file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
create-json.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import time
from tqdm import tqdm
import pandas as pd
path = r'C:\Users\train.tsv'
def local_time():
|
print(local_time() + " Starting script " )
columns = ['author','num1','content','str1','str2','num2','subreddit']
df = pd.read_csv(path, sep='\t', names=columns, header=None)
print(local_time() + " File has been read " )
df_authors = pd.DataFrame(df['author'])
df_content = pd.DataFrame(df['content'])
df_file = pd.concat([df_authors,df_content], axis=1)
print(local_time() + " Data needed has been concatenated ")
users_group = df_file.groupby('author')
group0 = df_file.groupby(['author','content'])
group1 = pd.Series(users_group.size())
users = (group1.index).to_numpy()
print(local_time() + " users been formatted ")
num_samples = group1.values
print(local_time() + " num_samples has been formatted ")
user_data_dict= {}
user_data_dict= {i: {'x':list()} for i in tqdm(users)}
for i in tqdm(range(len(df_file))):
if df_file['content'][i] not in user_data_dict[df_file['author'][i]]['x']:
user_data_dict[df_file['author'][i]]['x'].append(df_file['content'][i])
f = open(r'C:\Users\train.json', "w")
new_data = {'users': users.tolist(), 'num_samples': num_samples.tolist(), 'user_data': user_data_dict}
json.dump(new_data,f)
print(local_time() + " end of script ") | return str(time.strftime("%H:%M:%S",time.localtime())) |
mod.rs | //! Renderer is a "workhorse" of the engine, it draws scenes (both 3D and 2D), user interface,
//! debug geometry and has an ability to add user-defined render passes. Current renderer
//! implementation is not very flexible, but should cover 95% of use cases.
//!
//! # Implementation details
//!
//! Renderer is based on OpenGL 3.3+ Core.
#![warn(missing_docs)]
#![deny(unsafe_code)]
// Framework is 100% unsafe internally due to FFI calls.
#[allow(unsafe_code)]
pub mod framework;
pub mod cache;
pub mod debug_renderer;
pub mod renderer2d;
mod batch;
mod bloom;
mod flat_shader;
mod forward_renderer;
mod fxaa;
mod gbuffer;
mod hdr;
mod light;
mod light_volume;
mod particle_system_renderer;
mod shadow;
mod skybox_shader;
mod sprite_renderer;
mod ssao;
mod ui_renderer;
use crate::{
core::{
algebra::{Matrix4, Vector2, Vector3},
color::Color,
instant,
math::Rect,
pool::Handle,
scope_profile,
},
gui::{draw::DrawingContext, message::MessageData, Control, UserInterface},
material::{shader::SamplerFallback, Material, PropertyValue},
renderer::{
batch::BatchStorage,
bloom::BloomRenderer,
cache::shader::ShaderCache,
cache::{geometry::GeometryCache, texture::TextureCache, CacheEntry},
debug_renderer::DebugRenderer,
flat_shader::FlatShader,
forward_renderer::{ForwardRenderContext, ForwardRenderer},
framework::{
error::FrameworkError,
framebuffer::{Attachment, AttachmentKind, DrawParameters, FrameBuffer},
geometry_buffer::{DrawCallStatistics, GeometryBuffer},
gpu_program::GpuProgramBinding,
gpu_texture::{
Coordinate, GpuTexture, GpuTextureKind, MagnificationFilter, MinificationFilter,
PixelKind, WrapMode,
},
state::{PipelineState, PipelineStatistics},
},
fxaa::FxaaRenderer,
gbuffer::{GBuffer, GBufferRenderContext},
hdr::HighDynamicRangeRenderer,
light::{DeferredLightRenderer, DeferredRendererContext, LightingStatistics},
particle_system_renderer::{ParticleSystemRenderContext, ParticleSystemRenderer},
renderer2d::Renderer2d,
sprite_renderer::{SpriteRenderContext, SpriteRenderer},
ui_renderer::{UiRenderContext, UiRenderer},
},
resource::texture::{Texture, TextureKind},
scene::{camera::Camera, mesh::surface::SurfaceData, node::Node, Scene, SceneContainer},
scene2d::Scene2dContainer,
};
use serde::{Deserialize, Serialize};
use std::{
cell::RefCell,
collections::{hash_map::Entry, HashMap},
fmt::{Display, Formatter},
rc::Rc,
sync::{
mpsc::{Receiver, Sender},
Arc, Mutex,
},
};
/// Renderer statistics for one frame, also includes current frames per second
/// amount.
#[derive(Copy, Clone)]
pub struct Statistics {
/// Shows how many pipeline state changes was made per frame.
pub pipeline: PipelineStatistics,
/// Shows how many lights and shadow maps were rendered.
pub lighting: LightingStatistics,
/// Shows how many draw calls was made and how many triangles were rendered.
pub geometry: RenderPassStatistics,
/// Real time consumed to render frame. Time given in **seconds**.
pub pure_frame_time: f32,
/// Total time renderer took to process single frame, usually includes
/// time renderer spend to wait to buffers swap (can include vsync).
/// Time given in **seconds**.
pub capped_frame_time: f32,
/// Total amount of frames been rendered in one second.
pub frames_per_second: usize,
frame_counter: usize,
frame_start_time: instant::Instant,
last_fps_commit_time: instant::Instant,
}
impl Display for Statistics {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"FPS: {}\n\
Pure Frame Time: {:.2} ms\n\
Capped Frame Time: {:.2} ms\n\
{}\n\
{}\n\
{}\n",
self.frames_per_second,
self.pure_frame_time * 1000.0,
self.capped_frame_time * 1000.0,
self.geometry,
self.lighting,
self.pipeline
)
}
}
/// GPU statistics for single frame.
#[derive(Copy, Clone)]
pub struct RenderPassStatistics {
/// Amount of draw calls per frame - lower the better.
pub draw_calls: usize,
/// Amount of triangles per frame.
pub triangles_rendered: usize,
}
impl Display for RenderPassStatistics {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Draw Calls: {}\n\
Triangles Rendered: {}",
self.draw_calls, self.triangles_rendered
)
}
}
impl Default for RenderPassStatistics {
fn default() -> Self {
Self {
draw_calls: 0,
triangles_rendered: 0,
}
}
}
impl std::ops::AddAssign for RenderPassStatistics {
fn add_assign(&mut self, rhs: Self) {
self.draw_calls += rhs.draw_calls;
self.triangles_rendered += rhs.triangles_rendered;
}
}
impl std::ops::AddAssign<DrawCallStatistics> for RenderPassStatistics {
fn add_assign(&mut self, rhs: DrawCallStatistics) {
self.draw_calls += 1;
self.triangles_rendered += rhs.triangles;
}
}
impl std::ops::AddAssign<RenderPassStatistics> for Statistics {
fn add_assign(&mut self, rhs: RenderPassStatistics) {
self.geometry += rhs;
}
}
/// Shadow map precision allows you to select compromise between quality and performance.
#[derive(Copy, Clone, Hash, PartialOrd, PartialEq, Eq, Ord, Debug, Serialize, Deserialize)]
pub enum ShadowMapPrecision {
/// Shadow map will use 2 times less memory by switching to 16bit pixel format,
/// but "shadow acne" may occur.
Half,
/// Shadow map will use 32bit pixel format. This option gives highest quality,
/// but could be less performant than `Half`.
Full,
}
/// Quality settings allows you to find optimal balance between performance and
/// graphics quality.
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct QualitySettings {
/// Point shadows
/// Size of cube map face of shadow map texture in pixels.
pub point_shadow_map_size: usize,
/// Use or not percentage close filtering (smoothing) for point shadows.
pub point_soft_shadows: bool,
/// Point shadows enabled or not.
pub point_shadows_enabled: bool,
/// Maximum distance from camera to draw shadows.
pub point_shadows_distance: f32,
/// Point shadow map precision. Allows you to select compromise between
/// quality and performance.
pub point_shadow_map_precision: ShadowMapPrecision,
/// Spot shadows
/// Size of square shadow map texture in pixels
pub spot_shadow_map_size: usize,
/// Use or not percentage close filtering (smoothing) for spot shadows.
pub spot_soft_shadows: bool,
/// Spot shadows enabled or not.
pub spot_shadows_enabled: bool,
/// Maximum distance from camera to draw shadows.
pub spot_shadows_distance: f32,
/// Spot shadow map precision. Allows you to select compromise between
/// quality and performance.
pub spot_shadow_map_precision: ShadowMapPrecision,
/// Whether to use screen space ambient occlusion or not.
pub use_ssao: bool,
/// Radius of sampling hemisphere used in SSAO, it defines much ambient
/// occlusion will be in your scene.
pub ssao_radius: f32,
/// Global switch to enable or disable light scattering. Each light can have
/// its own scatter switch, but this one is able to globally disable scatter.
pub light_scatter_enabled: bool,
/// Whether to use Fast Approximate AntiAliasing or not.
pub fxaa: bool,
/// Whether to use Parallax Mapping or not.
pub use_parallax_mapping: bool,
/// Whether to use bloom effect.
pub use_bloom: bool,
}
impl Default for QualitySettings {
fn default() -> Self {
Self::high()
}
}
impl QualitySettings {
/// Highest possible graphics quality. Requires very powerful GPU.
pub fn ultra() -> Self {
Self {
point_shadow_map_size: 2048, | point_soft_shadows: true,
spot_shadow_map_size: 2048,
spot_shadows_distance: 20.0,
spot_shadows_enabled: true,
spot_soft_shadows: true,
use_ssao: true,
ssao_radius: 0.5,
light_scatter_enabled: true,
point_shadow_map_precision: ShadowMapPrecision::Full,
spot_shadow_map_precision: ShadowMapPrecision::Full,
fxaa: true,
use_bloom: true,
use_parallax_mapping: false, // TODO: Enable when it is fixed!
}
}
/// High graphics quality, includes all graphical effects. Requires powerful GPU.
pub fn high() -> Self {
Self {
point_shadow_map_size: 1024,
point_shadows_distance: 15.0,
point_shadows_enabled: true,
point_soft_shadows: true,
spot_shadow_map_size: 1024,
spot_shadows_distance: 15.0,
spot_shadows_enabled: true,
spot_soft_shadows: true,
use_ssao: true,
ssao_radius: 0.5,
light_scatter_enabled: true,
point_shadow_map_precision: ShadowMapPrecision::Full,
spot_shadow_map_precision: ShadowMapPrecision::Full,
fxaa: true,
use_bloom: true,
use_parallax_mapping: false, // TODO: Enable when it is fixed!
}
}
/// Medium graphics quality, some of effects are disabled, shadows will have sharp edges.
pub fn medium() -> Self {
Self {
point_shadow_map_size: 512,
point_shadows_distance: 5.0,
point_shadows_enabled: true,
point_soft_shadows: false,
spot_shadow_map_size: 512,
spot_shadows_distance: 5.0,
spot_shadows_enabled: true,
spot_soft_shadows: false,
use_ssao: true,
ssao_radius: 0.5,
light_scatter_enabled: false,
point_shadow_map_precision: ShadowMapPrecision::Half,
spot_shadow_map_precision: ShadowMapPrecision::Half,
fxaa: true,
use_bloom: true,
use_parallax_mapping: false,
}
}
/// Lowest graphics quality, all effects are disabled.
pub fn low() -> Self {
Self {
point_shadow_map_size: 1, // Zero is unsupported.
point_shadows_distance: 0.0,
point_shadows_enabled: false,
point_soft_shadows: false,
spot_shadow_map_size: 1,
spot_shadows_distance: 0.0,
spot_shadows_enabled: false,
spot_soft_shadows: false,
use_ssao: false,
ssao_radius: 0.5,
light_scatter_enabled: false,
point_shadow_map_precision: ShadowMapPrecision::Half,
spot_shadow_map_precision: ShadowMapPrecision::Half,
fxaa: false,
use_bloom: false,
use_parallax_mapping: false,
}
}
}
impl Statistics {
/// Must be called before render anything.
fn begin_frame(&mut self) {
self.frame_start_time = instant::Instant::now();
self.geometry = Default::default();
self.lighting = Default::default();
}
/// Must be called before SwapBuffers but after all rendering is done.
fn end_frame(&mut self) {
let current_time = instant::Instant::now();
self.pure_frame_time = current_time
.duration_since(self.frame_start_time)
.as_secs_f32();
self.frame_counter += 1;
if current_time
.duration_since(self.last_fps_commit_time)
.as_secs_f32()
>= 1.0
{
self.last_fps_commit_time = current_time;
self.frames_per_second = self.frame_counter;
self.frame_counter = 0;
}
}
/// Must be called after SwapBuffers to get capped frame time.
fn finalize(&mut self) {
self.capped_frame_time = instant::Instant::now()
.duration_since(self.frame_start_time)
.as_secs_f32();
}
}
impl Default for Statistics {
fn default() -> Self {
Self {
pipeline: Default::default(),
lighting: Default::default(),
geometry: Default::default(),
pure_frame_time: 0.0,
capped_frame_time: 0.0,
frames_per_second: 0,
frame_counter: 0,
frame_start_time: instant::Instant::now(),
last_fps_commit_time: instant::Instant::now(),
}
}
}
/// A sending point for textures that should be uploaded to GPU memory.
#[derive(Clone)]
pub struct TextureUploadSender {
sender: Sender<Texture>,
}
impl TextureUploadSender {
/// Requests an upload of the texture to GPU memory.
pub fn request_upload(&self, texture: Texture) {
self.sender
.send(texture)
.expect("Texture upload receiver must be alive while renderer is alive")
}
}
struct AssociatedSceneData {
/// G-Buffer of the scene.
pub gbuffer: GBuffer,
/// Intermediate high dynamic range frame buffer.
pub hdr_scene_framebuffer: FrameBuffer,
/// Final frame of the scene. Tone mapped + gamma corrected.
pub ldr_scene_framebuffer: FrameBuffer,
/// Additional frame buffer for post processing.
pub ldr_temp_framebuffer: FrameBuffer,
/// HDR renderer has be created per scene, because it contains
/// scene luminance.
pub hdr_renderer: HighDynamicRangeRenderer,
/// Bloom contains only overly bright pixels that creates light
/// bleeding effect (glow effect).
pub bloom_renderer: BloomRenderer,
}
impl AssociatedSceneData {
pub fn new(
state: &mut PipelineState,
width: usize,
height: usize,
) -> Result<Self, FrameworkError> {
let mut depth_stencil_texture = GpuTexture::new(
state,
GpuTextureKind::Rectangle { width, height },
PixelKind::D24S8,
MinificationFilter::Nearest,
MagnificationFilter::Nearest,
1,
None,
)?;
depth_stencil_texture
.bind_mut(state, 0)
.set_wrap(Coordinate::S, WrapMode::ClampToEdge)
.set_wrap(Coordinate::T, WrapMode::ClampToEdge);
let depth_stencil = Rc::new(RefCell::new(depth_stencil_texture));
let hdr_frame_texture = GpuTexture::new(
state,
GpuTextureKind::Rectangle { width, height },
// Intermediate scene frame will be rendered in HDR render target.
PixelKind::RGBA16F,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
None,
)?;
let hdr_scene_framebuffer = FrameBuffer::new(
state,
Some(Attachment {
kind: AttachmentKind::DepthStencil,
texture: depth_stencil.clone(),
}),
vec![Attachment {
kind: AttachmentKind::Color,
texture: Rc::new(RefCell::new(hdr_frame_texture)),
}],
)?;
let ldr_frame_texture = GpuTexture::new(
state,
GpuTextureKind::Rectangle { width, height },
// Final scene frame is in standard sRGB space.
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
None,
)?;
let ldr_scene_framebuffer = FrameBuffer::new(
state,
Some(Attachment {
kind: AttachmentKind::DepthStencil,
texture: depth_stencil.clone(),
}),
vec![Attachment {
kind: AttachmentKind::Color,
texture: Rc::new(RefCell::new(ldr_frame_texture)),
}],
)?;
let ldr_temp_texture = GpuTexture::new(
state,
GpuTextureKind::Rectangle { width, height },
// Final scene frame is in standard sRGB space.
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
None,
)?;
let ldr_temp_framebuffer = FrameBuffer::new(
state,
Some(Attachment {
kind: AttachmentKind::DepthStencil,
texture: depth_stencil,
}),
vec![Attachment {
kind: AttachmentKind::Color,
texture: Rc::new(RefCell::new(ldr_temp_texture)),
}],
)?;
Ok(Self {
gbuffer: GBuffer::new(state, width, height)?,
hdr_renderer: HighDynamicRangeRenderer::new(state)?,
bloom_renderer: BloomRenderer::new(state, width, height)?,
hdr_scene_framebuffer,
ldr_scene_framebuffer,
ldr_temp_framebuffer,
})
}
fn copy_depth_stencil_to_scene_framebuffer(&mut self, state: &mut PipelineState) {
state.blit_framebuffer(
self.gbuffer.framebuffer().id(),
self.hdr_scene_framebuffer.id(),
0,
0,
self.gbuffer.width,
self.gbuffer.height,
0,
0,
self.gbuffer.width,
self.gbuffer.height,
false,
true,
true,
);
}
pub fn hdr_scene_frame_texture(&self) -> Rc<RefCell<GpuTexture>> {
self.hdr_scene_framebuffer.color_attachments()[0]
.texture
.clone()
}
pub fn ldr_scene_frame_texture(&self) -> Rc<RefCell<GpuTexture>> {
self.ldr_scene_framebuffer.color_attachments()[0]
.texture
.clone()
}
pub fn ldr_temp_frame_texture(&self) -> Rc<RefCell<GpuTexture>> {
self.ldr_temp_framebuffer.color_attachments()[0]
.texture
.clone()
}
}
pub(in crate) fn make_viewport_matrix(viewport: Rect<i32>) -> Matrix4<f32> {
Matrix4::new_orthographic(
0.0,
viewport.w() as f32,
viewport.h() as f32,
0.0,
-1.0,
1.0,
) * Matrix4::new_nonuniform_scaling(&Vector3::new(
viewport.w() as f32,
viewport.h() as f32,
0.0,
))
}
/// See module docs.
pub struct Renderer {
backbuffer: FrameBuffer,
scene_render_passes: Vec<Arc<Mutex<dyn SceneRenderPass>>>,
deferred_light_renderer: DeferredLightRenderer,
flat_shader: FlatShader,
sprite_renderer: SpriteRenderer,
particle_system_renderer: ParticleSystemRenderer,
// Dummy white one pixel texture which will be used as stub when rendering
// something without texture specified.
white_dummy: Rc<RefCell<GpuTexture>>,
black_dummy: Rc<RefCell<GpuTexture>>,
environment_dummy: Rc<RefCell<GpuTexture>>,
// Dummy one pixel texture with (0, 1, 0) vector is used as stub when rendering
// something without normal map.
normal_dummy: Rc<RefCell<GpuTexture>>,
// Dummy one pixel texture used as stub when rendering something without a
// metallic texture. Default metalness is 0.0
metallic_dummy: Rc<RefCell<GpuTexture>>,
ui_renderer: UiRenderer,
statistics: Statistics,
quad: SurfaceData,
frame_size: (u32, u32),
quality_settings: QualitySettings,
/// Debug renderer instance can be used for debugging purposes
pub debug_renderer: DebugRenderer,
scene_data_map: HashMap<Handle<Scene>, AssociatedSceneData>,
backbuffer_clear_color: Color,
texture_cache: TextureCache,
shader_cache: ShaderCache,
geometry_cache: GeometryCache,
batch_storage: BatchStorage,
forward_renderer: ForwardRenderer,
fxaa_renderer: FxaaRenderer,
renderer2d: Renderer2d,
texture_upload_receiver: Receiver<Texture>,
texture_upload_sender: Sender<Texture>,
// TextureId -> FrameBuffer mapping. This mapping is used for temporal frame buffers
// like ones used to render UI instances.
ui_frame_buffers: HashMap<usize, FrameBuffer>,
// MUST BE LAST! Otherwise you'll get crash, because other parts of the renderer will
// contain **pointer** to pipeline state. It must be dropped last!
state: Box<PipelineState>,
}
fn make_ui_frame_buffer(
frame_size: Vector2<f32>,
state: &mut PipelineState,
) -> Result<FrameBuffer, FrameworkError> {
let color_texture = Rc::new(RefCell::new(GpuTexture::new(
state,
GpuTextureKind::Rectangle {
width: frame_size.x as usize,
height: frame_size.y as usize,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
None,
)?));
let depth_stencil = Rc::new(RefCell::new(GpuTexture::new(
state,
GpuTextureKind::Rectangle {
width: frame_size.x as usize,
height: frame_size.y as usize,
},
PixelKind::D24S8,
MinificationFilter::Nearest,
MagnificationFilter::Nearest,
1,
None,
)?));
FrameBuffer::new(
state,
Some(Attachment {
kind: AttachmentKind::DepthStencil,
texture: depth_stencil,
}),
vec![Attachment {
kind: AttachmentKind::Color,
texture: color_texture,
}],
)
}
/// A context for custom scene render passes.
pub struct SceneRenderPassContext<'a, 'b> {
/// A pipeline state that is used as a wrapper to underlying graphics API.
pub pipeline_state: &'a mut PipelineState,
/// A texture cache that uploads engine's `Texture` as internal `GpuTexture` to GPU.
/// Use this to get a corresponding GPU texture by an instance of a `Texture`.
pub texture_cache: &'a mut TextureCache,
/// A geometry cache that uploads engine's `SurfaceData` as internal `GeometryBuffer` to GPU.
/// Use this to get a corresponding GPU geometry buffer (essentially it is just a VAO) by an
/// instance of a `SurfaceData`.
pub geometry_cache: &'a mut GeometryCache,
/// A storage that contains "pre-compiled" groups of render data (batches).
pub batch_storage: &'a BatchStorage,
/// Current quality settings of the renderer.
pub quality_settings: &'a QualitySettings,
/// Current framebuffer to which scene is being rendered to.
pub framebuffer: &'a mut FrameBuffer,
/// A scene being rendered.
pub scene: &'b Scene,
/// A camera from the scene that is used as "eyes".
pub camera: &'b Camera,
/// A viewport of the camera.
pub viewport: Rect<i32>,
/// A handle of the scene being rendered.
pub scene_handle: Handle<Scene>,
/// An 1x1 white pixel texture that could be used a stub when there is no texture.
pub white_dummy: Rc<RefCell<GpuTexture>>,
/// An 1x1 pixel texture with (0, 1, 0) vector that could be used a stub when
/// there is no normal map.
pub normal_dummy: Rc<RefCell<GpuTexture>>,
/// An 1x1 pixel with 0.0 metalness factor texture that could be used a stub when
/// there is no metallic map.
pub metallic_dummy: Rc<RefCell<GpuTexture>>,
/// An 1x1 black cube map texture that could be used a stub when there is no environment
/// texture.
pub environment_dummy: Rc<RefCell<GpuTexture>>,
/// An 1x1 black pixel texture that could be used a stub when there is no texture.
pub black_dummy: Rc<RefCell<GpuTexture>>,
/// A texture with depth values from G-Buffer.
///
/// # Important notes
///
/// Keep in mind that G-Buffer cannot be modified in custom render passes, so you don't
/// have an ability to write to this texture. However you can still write to depth of
/// the frame buffer as you'd normally do.
pub depth_texture: Rc<RefCell<GpuTexture>>,
/// A texture with world-space normals from G-Buffer.
///
/// # Important notes
///
/// Keep in mind that G-Buffer cannot be modified in custom render passes, so you don't
/// have an ability to write to this texture.
pub normal_texture: Rc<RefCell<GpuTexture>>,
/// A texture with ambient lighting values from G-Buffer.
///
/// # Important notes
///
/// Keep in mind that G-Buffer cannot be modified in custom render passes, so you don't
/// have an ability to write to this texture.
pub ambient_texture: Rc<RefCell<GpuTexture>>,
}
/// A trait for custom scene rendering pass. It could be used to add your own rendering techniques.
pub trait SceneRenderPass {
/// Main rendering method. It will be called for **each** scene registered in the engine, but
/// you are able to filter out scene by its handle.
fn render(
&mut self,
ctx: SceneRenderPassContext,
) -> Result<RenderPassStatistics, FrameworkError>;
}
fn blit_pixels(
state: &mut PipelineState,
framebuffer: &mut FrameBuffer,
texture: Rc<RefCell<GpuTexture>>,
shader: &FlatShader,
viewport: Rect<i32>,
quad: &GeometryBuffer,
) -> DrawCallStatistics {
framebuffer.draw(
quad,
state,
viewport,
&shader.program,
&DrawParameters {
cull_face: None,
color_write: Default::default(),
depth_write: true,
stencil_test: None,
depth_test: false,
blend: None,
stencil_op: Default::default(),
},
|mut program_binding| {
program_binding
.set_matrix4(&shader.wvp_matrix, &{
Matrix4::new_orthographic(
0.0,
viewport.w() as f32,
viewport.h() as f32,
0.0,
-1.0,
1.0,
) * Matrix4::new_nonuniform_scaling(&Vector3::new(
viewport.w() as f32,
viewport.h() as f32,
0.0,
))
})
.set_texture(&shader.diffuse_texture, &texture);
},
)
}
pub(in crate) struct MaterialContext<'a, 'b> {
pub material: &'a Material,
pub program_binding: &'a mut GpuProgramBinding<'b>,
pub texture_cache: &'a mut TextureCache,
// Built-in uniforms.
pub world_matrix: &'a Matrix4<f32>,
pub wvp_matrix: &'a Matrix4<f32>,
pub bone_matrices: &'a [Matrix4<f32>],
pub use_skeletal_animation: bool,
pub camera_position: &'a Vector3<f32>,
pub use_pom: bool,
pub light_position: &'a Vector3<f32>,
// Fallback samplers.
pub normal_dummy: Rc<RefCell<GpuTexture>>,
pub white_dummy: Rc<RefCell<GpuTexture>>,
pub black_dummy: Rc<RefCell<GpuTexture>>,
}
pub(in crate) fn apply_material(ctx: MaterialContext) {
// Apply values for built-in uniforms.
if let Some(location) = ctx.program_binding.uniform_location("rg3d_worldMatrix") {
ctx.program_binding.set_matrix4(&location, ctx.world_matrix);
}
if let Some(location) = ctx
.program_binding
.uniform_location("rg3d_worldViewProjection")
{
ctx.program_binding.set_matrix4(&location, ctx.wvp_matrix);
}
if let Some(location) = ctx.program_binding.uniform_location("rg3d_boneMatrices") {
ctx.program_binding
.set_matrix4_array(&location, ctx.bone_matrices);
}
if let Some(location) = ctx
.program_binding
.uniform_location("rg3d_useSkeletalAnimation")
{
ctx.program_binding
.set_bool(&location, ctx.use_skeletal_animation);
}
if let Some(location) = ctx.program_binding.uniform_location("rg3d_cameraPosition") {
ctx.program_binding
.set_vector3(&location, ctx.camera_position);
}
if let Some(location) = ctx.program_binding.uniform_location("rg3d_usePOM") {
ctx.program_binding.set_bool(&location, ctx.use_pom);
}
if let Some(location) = ctx.program_binding.uniform_location("rg3d_lightPosition") {
ctx.program_binding
.set_vector3(&location, ctx.light_position);
}
// Apply material properties.
for (name, value) in ctx.material.properties() {
if let Some(uniform) = ctx.program_binding.uniform_location(name) {
match value {
PropertyValue::Float(v) => {
ctx.program_binding.set_f32(&uniform, *v);
}
PropertyValue::Int(v) => {
ctx.program_binding.set_i32(&uniform, *v);
}
PropertyValue::UInt(v) => {
ctx.program_binding.set_u32(&uniform, *v);
}
PropertyValue::Vector2(v) => {
ctx.program_binding.set_vector2(&uniform, v);
}
PropertyValue::Vector3(v) => {
ctx.program_binding.set_vector3(&uniform, v);
}
PropertyValue::Vector4(v) => {
ctx.program_binding.set_vector4(&uniform, v);
}
PropertyValue::Matrix2(v) => {
ctx.program_binding.set_matrix2(&uniform, v);
}
PropertyValue::Matrix3(v) => {
ctx.program_binding.set_matrix3(&uniform, v);
}
PropertyValue::Matrix4(v) => {
ctx.program_binding.set_matrix4(&uniform, v);
}
PropertyValue::Color(v) => {
ctx.program_binding.set_srgb_color(&uniform, v);
}
PropertyValue::Bool(v) => {
ctx.program_binding.set_bool(&uniform, *v);
}
PropertyValue::Sampler { value, fallback } => {
let texture = value
.as_ref()
.and_then(|t| ctx.texture_cache.get(ctx.program_binding.state, t))
.unwrap_or_else(|| match fallback {
SamplerFallback::White => ctx.white_dummy.clone(),
SamplerFallback::Normal => ctx.normal_dummy.clone(),
SamplerFallback::Black => ctx.black_dummy.clone(),
});
ctx.program_binding.set_texture(&uniform, &texture);
}
PropertyValue::FloatArray(v) => {
ctx.program_binding.set_f32_slice(&uniform, v);
}
PropertyValue::IntArray(v) => {
ctx.program_binding.set_i32_slice(&uniform, v);
}
PropertyValue::UIntArray(v) => {
ctx.program_binding.set_u32_slice(&uniform, v);
}
PropertyValue::Vector2Array(v) => {
ctx.program_binding.set_vector2_slice(&uniform, v);
}
PropertyValue::Vector3Array(v) => {
ctx.program_binding.set_vector3_slice(&uniform, v);
}
PropertyValue::Vector4Array(v) => {
ctx.program_binding.set_vector4_slice(&uniform, v);
}
PropertyValue::Matrix2Array(v) => {
ctx.program_binding.set_matrix2_array(&uniform, v);
}
PropertyValue::Matrix3Array(v) => {
ctx.program_binding.set_matrix3_array(&uniform, v);
}
PropertyValue::Matrix4Array(v) => {
ctx.program_binding.set_matrix4_array(&uniform, v);
}
}
}
}
}
impl Renderer {
pub(in crate) fn new(
context: glow::Context,
frame_size: (u32, u32),
) -> Result<Self, FrameworkError> {
let settings = QualitySettings::default();
let (texture_upload_sender, texture_upload_receiver) = std::sync::mpsc::channel();
// Box pipeline state because we'll store pointers to it inside framework's entities and
// it must have constant address.
let mut state = Box::new(PipelineState::new(context));
Ok(Self {
backbuffer: FrameBuffer::backbuffer(&mut state),
frame_size,
deferred_light_renderer: DeferredLightRenderer::new(&mut state, frame_size, &settings)?,
flat_shader: FlatShader::new(&mut state)?,
sprite_renderer: SpriteRenderer::new(&mut state)?,
white_dummy: Rc::new(RefCell::new(GpuTexture::new(
&mut state,
GpuTextureKind::Rectangle {
width: 1,
height: 1,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
Some(&[255u8, 255u8, 255u8, 255u8]),
)?)),
black_dummy: Rc::new(RefCell::new(GpuTexture::new(
&mut state,
GpuTextureKind::Rectangle {
width: 1,
height: 1,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
Some(&[0u8, 0u8, 0u8, 255u8]),
)?)),
environment_dummy: Rc::new(RefCell::new(GpuTexture::new(
&mut state,
GpuTextureKind::Cube {
width: 1,
height: 1,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
Some(&[
0u8, 0u8, 0u8, 255u8, // pos-x
0u8, 0u8, 0u8, 255u8, // neg-x
0u8, 0u8, 0u8, 255u8, // pos-y
0u8, 0u8, 0u8, 255u8, // neg-y
0u8, 0u8, 0u8, 255u8, // pos-z
0u8, 0u8, 0u8, 255u8, // neg-z
]),
)?)),
normal_dummy: Rc::new(RefCell::new(GpuTexture::new(
&mut state,
GpuTextureKind::Rectangle {
width: 1,
height: 1,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
Some(&[128u8, 128u8, 255u8, 255u8]),
)?)),
metallic_dummy: Rc::new(RefCell::new(GpuTexture::new(
&mut state,
GpuTextureKind::Rectangle {
width: 1,
height: 1,
},
PixelKind::RGBA8,
MinificationFilter::Linear,
MagnificationFilter::Linear,
1,
Some(&[0u8, 0u8, 0u8, 0u8]),
)?)),
quad: SurfaceData::make_unit_xy_quad(),
ui_renderer: UiRenderer::new(&mut state)?,
particle_system_renderer: ParticleSystemRenderer::new(&mut state)?,
quality_settings: settings,
debug_renderer: DebugRenderer::new(&mut state)?,
scene_data_map: Default::default(),
backbuffer_clear_color: Color::BLACK,
texture_cache: Default::default(),
geometry_cache: Default::default(),
batch_storage: Default::default(),
forward_renderer: ForwardRenderer::new(),
ui_frame_buffers: Default::default(),
fxaa_renderer: FxaaRenderer::new(&mut state)?,
statistics: Statistics::default(),
renderer2d: Renderer2d::new(&mut state)?,
texture_upload_receiver,
texture_upload_sender,
state,
shader_cache: ShaderCache::default(),
scene_render_passes: Default::default(),
})
}
/// Adds a custom render pass.
pub fn add_render_pass(&mut self, pass: Arc<Mutex<dyn SceneRenderPass>>) {
self.scene_render_passes.push(pass);
}
/// Returns statistics for last frame.
pub fn get_statistics(&self) -> Statistics {
self.statistics
}
/// Unloads texture from GPU memory.
pub fn unload_texture(&mut self, texture: Texture) {
self.texture_cache.unload(texture)
}
/// Sets color which will be used to fill screen when there is nothing to render.
pub fn set_backbuffer_clear_color(&mut self, color: Color) {
self.backbuffer_clear_color = color;
}
/// Returns a reference to current pipeline state.
pub fn pipeline_state(&mut self) -> &mut PipelineState {
&mut self.state
}
pub(in crate) fn upload_sender(&self) -> TextureUploadSender {
TextureUploadSender {
sender: self.texture_upload_sender.clone(),
}
}
/// Sets new frame size, should be called when received a Resize event.
///
/// # Notes
///
/// Input values will be set to 1 pixel if new size is 0. Rendering cannot
/// be performed into 0x0 texture.
pub fn set_frame_size(&mut self, new_size: (u32, u32)) -> Result<(), FrameworkError> {
self.frame_size.0 = new_size.0.max(1);
self.frame_size.1 = new_size.1.max(1);
self.deferred_light_renderer
.set_frame_size(&mut self.state, new_size)?;
Ok(())
}
/// Returns current (width, height) pair of back buffer size.
pub fn get_frame_size(&self) -> (u32, u32) {
self.frame_size
}
/// Returns current bounds of back buffer.
pub fn get_frame_bounds(&self) -> Vector2<f32> {
Vector2::new(self.frame_size.0 as f32, self.frame_size.1 as f32)
}
/// Sets new quality settings for renderer. Never call this method in a loop, otherwise
/// you may get **significant** lags. Always check if current quality setting differs
/// from new!
pub fn set_quality_settings(
&mut self,
settings: &QualitySettings,
) -> Result<(), FrameworkError> {
self.quality_settings = *settings;
self.deferred_light_renderer
.set_quality_settings(&mut self.state, settings)
}
/// Returns current quality settings.
pub fn get_quality_settings(&self) -> QualitySettings {
self.quality_settings
}
/// Removes all cached GPU data, forces renderer to re-upload data to GPU.
/// Do not call this method until you absolutely need! It may cause **significant**
/// performance lag!
pub fn flush(&mut self) {
self.texture_cache.clear();
self.geometry_cache.clear();
self.renderer2d.flush();
}
/// Renders given UI into specified render target. This method is especially useful if you need
/// to have off-screen UIs (like interactive touch-screen in Doom 3, Dead Space, etc).
pub fn render_ui_to_texture<M: MessageData, C: Control<M, C>>(
&mut self,
render_target: Texture,
ui: &mut UserInterface<M, C>,
) -> Result<(), FrameworkError> {
let new_width = ui.screen_size().x as usize;
let new_height = ui.screen_size().y as usize;
// Create or reuse existing frame buffer.
let frame_buffer = match self.ui_frame_buffers.entry(render_target.key()) {
Entry::Occupied(entry) => {
let frame_buffer = entry.into_mut();
let frame = frame_buffer.color_attachments().first().unwrap();
let color_texture_kind = frame.texture.borrow().kind();
if let GpuTextureKind::Rectangle { width, height } = color_texture_kind {
if width != new_width || height != new_height {
*frame_buffer = make_ui_frame_buffer(ui.screen_size(), &mut self.state)?;
}
} else {
panic!("ui can be rendered only in rectangle texture!")
}
frame_buffer
}
Entry::Vacant(entry) => {
entry.insert(make_ui_frame_buffer(ui.screen_size(), &mut self.state)?)
}
};
let viewport = Rect::new(0, 0, new_width as i32, new_height as i32);
frame_buffer.clear(
&mut self.state,
viewport,
Some(Color::TRANSPARENT),
Some(0.0),
Some(0),
);
self.statistics += self.ui_renderer.render(UiRenderContext {
state: &mut self.state,
viewport,
frame_buffer,
frame_width: ui.screen_size().x,
frame_height: ui.screen_size().y,
drawing_context: ui.draw(),
white_dummy: self.white_dummy.clone(),
texture_cache: &mut self.texture_cache,
})?;
// Finally register texture in the cache so it will become available as texture in deferred/forward
// renderer.
self.texture_cache.map.insert(
render_target.key(),
CacheEntry {
value: frame_buffer
.color_attachments()
.first()
.unwrap()
.texture
.clone(),
time_to_live: f32::INFINITY,
value_hash: 0, // TODO
},
);
Ok(())
}
fn update_texture_cache(&mut self, dt: f32) {
// Maximum amount of textures uploaded to GPU per frame. This defines throughput **only** for
// requests from resource manager. This is needed to prevent huge lag when there are tons of
// requests, so this is some kind of work load balancer.
const THROUGHPUT: usize = 5;
let mut uploaded = 0;
while let Ok(texture) = self.texture_upload_receiver.try_recv() {
// Just "touch" texture in the cache and it will load texture to GPU.
if self.texture_cache.get(&mut self.state, &texture).is_some() {
uploaded += 1;
if uploaded >= THROUGHPUT {
break;
}
}
}
self.texture_cache.update(dt);
}
pub(in crate) fn update(&mut self, dt: f32) {
// Update caches - this will remove timed out resources.
self.update_texture_cache(dt);
self.geometry_cache.update(dt);
self.renderer2d.update(dt);
}
fn render_frame(
&mut self,
scenes: &SceneContainer,
drawing_context: &DrawingContext,
scenes2d: &Scene2dContainer,
) -> Result<(), FrameworkError> {
scope_profile!();
// Make sure to drop associated data for destroyed scenes.
self.scene_data_map
.retain(|h, _| scenes.is_valid_handle(*h));
// We have to invalidate resource bindings cache because some textures or programs,
// or other GL resources can be destroyed and then on their "names" some new resource
// are created, but cache still thinks that resource is correctly bound, but it is different
// object have same name.
self.state.invalidate_resource_bindings_cache();
let dt = self.statistics.capped_frame_time;
self.statistics.begin_frame();
let window_viewport = Rect::new(0, 0, self.frame_size.0 as i32, self.frame_size.1 as i32);
self.backbuffer.clear(
&mut self.state,
window_viewport,
Some(self.backbuffer_clear_color),
Some(1.0),
Some(0),
);
let backbuffer_width = self.frame_size.0 as f32;
let backbuffer_height = self.frame_size.1 as f32;
for (scene_handle, scene) in scenes.pair_iter().filter(|(_, s)| s.enabled) {
let graph = &scene.graph;
let frame_size = scene
.render_target
.as_ref()
.map_or_else(
// Use either backbuffer size
|| Vector2::new(backbuffer_width, backbuffer_height),
// Or framebuffer size
|rt| {
if let TextureKind::Rectangle { width, height } = rt.data_ref().kind() {
Vector2::new(width as f32, height as f32)
} else {
panic!("only rectangle textures can be used as render target!")
}
},
)
// Clamp to [1.0; infinity] range.
.sup(&Vector2::new(1.0, 1.0));
let state = &mut self.state;
self.batch_storage.generate_batches(graph);
let scene_associated_data = self
.scene_data_map
.entry(scene_handle)
.and_modify(|data| {
if data.gbuffer.width != frame_size.x as i32
|| data.gbuffer.height != frame_size.y as i32
{
let width = frame_size.x as usize;
let height = frame_size.y as usize;
*data = AssociatedSceneData::new(state, width, height).unwrap();
}
})
.or_insert_with(|| {
let width = frame_size.x as usize;
let height = frame_size.y as usize;
AssociatedSceneData::new(state, width, height).unwrap()
});
// If we specified a texture to draw to, we have to register it in texture cache
// so it can be used in later on as texture. This is useful in case if you need
// to draw something on offscreen and then draw it on some mesh.
// TODO: However it can be dangerous to use frame texture as it may be bound to
// pipeline.
if let Some(rt) = scene.render_target.clone() {
self.texture_cache.map.insert(
rt.key(),
CacheEntry {
value: scene_associated_data.ldr_scene_frame_texture(),
time_to_live: f32::INFINITY,
value_hash: 0, // TODO
},
);
}
for camera in graph.linear_iter().filter_map(|node| {
if let Node::Camera(camera) = node {
if camera.is_enabled() {
Some(camera)
} else {
None
}
} else {
None
}
}) {
let viewport = camera.viewport_pixels(frame_size);
self.statistics += scene_associated_data.gbuffer.fill(GBufferRenderContext {
state,
camera,
geom_cache: &mut self.geometry_cache,
batch_storage: &self.batch_storage,
texture_cache: &mut self.texture_cache,
shader_cache: &mut self.shader_cache,
environment_dummy: self.environment_dummy.clone(),
use_parallax_mapping: self.quality_settings.use_parallax_mapping,
normal_dummy: self.normal_dummy.clone(),
white_dummy: self.white_dummy.clone(),
black_dummy: self.black_dummy.clone(),
graph,
});
scene_associated_data.copy_depth_stencil_to_scene_framebuffer(state);
scene_associated_data.hdr_scene_framebuffer.clear(
state,
viewport,
Some(Color::from_rgba(0, 0, 0, 255)),
None, // Keep depth, we've just copied valid data in it.
Some(0),
);
let (pass_stats, light_stats) =
self.deferred_light_renderer
.render(DeferredRendererContext {
state,
scene,
camera,
gbuffer: &mut scene_associated_data.gbuffer,
white_dummy: self.white_dummy.clone(),
ambient_color: scene.ambient_lighting_color,
settings: &self.quality_settings,
textures: &mut self.texture_cache,
geometry_cache: &mut self.geometry_cache,
batch_storage: &self.batch_storage,
frame_buffer: &mut scene_associated_data.hdr_scene_framebuffer,
shader_cache: &mut self.shader_cache,
normal_dummy: self.normal_dummy.clone(),
black_dummy: self.black_dummy.clone(),
});
self.statistics.lighting += light_stats;
self.statistics.geometry += pass_stats;
let depth = scene_associated_data.gbuffer.depth();
self.statistics +=
self.particle_system_renderer
.render(ParticleSystemRenderContext {
state,
framebuffer: &mut scene_associated_data.hdr_scene_framebuffer,
graph,
camera,
white_dummy: self.white_dummy.clone(),
depth,
frame_width: frame_size.x,
frame_height: frame_size.y,
viewport,
texture_cache: &mut self.texture_cache,
});
self.statistics += self.sprite_renderer.render(SpriteRenderContext {
state,
framebuffer: &mut scene_associated_data.hdr_scene_framebuffer,
graph,
camera,
white_dummy: self.white_dummy.clone(),
viewport,
textures: &mut self.texture_cache,
geom_map: &mut self.geometry_cache,
});
self.statistics += self.forward_renderer.render(ForwardRenderContext {
state,
camera,
geom_cache: &mut self.geometry_cache,
texture_cache: &mut self.texture_cache,
shader_cache: &mut self.shader_cache,
batch_storage: &self.batch_storage,
framebuffer: &mut scene_associated_data.hdr_scene_framebuffer,
viewport,
quality_settings: &self.quality_settings,
white_dummy: self.white_dummy.clone(),
normal_dummy: self.normal_dummy.clone(),
black_dummy: self.black_dummy.clone(),
});
for render_pass in self.scene_render_passes.iter() {
self.statistics +=
render_pass.lock().unwrap().render(SceneRenderPassContext {
pipeline_state: state,
texture_cache: &mut self.texture_cache,
geometry_cache: &mut self.geometry_cache,
quality_settings: &self.quality_settings,
batch_storage: &self.batch_storage,
viewport,
scene,
camera,
scene_handle,
white_dummy: self.white_dummy.clone(),
normal_dummy: self.normal_dummy.clone(),
metallic_dummy: self.metallic_dummy.clone(),
environment_dummy: self.environment_dummy.clone(),
black_dummy: self.black_dummy.clone(),
depth_texture: scene_associated_data.gbuffer.depth(),
normal_texture: scene_associated_data.gbuffer.normal_texture(),
ambient_texture: scene_associated_data.gbuffer.ambient_texture(),
framebuffer: &mut scene_associated_data.hdr_scene_framebuffer,
})?;
}
let quad = self.geometry_cache.get(state, &self.quad);
// Prepare glow map.
self.statistics.geometry += scene_associated_data.bloom_renderer.render(
state,
quad,
scene_associated_data.hdr_scene_frame_texture(),
);
// Convert high dynamic range frame to low dynamic range (sRGB) with tone mapping and gamma correction.
self.statistics.geometry += scene_associated_data.hdr_renderer.render(
state,
scene_associated_data.hdr_scene_frame_texture(),
scene_associated_data.bloom_renderer.result(),
&mut scene_associated_data.ldr_scene_framebuffer,
viewport,
quad,
dt,
camera.exposure(),
camera.color_grading_lut_ref(),
camera.color_grading_enabled(),
&mut self.texture_cache,
);
// Apply FXAA if needed.
if self.quality_settings.fxaa {
self.statistics.geometry += self.fxaa_renderer.render(
state,
viewport,
scene_associated_data.ldr_scene_frame_texture(),
&mut scene_associated_data.ldr_temp_framebuffer,
&mut self.geometry_cache,
);
let quad = self.geometry_cache.get(state, &self.quad);
let temp_frame_texture = scene_associated_data.ldr_temp_frame_texture();
self.statistics.geometry += blit_pixels(
state,
&mut scene_associated_data.ldr_scene_framebuffer,
temp_frame_texture,
&self.flat_shader,
viewport,
quad,
);
}
// Render debug geometry in the LDR frame buffer.
self.statistics += self.debug_renderer.render(
state,
viewport,
&mut scene_associated_data.ldr_scene_framebuffer,
&scene.drawing_context,
camera,
);
// Optionally render everything into back buffer.
if scene.render_target.is_none() {
let quad = self.geometry_cache.get(state, &self.quad);
self.statistics.geometry += blit_pixels(
state,
&mut self.backbuffer,
scene_associated_data.ldr_scene_frame_texture(),
&self.flat_shader,
viewport,
quad,
);
}
}
}
// TODO: 2D renderer requires its own HDR pipeline.
self.statistics += self.renderer2d.render(
&mut self.state,
&mut self.backbuffer,
Vector2::new(backbuffer_width, backbuffer_height),
scenes2d,
&mut self.texture_cache,
self.white_dummy.clone(),
)?;
// Render UI on top of everything without gamma correction.
self.statistics += self.ui_renderer.render(UiRenderContext {
state: &mut self.state,
viewport: window_viewport,
frame_buffer: &mut self.backbuffer,
frame_width: backbuffer_width,
frame_height: backbuffer_height,
drawing_context,
white_dummy: self.white_dummy.clone(),
texture_cache: &mut self.texture_cache,
})?;
Ok(())
}
#[cfg(not(target_arch = "wasm32"))]
pub(in crate) fn render_and_swap_buffers(
&mut self,
scenes: &SceneContainer,
drawing_context: &DrawingContext,
scenes2d: &Scene2dContainer,
context: &glutin::WindowedContext<glutin::PossiblyCurrent>,
) -> Result<(), FrameworkError> {
self.render_frame(scenes, drawing_context, scenes2d)?;
self.statistics.end_frame();
context.swap_buffers()?;
self.state.check_error();
self.statistics.finalize();
self.statistics.pipeline = self.state.pipeline_statistics();
Ok(())
}
#[cfg(target_arch = "wasm32")]
pub(in crate) fn render_and_swap_buffers(
&mut self,
scenes: &SceneContainer,
drawing_context: &DrawingContext,
scenes2d: &Scene2dContainer,
) -> Result<(), FrameworkError> {
self.render_frame(scenes, drawing_context, scenes2d)?;
self.statistics.end_frame();
self.state.check_error();
self.statistics.finalize();
self.statistics.pipeline = self.state.pipeline_statistics();
Ok(())
}
} | point_shadows_distance: 20.0,
point_shadows_enabled: true, |
utils_test.go | // Copyright 2015 Sorint.lab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
// See the License for the specific language governing permissions and
// limitations under the License.
package postgresql
import (
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/gravitational/stolon/pkg/cluster"
)
func TestParseTimeLineHistory(t *testing.T) {
tests := []struct {
contents string
tlsh cluster.PostgresTimeLinesHistory
err error
}{
{
contents: "",
tlsh: cluster.PostgresTimeLinesHistory{},
err: nil,
},
{
contents: `1 0/5000090 no recovery target specified`,
tlsh: cluster.PostgresTimeLinesHistory{
{
TimelineID: 1,
SwitchPoint: 83886224,
Reason: "no recovery target specified",
},
},
err: nil,
},
}
for i, tt := range tests {
tlsh, err := parseTimeLinesHistory(tt.contents)
t.Logf("test #%d", i)
if tt.err != nil {
if err == nil {
t.Errorf("got no error, wanted error: %v", tt.err)
} else if tt.err.Error() != err.Error() {
t.Errorf("got error: %v, wanted error: %v", err, tt.err)
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(tlsh, tt.tlsh) {
t.Errorf(spew.Sprintf("#%d: wrong timeline history: got: %#+v, want: %#+v", i, tlsh, tt.tlsh))
}
}
}
}
func TestValidReplSlotName(t *testing.T) | {
tests := []struct {
name string
valid bool
}{
{"aaaaaaaa", true},
{"a12345aa", true},
{"_a1_2345aa_", true},
{"", false},
{"a-aaaaaaa", false},
{"_a1_-2345aa_", false},
{"ABC123", false},
{"$123", false},
}
for i, tt := range tests {
valid := IsValidReplSlotName(tt.name)
if valid != tt.valid {
t.Errorf("%d: replication slot name %q got valid: %t but wanted valid: %t", i, tt.name, valid, tt.valid)
}
}
} |
|
virtualnode.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "kubeform.dev/provider-aws-api/apis/appmesh/v1alpha1"
scheme "kubeform.dev/provider-aws-api/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// VirtualNodesGetter has a method to return a VirtualNodeInterface.
// A group's client should implement this interface.
type VirtualNodesGetter interface {
VirtualNodes(namespace string) VirtualNodeInterface
}
// VirtualNodeInterface has methods to work with VirtualNode resources.
type VirtualNodeInterface interface {
Create(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.CreateOptions) (*v1alpha1.VirtualNode, error)
Update(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.UpdateOptions) (*v1alpha1.VirtualNode, error)
UpdateStatus(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.UpdateOptions) (*v1alpha1.VirtualNode, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VirtualNode, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VirtualNodeList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualNode, err error)
VirtualNodeExpansion
}
// virtualNodes implements VirtualNodeInterface
type virtualNodes struct {
client rest.Interface
ns string
}
// newVirtualNodes returns a VirtualNodes
func | (c *AppmeshV1alpha1Client, namespace string) *virtualNodes {
return &virtualNodes{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the virtualNode, and returns the corresponding virtualNode object, and an error if there is any.
func (c *virtualNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VirtualNode, err error) {
result = &v1alpha1.VirtualNode{}
err = c.client.Get().
Namespace(c.ns).
Resource("virtualnodes").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of VirtualNodes that match those selectors.
func (c *virtualNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VirtualNodeList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.VirtualNodeList{}
err = c.client.Get().
Namespace(c.ns).
Resource("virtualnodes").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested virtualNodes.
func (c *virtualNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("virtualnodes").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a virtualNode and creates it. Returns the server's representation of the virtualNode, and an error, if there is any.
func (c *virtualNodes) Create(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.CreateOptions) (result *v1alpha1.VirtualNode, err error) {
result = &v1alpha1.VirtualNode{}
err = c.client.Post().
Namespace(c.ns).
Resource("virtualnodes").
VersionedParams(&opts, scheme.ParameterCodec).
Body(virtualNode).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a virtualNode and updates it. Returns the server's representation of the virtualNode, and an error, if there is any.
func (c *virtualNodes) Update(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.UpdateOptions) (result *v1alpha1.VirtualNode, err error) {
result = &v1alpha1.VirtualNode{}
err = c.client.Put().
Namespace(c.ns).
Resource("virtualnodes").
Name(virtualNode.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(virtualNode).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *virtualNodes) UpdateStatus(ctx context.Context, virtualNode *v1alpha1.VirtualNode, opts v1.UpdateOptions) (result *v1alpha1.VirtualNode, err error) {
result = &v1alpha1.VirtualNode{}
err = c.client.Put().
Namespace(c.ns).
Resource("virtualnodes").
Name(virtualNode.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(virtualNode).
Do(ctx).
Into(result)
return
}
// Delete takes name of the virtualNode and deletes it. Returns an error if one occurs.
func (c *virtualNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("virtualnodes").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *virtualNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("virtualnodes").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched virtualNode.
func (c *virtualNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VirtualNode, err error) {
result = &v1alpha1.VirtualNode{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("virtualnodes").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
| newVirtualNodes |
main.rs | // Silence some warnings so they don't distract from the exercise.
#![allow(unused_mut)]
fn main() {
// This fancy stuff either gets the first argument as a String, or prints
// usage and exits if an argument was not supplied to the program.
let mut arg: String = std::env::args().nth(1).unwrap_or_else(|| {
println!("Please supply an argument to this program.");
std::process::exit(-1);
});
// 1. Write a function `inspect` that takes a reference to a String, returns nothing, but
// prints whether the contents of the String is plural or singular. Then uncomment and run this
// code with `cargo run apple` and `cargo run apples'. Hint: use `.ends_with("s")` on the
// String reference
//
inspect(&arg);
fn | (s: &String) {
if s.ends_with("s") {
println!("{} is plural!", s);
} else {
println!("{} is singular!", s);
}
}
// 2. Write a function `change` that takes a *mutable* reference to a String and adds an "s" to
// the String if it doesn't already end with "s". Then uncomment and run the code below with
// `cargo run apple`. Hint: use `.push_str("s")` on the mutable String reference to add an "s".
//
change(&mut arg);
println!("I have many {}", arg);
fn change(s: &mut String) {
if !s.ends_with("s") {
s.push_str("s");
println!("{} becomes plural!", s);
}
}
// 3. Write a function `eat` that accepts ownership of (consumes) a String and returns a bool
// indicating whether or not the String both starts with a "b" AND contains an "a".
// Hint 1: use `.starts_with("b")` and `.contains("a")`
// Hint 2: `&&` is the boolean "AND" operator
//
if eat(arg) {
println!("Might be bananas");
} else {
println!("Not bananas");
}
fn eat(s: String) -> bool {
s.starts_with("b") && s.contains("a")
}
// Try running this program with "boat", "banana", and "grapes" as the arguments :-)
// Challenge: Write a function "add" that takes *references* to two integer arguments,
// dereferences them and adds them together, and returns the result.
//
println!("1 + 2 = {}, even via references", add(&1, &2));
fn add(x: &i32, y: &i32) -> i32 {
*x + *y // equal to x + y
}
}
| inspect |
input_text_stream.rs | use crate::open_input::{open_input, Input};
use crate::{MediaType, Pseudonym};
use basic_text::{ReadText, ReadTextLayered, TextReader, TextSubstr};
use clap::TryFromOsArg;
use io_streams::StreamReader;
use layered_io::{Bufferable, LayeredReader, ReadLayered, Status};
use std::ffi::OsStr;
use std::fmt::{self, Debug, Formatter};
use std::io::{self, IoSliceMut, Read};
use terminal_io::TerminalReader;
use utf8_io::{ReadStr, ReadStrLayered, Utf8Reader};
/// In input stream for plain text input.
///
/// An `InputTextStream` implements `Read` so it supports `read`,
/// `read_to_end`, `read_to_str`, etc. and can be used anywhere a
/// `Read`-implementing object is needed.
///
/// `InputTextStream` is unbuffered (even when it is stdin), so wrapping
/// it in a [`std::io::BufReader`] is recommended for performance and
/// ease of use.
///
/// The primary way to construct an `InputTextStream` is to use it as
/// a type in a `kommand` argument or `clap_derive` struct. Command-line
/// arguments will then be automatically converted into input streams.
/// Currently supported syntaxes include:
/// - Names starting with `https:` or `http:`, which are interpreted as URLs
/// to open.
/// - Names starting with `data:` are interpreted as data URLs proving the
/// data in their payload.
/// - Names starting with `file:` are interpreted as local filesystem URLs
/// providing paths to files to open.
/// - "-" is interpreted as standard input.
/// - "(...)" runs a command with a pipe from the child process' stdout, on
/// platforms whch support it.
/// - Names which don't parse as URLs are interpreted as plain local
/// filesystem paths. To force a string to be interpreted as a plain local
/// path, arrange for it to begin with `./` or `/`.
pub struct InputTextStream {
name: String,
reader: TextReader<Utf8Reader<LayeredReader<TerminalReader<StreamReader>>>>,
media_type: MediaType,
initial_size: Option<u64>,
}
impl InputTextStream {
/// If the input stream metadata implies a particular media type, also
/// known as MIME type, return it. Many input streams know their type,
/// though some do not. This is strictly based on available metadata, and
/// not on examining any of the contents of the stream, and there's no
/// guarantee the contents are valid.
pub fn media_type(&self) -> &MediaType {
&self.media_type
}
/// Return the initial size of the stream, in bytes. This is strictly based
/// on available metadata, and not on examining any of the contents of the
/// stream, and the stream could end up being shorter or longer if the
/// source is concurrently modified or it produces content which must be
/// adapted to meet the "plain text" requirements.
pub fn initial_size(&self) -> Option<u64> {
self.initial_size
}
/// Return a `Pseudonym` which encapsulates this stream's name (typically
/// its filesystem path or its URL). This allows it to be written to an
/// `OutputByteStream` while otherwise remaining entirely opaque.
pub fn pseudonym(&self) -> Pseudonym {
Pseudonym::new(self.name.clone())
}
fn from_input(input: Input) -> Self {
let reader = TerminalReader::with_handle(input.reader);
let reader = TextReader::new(reader);
let media_type = input.media_type.union(MediaType::text());
Self {
name: input.name,
reader,
media_type,
initial_size: input.initial_size,
}
}
}
/// Implement `TryFromOsArg` so that `clap_derive` can parse `InputTextStream`
/// arguments automatically.
///
/// This is hidden from the documentation as it opens resources from
/// strings using ambient authorities.
#[doc(hidden)]
impl TryFromOsArg for InputTextStream {
type Error = anyhow::Error;
#[inline]
fn | (os: &OsStr) -> anyhow::Result<Self> {
open_input(os).map(Self::from_input)
}
}
impl ReadLayered for InputTextStream {
#[inline]
fn read_with_status(&mut self, buf: &mut [u8]) -> io::Result<(usize, Status)> {
self.reader.read_with_status(buf)
}
#[inline]
fn read_vectored_with_status(
&mut self,
bufs: &mut [IoSliceMut<'_>],
) -> io::Result<(usize, Status)> {
self.reader.read_vectored_with_status(bufs)
}
}
impl Read for InputTextStream {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.reader.read(buf)
}
#[inline]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> {
self.reader.read_vectored(bufs)
}
#[cfg(can_vector)]
#[inline]
fn is_read_vectored(&self) -> bool {
self.reader.is_read_vectored()
}
#[inline]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
self.reader.read_to_end(buf)
}
#[inline]
fn read_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
self.reader.read_to_string(buf)
}
#[inline]
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.reader.read_exact(buf)
}
}
impl Bufferable for InputTextStream {
#[inline]
fn abandon(&mut self) {
self.reader.abandon()
}
}
impl ReadStr for InputTextStream {
#[inline]
fn read_str(&mut self, buf: &mut str) -> io::Result<usize> {
self.reader.read_str(buf)
}
}
impl ReadStrLayered for InputTextStream {
#[inline]
fn read_str_with_status(&mut self, buf: &mut str) -> io::Result<(usize, Status)> {
self.reader.read_str_with_status(buf)
}
}
impl ReadText for InputTextStream {
#[inline]
fn read_text_substr(&mut self, buf: &mut TextSubstr) -> io::Result<usize> {
self.reader.read_text_substr(buf)
}
#[inline]
fn read_exact_text_substr(&mut self, buf: &mut TextSubstr) -> io::Result<()> {
self.reader.read_exact_text_substr(buf)
}
}
impl ReadTextLayered for InputTextStream {
#[inline]
fn read_text_substr_with_status(
&mut self,
buf: &mut TextSubstr,
) -> io::Result<(usize, Status)> {
self.reader.read_text_substr_with_status(buf)
}
#[inline]
fn read_exact_text_substr_using_status(&mut self, buf: &mut TextSubstr) -> io::Result<Status> {
self.reader.read_exact_text_substr_using_status(buf)
}
}
impl Debug for InputTextStream {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
// Don't print the name here, as that's an implementation detail.
let mut b = f.debug_struct("InputTextStream");
b.field("media_type", &self.media_type);
b.field("initial_size", &self.initial_size);
b.finish()
}
}
#[test]
fn data_url_plain() {
let mut s = String::new();
InputTextStream::try_from_os_str_arg("data:,Hello%2C%20World!".as_ref())
.unwrap()
.read_to_string(&mut s)
.unwrap();
assert_eq!(s, "Hello, World!\n");
}
#[test]
fn data_url_base64() {
let mut s = String::new();
InputTextStream::try_from_os_str_arg("data:text/plain;base64,SGVsbG8sIFdvcmxkIQ==".as_ref())
.unwrap()
.read_to_string(&mut s)
.unwrap();
assert_eq!(s, "Hello, World!\n");
}
| try_from_os_str_arg |
bitcoin_hr.ts | <?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hr">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+39"/>
<source><b>Ecologicalsquare</b> version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+41"/>
<source>Copyright © 2009-2014 The Bitcoin developers
Copyright © 2012-2014 The NovaCoin developers
Copyright © 2015 Ecologicalsquare Coin developers</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source><html><head/><body><p><br/></p></body></html></source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+22"/>
<source>Double-click to edit address or label</source>
<translation>Dvostruki klik za uređivanje adrese ili oznake</translation>
</message>
<message>
<location line="+24"/>
<source>Create a new address</source>
<translation>Dodajte novu adresu</translation>
</message>
<message>
<location line="+10"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Kopiraj trenutno odabranu adresu u međuspremnik</translation>
</message>
<message>
<location line="-7"/>
<source>&New Address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-43"/>
<source>These are your Ecologicalsquare addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+53"/>
<source>&Copy Address</source>
<translation>&Kopirati adresu</translation>
</message>
<message>
<location line="+7"/>
<source>Show &QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Sign a message to prove you own a Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-10"/>
<source>Verify a message to ensure it was signed with a specified Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished">&Potvrdite poruku</translation>
</message>
<message>
<location line="+10"/>
<source>&Delete</source>
<translation>&Brisanje</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+66"/>
<source>Copy &Label</source>
<translation>Kopirati &oznaku</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Izmjeniti</translation>
</message>
<message>
<location line="+248"/>
<source>Export Address Book Data</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Datoteka vrijednosti odvojenih zarezom (*. csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+145"/>
<source>Label</source>
<translation>Oznaka</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresa</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(bez oznake)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Unesite lozinku</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Nova lozinka</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Ponovite novu lozinku</translation>
</message>
<message>
<location line="+33"/>
<source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>For staking only</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+38"/>
<source>Encrypt wallet</source>
<translation>Šifriranje novčanika</translation>
</message>
<message>
<location line="+7"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Ova operacija treba lozinku vašeg novčanika kako bi se novčanik otključao.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Otključaj novčanik</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Ova operacija treba lozinku vašeg novčanika kako bi se novčanik dešifrirao.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Dešifriranje novčanika.</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Promjena lozinke</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Unesite staru i novu lozinku za novčanik.</translation>
</message>
<message>
<location line="+45"/>
<source>Confirm wallet encryption</source>
<translation>Potvrdi šifriranje novčanika</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Jeste li sigurni da želite šifrirati svoj novčanik?</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+103"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Upozorenje: Tipka Caps Lock je uključena!</translation>
</message>
<message>
<location line="-133"/>
<location line="+60"/>
<source>Wallet encrypted</source>
<translation>Novčanik šifriran</translation>
</message>
<message>
<location line="-140"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+82"/>
<source>Ecologicalsquare will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+44"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Šifriranje novčanika nije uspjelo</translation>
</message>
<message>
<location line="-56"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Šifriranje novčanika nije uspjelo zbog interne pogreške. Vaš novčanik nije šifriran.</translation>
</message>
<message>
<location line="+7"/>
<location line="+50"/>
<source>The supplied passphrases do not match.</source>
<translation>Priložene lozinke se ne podudaraju.</translation>
</message>
<message>
<location line="-38"/>
<source>Wallet unlock failed</source>
<translation>Otključavanje novčanika nije uspjelo</translation>
</message>
<message>
<location line="+1"/>
<location line="+12"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Lozinka za dešifriranje novčanika nije točna.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Dešifriranje novčanika nije uspjelo</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Lozinka novčanika je uspješno promijenjena.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+302"/>
<source>Sign &message...</source>
<translation>&Potpišite poruku...</translation>
</message>
<message>
<location line="-68"/>
<source>Show general overview of wallet</source>
<translation>Prikaži opći pregled novčanika</translation>
</message>
<message>
<location line="+17"/>
<source>&Transactions</source>
<translation>&Transakcije</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Pretraži povijest transakcija</translation>
</message>
<message>
<location line="+5"/>
<source>&Address Book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-18"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+34"/>
<source>E&xit</source>
<translation>&Izlaz</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Izlazak iz programa</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>About &Qt</source>
<translation>Više o &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Prikaži informacije o Qt</translation>
</message>
<message>
<location line="+3"/>
<source>&Options...</source>
<translation>&Postavke</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Šifriraj novčanik...</translation>
</message>
<message>
<location line="+2"/>
<source>&Backup Wallet...</source>
<translation>&Backup novčanika...</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Promijena lozinke...</translation>
</message>
<message>
<location line="+9"/>
<source>&Export...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-59"/>
<source>Send coins to a Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+41"/>
<source>Modify configuration options for Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-13"/>
<source>Encrypt or decrypt wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Backup wallet to another location</source>
<translation>Napravite sigurnosnu kopiju novčanika na drugoj lokaciji</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Promijenite lozinku za šifriranje novčanika</translation>
</message>
<message>
<location line="+10"/>
<source>&Debug window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-5"/>
<source>&Verify message...</source>
<translation>&Potvrdite poruku...</translation>
</message>
<message>
<location line="-218"/>
<location line="+579"/>
<source>Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-579"/>
<source>Wallet</source>
<translation>Novčanik</translation>
</message>
<message>
<location line="+193"/>
<source>&About Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>&Show / Hide</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Unlock wallet</source>
<translation type="unfinished">Otključaj novčanik</translation>
</message>
<message>
<location line="+1"/>
<source>&Lock Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Lock wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>&File</source>
<translation>&Datoteka</translation>
</message>
<message>
<location line="+8"/>
<source>&Settings</source>
<translation>&Konfiguracija</translation>
</message>
<message>
<location line="+9"/>
<source>&Help</source>
<translation>&Pomoć</translation>
</message>
<message>
<location line="+17"/>
<source>Tabs toolbar</source>
<translation>Traka kartica</translation>
</message>
<message>
<location line="+46"/>
<location line="+9"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+0"/>
<location line="+58"/>
<source>Ecologicalsquare client</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+86"/>
<source>%n active connection(s) to Ecologicalsquare network</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+488"/>
<source>Staking.<br>Your weight is %1<br>Network weight is %2<br>Expected time to earn reward is %3</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Not staking because wallet is locked</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is offline</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because wallet is syncing</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Not staking because you don't have mature coins</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-836"/>
<source>&Dashboard</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>&Receive</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>&Send</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+44"/>
<location line="+1"/>
<location line="+263"/>
<location line="+1"/>
<source>Gen Pos</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-256"/>
<source>&Unlock Wallet...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>Ctrl+D</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+238"/>
<location line="+1"/>
<source>Stop Pos</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+47"/>
<source>Up to date</source>
<translation>Ažurno</translation>
</message>
<message>
<location line="+43"/>
<source>Catching up...</source>
<translation>Ažuriranje...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+27"/>
<source>Sent transaction</source>
<translation>Poslana transakcija</translation>
</message>
<message>
<location line="+1"/>
<source>Incoming transaction</source>
<translation>Dolazna transakcija</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Datum:%1
Iznos:%2
Tip:%3
Adresa:%4
</translation>
</message>
<message>
<location line="+100"/>
<location line="+15"/>
<source>URI handling</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-15"/>
<location line="+15"/>
<source>URI can not be parsed! This can be caused by an invalid Ecologicalsquare address or malformed URI parameters.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+9"/>
<source>Wallet is <b>not encrypted</b></source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Novčanik je <b>šifriran</b> i trenutno <b>otključan</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Novčanik je <b>šifriran</b> i trenutno <b>zaključan</b></translation>
</message>
<message>
<location line="+24"/>
<source>Backup Wallet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+91"/>
<source>%n second(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n minute(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+433"/>
<source>%n hour(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="-456"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+27"/>
<location line="+433"/>
<source>%n day(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message numerus="yes">
<location line="-429"/>
<location line="+6"/>
<source>%n week(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+0"/>
<source>%1 and %2</source>
<translation type="unfinished"></translation>
</message>
<message numerus="yes">
<location line="+0"/>
<source>%n year(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+5"/>
<source>%1 behind</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Error</source>
<translation type="unfinished">Greška</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+69"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+324"/>
<source>Not staking</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+104"/>
<source>A fatal error occurred. Ecologicalsquare can no longer continue safely and will quit.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+110"/>
<source>Network Alert</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>CoinControlDialog</name>
<message>
<location filename="../forms/coincontroldialog.ui" line="+14"/>
<source>Coin Control</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>Quantity:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+32"/>
<source>Bytes:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+48"/>
<source>Amount:</source>
<translation>Iznos:</translation>
</message>
<message>
<location line="+32"/>
<source>Priority:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+48"/>
<source>Fee:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="+537"/>
<source>no</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../forms/coincontroldialog.ui" line="+51"/>
<source>After Fee:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>Change:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+69"/>
<source>(un)select all</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>Tree mode</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<source>List mode</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+45"/>
<source>Amount</source>
<translation>Iznos</translation>
</message>
<message>
<location line="+5"/>
<source>Label</source>
<translation type="unfinished">Oznaka</translation>
</message>
<message>
<location line="+5"/>
<source>Address</source>
<translation>Adresa</translation>
</message>
<message>
<location line="+5"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+5"/>
<source>Confirmations</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed</source>
<translation>Potvrđeno</translation>
</message>
<message>
<location line="+5"/>
<source>Priority</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../coincontroldialog.cpp" line="-500"/>
<source>Copy address</source>
<translation>Kopirati adresu</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopirati oznaku</translation>
</message>
<message>
<location line="+1"/>
<location line="+26"/>
<source>Copy amount</source>
<translation>Kopiraj iznos</translation>
</message>
<message>
<location line="-25"/>
<source>Copy transaction ID</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+24"/>
<source>Copy quantity</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Copy fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+317"/>
<source>highest</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>high</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>medium-high</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>medium</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>low-medium</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>low</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>lowest</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+140"/>
<source>DUST</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>yes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>This label turns red, if the transaction size is bigger than 10000 bytes.
This means a fee of at least %1 per kb is required.
Can vary +/- 1 Byte per input.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Transactions with higher priority get more likely into a block.
This label turns red, if the priority is smaller than "medium".
This means a fee of at least %1 per kb is required.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if any recipient receives an amount smaller than %1.
This means a fee of at least %2 is required.
Amounts below 0.546 times the minimum relay fee are shown as DUST.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>This label turns red, if the change is smaller than %1.
This means a fee of at least %2 is required.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+36"/>
<location line="+66"/>
<source>(no label)</source>
<translation>(bez oznake)</translation>
</message>
<message>
<location line="-9"/>
<source>change from %1 (%2)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>(change)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Izmjeni adresu</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Oznaka</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Adresa</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Nova adresa za primanje</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Nova adresa za slanje</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Uredi adresu za primanje</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Uredi adresu za slanje</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Upisana adresa "%1" je već u adresaru.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Ecologicalsquare address.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Ne mogu otključati novčanik.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Stvaranje novog ključa nije uspjelo.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+428"/>
<location line="+12"/>
<source>Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished">Upotreba:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>Ecologicalsquare-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+171"/>
<source>Ecologicalsquare version</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Usage:</source>
<translation>Upotreba:</translation>
</message>
<message>
<location line="+1"/>
<source>Send command to -server or Ecologicalsquared</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>List commands</source>
<translation>Prikaži komande</translation>
</message>
<message>
<location line="+1"/>
<source>Get help for a command</source>
<translation>Potraži pomoć za komandu</translation>
</message>
<message>
<location line="-145"/>
<source>Options:</source>
<translation>Postavke:</translation>
</message>
<message>
<location line="+2"/>
<source>Specify configuration file (default: Ecologicalsquare.conf)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Specify pid file (default: Ecologicalsquared.pid)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Specify wallet file (within data directory)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Odredi direktorij za datoteke</translation>
</message>
<message>
<location line="-25"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=Ecologicalsquarerpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Ecologicalsquare Alert" [email protected]
</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+27"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Postavi cache za bazu podataka u MB (zadano:25)</translation>
</message>
<message>
<location line="+1"/>
<source>Set database disk log size in megabytes (default: 100)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Listen for connections on <port> (default: 15714 or testnet: 25714)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Održavaj najviše <n> veza sa članovima (default: 125)</translation>
</message>
<message>
<location line="+3"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Specify your own public address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Bind to given address. Use [host]:port notation for IPv6</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Always query for peer addresses via DNS lookup (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Prag za odspajanje članova koji se čudno ponašaju (default: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Broj sekundi koliko se članovima koji se čudno ponašaju neće dopustiti da se opet spoje (default: 86400)</translation>
</message>
<message>
<location line="-35"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+62"/>
<source>Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-16"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Prihvati komande iz tekst moda i JSON-RPC</translation>
</message>
<message>
<location line="+1"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Izvršavaj u pozadini kao uslužnik i prihvaćaj komande</translation>
</message>
<message>
<location line="+1"/>
<source>Use the test network</source>
<translation>Koristi test mrežu</translation>
</message>
<message>
<location line="-23"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-28"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+93"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Upozorenje: -paytxfee je podešen na preveliki iznos. To je iznos koji ćete platiti za obradu transakcije.</translation>
</message>
<message>
<location line="-103"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Ecologicalsquare will not work properly.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+130"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-16"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-34"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Block creation options:</source>
<translation>Opcije za kreiranje bloka:</translation>
</message>
<message>
<location line="-67"/>
<source>Connect only to the specified node(s)</source>
<translation>Poveži se samo sa određenim nodom</translation>
</message>
<message>
<location line="+4"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+101"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-2"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Invalid amount for -reservebalance=<amount></source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-89"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-16"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+30"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+40"/>
<source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source>
<translation>SSL postavke: (za detalje o podešavanju SSL opcija vidi Bitcoin Wiki)</translation>
</message>
<message>
<location line="-38"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Šalji trace/debug informacije na konzolu umjesto u debug.log datoteku</translation>
</message>
<message>
<location line="+34"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Podesite minimalnu veličinu bloka u bajtovima (default: 0)</translation>
</message>
<message>
<location line="-34"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-41"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Odredi vremenski prozor za spajanje na mrežu u milisekundama (ugrađeni izbor: 5000)</translation>
</message>
<message>
<location line="+28"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Pokušaj koristiti UPnP da otvoriš port za uslugu (default: 0)</translation>
</message>
<message>
<location line="-1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Pokušaj koristiti UPnP da otvoriš port za uslugu (default: 1 when listening)</translation>
</message>
<message>
<location line="-25"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+45"/>
<source>Username for JSON-RPC connections</source>
<translation>Korisničko ime za JSON-RPC veze</translation>
</message>
<message>
<location line="+54"/>
<source>Verifying database integrity...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+42"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Warning</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-52"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-59"/>
<source>Password for JSON-RPC connections</source>
<translation>Lozinka za JSON-RPC veze</translation>
</message>
<message>
<location line="-47"/>
<source>Connect through SOCKS5 proxy</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+12"/>
<source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Output debugging information (default: 0, supplying <category> is optional)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>If <category> is not supplied, output all debugging information.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source><category> can be:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Dozvoli JSON-RPC povezivanje s određene IP adrese</translation>
</message>
<message>
<location line="+1"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Pošalji komande nodu na adresi <ip> (ugrađeni izbor: 127.0.0.1)</translation>
</message>
<message>
<location line="+1"/>
<source>Wait for RPC server to start</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Izvršite naredbu kada se najbolji blok promjeni (%s u cmd je zamjenjen sa block hash)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Require a confirmations for change (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Upgrade wallet to latest format</source>
<translation>Nadogradite novčanik u posljednji format.</translation>
</message>
<message>
<location line="+1"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Podesi memorijski prostor za ključeve na <n> (ugrađeni izbor: 100)</translation>
</message>
<message>
<location line="+1"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Ponovno pretraži lanac blokova za transakcije koje nedostaju</translation>
</message>
<message>
<location line="+3"/>
<source>How thorough the block verification is (0-6, default: 1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Imports blocks from external blk000?.dat file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Keep at most <n> MiB of unconnectable blocks in memory (default: %u)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Koristi OpenSSL (https) za JSON-RPC povezivanje</translation>
</message>
<message>
<location line="+1"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Uslužnikov SSL certifikat (ugrađeni izbor: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Uslužnikov privatni ključ (ugrađeni izbor: server.pem)</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Unsupported argument -socks found. Setting SOCKS version isn't possible anymore, only SOCKS5 proxies are supported.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Initialization sanity check failed. Ecologicalsquare is shutting down.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+20"/>
<source>Error loading block database</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+28"/>
<source>Error: Wallet unlocked for staking only, unable to create transaction.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-168"/>
<source>This help message</source>
<translation>Ova poruka za pomoć</translation>
</message>
<message>
<location line="+104"/>
<source>Wallet %s resides outside data directory %s.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Program ne može koristiti %s na ovom računalu (bind returned error %d, %s)</translation>
</message>
<message>
<location line="-129"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Dozvoli DNS upite za dodavanje nodova i povezivanje</translation>
</message>
<message>
<location line="+125"/>
<source>Loading addresses...</source>
<translation>Učitavanje adresa...</translation>
</message>
<message>
<location line="-10"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Greška kod učitavanja wallet.dat: Novčanik pokvaren</translation>
</message>
<message>
<location line="+4"/>
<source>Error loading wallet.dat: Wallet requires newer version of Ecologicalsquare</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Wallet needed to be rewritten: restart Ecologicalsquare to complete</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat</source>
<translation>Greška kod učitavanja wallet.dat</translation>
</message>
<message>
<location line="-15"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Nevaljala -proxy adresa: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-22"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Nevaljali iznos za opciju -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+58"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Invalid amount</source>
<translation>Nevaljali iznos za opciju</translation>
</message>
<message>
<location line="+1"/>
<source>Insufficient funds</source>
<translation>Nedovoljna sredstva</translation>
</message>
<message>
<location line="-40"/>
<source>Loading block index...</source>
<translation>Učitavanje indeksa blokova...</translation>
</message>
<message>
<location line="-109"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Unesite nod s kojim se želite spojiti and attempt to keep the connection open</translation>
</message>
<message>
<location line="+124"/>
<source>Unable to bind to %s on this computer. Ecologicalsquare is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-101"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+33"/>
<source>Minimize weight consumption (experimental) (default: 0)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>How many blocks to check at startup (default: 500, 0 = all)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+14"/>
<source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Invalid amount for -mininput=<amount>: '%s'</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Ecologicalsquare is probably already running.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+4"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Loading wallet...</source>
<translation>Učitavanje novčanika...</translation>
</message>
<message>
<location line="+8"/>
<source>Cannot downgrade wallet</source>
<translation>Nije moguće novčanik vratiti na prijašnju verziju.</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot write default address</source>
<translation>Nije moguće upisati zadanu adresu.</translation>
</message>
<message>
<location line="+1"/>
<source>Rescanning...</source>
<translation>Rescaniranje</translation>
</message>
<message>
<location line="+2"/>
<source>Done loading</source>
<translation>Učitavanje gotovo</translation>
</message>
<message>
<location line="-159"/>
<source>To use the %s option</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+186"/>
<source>Error</source>
<translation>Greška</translation>
</message>
<message>
<location line="-18"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Postavke</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Glavno</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Plati &naknadu za transakciju</translation>
</message>
<message>
<location line="+31"/>
<source>Reserved amount does not participate in staking and is therefore spendable at any time.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Reserve</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Ecologicalsquare after logging in to the system.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Start Ecologicalsquare on system login</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+21"/>
<source>&Network</source>
<translation>&Mreža</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Ecologicalsquare client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Mapiraj port koristeći &UPnP</translation>
</message>
<message>
<location line="+19"/>
<source>Proxy &IP:</source>
<translation>Proxy &IP:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Port od proxy-a (npr. 9050)</translation>
</message>
<message>
<location line="-57"/>
<source>Connect to the Ecologicalsquare network through a SOCKS5 proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS5 proxy:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+90"/>
<source>&Window</source>
<translation>&Prozor</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Prikaži samo ikonu u sistemskoj traci nakon minimiziranja prozora</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Minimiziraj u sistemsku traku umjesto u traku programa</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Minimizirati umjesto izaći iz aplikacije kada je prozor zatvoren. Kada je ova opcija omogućena, aplikacija će biti zatvorena tek nakon odabira Izlaz u izborniku.</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>M&inimiziraj kod zatvaranja</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>&Prikaz</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Ecologicalsquare.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Jedinica za prikazivanje iznosa:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Izaberite željeni najmanji dio bitcoina koji će biti prikazan u sučelju i koji će se koristiti za plaćanje.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show coin control features or not.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Display coin &control features (experts only!)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Whether to select the coin outputs randomly or with minimal coin age.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Minimize weight consumption (experimental)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Use black visual theme (requires restart)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&U redu</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Odustani</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+47"/>
<source>default</source>
<translation>standardne vrijednosti</translation>
</message>
<message>
<location line="+148"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Ecologicalsquare.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Oblik</translation>
</message>
<message>
<location line="+46"/>
<location line="+247"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Ecologicalsquare network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-173"/>
<source>Stake:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+32"/>
<source>Unconfirmed:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-113"/>
<source>Wallet</source>
<translation>Novčanik</translation>
</message>
<message>
<location line="+49"/>
<source>Spendable:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+16"/>
<source>Your current spendable balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+80"/>
<source>Immature:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Total:</source>
<translation>Ukupno:</translation>
</message>
<message>
<location line="+16"/>
<source>Your current total balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+50"/>
<source><b>Recent transactions</b></source>
<translation><b>Nedavne transakcije</b></translation>
</message>
<message>
<location line="-118"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-32"/>
<source>Total of coins that was staked, and do not yet count toward the current balance</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start Ecologicalsquare: click-to-pay handler</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished">Iznos:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>RPCConsole</name> | </message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<source>N/A</source>
<translation>N/A</translation>
</message>
<message>
<location line="-194"/>
<source>Client version</source>
<translation>Verzija klijenta</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Informacija</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Koristim OpenSSL verziju</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Mreža</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Broj konekcija</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>Lanac blokova</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Trenutni broj blokova</translation>
</message>
<message>
<location line="+197"/>
<source>&Network Traffic</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+52"/>
<source>&Clear</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>Totals</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+64"/>
<source>In:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+80"/>
<source>Out:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-383"/>
<source>Last block time</source>
<translation>Posljednje vrijeme bloka</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Otvori</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Show the Ecologicalsquare help message to get a list with possible Ecologicalsquare command-line options.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Konzola</translation>
</message>
<message>
<location line="-237"/>
<source>Build date</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-104"/>
<source>Ecologicalsquare - Debug window</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+25"/>
<source>Ecologicalsquare Core</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+256"/>
<source>Debug log file</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Open the Ecologicalsquare debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Očisti konzolu</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="+325"/>
<source>Welcome to the Ecologicalsquare RPC console.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+127"/>
<source>%1 B</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 KB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 MB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 GB</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>%1 m</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>%1 h</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1 h %2 m</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+182"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Slanje novca</translation>
</message>
<message>
<location line="+76"/>
<source>Coin Control Features</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+20"/>
<source>Inputs...</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>automatically selected</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>Insufficient funds!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+77"/>
<source>Quantity:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+22"/>
<location line="+35"/>
<source>0</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-19"/>
<source>Bytes:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+51"/>
<source>Amount:</source>
<translation>Iznos:</translation>
</message>
<message>
<location line="+35"/>
<source>Priority:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>medium</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+32"/>
<source>Fee:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>Low Output:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+19"/>
<source>no</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+32"/>
<source>After Fee:</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+35"/>
<source>Change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+50"/>
<source>custom change address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+106"/>
<source>Send to multiple recipients at once</source>
<translation>Pošalji k nekoliko primatelja odjednom</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>&Dodaj primatelja</translation>
</message>
<message>
<location line="+16"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Obriši &sve</translation>
</message>
<message>
<location line="+24"/>
<source>Balance:</source>
<translation>Stanje:</translation>
</message>
<message>
<location line="+47"/>
<source>Confirm the send action</source>
<translation>Potvrdi akciju slanja</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>&Pošalji</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-174"/>
<source>Enter a Ecologicalsquare address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+15"/>
<source>Copy quantity</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopiraj iznos</translation>
</message>
<message>
<location line="+1"/>
<source>Copy fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy after fee</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy bytes</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy priority</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy low output</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Copy change</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+87"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Potvrdi slanje novca</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+29"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Adresa primatelja je nevaljala, molimo provjerite je ponovo.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Iznos mora biti veći od 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Iznos je veći od stanja računa.</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Iznos je veći od stanja računa kad se doda naknada za transakcije od %1.</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Pronašli smo adresu koja se ponavlja. U svakom plaćanju program može svaku adresu koristiti samo jedanput.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+247"/>
<source>WARNING: Invalid Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<source>(no label)</source>
<translation>(bez oznake)</translation>
</message>
<message>
<location line="+4"/>
<source>WARNING: unknown change address</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished">Oblik</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Iznos:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>&Primatelj plaćanja:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Unesite oznaku za ovu adresu kako bi ju dodali u vaš adresar</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Oznaka:</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Zalijepi adresu iz međuspremnika</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Ecologicalsquare address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+13"/>
<location line="+184"/>
<source>&Sign Message</source>
<translation>&Potpišite poruku</translation>
</message>
<message>
<location line="-178"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Možete potpisati poruke sa svojom adresom kako bi dokazali da ih posjedujete. Budite oprezni da ne potpisujete ništa mutno, jer bi vas phishing napadi mogli na prevaru natjerati da prepišete svoj identitet njima. Potpisujte samo detaljno objašnjene izjave sa kojima se slažete.</translation>
</message>
<message>
<location line="+30"/>
<source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+22"/>
<location line="+263"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-253"/>
<location line="+263"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-241"/>
<source>Paste address from clipboard</source>
<translation>Zalijepi adresu iz međuspremnika</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Upišite poruku koju želite potpisati ovdje</translation>
</message>
<message>
<location line="+48"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<location line="+170"/>
<source>Clear &All</source>
<translation>Obriši &sve</translation>
</message>
<message>
<location line="-111"/>
<location line="+94"/>
<source>&Verify Message</source>
<translation>&Potvrdite poruku</translation>
</message>
<message>
<location line="-88"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+33"/>
<source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+52"/>
<source>Verify the message to ensure it was signed with the specified Ecologicalsquare address</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+17"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"></translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Ecologicalsquare address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Enter Ecologicalsquare signature</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+85"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>Otključavanje novčanika je otkazano.</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Poruka je potpisana.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TrafficGraphWidget</name>
<message>
<location filename="../trafficgraphwidget.cpp" line="+75"/>
<source>KB/s</source>
<translation type="unfinished"></translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+25"/>
<source>Open until %1</source>
<translation>Otvoren do %1</translation>
</message>
<message>
<location line="+6"/>
<source>conflicted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+2"/>
<source>%1/offline</source>
<translation>%1 nije dostupan</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/nepotvrđeno</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 potvrda</translation>
</message>
<message>
<location line="+17"/>
<source>Status</source>
<translation>Status</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Izvor</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Generiran</translation>
</message>
<message>
<location line="+5"/>
<location line="+13"/>
<source>From</source>
<translation>Od</translation>
</message>
<message>
<location line="+1"/>
<location line="+19"/>
<location line="+58"/>
<source>To</source>
<translation>Za</translation>
</message>
<message>
<location line="-74"/>
<location line="+2"/>
<source>own address</source>
<translation>vlastita adresa</translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>oznaka</translation>
</message>
<message>
<location line="+34"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Uplaćeno</translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>Nije prihvaćeno</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Zaduženje</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Naknada za transakciju</translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Neto iznos</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Poruka</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Komentar</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>ID transakcije</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Transakcija</translation>
</message>
<message>
<location line="+5"/>
<source>Inputs</source>
<translation>Unosi</translation>
</message>
<message>
<location line="+21"/>
<source>Amount</source>
<translation>Iznos</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="-202"/>
<source>, has not been successfully broadcast yet</source>
<translation>, još nije bio uspješno emitiran</translation>
</message>
<message numerus="yes">
<location line="-36"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+67"/>
<source>unknown</source>
<translation>nepoznato</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Detalji transakcije</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Ova panela prikazuje detaljni opis transakcije</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+231"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Tip</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Adresa</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Iznos</translation>
</message>
<message>
<location line="+52"/>
<source>Open until %1</source>
<translation>Otvoren do %1</translation>
</message>
<message>
<location line="+12"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Potvrđen (%1 potvrda)</translation>
</message>
<message numerus="yes">
<location line="-15"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished">
<numerusform></numerusform>
<numerusform></numerusform>
<numerusform></numerusform>
</translation>
</message>
<message>
<location line="+6"/>
<source>Offline</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Confirming (%1 of %2 recommended confirmations)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+6"/>
<source>Conflicted</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>Immature (%1 confirmations, will be available after %2)</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+3"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Generirano - Upozorenje: ovaj blok nije bio primljen od strane bilo kojeg drugog noda i vjerojatno neće biti prihvaćen!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Generirano, ali nije prihvaćeno</translation>
</message>
<message>
<location line="+42"/>
<source>Received with</source>
<translation>Primljeno s</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Primljeno od</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Poslano za</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Plaćanje samom sebi</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Rudareno</translation>
</message>
<message>
<location line="+2"/>
<source>Pos Mined</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+42"/>
<source>(n/a)</source>
<translation>(n/d)</translation>
</message>
<message>
<location line="+196"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Status transakcije</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Datum i vrijeme kad je transakcija primljena</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Vrsta transakcije.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Odredište transakcije</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Iznos odbijen od ili dodan k saldu.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+54"/>
<location line="+17"/>
<source>All</source>
<translation>Sve</translation>
</message>
<message>
<location line="-16"/>
<source>Today</source>
<translation>Danas</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Ovaj tjedan</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Ovaj mjesec</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Prošli mjesec</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Ove godine</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Raspon...</translation>
</message>
<message>
<location line="+12"/>
<source>Received with</source>
<translation>Primljeno s</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Poslano za</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Tebi</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Rudareno</translation>
</message>
<message>
<location line="+1"/>
<source>Pos Mined</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Ostalo</translation>
</message>
<message>
<location line="+6"/>
<source>Enter address or label to search</source>
<translation>Unesite adresu ili oznaku za pretraživanje</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Min iznos</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Kopirati adresu</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Kopirati oznaku</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Kopiraj iznos</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Izmjeniti oznaku</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+138"/>
<source>Export Transaction Data</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Datoteka podataka odvojenih zarezima (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Potvrđeno</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Datum</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Tip</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Oznaka</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Adresa</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Iznos</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"></translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Raspon:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>za</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+212"/>
<source>Sending...</source>
<translation type="unfinished"></translation>
</message>
</context>
</TS> | <message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Ime klijenta</translation> |
pull_external.py | import yaml
import os
import shutil
import re
import toml
from typing import List, Set, Tuple, Pattern, Match
from urllib.parse import urlparse
from pathlib import Path
CHECKOUT_DIR = "checkouts"
GIT_CLONE_CMD = "git clone {{}} ./{}/{{}}/{{}}".format(CHECKOUT_DIR)
RE_EXTRACT_TITLE: Pattern[str] = re.compile("([#\s]*)(?P<title>.*)")
RE_EXTRACT_IMAGES: Pattern[str] = re.compile("\!\[(?P<alt>.*)\]\((?P<url>.*)\)")
RE_EXTRACT_LINKS: Pattern[str] = re.compile(
"\[(?P<alt>[^\]]*)\]\((?P<rel>[\.\/]*)(?P<url>(?P<domain>https?:\/\/[a-zA-Z\.0-9-]+)?(?!#)\S+)\)"
)
# holds the git URL and the new path for links between pulled in files
internal_links: dict = {}
config: dict = toml.load("config.toml")
def main():
|
def _read_yaml(file_name: str) -> dict:
with open(file_name, "r", encoding="utf-8") as stream:
yaml_file = yaml.safe_load(stream)
return yaml_file
def _get_repo_url_from_pull_url(url: str) -> str:
parsed = urlparse(url)
repo_owner, repo_name = _get_canonical_repo_from_url(url)
return "https://{}/{}/{}".format(parsed.netloc, repo_owner, repo_name)
def _get_canonical_repo_from_url(url: str) -> Tuple[str, str]:
parsed = urlparse(url)
repo_owner, repo_name = parsed.path[1:].split("/")[:2]
return repo_owner, repo_name
def _get_file_content(filename: str, remove_heading=False) -> Tuple[str, str]:
with open(filename, "r") as f:
raw = f.readlines()
if not remove_heading:
return "".join(raw)
heading = None
for i in range(len(raw)):
if raw[i].startswith("#"):
heading = RE_EXTRACT_TITLE.match(raw[i]).group("title")
heading = '"' + heading.replace('"', '\\"') + '"'
continue
if not raw[i].startswith("#") and not raw[i].strip() == "":
return "".join(raw[i:]), heading
def _generate_yaml_front_matter(front_matter: dict = {}) -> List[str]:
fm = ["---"]
for key, value in front_matter.items():
if type(value) == dict:
fm.append(yaml.dump({key: value}).strip())
else:
fm.append("{}: {}".format(key, value))
fm.append("---")
fm = [l + "\n" for l in fm]
return fm
def _clone_repos(repos: List[str]):
for repo_url in repos:
repo_owner, repo_name = _get_canonical_repo_from_url(repo_url)
cmd = GIT_CLONE_CMD.format(repo_url, repo_owner, repo_name)
os.system(cmd)
# TODO: This is currently not being used
# def pull_directories(yaml_external: dict):
# content: dict
# for target_dir, content in yaml_external.items():
# pull_dir = content.get("pullDir", None)
# if not pull_dir:
# continue
# # abs_target_path = get_abs_dir_path(target_dir)
# repo_owner, repo_name = get_canonical_repo_from_url(content.get("source"))
# repo_checkout_base_path = os.path.join(CHECKOUT_DIR, repo_owner, repo_name)
# repo_checkout_pull_path = os.path.join(repo_checkout_base_path, pull_dir)
# for root, _, files in os.walk(repo_checkout_pull_path):
# for file in files:
# relative_path = os.path.join(
# root[len(repo_checkout_pull_path) + 1 :], file
# )
# copy_file(
# base_src_path=repo_checkout_base_path,
# pull_dir=pull_dir,
# rel_file_path=relative_path,
# target_dir=target_dir,
# transform_file=content.get("transform", {}).get(file, None),
# remove_heading=True,
# )
def _pull_files(yaml_external: dict) -> List[str]:
generated_files: List[str] = []
content: dict
# collects all the URLs and new file paths for the pulled in files.
# we need to do this as a prep step before processing each file
# so we can redirect internally
for target_dir, content in yaml_external.items():
for rel_file in content.get("pullFiles", []):
full_url = "{}/blob/master/{}".format(content.get("source"), rel_file)
file_name = os.path.basename(rel_file)
file_path = os.path.join(target_dir, file_name)
rel_path_to_target_file = ("".join(file_path.split(".")[:-1])).lower()
internal_links[full_url] = "/docs/latest/{}/".format(
rel_path_to_target_file
)
for target_dir, content in yaml_external.items():
pull_files: List[str] = content.get("pullFiles", [])
repo_owner, repo_name = _get_canonical_repo_from_url(content.get("source"))
# processes and copies content from the git checkout to the desired location
for rel_file in pull_files:
filename = os.path.basename(rel_file)
abs_path_to_source_file = os.path.abspath(
os.path.join(CHECKOUT_DIR, repo_owner, repo_name)
)
abs_path_to_target_file = _copy_file(
abs_path_to_repo_checkout_dir=abs_path_to_source_file,
rel_path_to_source_file=rel_file,
target_dir=target_dir,
transform_file=content.get("transform", {}).get(filename, None),
remove_heading=True,
repo_owner=repo_owner,
repo_name=repo_name,
)
generated_files.append(abs_path_to_target_file)
return generated_files
def _generate_gitignore(paths_to_include: List[str]):
with open("content/.gitignore", "w") as gitignore:
gitignore.write("# THIS FILE IS AUTO-GENERATED. DO NOT MODIFY BY HAND\n\n")
for f in paths_to_include:
gitignore.write(os.path.relpath(f, "content") + "\n")
def _copy_file(
abs_path_to_repo_checkout_dir: str,
rel_path_to_source_file: str,
target_dir: str,
transform_file: dict = {},
remove_heading: bool = True,
repo_owner: str = "",
repo_name: str = "",
) -> str:
file_name = os.path.basename(rel_path_to_source_file)
rel_path_to_target_file = os.path.join(target_dir, file_name)
abs_path_to_target_file = os.path.abspath(
os.path.join("content/docs/latest", rel_path_to_target_file)
)
path_to_target_file = Path(os.path.dirname(abs_path_to_target_file))
path_to_target_file.mkdir(parents=True, exist_ok=True)
# we just copy files that aren't markdown
abs_path_to_source_file = os.path.join(
abs_path_to_repo_checkout_dir, rel_path_to_source_file
)
if os.path.splitext(abs_path_to_target_file)[1] != ".md":
shutil.copyfile(abs_path_to_source_file, abs_path_to_target_file)
return
# copy file content
with open(abs_path_to_target_file, "w") as target_file:
content, heading = _get_file_content(abs_path_to_source_file, remove_heading)
front_matter = None
if heading:
front_matter = {"title": heading}
if transform_file:
front_matter = {**front_matter, **transform_file.get("frontMatter", {})}
if front_matter:
target_file.writelines(_generate_yaml_front_matter(front_matter))
final_content = _process_content(
content=content,
abs_path_to_source_dir=abs_path_to_repo_checkout_dir,
rel_path_to_source_file=rel_path_to_source_file,
repo_owner=repo_owner,
repo_name=repo_name,
)
target_file.write(final_content)
return abs_path_to_target_file
def _process_content(
content: str,
abs_path_to_source_dir: str,
rel_path_to_source_file: str,
repo_owner: str,
repo_name: str,
):
def repl_images(m: Match[str]):
url = m.group("url")
alt = m.group("alt")
new_url = _copy_asset(
url_path=url,
abs_path_to_source_dir=abs_path_to_source_dir,
rel_path_to_source_file=rel_path_to_source_file,
repo_owner=repo_owner,
repo_name=repo_name,
)
figure = '{{{{< figure src="{}" caption="{}" width="100" >}}}}'.format(
new_url, alt
)
return figure
def repl_links(m: Match[str]):
alt = m.group("alt")
rel = m.group("rel")
url = m.group("url")
domain = m.group("domain")
rel_url = url
# this is an external url
if domain is not None:
# that points to this site
if domain == config.get("publicUrl"):
new_url = url[len(config.get("publicUrl")) :]
else:
new_url = url
# this is an internal relative url
else:
if rel == "/":
rel_url = url
elif rel == "./" or rel == "":
rel_url = os.path.join(os.path.dirname(rel_path_to_source_file), url)
new_url = "https://github.com/{}/{}/blob/master/{}".format(
repo_owner, repo_name, rel_url
)
# if this file has been already pulled in, we can use the new internal URL
# instead of pointing to the original github location
if new_url in internal_links:
new_url = internal_links[new_url]
new_link = "[{}]({})".format(alt, new_url)
return new_link
parsed_content = RE_EXTRACT_IMAGES.sub(repl_images, content)
parsed_content = RE_EXTRACT_LINKS.sub(repl_links, parsed_content)
return parsed_content
def _copy_asset(
url_path: str,
abs_path_to_source_dir: str,
rel_path_to_source_file: str,
repo_owner: str,
repo_name: str,
) -> str:
if url_path.startswith(os.path.sep):
rel_path_to_asset = url_path[1:]
else:
rel_path_to_source_dir = os.path.dirname(rel_path_to_source_file)
rel_path_to_asset = os.path.join(rel_path_to_source_dir, url_path)
path_to_source_asset = os.path.join(abs_path_to_source_dir, rel_path_to_asset)
path_to_target_asset = Path(
os.path.join("./static/img/checkouts", repo_owner, repo_name, rel_path_to_asset)
)
path_to_target_asset.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(path_to_source_asset, path_to_target_asset.absolute())
rel_target_url_path = os.path.join(
"/img/checkouts", repo_owner, repo_name, rel_path_to_asset
)
return rel_target_url_path
if __name__ == "__main__":
main()
| os.system("rm -rf ./{}/".format(CHECKOUT_DIR))
yaml_external = _read_yaml("external.yaml")
repos_to_clone: Set[str] = set()
directories_to_create: List[str] = []
content: dict
for directory, content in yaml_external.items():
directories_to_create.append(directory)
repo = _get_repo_url_from_pull_url(content.get("source"))
repos_to_clone.add(repo)
_clone_repos(repos_to_clone)
# pull_directories(yaml_external)
generated_files = _pull_files(yaml_external)
_generate_gitignore(generated_files) |
runtime.rs | //! A Rust interface for the functionality of the Objective-C runtime.
//!
//! For more information on foreign functions, see Apple's documentation:
//! <https://developer.apple.com/library/mac/documentation/Cocoa/Reference/ObjCRuntimeRef/index.html>
use core::ffi::c_void;
use core::fmt;
use core::hash;
use core::panic::{RefUnwindSafe, UnwindSafe};
use core::ptr;
use core::str;
#[cfg(feature = "malloc")]
use malloc_buf::Malloc;
use std::ffi::{CStr, CString};
#[cfg(feature = "malloc")]
use std::os::raw::c_uint;
pub use super::bool::Bool;
use crate::{ffi, Encode, Encoding, RefEncode};
/// Use [`Bool`] or [`ffi::BOOL`] instead.
#[deprecated = "Use `Bool` or `ffi::BOOL` instead"]
#[allow(non_upper_case_globals)]
pub type BOOL = ffi::BOOL;
/// Use [`Bool::YES`] or [`ffi::YES`] instead.
#[deprecated = "Use `Bool::YES` or `ffi::YES` instead"]
pub const YES: ffi::BOOL = ffi::YES;
/// Use [`Bool::NO`] or [`ffi::NO`] instead.
#[deprecated = "Use `Bool::NO` or `ffi::NO` instead"]
pub const NO: ffi::BOOL = ffi::NO;
/// A type that represents a method selector.
#[repr(transparent)]
// ffi::sel_isEqual is just pointer comparison, so just generate PartialEq
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct Sel {
ptr: *const ffi::objc_selector,
}
/// A type that represents an instance variable.
#[repr(C)]
pub struct Ivar(ffi::objc_ivar);
/// A type that represents a method in a class definition.
#[repr(C)]
pub struct Method(ffi::objc_method);
/// A type that represents an Objective-C class.
#[repr(C)]
pub struct Class(ffi::objc_class);
/// A type that represents an Objective-C protocol.
#[repr(C)]
pub struct Protocol(ffi::objc_protocol);
macro_rules! standard_pointer_impls {
($($name:ident),*) => {
$(
impl PartialEq for $name {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
}
impl Eq for $name {}
impl hash::Hash for $name {
#[inline]
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.as_ptr().hash(state)
}
}
)*
}
}
// Implement PartialEq, Eq and Hash using pointer semantics; there's not
// really a better way to do it.
standard_pointer_impls!(Ivar, Method, Class);
/// A type that represents an instance of a class.
///
/// Note: This is intentionally neither [`Sync`], [`Send`], [`UnwindSafe`],
/// [`RefUnwindSafe`] nor [`Unpin`], since it is something that changes
/// depending on the specific subclass.
///
/// Examples: `NSAutoreleasePool` is not `Send`, it has to be deallocated
/// on the same thread that it was created. `NSLock` is not `Send` either.
#[repr(C)]
pub struct Object(ffi::objc_object);
/// A pointer to the start of a method implementation.
pub type Imp = unsafe extern "C" fn();
impl Sel {
/// Registers a method with the Objective-C runtime system,
/// maps the method name to a selector, and returns the selector value.
pub fn register(name: &str) -> Self {
let name = CString::new(name).unwrap();
Self {
ptr: unsafe { ffi::sel_registerName(name.as_ptr()) },
}
}
/// Returns the name of the method specified by self.
pub fn name(&self) -> &str {
let name = unsafe { CStr::from_ptr(ffi::sel_getName(self.ptr)) };
str::from_utf8(name.to_bytes()).unwrap()
}
/// Wraps a raw pointer to a selector into a [`Sel`] object.
///
/// # Safety
///
/// The pointer must a valid, registered selector.
///
/// This is almost never what you want; use [`Sel::register`] instead.
#[inline]
pub unsafe fn from_ptr(ptr: *const c_void) -> Self {
Self {
ptr: ptr as *const _,
}
}
/// Returns a pointer to the raw selector.
#[inline]
pub fn as_ptr(&self) -> *const c_void {
self.ptr as *const _
}
}
unsafe impl Encode for Sel {
const ENCODING: Encoding<'static> = Encoding::Sel;
}
// RefEncode is not implemented for Sel, because there is literally no API
// that takes &Sel, but the user could easily get confused and accidentally
// attempt that.
// SAFETY: Sel is immutable (and can be retrieved from any thread using the
// `sel!` macro).
unsafe impl Sync for Sel {}
unsafe impl Send for Sel {}
impl UnwindSafe for Sel {}
impl RefUnwindSafe for Sel {}
impl fmt::Debug for Sel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name())
}
}
impl fmt::Pointer for Sel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.ptr, f)
}
}
impl Ivar {
pub(crate) fn as_ptr(&self) -> *const ffi::objc_ivar {
self as *const Self as *const _
}
/// Returns the name of self.
pub fn name(&self) -> &str {
let name = unsafe { CStr::from_ptr(ffi::ivar_getName(self.as_ptr())) };
str::from_utf8(name.to_bytes()).unwrap()
}
/// Returns the offset of self.
pub fn offset(&self) -> isize {
let offset = unsafe { ffi::ivar_getOffset(self.as_ptr()) };
offset as isize
}
/// Returns the `Encoding` of self.
pub fn type_encoding(&self) -> &str {
let encoding = unsafe { CStr::from_ptr(ffi::ivar_getTypeEncoding(self.as_ptr())) };
str::from_utf8(encoding.to_bytes()).unwrap()
}
}
// SAFETY: Ivar is immutable (and can be retrieved from Class anyhow).
unsafe impl Sync for Ivar {}
unsafe impl Send for Ivar {}
impl UnwindSafe for Ivar {}
impl RefUnwindSafe for Ivar {}
impl Method {
pub(crate) fn as_ptr(&self) -> *const ffi::objc_method {
self as *const Self as *const _
}
/// Returns the name of self.
pub fn name(&self) -> Sel {
Sel {
ptr: unsafe { ffi::method_getName(self.as_ptr()) },
}
}
/// Returns the `Encoding` of self's return type.
#[cfg(feature = "malloc")]
pub fn return_type(&self) -> Malloc<str> {
unsafe {
let encoding = ffi::method_copyReturnType(self.as_ptr());
Malloc::from_c_str(encoding).unwrap()
}
}
/// Returns the `Encoding` of a single parameter type of self, or
/// [`None`] if self has no parameter at the given index.
#[cfg(feature = "malloc")]
pub fn argument_type(&self, index: usize) -> Option<Malloc<str>> {
unsafe {
let encoding = ffi::method_copyArgumentType(self.as_ptr(), index as c_uint);
ptr::NonNull::new(encoding)
.map(|encoding| Malloc::from_c_str(encoding.as_ptr()).unwrap())
}
}
// method_getTypeEncoding, efficient version of:
// -> return_type() + sum(argument_type(i) for i in arguments_count())
/// Returns the number of arguments accepted by self.
pub fn arguments_count(&self) -> usize {
unsafe { ffi::method_getNumberOfArguments(self.as_ptr()) as usize }
}
/// Returns the implementation of self.
pub fn implementation(&self) -> Imp {
unsafe { ffi::method_getImplementation(self.as_ptr()).expect("Null IMP") }
}
// unsafe fn set_implementation(&mut self, imp: Imp) -> Imp;
// unsafe fn exchange_implementation(&mut self, other: &mut Method);
}
// SAFETY: Method is immutable (and can be retrieved from Class anyhow).
unsafe impl Sync for Method {}
unsafe impl Send for Method {}
impl UnwindSafe for Method {}
impl RefUnwindSafe for Method {}
impl Class {
pub(crate) fn as_ptr(&self) -> *const ffi::objc_class {
self as *const Self as *const _
}
/// Returns the class definition of a specified class, or [`None`] if the
/// class is not registered with the Objective-C runtime.
pub fn get(name: &str) -> Option<&'static Self> {
let name = CString::new(name).unwrap();
let cls = unsafe { ffi::objc_getClass(name.as_ptr()) };
unsafe { cls.cast::<Self>().as_ref() }
}
// Same as `get`, but ...
// fn lookup(name: &str) -> Option<&'static Self>;
/// Obtains the list of registered class definitions.
#[cfg(feature = "malloc")]
pub fn classes() -> Malloc<[&'static Self]> {
unsafe {
let mut count: c_uint = 0;
let classes = ffi::objc_copyClassList(&mut count);
Malloc::from_array(classes as *mut _, count as usize)
}
}
/// Returns the total number of registered classes.
pub fn classes_count() -> usize {
unsafe { ffi::objc_getClassList(ptr::null_mut(), 0) as usize }
}
/// Returns the name of the class.
pub fn name(&self) -> &str {
let name = unsafe { CStr::from_ptr(ffi::class_getName(self.as_ptr())) };
str::from_utf8(name.to_bytes()).unwrap()
}
/// Returns the superclass of self, or [`None`] if self is a root class.
pub fn superclass(&self) -> Option<&Class> {
unsafe {
let superclass = ffi::class_getSuperclass(self.as_ptr());
superclass.cast::<Class>().as_ref()
}
}
/// Returns the metaclass of self.
pub fn metaclass(&self) -> &Self {
unsafe { &*(ffi::object_getClass(self.as_ptr() as *const _) as *const Self) }
}
// objc_getMetaClass -> Same as `Class::get(name).metaclass()`
#[allow(unused)]
fn is_metaclass(&self) -> bool {
unsafe { Bool::from_raw(ffi::class_isMetaClass(self.as_ptr())).as_bool() }
}
/// Returns the size of instances of self.
pub fn instance_size(&self) -> usize {
unsafe { ffi::class_getInstanceSize(self.as_ptr()) as usize }
}
/// Returns a specified instance method for self, or [`None`] if self and
/// its superclasses do not contain an instance method with the specified
/// selector.
pub fn instance_method(&self, sel: Sel) -> Option<&Method> {
unsafe {
let method = ffi::class_getInstanceMethod(self.as_ptr(), sel.ptr);
method.cast::<Method>().as_ref()
}
}
// fn class_method(&self, sel: Sel) -> Option<&Method>;
/// Returns the ivar for a specified instance variable of self, or
/// [`None`] if self has no ivar with the given name.
pub fn instance_variable(&self, name: &str) -> Option<&Ivar> {
let name = CString::new(name).unwrap();
unsafe {
let ivar = ffi::class_getInstanceVariable(self.as_ptr(), name.as_ptr());
ivar.cast::<Ivar>().as_ref()
}
}
#[allow(unused)]
fn instance_variable_layout(&self) -> Option<&[u8]> {
let layout = unsafe { ffi::class_getIvarLayout(self.as_ptr()) };
if layout.is_null() {
None
} else {
Some(unsafe { CStr::from_ptr(layout as *const _) }.to_bytes())
}
}
#[allow(unused)]
fn class_variable(&self, name: &str) -> Option<&Ivar> {
let name = CString::new(name).unwrap();
let ivar = unsafe { ffi::class_getClassVariable(self.as_ptr(), name.as_ptr()) };
// SAFETY: TODO
unsafe { ivar.cast::<Ivar>().as_ref() }
}
/// Describes the instance methods implemented by self.
#[cfg(feature = "malloc")]
pub fn instance_methods(&self) -> Malloc<[&Method]> {
unsafe {
let mut count: c_uint = 0;
let methods = ffi::class_copyMethodList(self.as_ptr(), &mut count);
Malloc::from_array(methods as *mut _, count as usize)
}
}
/// Checks whether this class conforms to the specified protocol.
pub fn conforms_to(&self, proto: &Protocol) -> bool {
unsafe {
Bool::from_raw(ffi::class_conformsToProtocol(self.as_ptr(), proto.as_ptr())).as_bool()
}
}
/// Get a list of the protocols to which this class conforms.
#[cfg(feature = "malloc")]
pub fn adopted_protocols(&self) -> Malloc<[&Protocol]> {
unsafe {
let mut count: c_uint = 0;
let protos = ffi::class_copyProtocolList(self.as_ptr(), &mut count);
Malloc::from_array(protos as *mut _, count as usize)
}
}
/// Describes the instance variables declared by self.
#[cfg(feature = "malloc")]
pub fn | (&self) -> Malloc<[&Ivar]> {
unsafe {
let mut count: c_uint = 0;
let ivars = ffi::class_copyIvarList(self.as_ptr(), &mut count);
Malloc::from_array(ivars as *mut _, count as usize)
}
}
// fn property(&self, name: &str) -> Option<&Property>;
// fn properties(&self) -> Malloc<[&Property]>;
// unsafe fn replace_method(&self, name: Sel, imp: Imp, types: &str) -> Imp;
// unsafe fn replace_property(&self, name: &str, attributes: &[ffi::objc_property_attribute_t]);
// unsafe fn set_ivar_layout(&mut self, layout: &[u8]);
// fn method_imp(&self, name: Sel) -> Imp; // + _stret
// fn responds_to(&self, sel: Sel) -> bool;
// fn get_version(&self) -> u32;
// unsafe fn set_version(&mut self, version: u32);
}
// SAFETY: Class is immutable (and can be retrieved from any thread using the
// `class!` macro).
unsafe impl Sync for Class {}
unsafe impl Send for Class {}
impl UnwindSafe for Class {}
impl RefUnwindSafe for Class {}
// Note that Unpin is not applicable.
unsafe impl RefEncode for Class {
const ENCODING_REF: Encoding<'static> = Encoding::Class;
}
impl fmt::Debug for Class {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name())
}
}
impl Protocol {
pub(crate) fn as_ptr(&self) -> *const ffi::objc_protocol {
self as *const Self as *const _
}
/// Returns the protocol definition of a specified protocol, or [`None`]
/// if the protocol is not registered with the Objective-C runtime.
pub fn get(name: &str) -> Option<&'static Protocol> {
let name = CString::new(name).unwrap();
unsafe {
let proto = ffi::objc_getProtocol(name.as_ptr());
proto.cast::<Self>().as_ref()
}
}
/// Obtains the list of registered protocol definitions.
#[cfg(feature = "malloc")]
pub fn protocols() -> Malloc<[&'static Protocol]> {
unsafe {
let mut count: c_uint = 0;
let protocols = ffi::objc_copyProtocolList(&mut count);
Malloc::from_array(protocols as *mut _, count as usize)
}
}
/// Get a list of the protocols to which this protocol conforms.
#[cfg(feature = "malloc")]
pub fn adopted_protocols(&self) -> Malloc<[&Protocol]> {
unsafe {
let mut count: c_uint = 0;
let protocols = ffi::protocol_copyProtocolList(self.as_ptr(), &mut count);
Malloc::from_array(protocols as *mut _, count as usize)
}
}
/// Checks whether this protocol conforms to the specified protocol.
pub fn conforms_to(&self, proto: &Protocol) -> bool {
unsafe {
Bool::from_raw(ffi::protocol_conformsToProtocol(
self.as_ptr(),
proto.as_ptr(),
))
.as_bool()
}
}
/// Returns the name of self.
pub fn name(&self) -> &str {
let name = unsafe { CStr::from_ptr(ffi::protocol_getName(self.as_ptr())) };
str::from_utf8(name.to_bytes()).unwrap()
}
}
impl PartialEq for Protocol {
/// Check whether the protocols are equal, or conform to each other.
#[inline]
fn eq(&self, other: &Protocol) -> bool {
unsafe { Bool::from_raw(ffi::protocol_isEqual(self.as_ptr(), other.as_ptr())).as_bool() }
}
}
unsafe impl RefEncode for Protocol {
// Protocol is an object internally
const ENCODING_REF: Encoding<'static> = Encoding::Object;
}
impl Eq for Protocol {}
impl fmt::Debug for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.name())
}
}
// SAFETY: Protocol is immutable (and can be retrieved from Class anyhow).
unsafe impl Sync for Protocol {}
unsafe impl Send for Protocol {}
impl UnwindSafe for Protocol {}
impl RefUnwindSafe for Protocol {}
// Note that Unpin is not applicable.
fn ivar_offset<T: Encode>(cls: &Class, name: &str) -> isize {
match cls.instance_variable(name) {
Some(ivar) => {
assert!(T::ENCODING.equivalent_to_str(ivar.type_encoding()));
ivar.offset()
}
None => panic!("Ivar {} not found on class {:?}", name, cls),
}
}
impl Object {
pub(crate) fn as_ptr(&self) -> *const ffi::objc_object {
self as *const Self as *const _
}
/// Dynamically find the class of this object.
pub fn class(&self) -> &Class {
unsafe { &*(ffi::object_getClass(self.as_ptr()) as *const Class) }
}
/// Returns a shared reference to the ivar with the given name.
///
/// # Panics
///
/// Panics if the object has no ivar with the given name, or the type
/// encoding of the ivar differs from the type encoding of `T`.
///
/// # Safety
///
/// The caller must ensure that the ivar is actually of type `T`.
///
/// Library implementors should expose a safe interface to the ivar.
pub unsafe fn ivar<T: Encode>(&self, name: &str) -> &T {
let offset = ivar_offset::<T>(self.class(), name);
// `offset` is given in bytes, so we convert to `u8`
let ptr = self as *const Self as *const u8;
let ptr = unsafe { ptr.offset(offset) } as *const T;
unsafe { &*ptr }
}
/// Use [`ivar`][`Self::ivar`] instead.
///
/// # Safety
///
/// Same as [`ivar`][`Self::ivar`].
#[deprecated = "Use `Object::ivar` instead."]
pub unsafe fn get_ivar<T: Encode>(&self, name: &str) -> &T {
// SAFETY: Upheld by caller
unsafe { self.ivar(name) }
}
/// Returns a mutable reference to the ivar with the given name.
///
/// # Panics
///
/// Panics if the object has no ivar with the given name, or the type
/// encoding of the ivar differs from the type encoding of `T`.
///
/// # Safety
///
/// The caller must ensure that the ivar is actually of type `T`.
///
/// Library implementors should expose a safe interface to the ivar.
pub unsafe fn ivar_mut<T: Encode>(&mut self, name: &str) -> &mut T {
let offset = ivar_offset::<T>(self.class(), name);
// `offset` is given in bytes, so we convert to `u8`
let ptr = self as *mut Self as *mut u8;
let ptr = unsafe { ptr.offset(offset) } as *mut T;
unsafe { &mut *ptr }
}
/// Use [`ivar_mut`](`Self::ivar_mut`) instead.
///
/// # Safety
///
/// Same as [`ivar_mut`][`Self::ivar_mut`].
#[deprecated = "Use `Object::ivar_mut` instead."]
pub unsafe fn get_mut_ivar<T: Encode>(&mut self, name: &str) -> &mut T {
// SAFETY: Upheld by caller
unsafe { self.ivar_mut(name) }
}
/// Sets the value of the ivar with the given name.
///
/// # Panics
///
/// Panics if the object has no ivar with the given name, or the type
/// encoding of the ivar differs from the type encoding of `T`.
///
/// # Safety
///
/// The caller must ensure that the ivar is actually of type `T`.
///
/// Library implementors should expose a safe interface to the ivar.
pub unsafe fn set_ivar<T: Encode>(&mut self, name: &str, value: T) {
// SAFETY: Invariants upheld by caller
unsafe { *self.ivar_mut::<T>(name) = value };
}
// objc_setAssociatedObject
// objc_getAssociatedObject
// objc_removeAssociatedObjects
}
unsafe impl RefEncode for Object {
const ENCODING_REF: Encoding<'static> = Encoding::Object;
}
impl fmt::Debug for Object {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<{:?}: {:p}>", self.class(), self.as_ptr())
}
}
#[cfg(test)]
mod tests {
use alloc::string::ToString;
use super::{Bool, Class, Imp, Ivar, Method, Object, Protocol, Sel};
use crate::test_utils;
use crate::Encode;
#[test]
fn test_selector() {
macro_rules! test_sel {
($s:literal, $($tt:tt)+) => {{
let sel = sel!($($tt)*);
let expected = Sel::register($s);
assert_eq!(sel, expected);
assert_eq!(sel.name(), $s);
}}
}
test_sel!("abc", abc);
test_sel!("abc:", abc:);
test_sel!("abc:def:", abc:def:);
}
#[test]
fn test_empty_selector() {
let sel = Sel::register("");
assert_eq!(sel.name(), "");
let sel = Sel::register(":");
assert_eq!(sel.name(), ":");
}
#[test]
#[should_panic]
fn test_sel_register_null() {
let _ = Sel::register("\0");
}
#[test]
fn test_ivar() {
let cls = test_utils::custom_class();
let ivar = cls.instance_variable("_foo").unwrap();
assert_eq!(ivar.name(), "_foo");
assert!(<u32>::ENCODING.equivalent_to_str(ivar.type_encoding()));
assert!(ivar.offset() > 0);
#[cfg(feature = "malloc")]
assert!(cls.instance_variables().len() > 0);
}
#[test]
fn test_method() {
let cls = test_utils::custom_class();
let sel = Sel::register("foo");
let method = cls.instance_method(sel).unwrap();
assert_eq!(method.name().name(), "foo");
assert_eq!(method.arguments_count(), 2);
#[cfg(feature = "malloc")]
{
assert!(<u32>::ENCODING.equivalent_to_str(&method.return_type()));
assert!(Sel::ENCODING.equivalent_to_str(&method.argument_type(1).unwrap()));
let methods = cls.instance_methods();
assert!(methods.len() > 0);
}
}
#[test]
fn test_class() {
let cls = test_utils::custom_class();
assert_eq!(cls.name(), "CustomObject");
assert!(cls.instance_size() > 0);
assert!(cls.superclass().is_none());
assert_eq!(Class::get(cls.name()), Some(cls));
let metaclass = cls.metaclass();
// The metaclass of a root class is a subclass of the root class
assert_eq!(metaclass.superclass().unwrap(), cls);
let subclass = test_utils::custom_subclass();
assert_eq!(subclass.superclass().unwrap(), cls);
}
#[test]
fn test_classes_count() {
assert!(Class::classes_count() > 0);
}
#[test]
#[cfg(feature = "malloc")]
fn test_classes() {
let classes = Class::classes();
assert!(classes.len() > 0);
}
#[test]
fn test_protocol() {
let proto = test_utils::custom_protocol();
assert_eq!(proto.name(), "CustomProtocol");
let class = test_utils::custom_class();
assert!(class.conforms_to(proto));
#[cfg(feature = "malloc")]
assert!(class.adopted_protocols().len() > 0);
}
#[test]
fn test_protocol_method() {
let class = test_utils::custom_class();
let result: i32 = unsafe { msg_send![class, addNumber: 1, toNumber: 2] };
assert_eq!(result, 3);
}
#[test]
fn test_subprotocols() {
let sub_proto = test_utils::custom_subprotocol();
let super_proto = test_utils::custom_protocol();
assert!(sub_proto.conforms_to(super_proto));
#[cfg(feature = "malloc")]
assert_eq!(sub_proto.adopted_protocols()[0], super_proto);
}
#[test]
fn test_protocols() {
// Ensure that a protocol has been registered on linux
let _ = test_utils::custom_protocol();
#[cfg(feature = "malloc")]
assert!(Protocol::protocols().len() > 0);
}
#[test]
fn test_object() {
let mut obj = test_utils::custom_object();
assert_eq!(obj.class(), test_utils::custom_class());
let result: u32 = unsafe {
obj.set_ivar("_foo", 4u32);
*obj.ivar("_foo")
};
assert_eq!(result, 4);
}
#[test]
fn test_encode() {
fn assert_enc<T: Encode>(expected: &str) {
assert_eq!(&T::ENCODING.to_string(), expected);
}
assert_enc::<&Object>("@");
assert_enc::<*mut Object>("@");
assert_enc::<&Class>("#");
assert_enc::<Sel>(":");
assert_enc::<Imp>("^?");
assert_enc::<Option<Imp>>("^?");
assert_enc::<&Protocol>("@");
}
#[test]
fn test_send_sync() {
fn assert_send_sync<T: Send + Sync + ?Sized>() {}
assert_send_sync::<Bool>();
assert_send_sync::<Class>();
assert_send_sync::<Ivar>();
assert_send_sync::<Method>();
assert_send_sync::<Protocol>();
assert_send_sync::<Sel>();
}
}
| instance_variables |
PaypalStragtegy.ts | import IPaymentStrategy from '../Configuration/IPaymentStrategy';
class PaypalStrategy implements IPaymentStrategy {
pay(): void {
console.log('I Implement the Paypal Strategy right now....'); | }
}
export default PaypalStrategy; | return; |
node_editor_widget.py | import logging
import os
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from nodeeditor.node_edge import Edge, EDGE_TYPE_BEZIER
from nodeeditor.node_graphics_view import QDMGraphicsView
from nodeeditor.node_node import Node
from nodeeditor.node_scene import Scene, InvalidFile
class NodeEditorWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.filename = None
self.initUI()
def initUI(self):
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
# crate graphics scene
self.scene = Scene()
# create graphics view
self.view = QDMGraphicsView(self.scene.grScene, self)
self.layout.addWidget(self.view)
def isModified(self):
return self.scene.isModified()
def isFilenameSet(self):
return self.filename is not None
def getSelectedItems(self):
return self.scene.getSelectedItems()
def hasSelectedItems(self):
return self.getSelectedItems() != []
def canUndo(self):
return self.scene.history.canUndo()
def canRedo(self):
return self.scene.history.canRedo()
def getUserFriendlyFilename(self):
name = os.path.basename(self.filename) if self.isFilenameSet() else "New Graph"
return name + ("*" if self.isModified() else "")
def fileNew(self):
self.scene.clear()
self.filename = None
self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
def fileLoad(self, filename):
QApplication.setOverrideCursor(Qt.WaitCursor)
try: | self.scene.history.clear()
self.scene.history.storeInitialHistoryStamp()
return True
except InvalidFile as e:
logging.error(e)
QApplication.restoreOverrideCursor()
QMessageBox.warning(self, "Error loading %s" % os.path.basename(filename), str(e))
return False
finally:
QApplication.restoreOverrideCursor()
def fileSave(self, filename=None):
# when called with empty parameter, we won't store the filename
if filename is not None: self.filename = filename
QApplication.setOverrideCursor(Qt.WaitCursor)
self.scene.saveToFile(self.filename)
QApplication.restoreOverrideCursor()
return True | self.scene.loadFromFile(filename)
self.filename = filename |
grpc_connector_benchmark_test.go | package plumbing_test
import (
"io/ioutil"
"log"
"net"
"testing"
"time"
"code.cloudfoundry.org/loggregator/metricemitter/testhelper"
"code.cloudfoundry.org/loggregator/plumbing"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
)
func BenchmarkGRPCConnectorParallel(b *testing.B) {
log.SetOutput(ioutil.Discard)
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
spyDopplerA := newSpyDoppler()
spyDopplerB := newSpyDoppler()
pool := plumbing.NewPool(20, grpc.WithInsecure())
finder := newMockFinder()
finder.NextOutput.Ret0 <- plumbing.Event{
GRPCDopplers: []string{
spyDopplerA.addr.String(),
spyDopplerB.addr.String(),
},
}
metricClient := testhelper.NewMetricClient()
connector := plumbing.NewGRPCConnector(5, pool, finder, metricClient)
time.Sleep(2 * time.Second)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
var data [][]byte
for pb.Next() {
data = connector.ContainerMetrics(context.Background(), "an-app-id")
}
_ = data
})
}
type spyDoppler struct {
addr net.Addr
grpcServer *grpc.Server
}
func newSpyDoppler() *spyDoppler |
func (m *spyDoppler) Subscribe(r *plumbing.SubscriptionRequest, s plumbing.Doppler_SubscribeServer) error {
<-s.Context().Done()
return nil
}
func (m *spyDoppler) BatchSubscribe(r *plumbing.SubscriptionRequest, s plumbing.Doppler_BatchSubscribeServer) error {
<-s.Context().Done()
return nil
}
func (m *spyDoppler) ContainerMetrics(context.Context, *plumbing.ContainerMetricsRequest) (*plumbing.ContainerMetricsResponse, error) {
return &plumbing.ContainerMetricsResponse{
Payload: [][]byte{},
}, nil
}
func (m *spyDoppler) RecentLogs(context.Context, *plumbing.RecentLogsRequest) (*plumbing.RecentLogsResponse, error) {
return &plumbing.RecentLogsResponse{
Payload: [][]byte{},
}, nil
}
func (m *spyDoppler) Stop() {
m.grpcServer.Stop()
}
| {
lis, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
spy := &spyDoppler{
addr: lis.Addr(),
grpcServer: grpc.NewServer(),
}
plumbing.RegisterDopplerServer(spy.grpcServer, spy)
go func() {
log.Println(spy.grpcServer.Serve(lis))
}()
return spy
} |
payload.go | package attestation
import (
"encoding/json"
"github.com/docker/distribution/reference"
"github.com/opencontainers/go-digest"
)
const payloadType = "Google cloud binauthz container signature"
// PayloadIdentity represents the identity block in an Payload message.
type PayloadIdentity struct {
DockerReference string `json:"docker-reference"`
}
// PayloadImage represents the image block in an Payload message.
type PayloadImage struct {
DockerManifestDigest digest.Digest `json:"docker-manifest-digest"`
}
// PayloadCritical represents the critical block in the Payload message.
type PayloadCritical struct {
Identity PayloadIdentity `json:"identity"`
Image PayloadImage `json:"image"`
Type string `json:"type"`
}
// Payload represents an Payload message.
type Payload struct {
Critical PayloadCritical `json:"critical"`
}
// ToString returns the payload as a JSON encoded string, or returns an error.
func (p Payload) ToString() (string, error) {
b, err := json.Marshal(p)
if err != nil {
return "", err
}
return string(b), nil
} | // NewPayload creates a new Binauth specific payload for the image at
// the passed URL.
func NewPayload(reference reference.Canonical) Payload {
payload := Payload{
Critical: PayloadCritical{
Identity: PayloadIdentity{
DockerReference: reference.Name(),
},
Image: PayloadImage{
DockerManifestDigest: reference.Digest(),
},
Type: payloadType,
},
}
return payload
} | |
streamer.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0-devel
// protoc v3.14.0
// source: pkg/grpcstreamer/streamer.proto
package grpcstreamer
import (
v1 "github.com/bookingcom/nanotube/pkg/opentelemetry/proto/metrics/v1"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) | state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ReceivedCount uint32 `protobuf:"varint,1,opt,name=receivedCount,proto3" json:"receivedCount,omitempty"`
}
func (x *Result) Reset() {
*x = Result{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_grpcstreamer_streamer_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Result) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Result) ProtoMessage() {}
func (x *Result) ProtoReflect() protoreflect.Message {
mi := &file_pkg_grpcstreamer_streamer_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Result.ProtoReflect.Descriptor instead.
func (*Result) Descriptor() ([]byte, []int) {
return file_pkg_grpcstreamer_streamer_proto_rawDescGZIP(), []int{0}
}
func (x *Result) GetReceivedCount() uint32 {
if x != nil {
return x.ReceivedCount
}
return 0
}
var File_pkg_grpcstreamer_streamer_proto protoreflect.FileDescriptor
var file_pkg_grpcstreamer_streamer_proto_rawDesc = []byte{
0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x30, 0x70, 0x6b, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0x2e, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a,
0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x32, 0x49, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x65, 0x72, 0x12,
0x3d, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x1a, 0x07, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x00, 0x28, 0x01, 0x42, 0x12,
0x5a, 0x10, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_grpcstreamer_streamer_proto_rawDescOnce sync.Once
file_pkg_grpcstreamer_streamer_proto_rawDescData = file_pkg_grpcstreamer_streamer_proto_rawDesc
)
func file_pkg_grpcstreamer_streamer_proto_rawDescGZIP() []byte {
file_pkg_grpcstreamer_streamer_proto_rawDescOnce.Do(func() {
file_pkg_grpcstreamer_streamer_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_grpcstreamer_streamer_proto_rawDescData)
})
return file_pkg_grpcstreamer_streamer_proto_rawDescData
}
var file_pkg_grpcstreamer_streamer_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_pkg_grpcstreamer_streamer_proto_goTypes = []interface{}{
(*Result)(nil), // 0: Result
(*v1.Metric)(nil), // 1: opentelemetry.proto.metrics.v1.Metric
}
var file_pkg_grpcstreamer_streamer_proto_depIdxs = []int32{
1, // 0: Streamer.Stream:input_type -> opentelemetry.proto.metrics.v1.Metric
0, // 1: Streamer.Stream:output_type -> Result
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_pkg_grpcstreamer_streamer_proto_init() }
func file_pkg_grpcstreamer_streamer_proto_init() {
if File_pkg_grpcstreamer_streamer_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_grpcstreamer_streamer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Result); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_grpcstreamer_streamer_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkg_grpcstreamer_streamer_proto_goTypes,
DependencyIndexes: file_pkg_grpcstreamer_streamer_proto_depIdxs,
MessageInfos: file_pkg_grpcstreamer_streamer_proto_msgTypes,
}.Build()
File_pkg_grpcstreamer_streamer_proto = out.File
file_pkg_grpcstreamer_streamer_proto_rawDesc = nil
file_pkg_grpcstreamer_streamer_proto_goTypes = nil
file_pkg_grpcstreamer_streamer_proto_depIdxs = nil
} | )
type Result struct { |
reader.rs | use std::num::{ParseFloatError, ParseIntError};
use crate::types::*;
fn is_whitespace(ch: char) -> bool {
match ch {
' ' => true,
'\t' => true,
_ => false,
}
}
macro_rules! save_token {
($tokens:expr, $token:expr) => {{
let t_token = $token.trim();
if !t_token.is_empty() {
$tokens.push(t_token.to_string());
$token = String::new();
}
}};
}
fn char_to_hex_num(ch: char) -> u8 {
if ch > '0' && ch < '9' {
ch as u8 - b'0'
} else {
match ch {
'a' => 10,
'b' => 11,
'c' => 12,
'd' => 13,
'e' => 14,
'f' => 15,
_ => 0,
}
}
}
fn escape_to_char(escape_code: &[char]) -> char {
let mut ch_n: u8 = 0;
if escape_code.len() > 1 {
ch_n = (char_to_hex_num(escape_code[0]) * 16) + (char_to_hex_num(escape_code[1]));
} else if escape_code.len() == 1 {
ch_n = char_to_hex_num(escape_code[0]);
}
ch_n as char
}
fn | (
mut token: String,
ch: char,
last_ch: &mut char,
in_escape_code: &mut bool,
escape_code: &mut Vec<char>,
) -> String {
let mut set_last_char = false;
if !(ch == '\\' && *last_ch != '\\') {
// skip a standalone \ for now
if *in_escape_code {
escape_code.push(ch);
if escape_code.len() == 2 {
token.push(escape_to_char(escape_code));
escape_code.clear();
*in_escape_code = false;
}
} else if *last_ch == '\\' {
match ch {
'n' => token.push('\n'),
'r' => token.push('\r'),
't' => token.push('\t'),
'"' => token.push('"'),
'x' => {
*in_escape_code = true;
}
'\\' => {
// These \ are consumed so do not use again.
*last_ch = ' ';
set_last_char = true;
token.push('\\');
}
_ => {
token.push('\\');
token.push(ch);
}
}
} else {
token.push(ch);
}
}
if !set_last_char {
*last_ch = ch;
}
token
}
fn handle_char(
tokens: &mut Vec<String>,
mut token: String,
ch: char,
last_ch: char,
last_comma: &mut bool,
) -> String {
if ch == '(' {
save_token!(tokens, token);
tokens.push("(".to_string());
} else if ch == ')' {
save_token!(tokens, token);
tokens.push(")".to_string());
} else if ch == '\'' && (last_ch == ' ' || last_ch == '(' || last_ch == '\'' || last_ch == '`')
{
save_token!(tokens, token);
tokens.push("'".to_string());
} else if ch == '`' && (last_ch == ' ' || last_ch == '(' || last_ch == '\'' || last_ch == '`') {
save_token!(tokens, token);
tokens.push("`".to_string());
} else if ch == ',' && (last_ch == ' ' || last_ch == '(') {
*last_comma = true;
} else if is_whitespace(ch) {
save_token!(tokens, token);
} else if ch == '\\' && last_ch != '\\' {
// Do nothing...
} else {
token.push(ch);
}
token
}
fn tokenize(text: &str) -> Vec<String> {
let mut tokens: Vec<String> = Vec::new();
let mut in_string = false;
let mut token = String::new();
let mut last_ch = ' ';
let mut in_comment = false;
let mut last_comma = false;
let mut escape_code: Vec<char> = Vec::with_capacity(2);
let mut in_escape_code = false;
if text.starts_with("#!") {
// Work with shebanged scripts.
in_comment = true;
}
for ch in text.chars() {
if last_comma {
last_comma = false;
save_token!(tokens, token);
if ch == '@' {
tokens.push(",@".to_string());
last_ch = ch;
continue;
} else {
tokens.push(",".to_string());
}
}
if in_comment {
if ch == '\n' {
in_comment = false;
}
continue;
}
if ch == '\n' && last_ch == '\\' {
// Line ended on \ so combine with next line.
token.push('\n');
last_ch = ch;
continue;
}
if ch == '\"' && last_ch != '\\' {
// Kakoune bug "
in_string = !in_string;
token.push(ch);
if !in_string {
tokens.push(token);
token = String::new();
} else {
in_escape_code = false;
escape_code.clear();
}
last_ch = ch;
continue;
}
if in_string {
token = do_in_string(
token,
ch,
&mut last_ch,
&mut in_escape_code,
&mut escape_code,
);
} else {
if ch == ';' {
// Comment, ignore the rest of the line.
in_comment = true;
continue;
}
token = handle_char(&mut tokens, token, ch, last_ch, &mut last_comma);
last_ch = ch;
}
}
let token = token.trim();
if !token.is_empty() {
tokens.push(token.to_string());
}
tokens
}
fn parse_atom(token: &str) -> Expression {
if token.is_empty() {
return Expression::Atom(Atom::Nil);
}
if token.len() > 1 && token.starts_with('\"') && token.ends_with('\"') {
// Kakoune bug "
let string = token[1..token.len() - 1].to_string();
return Expression::Atom(Atom::String(string));
}
if token == "t" {
Expression::Atom(Atom::True)
} else if token == "nil" {
Expression::Atom(Atom::Nil)
} else {
let potential_int: Result<i64, ParseIntError> = token.parse();
match potential_int {
Ok(v) => Expression::Atom(Atom::Int(v)),
Err(_) => {
let potential_float: Result<f64, ParseFloatError> = token.parse();
match potential_float {
Ok(v) => Expression::Atom(Atom::Float(v)),
Err(_) => Expression::Atom(Atom::Symbol(token.to_string().clone())),
}
}
}
}
}
fn close_list(level: i32, stack: &mut Vec<Vec<Expression>>) -> Result<(), ParseError> {
if level < 0 {
return Err(ParseError {
reason: "Unexpected `)`".to_string(),
});
}
if level > 0 {
match stack.pop() {
Some(v) => match stack.pop() {
Some(mut v2) => {
v2.push(Expression::List(v));
stack.push(v2);
}
None => {
stack.push(v);
}
},
None => {
return Err(ParseError {
reason: "Unexpected `)`".to_string(),
});
}
}
}
Ok(())
}
fn parse(tokens: &[String]) -> Result<Expression, ParseError> {
if tokens.is_empty() {
return Err(ParseError {
reason: "No tokens".to_string(),
});
}
if tokens[0] != "(" && tokens[0] != "'" && tokens[0] != "`" {
return Err(ParseError {
reason: "Not a list".to_string(),
});
}
let mut stack: Vec<Vec<Expression>> = Vec::new();
let mut level = 0;
let mut qexits: Vec<i32> = Vec::new();
let mut backtick_level = 0;
for token in tokens {
match &token[..] {
"'" => {
level += 1;
qexits.push(level);
let mut quoted = Vec::<Expression>::new();
quoted.push(Expression::Atom(Atom::Symbol("quote".to_string())));
stack.push(quoted);
}
"`" => {
level += 1;
qexits.push(level);
let mut quoted = Vec::<Expression>::new();
if backtick_level > 0 {
quoted.push(Expression::Atom(Atom::Symbol("quote".to_string())));
} else {
quoted.push(Expression::Atom(Atom::Symbol("bquote".to_string())));
backtick_level = level;
}
stack.push(quoted);
}
"(" => {
level += 1;
stack.push(Vec::<Expression>::new());
}
")" => {
level -= 1;
close_list(level, &mut stack)?;
while let Some(quote_exit_level) = qexits.pop() {
if level == quote_exit_level {
if level == backtick_level {
backtick_level = 0;
}
level -= 1;
close_list(level, &mut stack)?;
} else {
qexits.push(quote_exit_level);
break;
}
}
}
_ => match stack.pop() {
Some(mut v) => {
v.push(parse_atom(token));
stack.push(v);
if let Some(quote_exit_level) = qexits.pop() {
if level == quote_exit_level {
if level == backtick_level {
backtick_level = 0;
}
level -= 1;
close_list(level, &mut stack)?;
} else {
qexits.push(quote_exit_level);
}
}
}
None => {
return Err(ParseError {
reason: "Found symbol without containing list".to_string(),
});
}
},
}
}
if !qexits.is_empty() {
qexits.reverse();
for quote_exit_level in qexits.drain(..) {
if level == quote_exit_level {
level -= 1;
close_list(level, &mut stack)?;
}
}
}
if level != 0 {
return Err(ParseError {
reason: "Unclosed list(s)".to_string(),
});
}
if stack.len() > 1 {
let mut v: Vec<Expression> = Vec::new();
for s in stack {
v.push(Expression::List(s));
}
Ok(Expression::List(v))
} else {
match stack.pop() {
Some(v) => Ok(Expression::List(v)),
None => Err(ParseError {
reason: "Empty results".to_string(),
}),
}
}
}
pub fn read(text: &str) -> Result<Expression, ParseError> {
let tokens = tokenize(text);
parse(&tokens)
}
| do_in_string |
lib.rs | mod components;
mod resources;
mod systems;
use hotham::{
components::Visible,
hecs::{Entity, World},
schedule_functions::{
apply_haptic_feedback, begin_frame, begin_pbr_renderpass, end_frame, end_pbr_renderpass,
physics_step,
},
systems::{
audio_system, collision_system, draw_gui_system, rendering_system,
update_parent_transform_matrix_system, update_rigid_body_transforms_system,
update_transform_matrix_system,
},
systems::{pointers_system, Queries},
xr::{self, SessionState},
Engine, HothamResult,
};
use resources::{
game_context::{add_songs, add_sound_effects, GameState},
GameContext,
};
use systems::{game::game_system, sabers_system, CrabSaberQueries};
#[cfg_attr(target_os = "android", ndk_glue::main(backtrace = "on"))]
pub fn main() {
println!("[CRAB_SABER] MAIN!");
real_main().expect("[CRAB_SABER] ERROR IN MAIN!");
}
pub fn real_main() -> HothamResult<()> {
let mut engine = Engine::new();
let (mut world, mut game_context) = init(&mut engine);
let mut hotham_queries = Default::default();
let mut crab_saber_queries = Default::default();
while let Ok((previous_state, current_state)) = engine.update() {
tick(
previous_state,
current_state,
&mut engine,
&mut world,
&mut hotham_queries,
&mut crab_saber_queries,
&mut game_context,
);
}
Ok(())
}
fn tick(
previous_state: xr::SessionState,
current_state: xr::SessionState,
engine: &mut Engine,
world: &mut World,
hotham_queries: &mut Queries,
crab_saber_queries: &mut CrabSaberQueries,
game_context: &mut GameContext,
) |
fn handle_state_change(
previous_state: SessionState,
current_state: SessionState,
audio_context: &mut hotham::resources::AudioContext,
game_context: &mut GameContext,
world: &mut World,
) {
match (previous_state, current_state) {
(SessionState::VISIBLE, SessionState::FOCUSED) => {
audio_context.resume_music_track();
match game_context.state {
GameState::Init => {}
GameState::MainMenu | GameState::GameOver => {
show(world, game_context.pointer);
}
GameState::Playing(_) => {
show(world, game_context.blue_saber);
show(world, game_context.red_saber);
}
}
}
(SessionState::FOCUSED, SessionState::VISIBLE) => {
audio_context.pause_music_track();
match game_context.state {
GameState::Init => {}
GameState::MainMenu | GameState::GameOver => {
hide(world, game_context.pointer);
}
GameState::Playing(_) => {
hide(world, game_context.blue_saber);
hide(world, game_context.red_saber);
}
}
}
_ => {}
}
}
fn init(engine: &mut Engine) -> (World, GameContext) {
let mut world = World::default();
let mut game_context = GameContext::new(engine, &mut world);
add_songs(&mut engine.audio_context, &mut game_context);
add_sound_effects(&mut engine.audio_context, &mut game_context);
(world, game_context)
}
fn hide(world: &mut World, entity: Entity) {
if world.remove_one::<Visible>(entity).is_err() {
println!(
"[STATE_CHANGE] Tried to make {:?} hidden but it had no Visible component",
entity
)
}
}
fn show(world: &mut World, entity: Entity) {
world.insert_one(entity, Visible {}).unwrap();
}
| {
let xr_context = &mut engine.xr_context;
let vulkan_context = &engine.vulkan_context;
let render_context = &mut engine.render_context;
let physics_context = &mut engine.physics_context;
let gui_context = &mut engine.gui_context;
let haptic_context = &mut engine.haptic_context;
let audio_context = &mut engine.audio_context;
// If we're not in a session, don't run the frame loop.
match current_state {
xr::SessionState::IDLE | xr::SessionState::EXITING | xr::SessionState::STOPPING => return,
_ => {}
}
// Frame start
begin_frame(xr_context, vulkan_context, render_context);
handle_state_change(
previous_state,
current_state,
audio_context,
game_context,
world,
);
// Core logic tasks - these are only necessary when in a FOCUSSED state.
if current_state == xr::SessionState::FOCUSED {
// Handle input
sabers_system(
&mut crab_saber_queries.sabers_query,
world,
xr_context,
physics_context,
);
pointers_system(
&mut hotham_queries.pointers_query,
world,
xr_context,
physics_context,
);
// Physics
physics_step(physics_context);
collision_system(&mut hotham_queries.collision_query, world, physics_context);
// Game logic
game_system(
crab_saber_queries,
world,
game_context,
audio_context,
physics_context,
haptic_context,
);
// Update the world
update_rigid_body_transforms_system(
&mut hotham_queries.update_rigid_body_transforms_query,
world,
physics_context,
);
update_transform_matrix_system(&mut hotham_queries.update_transform_matrix_query, world);
update_parent_transform_matrix_system(
&mut hotham_queries.parent_query,
&mut hotham_queries.roots_query,
world,
);
// Draw GUI
draw_gui_system(
&mut hotham_queries.draw_gui_query,
world,
vulkan_context,
&xr_context.frame_index,
render_context,
gui_context,
haptic_context,
);
// Haptics
apply_haptic_feedback(xr_context, haptic_context);
// Audio
audio_system(
&mut hotham_queries.audio_query,
world,
audio_context,
physics_context,
xr_context,
);
}
// Rendering tasks - only necessary if we are in at least the visible state
if current_state == xr::SessionState::VISIBLE || current_state == xr::SessionState::FOCUSED {
begin_pbr_renderpass(xr_context, vulkan_context, render_context);
rendering_system(
&mut hotham_queries.rendering_query,
world,
vulkan_context,
xr_context.frame_index,
render_context,
);
end_pbr_renderpass(xr_context, vulkan_context, render_context);
}
// End the frame
end_frame(xr_context, vulkan_context, render_context);
} |
test_hdfs_racks_extended.py | # coding: utf-8
|
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.hdfs_racks_extended import HdfsRacksExtended # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestHdfsRacksExtended(unittest.TestCase):
"""HdfsRacksExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHdfsRacksExtended(self):
"""Test HdfsRacksExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.hdfs_racks_extended.HdfsRacksExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main() | """
Isilon SDK |
Config.go | /*
Copyright 2020 Reactive Tech Limited.
"Reactive Tech Limited" is a limited company with number 09234118 and located in England, United Kingdom.
https://www.reactive-tech.io
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parser
type Config struct {
SourceDir string `yaml:"sourceDir,omitempty"`
OutputDir string `yaml:"outputDir,omitempty"`
PatternFilesToParse string `yaml:"patternFilesToParse,omitempty"`
PatternFilesToExclude string `yaml:"patternFilesToExclude,omitempty"`
PatternDirsToExclude string `yaml:"patternDirsToExclude,omitempty"`
}
| var DefaultConfig = Config{
SourceDir: ".",
OutputDir: "output",
PatternFilesToParse: "*.html",
PatternFilesToExclude: "gigo",
PatternDirsToExclude: ".idea",
} | |
mode.js | // TODO: open stream
import { setOptions } from './kcp'
const NAMES = [
'nodelay',
'interval',
'fastresend',
'nocwnd',
]
// nodelay, interval, fastresend, nocwnd
export const MODES = {
normal: [0, 40, 2, 1],
fast: [0, 30, 2, 1],
fast2: [1, 20, 2, 1],
fast3: [1, 10, 2, 1],
}
export function | (kcp, mode = 'fast') {
const modeArgs = MODES[mode]
if (!modeArgs) {
throw new Error(`invalid mode: ${mode}`)
}
const options = {}
NAMES.forEach((key, i) => {
options[key] = modeArgs[i]
})
return setOptions(kcp, options)
}
| setKCPMode |
asgd.py | import math
import torch
from .optimizer import Optimizer
class ASGD(Optimizer):
"""Implements Averaged Stochastic Gradient Descent.
It has been proposed in `Acceleration of stochastic approximation by
averaging`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lambd (float, optional): decay term (default: 1e-4)
alpha (float, optional): power for eta update (default: 0.75)
t0 (float, optional): point at which to start averaging (default: 1e6)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Acceleration of stochastic approximation by averaging:
http://dl.acm.org/citation.cfm?id=131098
"""
def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0):
defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0,
weight_decay=weight_decay)
super(ASGD, self).__init__(params, defaults)
| Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('ASGD does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['eta'] = group['lr']
state['mu'] = 1
state['ax'] = torch.zeros_like(p.data)
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# decay term
p.data.mul_(1 - group['lambd'] * state['eta'])
# update parameter
p.data.add_(-state['eta'], grad)
# averaging
if state['mu'] != 1:
state['ax'].add_(p.data.sub(state['ax']).mul(state['mu']))
else:
state['ax'].copy_(p.data)
# update eta and mu
state['eta'] = (group['lr'] /
math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha']))
state['mu'] = 1 / max(1, state['step'] - group['t0'])
return loss | def step(self, closure=None):
"""Performs a single optimization step.
|
register_or_cluster.rs | use cluster::Cluster;
use errors::*;
use is_similar::{IsSimilar, IsSimilarOptions};
use register::Register;
use register_properties_group::RegisterPropertiesGroup;
use types::*;
use xmltree;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum RegisterOrCluster {
Register(Register),
Cluster(Cluster),
}
impl RegisterOrCluster {
pub fn | (element: &xmltree::Element) -> Result<RegisterOrCluster> {
match &*element.name {
"register" => Ok(RegisterOrCluster::Register(try!(Register::from_element(element)))),
"cluster" => Ok(RegisterOrCluster::Cluster(try!(Cluster::from_element(element)))),
_ => {
Err(ErrorKind::UnexpectedValue("one of register or cluster",
element.name.to_string())
.into())
}
}
}
pub fn derived_from(&self) -> &Option<IdentifierType> {
match *self {
RegisterOrCluster::Register(ref r) => &r.derived_from,
RegisterOrCluster::Cluster(ref c) => &c.derived_from,
}
}
pub fn name(&self) -> &str {
match *self {
RegisterOrCluster::Register(ref r) => &*r.name,
RegisterOrCluster::Cluster(ref c) => &*c.name,
}
}
pub fn propagate_register_properties(&mut self,
register_properties: &RegisterPropertiesGroup) {
match *self {
RegisterOrCluster::Register(ref mut r) => {
r.register_properties = r.register_properties.merge(register_properties)
}
RegisterOrCluster::Cluster(ref mut c) => {
c.propagate_register_properties(register_properties)
}
}
}
pub fn merge_derived_from(&mut self, derived_from: &RegisterOrCluster) {
match (self, derived_from) {
(&mut RegisterOrCluster::Register(ref mut r1),
&RegisterOrCluster::Register(ref r2)) => r1.merge_derived_from(r2),
(&mut RegisterOrCluster::Cluster(ref mut c1), &RegisterOrCluster::Cluster(ref c2)) => {
c1.merge_derived_from(c2)
}
_ => (),
}
}
pub fn propagate_derived_from(&mut self) {
match *self {
RegisterOrCluster::Register(ref mut r) => {
r.propagate_derived_from();
}
RegisterOrCluster::Cluster(ref mut c) => {
c.propagate_derived_from();
}
}
}
}
impl<'a, 'b> IsSimilar<&'a RegisterOrCluster> for &'b RegisterOrCluster {
fn is_similar(self, other: &RegisterOrCluster, options: &IsSimilarOptions) -> bool {
match (self, other) {
(&RegisterOrCluster::Register(ref r1), &RegisterOrCluster::Register(ref r2)) => {
r1.is_similar(r2, options)
}
(&RegisterOrCluster::Cluster(ref c1), &RegisterOrCluster::Cluster(ref c2)) => {
c1.is_similar(c2, options)
}
_ => false,
}
}
}
| from_element |
geomath.py | #
# geomath.py
#
# some geo coordinate math that I found on the internet
#
# [email protected]
#
import math
def HeadingStr(heading):
"""
Gives a heading string given the heading float
"""
headstr = "?"
if heading != None:
if heading < 22.5 or heading >= 337.5:
headstr = "N"
elif heading >=22.5 and heading < 67.5:
headstr = "NE"
elif heading >= 67.5 and heading < 112.5:
headstr = "E"
elif heading >= 112.5 and heading < 157.5:
headstr = "SE"
elif heading >= 157.5 and heading < 202.5:
headstr = "S"
elif heading >= 202.5 and heading < 247.5:
headstr = "SW"
elif heading >= 247.5 and heading < 292.5:
headstr = "W"
elif heading >= 292.5 and heading < 337.5:
headstr = "NW"
return headstr
def knot2mph(k):
"""
Converts knots to miles per hour.
"""
if k == None:
return None
return k * 1.15078
def mi2km(mi):
"""
Converts to miles to kilometers.
"""
if mi == None:
return None
return mi * 1.60934
def mi2nm(mi):
"""
Converts miles to nautical miles
"""
if mi == None:
return None
return mi * 0.868976
def ft2m(ft):
|
def distance(pointA, pointB):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
http://stackoverflow.com/questions/15736995/how-can-i-quickly-estimate-the-distance-between-two-latitude-longitude-points
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(math.radians, [pointA[1], pointA[0], pointB[1], pointB[0]])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
r = 3956 # Radius of earth in miles. Use 6371 for kilometers
return c * r
def bearing(pointA, pointB):
"""
Calculates the bearing between two points.
Found here: https://gist.github.com/jeromer/2005586
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
if (type(pointA) != tuple) or (type(pointB) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(pointA[0])
lat2 = math.radians(pointB[0])
diffLong = math.radians(pointB[1] - pointA[1])
x = math.sin(diffLong) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diffLong))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing
| """
Converts feet to meters.
"""
if ft == None:
return None
return ft * 0.3048 |
bootstrap.rs | //! # Building tari-based applications CLI
//!
//! To help with building tari-enabled CLI from scratch as easy as possible this crate exposes
//! [`ConfigBootstrap`] struct. ConfigBootstrap implements [`structopt::StructOpt`] trait, all CLI options
//! required for initializing configs can be embedded in any StructOpt derived struct.
//!
//! After loading ConfigBootstrap parameters it is necessary to call [`ConfigBootstrap::init_dirs()`] call
//! which would create necessary configuration files based on input parameters. This usually followed by:
//! - [`ConfigBootstrap::initialize_logging()`] would initialize log4rs logging.
//! - [`ConfigBootstrap::load_configuration()`] which would load [config::Config] from .tari config file.
//!
//! ## Example - CLI which is loading and deserializing the global config file
//!
//! ```ignore
//! # use tempdir::TempDir;
//! use tari_common::ConfigBootstrap;
//! use structopt::StructOpt;
//!
//! #[derive(StructOpt)]
//! /// The reference Tari cryptocurrency base node implementation
//! struct Arguments {
//! /// Create and save new node identity if one doesn't exist
//! #[structopt(long)]
//! id: bool,
//! #[structopt(flatten)]
//! bootstrap: ConfigBootstrap,
//! }
//!
//! let mut args = Arguments::from_args();
//! # let temp_dir = TempDir::new(string(8).as_str()).unwrap();
//! # args.bootstrap.base_path = temp_dir.path().to_path_buf();
//! # args.bootstrap.init = true;
//! args.bootstrap.init_dirs();
//! args.bootstrap.initialize_logging();
//! let config = args.bootstrap.load_configuration();
//! assert_eq!(config.network, Network::MainNet);
//! assert_eq!(config.blocking_threads, 4);
//! # std::fs::remove_dir_all(&dir_utils::default_subdir("", Some(dir))).unwrap();
//! ```
//!
//! ```shell
//! > main -h
//! main 0.0.0
//! The reference Tari cryptocurrency base node implementation
//!
//! USAGE:
//! main [FLAGS] [OPTIONS]
//!
//! FLAGS:
//! -h, --help Prints help information
//! --create Create and save new node identity if one doesn't exist
//! --init Create a default configuration file if it doesn't exist
//! -V, --version Prints version information
//!
//! OPTIONS:
//! --base-path <base-path> A path to a directory to store your files
//! --config <config> A path to the configuration file to use (config.toml)
//! --log-config <log-config> The path to the log configuration file. It is set using the following precedence
//! set: [env: TARI_LOG_CONFIGURATION=]
//! ```
use super::{
error::ConfigError,
utils::{install_default_config_file, load_configuration},
};
use crate::{dir_utils, initialize_logging, logging, DEFAULT_CONFIG, DEFAULT_LOG_CONFIG};
use std::{
io,
path::{Path, PathBuf},
};
use structopt::{clap::ArgMatches, StructOpt};
#[derive(StructOpt, Debug)]
pub struct ConfigBootstrap {
/// A path to a directory to store your files
#[structopt(short, long, alias("base_dir"), hide_default_value(true), default_value = "")]
pub base_path: PathBuf,
/// A path to the configuration file to use (config.toml)
#[structopt(short, long, hide_default_value(true), default_value = "")]
pub config: PathBuf,
/// The path to the log configuration file. It is set using the following precedence set
#[structopt(
short,
long,
alias("log_config"),
env = "TARI_LOG_CONFIGURATION",
hide_default_value(true),
default_value = ""
)]
pub log_config: PathBuf,
/// Create a default configuration file if it doesn't exist
#[structopt(long)]
pub init: bool,
}
impl Default for ConfigBootstrap {
fn default() -> Self {
ConfigBootstrap {
base_path: dir_utils::default_path("", None),
config: dir_utils::default_path(DEFAULT_CONFIG, None),
log_config: dir_utils::default_path(DEFAULT_LOG_CONFIG, None),
init: false,
}
}
}
impl ConfigBootstrap {
const ARGS: &'static [&'static str] = &["base-path", "base_dir", "config", "init", "log-config", "log_config"];
/// Initialize configuration and directories based on ConfigBootstrap options.
///
/// If not present it will create base directory (default ~/.tari/, depending on OS).
/// Log and tari configs will be initialized in the base directory too.
///
/// Without `--init` flag provided configuration and directories will be created only
/// after user's confirmation.
pub fn init_dirs(&mut self) -> Result<(), ConfigError> {
if self.base_path.to_str() == Some("") {
self.base_path = dir_utils::default_path("", None);
}
// Create the tari data directory
dir_utils::create_data_directory(Some(&self.base_path)).map_err(|err| {
ConfigError::new(
"We couldn't create a default Tari data directory and have to quit now. This makes us sad :(",
Some(err.to_string()),
)
})?;
if self.config.to_str() == Some("") {
self.config = dir_utils::default_path(DEFAULT_CONFIG, Some(&self.base_path));
}
let log_config = if self.log_config.to_str() == Some("") {
None
} else {
Some(self.log_config.clone())
};
self.log_config = logging::get_log_configuration_path(log_config);
if !self.config.exists() {
let install = if !self.init {
prompt("Config file does not exist. We can create a default one for you now, or you can say 'no' here, \
and generate a customised one at https://config.tari.com.\n\
Would you like to try the default configuration (Y/n)?")
} else {
true
};
if install {
println!(
"Installing new config file at {}",
self.config.to_str().unwrap_or("[??]")
);
install_configuration(&self.config, install_default_config_file);
}
}
if !self.log_config.exists() {
let install = if !self.init {
prompt("Logging configuration file does not exist. Would you like to create a new one (Y/n)?")
} else {
true
};
if install {
println!(
"Installing new logfile configuration at {}",
self.log_config.to_str().unwrap_or("[??]")
);
install_configuration(&self.log_config, logging::install_default_logfile_config);
}
};
Ok(())
}
/// Fill in ConfigBootstrap from clap ArgMatches.
///
/// ## Example:
/// ```edition2018
/// # use structopt::clap::clap_app;
/// # use tari_common::*;
/// let matches = clap_app!(myapp =>
/// (@arg base_path: -b --("base-path") +takes_value "A path to a directory to store your files")
/// (@arg config: -c --config +takes_value "A path to the configuration file to use (config.toml)")
/// (@arg log_config: -l --("log-config") +takes_value "A path to the logfile configuration (log4rs.yml))")
/// (@arg init: -i --init "Create a default configuration file if it doesn't exist")
/// ).get_matches();
/// let bootstrap = ConfigBootstrap::from_matches(&matches);
/// ```
pub fn from_matches(matches: &ArgMatches) -> Result<Self, ConfigError> {
let iter = matches
.args
.keys()
.flat_map(|arg| match Self::ARGS.binary_search(arg) {
Ok(_) => vec![
Some(std::ffi::OsString::from(format!("--{}", arg))),
matches.value_of_os(arg).map(|s| s.to_os_string()),
],
_ => vec![],
})
.filter_map(|arg| arg);
let mut vals: Vec<std::ffi::OsString> = iter.collect();
vals.insert(0, "".into());
Ok(ConfigBootstrap::from_iter_safe(vals.iter())?)
}
/// Set up application-level logging using the Log4rs configuration file
/// based on supplied CLI arguments
pub fn initialize_logging(&self) -> Result<(), ConfigError> {
match initialize_logging(&self.log_config) {
true => Ok(()),
false => Err(ConfigError::new("failed to initalize logging", None)),
}
}
/// Load configuration from files located based on supplied CLI arguments
pub fn load_configuration(&self) -> Result<config::Config, ConfigError> {
load_configuration(self).map_err(|source| ConfigError::new("failed to load configuration", Some(source)))
}
}
/// Fill in ConfigBootstrap from clap ArgMatches
///
/// ```rust
/// # use structopt::clap::clap_app;
/// # use tari_common::*;
/// let matches = clap_app!(myapp =>
/// (version: "0.0.10")
/// (author: "The Tari Community")
/// (about: "The reference Tari cryptocurrency base node implementation")
/// (@arg base_path: -b --("base-path") +takes_value "A path to a directory to store your files")
/// (@arg config: -c --config +takes_value "A path to the configuration file to use (config.toml)")
/// (@arg log_config: -l --("log-config") +takes_value "A path to the logfile configuration (log4rs.yml))")
/// (@arg init: -i --init "Create a default configuration file if it doesn't exist")
/// (@arg create_id: --("create-id") "Create and save new node identity if one doesn't exist ")
/// ).get_matches();
/// let bootstrap = bootstrap_config_from_cli(&matches);
/// ```
/// ## Caveats
/// It will exit with code 1 if no base dir and fails to create one
pub fn bootstrap_config_from_cli(matches: &ArgMatches) -> ConfigBootstrap {
let mut bootstrap = ConfigBootstrap::from_matches(matches).expect("failed to extract matches");
match bootstrap.init_dirs() {
Err(err) => {
println!("{}", err);
std::process::exit(1);
},
Ok(_) => bootstrap,
}
}
fn | (question: &str) -> bool {
println!("{}", question);
let mut input = "".to_string();
io::stdin().read_line(&mut input).unwrap();
let input = input.trim().to_lowercase();
input == "y" || input.is_empty()
}
pub fn install_configuration<F>(path: &Path, installer: F)
where F: Fn(&Path) -> Result<(), std::io::Error> {
if let Err(e) = installer(path) {
println!(
"We could not install a new configuration file in {}: {}",
path.to_str().unwrap_or("?"),
e.to_string()
)
}
}
#[cfg(test)]
mod test {
use super::ConfigBootstrap;
use crate::{bootstrap_config_from_cli, dir_utils, dir_utils::default_subdir, load_configuration};
use structopt::{clap::clap_app, StructOpt};
use tari_test_utils::random::string;
use tempdir::TempDir;
#[test]
fn test_bootstrap_from_matches() {
// Create command line test data
let app = clap_app!(myapp =>
(@arg base_dir: -b --base_dir +takes_value "A path to a directory to store your files")
(@arg config: -c --config +takes_value "A path to the configuration file to use (config.toml)")
(@arg log_config: -l--log_config +takes_value "A path to the logfile configuration (log4rs.yml))")
(@arg init: --init "Create a default configuration file if it doesn't exist")
);
let matches = app.clone().get_matches_from(vec![
"",
"--log_config",
"no-file-created",
"--config",
"no-file-created",
"--base_dir",
"no-dir-created",
"--init",
]);
let bootstrap = ConfigBootstrap::from_matches(&matches).expect("failed to extract matches");
assert!(bootstrap.init);
assert_eq!(bootstrap.base_path.to_str(), Some("no-dir-created"));
assert_eq!(bootstrap.log_config.to_str(), Some("no-file-created"));
assert_eq!(bootstrap.config.to_str(), Some("no-file-created"));
// Check aliases too
let app = clap_app!(myapp =>
(@arg ("base-path"): -b --("base-path") +takes_value "A path to a directory to store your files")
(@arg config: -c --config +takes_value "A path to the configuration file to use (config.toml)")
(@arg ("log-config"): -l --("log-config") +takes_value "A path to the logfile configuration (log4rs.yml))")
(@arg init: --init "Create a default configuration file if it doesn't exist")
);
let matches = app.get_matches_from(vec![
"",
"--log-config",
"no-file-created",
"--base-path",
"no-dir-created",
]);
let bootstrap = ConfigBootstrap::from_matches(&matches).expect("failed to extract matches");
assert!(!bootstrap.init);
assert_eq!(bootstrap.base_path.to_str(), Some("no-dir-created"));
assert_eq!(bootstrap.log_config.to_str(), Some("no-file-created"));
assert_eq!(bootstrap.config.to_str(), Some(""));
}
#[test]
fn test_bootstrap_config_from_cli_and_load_configuration() {
let temp_dir = TempDir::new(string(8).as_str()).unwrap();
let dir = &temp_dir.path().to_path_buf();
// Create test folder
dir_utils::create_data_directory(Some(dir)).unwrap();
// Create command line test data
let matches = clap_app!(myapp =>
(version: "0.0.10")
(author: "The Tari Community")
(about: "The reference Tari cryptocurrency base node implementation")
(@arg base_dir: -b --base_dir +takes_value "A path to a directory to store your files")
(@arg config: -c --config +takes_value "A path to the configuration file to use (config.toml)")
(@arg log_config: -l --log_config +takes_value "A path to the logfile configuration (log4rs.yml))")
(@arg init: --init "Create a default configuration file if it doesn't exist")
(@arg create_id: --create_id "Create and save new node identity if one doesn't exist ")
)
.get_matches_from(vec![
"",
"--base_dir",
default_subdir("", Some(dir)).as_str(),
"--init",
"--create_id",
]);
let bootstrap = ConfigBootstrap::from_matches(&matches).expect("failed to extract matches");
assert!(bootstrap.init);
assert_eq!(&bootstrap.base_path, dir);
// Load bootstrap via former API
let bootstrap = bootstrap_config_from_cli(&matches);
let config_exists = std::path::Path::new(&bootstrap.config).exists();
let log_config_exists = std::path::Path::new(&bootstrap.log_config).exists();
// Load and apply configuration file
let cfg = load_configuration(&bootstrap);
// Cleanup test data
if std::path::Path::new(&dir_utils::default_subdir("", Some(dir))).exists() {
std::fs::remove_dir_all(&dir_utils::default_subdir("", Some(dir))).expect("failed to cleanup dirs");
}
// Assert results
assert!(config_exists);
assert!(log_config_exists);
assert!(&cfg.is_ok());
}
#[test]
fn test_bootstrap_config_from_structopt_derive() {
let temp_dir = TempDir::new(string(8).as_str()).unwrap();
let dir = &temp_dir.path().to_path_buf();
// Create test folder
dir_utils::create_data_directory(Some(dir)).unwrap();
#[derive(StructOpt)]
/// The reference Tari cryptocurrency base node implementation
struct Arguments {
/// Create and save new node identity if one doesn't exist
#[structopt(long = "create_id")]
create_id: bool,
#[structopt(flatten)]
bootstrap: super::ConfigBootstrap,
}
// Create command line test data
let mut args = Arguments::from_iter_safe(vec![
"",
"--base_dir",
default_subdir("", Some(dir)).as_str(),
"--init",
"--create_id",
])
.expect("failed to process arguments");
// Init bootstrap dirs
args.bootstrap.init_dirs().expect("failed to initialize dirs");
// Load and apply configuration file
let cfg = load_configuration(&args.bootstrap);
// Cleanup test data
if std::path::Path::new(&dir_utils::default_subdir("", Some(dir))).exists() {
std::fs::remove_dir_all(&dir_utils::default_subdir("", Some(dir))).unwrap();
}
// Assert results
assert!(args.bootstrap.init);
assert!(args.create_id);
assert!(&cfg.is_ok());
}
#[test]
fn check_homedir_is_used_by_default() {
dir_utils::create_data_directory(None).unwrap();
assert_eq!(
dirs::home_dir().unwrap().join(".tari"),
dir_utils::default_path("", None)
);
}
}
| prompt |
scoring.py | """ Runs the scoring procedure for the challenge.
It assumes that there exists a ./model_dir folder containing both the
submission code and the saved learner.
It will create a folder named ./scoring_output (default) in which a txt file
will contain the average score over 600 episodes. You can change the folder
name via the score_dir flag.
Usage example executed from the metadl/ repository :
python -m metadl.core.scoring.scoring --meta_test_dir=<path_dataset.meta_test>
"""
import os
from sys import path
import scipy.stats
import gin
import numpy as np
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from metadl.data.dataset import DataGenerator
from metadl.core.ingestion.ingestion import get_gin_path, show_dir
FLAGS = flags.FLAGS
flags.DEFINE_string('meta_test_dir',
'/Users/adrian/GitInria/meta-dataset/records/',
('Directory of the meta-test dataset. This directory '
+ 'should contain records and a json spec file.'))
flags.DEFINE_string('saved_model_dir',
'./model_dir',
('Directory path that contains the participant\'s code '
+ 'along with the serialized learner from meta-fit.'))
flags.DEFINE_string('score_dir',
'./scoring_output',
'Path to the score directory.')
flags.DEFINE_string('evaltype',
'test',
'Data type on which to perform evaluation. [train, val, test]')
tf.random.set_seed(1234)
def NwayKshot_accuracy(predictions, ground_truth, metric):
""" N-way, K-shot accuracy which corresponds to the accuracy in a
multi-classification context with N classes.
Args:
predictions : tensors, sparse tensors corresponding to the predicted
labels.
ground_truth : tensors, sparse tensors corresponding the ground truth
labels.
metric : keras.metrics , the metric we use to evaluate the
classification performance of the meta-learning algorithm. We use
the SparseCategoricalAccuracy in this challenge.
Retruns:
score : Float, the resulting performance using the given metric.
"""
ground_truth = tf.expand_dims(ground_truth, axis = 1)
predictions = tf.expand_dims(predictions, axis = 1)
logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(
predictions.shape, ground_truth.shape))
metric.update_state(ground_truth, predictions)
score = metric.result()
logging.debug('An episode score: {}'.format(score))
metric.reset_states()
return score
def is_one_hot_vector(x, axis=None, keepdims=False):
"""Check if a vector 'x' is one-hot (i.e. one entry is 1 and others 0)."""
norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)
norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)
return np.logical_and(norm_1 == 1, norm_inf == 1)
def write_score(score, conf_int, file_score, duration=-1):
"""Write score of the k-th task in the given file_score."""
file_score.write('set1_score: {:.6f}\n'.format(float(score)))
file_score.write('conf_int: {:.3f}\n'.format(float(conf_int)))
file_score.write('Duration: {:.6f}\n'.format(float(duration)))
def extract_elapsed_time(saved_model_dir):
""" Extracts elapsed time from the metadata file. It corresponds to the
meta-training time, the duration of the ingestion process.
"""
if not os.path.isdir(saved_model_dir):
raise ValueError('Saved model directory does not exists.')
if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):
with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f :
lines = f.readlines()
for line in lines :
splitted_line = line.split(' ')
for k, word in enumerate(splitted_line):
if 'elapsed' in splitted_line[k]:
elapsed_time = float(splitted_line[k+1])
return elapsed_time
return -1
def process_task(task):
"""We are using the meta-dataset code to generate episodes from a dataset.
Generated episodes have a specific format. Each is processed such that the
the support and query sets are ready to be used by the participants. Each
set is returned as a tf.data.Dataset object.
The que_labs are kept hidden.
Returns :
support_dataset : tf.data.Dataset containing the support examples and
labels.
query_dataset : tf.data.Dataset containing the query examples
que_labs : tuple (query_batch_size, 1), the query examples labels
i.e. the ground truth labels.
"""
sup_set = tf.data.Dataset.from_tensor_slices(\
(task[0][1], task[0][0]))
dim = task[0][4].shape[1]
arr = np.arange(dim)
np.random.shuffle(arr) # shuffling arr | query_imgs = task[0][3]
query_labs_s = tf.gather(query_labs, arr, axis=1)
query_imgs_s = tf.gather(query_imgs, arr, axis=1)
que_set = tf.data.Dataset.from_tensor_slices(
(query_labs_s, query_imgs_s)
)
new_ds = tf.data.Dataset.zip((sup_set, que_set))
for ((supp_labs, supp_img), (que_labs, que_img)) \
in new_ds :
logging.debug('Supp labs : {}'.format(supp_labs))
logging.debug('Query labs : {}'.format(que_labs))
support_set = tf.data.Dataset.from_tensor_slices(\
(supp_img, supp_labs))
query_set = tf.data.Dataset.from_tensor_slices(\
(que_img,))
support_set = support_set.batch(5)
query_set = query_set.batch(95)
return support_set, query_set, que_labs
def scoring(argv):
"""
For each task, load and fit the Learner with the support set and evaluate
the submission performance with the query set.
A directory 'scoring_output' is created and contains a txt file that
contains the submission score and duration. Note that the former is the
time elapsed during the ingestion program and hence the meta-fit()
duration.
The metric considered here is the Sparse Categorical Accuracy for a
5 classes image classification problem.
"""
del argv
saved_model_dir = FLAGS.saved_model_dir
meta_test_dir = FLAGS.meta_test_dir
eval_type = FLAGS.evaltype
# Making eval type compatible with DataGenerator specs
if eval_type == 'train' or eval_type == 'val':
data_generator_eval_type = 'train'
elif eval_type == 'test':
data_generator_eval_type = 'test'
# Use CodaLab's path `run/input/ref` in parallel with `run/input/res`
if not os.path.isdir(meta_test_dir):
meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')
# Evaluation type scenario: if meta_test is specified -> act as normal
# scoring on meta_test data
if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:
raise ValueError('Cannot perform train/val evaluation on meta-test data!')
#if 'meta_test' not in meta_test_dir:
# if eval_type == 'test':
# meta_test_dir = os.path.join(meta_test_dir, 'meta_test')
# else:
# meta_test_dir = os.path.join(meta_test_dir, 'meta_train')
code_dir = os.path.join(saved_model_dir, 'code_dir')
score_dir = FLAGS.score_dir
path.append(code_dir)
from model import MyLearner
if(os.path.exists(os.path.join(code_dir, 'model.gin'))):
gin.parse_config_file(os.path.join(code_dir, 'model.gin'))
logging.info('Ingestion done! Starting scoring process ... ')
logging.info('Creating the meta-test episode generator ... \n ')
generator = DataGenerator(path_to_records=meta_test_dir,
batch_config=None,
episode_config=[28, 5, 1, 19],
pool= data_generator_eval_type,
mode='episode')
if eval_type == 'test':
meta_test_dataset = generator.meta_test_pipeline
elif eval_type == 'train':
meta_test_dataset = generator.meta_train_pipeline
elif eval_type == 'val':
meta_test_dataset = generator.meta_valid_pipeline
else:
raise ValueError('Wrong eval_type : {}'.format(eval_type))
logging.info('Evaluating performance on episodes ... ')
meta_test_dataset = meta_test_dataset.batch(1)
meta_test_dataset = meta_test_dataset.prefetch(5)
learner = MyLearner()
if (not os.path.isdir(score_dir)):
os.mkdir(score_dir)
score_file = os.path.join(score_dir, 'scores.txt')
results = []
metric = tf.metrics.SparseCategoricalAccuracy()
nbr_episodes = 600
for k , task in enumerate(meta_test_dataset) :
support_set, query_set, ground_truth = process_task(task)
learner.load(saved_model_dir)
predictor = learner.fit(support_set)
predictions = predictor.predict(query_set)
score = NwayKshot_accuracy(predictions, ground_truth, metric)
results.append(score)
logging.debug('Score on {} : {}'.format(k, score))
logging.debug('Results : {}'.format(results[:20]))
if(k > nbr_episodes):
break
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
m, conf_int = mean_confidence_interval(results)
with open(score_file, 'w') as f :
write_score(m,
conf_int,
f,
extract_elapsed_time(saved_model_dir))
logging.info(('Scoring done! The average score over {} '
+ 'episodes is : {:.3%}').format(nbr_episodes,
sum(results)/len(results))
)
if __name__ == '__main__':
np.random.seed(seed=1234)
tf.get_logger().setLevel('ERROR')
app.run(scoring) | query_labs = task[0][4] |
key-store.go | package login
import (
"crypto/rsa"
"encoding/base64"
"encoding/json"
"fmt"
"log"
"math/big"
"net/http"
"sync"
"time"
)
type keyStore struct {
keys map[string]*rsa.PublicKey
expiry time.Time
mux sync.Mutex
}
type jwkKey struct {
Kty string
Alg string
Use string
Kid string
N string
E string
}
type jwkKeys struct {
Keys []jwkKey
}
func (ks *keyStore) fetchKeys() (err error) {
if len(ks.keys) > 0 && ks.expiry.After(time.Now()) {
log.Println("Keys still active")
return nil
}
var r *http.Response
if r, err = http.Get("https://www.googleapis.com/oauth2/v3/certs"); err != nil {
return
}
defer r.Body.Close()
decoder := json.NewDecoder(r.Body)
var keys jwkKeys
if err = decoder.Decode(&keys); err != nil {
return
}
var expiry time.Time
if expiry, err = time.Parse("Mon, 02 Jan 2006 15:04:05 MST", r.Header.Get("expires")); err != nil {
return
}
newKeys := make(map[string]*rsa.PublicKey)
for _, key := range keys.Keys {
if key.Kty != "RSA" || key.Alg != "RS256" || key.Use != "sig" {
log.Printf("Key %v not a signing RSA 256 Key (%v/%v/%v)\n", key.Kid, key.Kty, key.Alg, key.Use)
continue
}
var nBytes, eBytes []byte
nBytes, err = base64.RawURLEncoding.DecodeString(key.N)
if err != nil {
return
}
eBytes, err = base64.RawURLEncoding.DecodeString(key.E)
if err != nil |
var n, e big.Int
n.SetBytes(nBytes)
e.SetBytes(eBytes)
newKeys[key.Kid] = &rsa.PublicKey{
N: &n,
E: int(e.Int64()),
}
}
log.Println("New keys, expiring at ", expiry)
ks.keys = newKeys
ks.expiry = expiry
return nil
}
func (ks *keyStore) lookupKey(id string) (key *rsa.PublicKey, err error) {
ks.mux.Lock()
defer ks.mux.Unlock()
if err = ks.fetchKeys(); err != nil {
return
}
var keyFound bool
if key, keyFound = ks.keys[id]; !keyFound {
err = fmt.Errorf("Key \"%v\" not found", id)
return
}
return
}
| {
return
} |
part1.rs | //! # Advent of Code - Day 16 - Part One
pub fn | () -> usize {
return 0;
}
#[cfg(test)]
mod day16 {
use super::*;
#[test]
fn test_part1() {
assert_eq!(part1(), 0);
}
}
| part1 |
apiPolicy.ts | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import * as utilities from "../../utilities";
/**
* Policy Contract details.
*/
export class ApiPolicy extends pulumi.CustomResource {
/**
* Get an existing ApiPolicy resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param opts Optional settings to control the behavior of the CustomResource.
*/
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): ApiPolicy {
return new ApiPolicy(name, undefined as any, { ...opts, id: id });
}
/** @internal */
public static readonly __pulumiType = 'azure-native:apimanagement/v20170301:ApiPolicy';
/**
* Returns true if the given object is an instance of ApiPolicy. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
public static isInstance(obj: any): obj is ApiPolicy {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === ApiPolicy.__pulumiType;
}
/**
* Resource name.
*/
public /*out*/ readonly name!: pulumi.Output<string>;
/**
* Json escaped Xml Encoded contents of the Policy.
*/
public readonly policyContent!: pulumi.Output<string>;
/**
* Resource type for API Management resource.
*/
public /*out*/ readonly type!: pulumi.Output<string>;
/**
* Create a ApiPolicy resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: ApiPolicyArgs, opts?: pulumi.CustomResourceOptions) {
let inputs: pulumi.Inputs = {};
opts = opts || {};
if (!opts.id) {
if ((!args || args.apiId === undefined) && !opts.urn) {
throw new Error("Missing required property 'apiId'");
}
if ((!args || args.policyContent === undefined) && !opts.urn) {
throw new Error("Missing required property 'policyContent'");
}
if ((!args || args.resourceGroupName === undefined) && !opts.urn) {
throw new Error("Missing required property 'resourceGroupName'");
}
if ((!args || args.serviceName === undefined) && !opts.urn) {
throw new Error("Missing required property 'serviceName'");
}
inputs["apiId"] = args ? args.apiId : undefined;
inputs["policyContent"] = args ? args.policyContent : undefined;
inputs["policyId"] = args ? args.policyId : undefined;
inputs["resourceGroupName"] = args ? args.resourceGroupName : undefined;
inputs["serviceName"] = args ? args.serviceName : undefined;
inputs["name"] = undefined /*out*/;
inputs["type"] = undefined /*out*/;
} else {
inputs["name"] = undefined /*out*/;
inputs["policyContent"] = undefined /*out*/;
inputs["type"] = undefined /*out*/;
}
if (!opts.version) {
opts = pulumi.mergeOptions(opts, { version: utilities.getVersion()});
}
const aliasOpts = { aliases: [{ type: "azure-nextgen:apimanagement/v20170301:ApiPolicy" }, { type: "azure-native:apimanagement:ApiPolicy" }, { type: "azure-nextgen:apimanagement:ApiPolicy" }, { type: "azure-native:apimanagement/v20180101:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20180101:ApiPolicy" }, { type: "azure-native:apimanagement/v20180601preview:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20180601preview:ApiPolicy" }, { type: "azure-native:apimanagement/v20190101:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20190101:ApiPolicy" }, { type: "azure-native:apimanagement/v20191201:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20191201:ApiPolicy" }, { type: "azure-native:apimanagement/v20191201preview:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20191201preview:ApiPolicy" }, { type: "azure-native:apimanagement/v20200601preview:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20200601preview:ApiPolicy" }, { type: "azure-native:apimanagement/v20201201:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20201201:ApiPolicy" }, { type: "azure-native:apimanagement/v20210101preview:ApiPolicy" }, { type: "azure-nextgen:apimanagement/v20210101preview:ApiPolicy" }] };
opts = pulumi.mergeOptions(opts, aliasOpts);
super(ApiPolicy.__pulumiType, name, inputs, opts);
}
}
/**
* The set of arguments for constructing a ApiPolicy resource.
*/
export interface ApiPolicyArgs {
/**
* API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
*/
readonly apiId: pulumi.Input<string>;
/**
* Json escaped Xml Encoded contents of the Policy.
*/
readonly policyContent: pulumi.Input<string>;
/**
* The identifier of the Policy.
*/ | readonly policyId?: pulumi.Input<string>;
/**
* The name of the resource group.
*/
readonly resourceGroupName: pulumi.Input<string>;
/**
* The name of the API Management service.
*/
readonly serviceName: pulumi.Input<string>;
} | |
libo_shark.rs | use super::{ButtplugDeviceResultFuture, ButtplugProtocol, ButtplugProtocolCommandHandler};
use crate::{
core::messages::{self, ButtplugDeviceCommandMessageUnion, DeviceMessageAttributesMap},
device::{
protocol::{generic_command_manager::GenericCommandManager, ButtplugProtocolProperties},
DeviceImpl,
DeviceWriteCmd,
Endpoint,
},
};
use std::sync::Arc;
use tokio::sync::Mutex;
#[derive(ButtplugProtocolProperties)]
pub struct | {
name: String,
message_attributes: DeviceMessageAttributesMap,
manager: Arc<Mutex<GenericCommandManager>>,
stop_commands: Vec<ButtplugDeviceCommandMessageUnion>,
}
impl ButtplugProtocol for LiboShark {
fn new_protocol(
name: &str,
message_attributes: DeviceMessageAttributesMap,
) -> Box<dyn ButtplugProtocol> {
let manager = GenericCommandManager::new(&message_attributes);
Box::new(Self {
name: name.to_owned(),
message_attributes,
stop_commands: manager.get_stop_commands(),
manager: Arc::new(Mutex::new(manager)),
})
}
}
impl ButtplugProtocolCommandHandler for LiboShark {
fn handle_vibrate_cmd(
&self,
device: Arc<DeviceImpl>,
message: messages::VibrateCmd,
) -> ButtplugDeviceResultFuture {
// Store off result before the match, so we drop the lock ASAP.
let manager = self.manager.clone();
Box::pin(async move {
let result = manager.lock().await.update_vibration(&message, true)?;
if let Some(cmds) = result {
let mut data = 0u8;
if let Some(speed) = cmds[0] {
data |= (speed as u8) << 4;
}
if let Some(speed) = cmds[1] {
data |= speed as u8;
}
device
.write_value(DeviceWriteCmd::new(Endpoint::Tx, vec![data], false))
.await?;
}
Ok(messages::Ok::default().into())
})
}
}
#[cfg(all(test, feature = "server"))]
mod test {
use crate::{
core::messages::{StopDeviceCmd, VibrateCmd, VibrateSubcommand},
device::{DeviceImplCommand, DeviceWriteCmd, Endpoint},
test::{check_test_recv_empty, check_test_recv_value, new_bluetoothle_test_device},
util::async_manager,
};
#[test]
pub fn test_libo_shark_protocol() {
async_manager::block_on(async move {
let (device, test_device) = new_bluetoothle_test_device("ShaYu").await.unwrap();
let command_receiver_tx = test_device.get_endpoint_receiver(&Endpoint::Tx).unwrap();
let command_receiver_tx_mode = test_device
.get_endpoint_receiver(&Endpoint::TxMode)
.unwrap();
device
.parse_message(
VibrateCmd::new(
0,
vec![
VibrateSubcommand::new(0, 0.5),
VibrateSubcommand::new(1, 0.5),
],
)
.into(),
)
.await
.unwrap();
check_test_recv_value(
&command_receiver_tx,
DeviceImplCommand::Write(DeviceWriteCmd::new(Endpoint::Tx, vec![0x22], false)),
);
assert!(check_test_recv_empty(&command_receiver_tx));
device
.parse_message(VibrateCmd::new(0, vec![VibrateSubcommand::new(1, 1.0)]).into())
.await
.unwrap();
check_test_recv_value(
&command_receiver_tx,
DeviceImplCommand::Write(DeviceWriteCmd::new(Endpoint::Tx, vec![0x23], false)),
);
assert!(check_test_recv_empty(&command_receiver_tx));
device
.parse_message(VibrateCmd::new(0, vec![VibrateSubcommand::new(0, 0.5)]).into())
.await
.unwrap();
assert!(check_test_recv_empty(&command_receiver_tx));
device
.parse_message(StopDeviceCmd::new(0).into())
.await
.unwrap();
check_test_recv_value(
&command_receiver_tx,
DeviceImplCommand::Write(DeviceWriteCmd::new(Endpoint::Tx, vec![0x00], false)),
);
assert!(check_test_recv_empty(&command_receiver_tx));
assert!(check_test_recv_empty(&command_receiver_tx_mode));
});
}
}
| LiboShark |
sigcache_test.go | // Copyright (c) 2015-2016 The btcsuite developers
// Copyright (c) 2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package txscript
import (
"crypto/rand"
"math/big"
"testing"
"github.com/decred/dcrd/chaincfg/chainec"
"github.com/decred/dcrd/chaincfg/chainhash"
)
// genRandomSig returns a random message, a signature of the message under the
// public key and the public key. This function is used to generate randomized
// test data.
func genRandomSig() (*chainhash.Hash, chainec.Signature, chainec.PublicKey, error) {
secp256k1 := chainec.Secp256k1
privBytes, pubX, pubY, err := secp256k1.GenerateKey(rand.Reader)
if err != nil {
return nil, nil, nil, err
}
priv := secp256k1.NewPrivateKey(new(big.Int).SetBytes(privBytes))
pub := secp256k1.NewPublicKey(pubX, pubY)
var msgHash chainhash.Hash
if _, err := rand.Read(msgHash[:]); err != nil {
return nil, nil, nil, err
}
r, s, err := secp256k1.Sign(priv, msgHash[:])
if err != nil {
return nil, nil, nil, err
}
sig := secp256k1.NewSignature(r, s)
return &msgHash, sig, pub, nil
}
// TestSigCacheAddExists tests the ability to add, and later check the
// existence of a signature triplet in the signature cache.
func TestSigCacheAddExists(t *testing.T) {
sigCache := NewSigCache(200)
// Generate a random sigCache entry triplet.
msg1, sig1, key1, err := genRandomSig()
if err != nil {
t.Errorf("unable to generate random signature test data")
}
// Add the triplet to the signature cache.
sigCache.Add(*msg1, sig1, key1)
// The previously added triplet should now be found within the sigcache.
sig1Copy, _ := chainec.Secp256k1.ParseSignature(sig1.Serialize())
key1Copy, _ := chainec.Secp256k1.ParsePubKey(key1.SerializeCompressed())
if !sigCache.Exists(*msg1, sig1Copy, key1Copy) {
t.Errorf("previously added item not found in signature cache")
}
}
// TestSigCacheAddEvictEntry tests the eviction case where a new signature
// triplet is added to a full signature cache which should trigger randomized
// eviction, followed by adding the new element to the cache.
func TestSigCacheAddEvictEntry(t *testing.T) {
// Create a sigcache that can hold up to 100 entries.
sigCacheSize := uint(100)
sigCache := NewSigCache(sigCacheSize)
// Fill the sigcache up with some random sig triplets.
for i := uint(0); i < sigCacheSize; i++ {
msg, sig, key, err := genRandomSig()
if err != nil {
t.Fatalf("unable to generate random signature test data")
}
sigCache.Add(*msg, sig, key)
sigCopy, _ := chainec.Secp256k1.ParseSignature(sig.Serialize())
keyCopy, _ := chainec.Secp256k1.ParsePubKey(key.SerializeCompressed())
if !sigCache.Exists(*msg, sigCopy, keyCopy) {
t.Errorf("previously added item not found in signature" +
"cache")
}
}
// The sigcache should now have sigCacheSize entries within it.
if uint(len(sigCache.validSigs)) != sigCacheSize {
t.Fatalf("sigcache should now have %v entries, instead it has %v",
sigCacheSize, len(sigCache.validSigs))
}
// Add a new entry, this should cause eviction of a randomly chosen
// previous entry.
msgNew, sigNew, keyNew, err := genRandomSig()
if err != nil {
t.Fatalf("unable to generate random signature test data")
}
sigCache.Add(*msgNew, sigNew, keyNew)
// The sigcache should still have sigCache entries.
if uint(len(sigCache.validSigs)) != sigCacheSize {
t.Fatalf("sigcache should now have %v entries, instead it has %v",
sigCacheSize, len(sigCache.validSigs))
}
// The entry added above should be found within the sigcache.
sigNewCopy, _ := chainec.Secp256k1.ParseSignature(sigNew.Serialize())
keyNewCopy, _ := chainec.Secp256k1.ParsePubKey(keyNew.SerializeCompressed())
if !sigCache.Exists(*msgNew, sigNewCopy, keyNewCopy) {
t.Fatalf("previously added item not found in signature cache")
}
}
// TestSigCacheAddMaxEntriesZeroOrNegative tests that if a sigCache is created
// with a max size <= 0, then no entries are added to the sigcache at all.
func TestSigCacheAddMaxEntriesZeroOrNegative(t *testing.T) {
// Create a sigcache that can hold up to 0 entries.
sigCache := NewSigCache(0)
// Generate a random sigCache entry triplet.
msg1, sig1, key1, err := genRandomSig()
if err != nil |
// Add the triplet to the signature cache.
sigCache.Add(*msg1, sig1, key1)
// The generated triplet should not be found.
sig1Copy, _ := chainec.Secp256k1.ParseSignature(sig1.Serialize())
key1Copy, _ := chainec.Secp256k1.ParsePubKey(key1.SerializeCompressed())
if sigCache.Exists(*msg1, sig1Copy, key1Copy) {
t.Errorf("previously added signature found in sigcache, but" +
"shouldn't have been")
}
// There shouldn't be any entries in the sigCache.
if len(sigCache.validSigs) != 0 {
t.Errorf("%v items found in sigcache, no items should have"+
"been added", len(sigCache.validSigs))
}
}
| {
t.Errorf("unable to generate random signature test data")
} |
rtp_codec.rs | use super::*;
use crate::api::media_engine::*;
use crate::error::Error;
use crate::media::rtp::fmtp::*;
use anyhow::Result;
use std::fmt;
/// RTPCodecType determines the type of a codec
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum RTPCodecType {
Unspecified = 0,
/// RTPCodecTypeAudio indicates this is an audio codec
Audio = 1,
/// RTPCodecTypeVideo indicates this is a video codec
Video = 2,
}
impl Default for RTPCodecType {
fn default() -> Self {
RTPCodecType::Unspecified
}
}
impl From<&str> for RTPCodecType {
fn from(raw: &str) -> Self {
match raw {
"audio" => RTPCodecType::Audio,
"video" => RTPCodecType::Video,
_ => RTPCodecType::Unspecified,
}
}
}
impl From<u8> for RTPCodecType {
fn from(v: u8) -> Self {
match v {
1 => RTPCodecType::Audio,
2 => RTPCodecType::Video,
_ => RTPCodecType::Unspecified,
}
}
}
impl fmt::Display for RTPCodecType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match *self {
RTPCodecType::Audio => "audio",
RTPCodecType::Video => "video",
RTPCodecType::Unspecified => crate::UNSPECIFIED_STR,
};
write!(f, "{}", s)
}
}
/// RTPCodecCapability provides information about codec capabilities.
/// https://w3c.github.io/webrtc-pc/#dictionary-rtcrtpcodeccapability-members
#[derive(Default, Debug, Clone, PartialEq)]
pub struct RTPCodecCapability {
pub mime_type: String,
pub clock_rate: u32,
pub channels: u16,
pub sdp_fmtp_line: String,
pub rtcp_feedback: Vec<RTCPFeedback>,
}
impl RTPCodecCapability {
pub(crate) fn payloader_for_codec(
&self,
) -> Result<Box<dyn rtp::packetizer::Payloader + Send + Sync>> {
let mime_type = self.mime_type.to_lowercase();
if mime_type == MIME_TYPE_H264.to_lowercase() {
Ok(Box::new(rtp::codecs::h264::H264Payloader))
} else if mime_type == MIME_TYPE_VP8.to_lowercase() {
Ok(Box::new(rtp::codecs::vp8::Vp8Payloader))
} else if mime_type == MIME_TYPE_VP9.to_lowercase() {
Ok(Box::new(rtp::codecs::vp9::Vp9Payloader))
} else if mime_type == MIME_TYPE_OPUS.to_lowercase() {
Ok(Box::new(rtp::codecs::opus::OpusPayloader))
} else if mime_type == MIME_TYPE_G722.to_lowercase()
|| mime_type == MIME_TYPE_PCMU.to_lowercase()
|| mime_type == MIME_TYPE_PCMA.to_lowercase()
{
Ok(Box::new(rtp::codecs::g7xx::G7xxPayloader))
} else {
Err(Error::ErrNoPayloaderForCodec.into())
}
}
}
/// RTPHeaderExtensionCapability is used to define a RFC5285 RTP header extension supported by the codec.
/// https://w3c.github.io/webrtc-pc/#dom-rtcrtpcapabilities-headerextensions
#[derive(Default, Debug, Clone)]
pub struct RTPHeaderExtensionCapability {
pub uri: String,
}
/// RTPHeaderExtensionParameter represents a negotiated RFC5285 RTP header extension.
/// https://w3c.github.io/webrtc-pc/#dictionary-rtcrtpheaderextensionparameters-members
#[derive(Default, Debug, Clone, PartialEq)]
pub struct RTPHeaderExtensionParameter {
pub uri: String,
pub id: isize,
}
/// RTPCodecParameters is a sequence containing the media codecs that an RtpSender
/// will choose from, as well as entries for RTX, RED and FEC mechanisms. This also
/// includes the PayloadType that has been negotiated
/// https://w3c.github.io/webrtc-pc/#rtcrtpcodecparameters
#[derive(Default, Debug, Clone, PartialEq)]
pub struct RTPCodecParameters {
pub capability: RTPCodecCapability,
pub payload_type: PayloadType,
pub stats_id: String,
}
/// RTPParameters is a list of negotiated codecs and header extensions
/// https://w3c.github.io/webrtc-pc/#dictionary-rtcrtpparameters-members
#[derive(Default, Debug, Clone)]
pub struct RTPParameters {
pub header_extensions: Vec<RTPHeaderExtensionParameter>,
pub codecs: Vec<RTPCodecParameters>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub(crate) enum CodecMatch {
None = 0,
Partial = 1,
Exact = 2,
}
| }
}
/// Do a fuzzy find for a codec in the list of codecs
/// Used for lookup up a codec in an existing list to find a match
/// Returns codecMatchExact, codecMatchPartial, or codecMatchNone
pub(crate) fn codec_parameters_fuzzy_search(
needle: &RTPCodecParameters,
haystack: &[RTPCodecParameters],
) -> (RTPCodecParameters, CodecMatch) {
let needle_fmtp = parse_fmtp(&needle.capability.sdp_fmtp_line);
//TODO: add unicode case-folding equal support
// First attempt to match on mime_type + sdpfmtp_line
for c in haystack {
if c.capability.mime_type.to_uppercase() == needle.capability.mime_type.to_uppercase()
&& fmtp_consist(&needle_fmtp, &parse_fmtp(&c.capability.sdp_fmtp_line))
{
return (c.clone(), CodecMatch::Exact);
}
}
// Fallback to just mime_type
for c in haystack {
if c.capability.mime_type.to_uppercase() == needle.capability.mime_type.to_uppercase() {
return (c.clone(), CodecMatch::Partial);
}
}
(RTPCodecParameters::default(), CodecMatch::None)
} | impl Default for CodecMatch {
fn default() -> Self {
CodecMatch::None |
pls_recall.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the PL/SQL Recall event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import pls_recall
from tests.formatters import test_lib
class PlsRecallFormatterTest(test_lib.EventFormatterTestCase): |
def testInitialization(self):
"""Tests the initialization."""
event_formatter = pls_recall.PlsRecallFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = pls_recall.PlsRecallFormatter()
expected_attribute_names = [
'sequence_number',
'username',
'database_name',
'query']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main() | """Tests for the PL/SQL Recall file container event formatter.""" |
dmca.component.ts | import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-dmca',
templateUrl: './dmca.component.html',
styleUrls: ['./dmca.component.css']
})
export class | implements OnInit {
constructor() { }
ngOnInit() {
}
}
| DmcaComponent |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
|
if __name__ == '__main__':
main()
| """Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BachelorETL.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv) |
enums.go | package costmanagement
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// FormatType enumerates the values for format type.
type FormatType string
const (
// Csv ...
Csv FormatType = "Csv"
)
// PossibleFormatTypeValues returns an array of possible values for the FormatType const type.
func | () []FormatType {
return []FormatType{Csv}
}
// GranularityType enumerates the values for granularity type.
type GranularityType string
const (
// Daily ...
Daily GranularityType = "Daily"
)
// PossibleGranularityTypeValues returns an array of possible values for the GranularityType const type.
func PossibleGranularityTypeValues() []GranularityType {
return []GranularityType{Daily}
}
// RecurrenceType enumerates the values for recurrence type.
type RecurrenceType string
const (
// RecurrenceTypeAnnually ...
RecurrenceTypeAnnually RecurrenceType = "Annually"
// RecurrenceTypeDaily ...
RecurrenceTypeDaily RecurrenceType = "Daily"
// RecurrenceTypeMonthly ...
RecurrenceTypeMonthly RecurrenceType = "Monthly"
// RecurrenceTypeWeekly ...
RecurrenceTypeWeekly RecurrenceType = "Weekly"
)
// PossibleRecurrenceTypeValues returns an array of possible values for the RecurrenceType const type.
func PossibleRecurrenceTypeValues() []RecurrenceType {
return []RecurrenceType{RecurrenceTypeAnnually, RecurrenceTypeDaily, RecurrenceTypeMonthly, RecurrenceTypeWeekly}
}
// ReportConfigColumnType enumerates the values for report config column type.
type ReportConfigColumnType string
const (
// ReportConfigColumnTypeDimension ...
ReportConfigColumnTypeDimension ReportConfigColumnType = "Dimension"
// ReportConfigColumnTypeTag ...
ReportConfigColumnTypeTag ReportConfigColumnType = "Tag"
)
// PossibleReportConfigColumnTypeValues returns an array of possible values for the ReportConfigColumnType const type.
func PossibleReportConfigColumnTypeValues() []ReportConfigColumnType {
return []ReportConfigColumnType{ReportConfigColumnTypeDimension, ReportConfigColumnTypeTag}
}
// StatusType enumerates the values for status type.
type StatusType string
const (
// Active ...
Active StatusType = "Active"
// Inactive ...
Inactive StatusType = "Inactive"
)
// PossibleStatusTypeValues returns an array of possible values for the StatusType const type.
func PossibleStatusTypeValues() []StatusType {
return []StatusType{Active, Inactive}
}
// TimeframeType enumerates the values for timeframe type.
type TimeframeType string
const (
// Custom ...
Custom TimeframeType = "Custom"
// MonthToDate ...
MonthToDate TimeframeType = "MonthToDate"
// WeekToDate ...
WeekToDate TimeframeType = "WeekToDate"
// YearToDate ...
YearToDate TimeframeType = "YearToDate"
)
// PossibleTimeframeTypeValues returns an array of possible values for the TimeframeType const type.
func PossibleTimeframeTypeValues() []TimeframeType {
return []TimeframeType{Custom, MonthToDate, WeekToDate, YearToDate}
}
| PossibleFormatTypeValues |
co2_data.py | """ Description here
Author: Leonard Berrada
Date: 5 Nov 2015
"""
import sys
sys.path.append("../")
import matplotlib.pyplot as plt
from Regression import AutoRegressive, AutoCorrelation, GaussianProcess, KalmanFilter
from process_data import data_from_file
file_name = "co2.mat"
data_dict = data_from_file(file_name)
# model = "GP"
model = "AR"
# model = "AC"
# model = "KF"
if model.lower() == 'kf':
p = 25
kf = KalmanFilter(data_dict, p)
kf.fit()
kf.display(out="./co2_kf.png")
if model.lower() == "ar":
p = 50
my_ar = AutoRegressive(data_dict, p)
my_ar.fit()
my_ar.predict()
my_ar.display(out="./co2_ar.png") | p = 50
my_ac = AutoCorrelation(data_dict, p)
my_ac.fit()
my_ac.predict()
my_ac.display(out="./co2_ac.png")
my_ac.spectrum()
if model.lower() == "gp":
Q = 3
use_kernels = "exponential_quadratic* cosine"
for _ in range(Q - 1):
use_kernels += "+ exponential_quadratic * cosine"
# use_kernels = 'rational_quadratic + periodic'
use_means = "constant"
estimator = "MLE"
my_gp = GaussianProcess(data_dict=data_dict,
use_kernels=use_kernels,
use_means=use_means,
estimator=estimator,
sequential_mode=False)
my_gp.predict()
my_gp.compute_score()
my_gp.show_prediction(out="./co2_gp.png") |
if model.lower() == "ac": |
channel.go | package main
import (
"fmt"
"time"
)
var ch = make(chan int)
func print(s string) {
for _, ch := range s {
fmt.Printf("%c", ch)
time.Sleep(300 * time.Millisecond)
}
}
func main() {
go person1()
go person2() |
func person1() {
print("hello")
ch <- 88
}
func person2() {
<-ch
print("world")
}
func person3() {
<-ch
print("zhangsan")
} | go person3()
<-ch
} |
userPrintMoudleService.go | package printService
import (
"github.com/astaxie/beego/orm"
"new_erp_agent_by_go/helper/error_message"
"new_erp_agent_by_go/models/print"
"time"
)
func UpdateOrAddUserPrintMoudle(param *print.JccUserMould) error {
//判断是否存在记录
exist, err := print.ExistUserMoudle(param.CompanyId)
if err != nil {
return error_message.ErrMessage("用户模板查询失败。。。", err)
}
if exist > 0 {
//存在记录证明需要修改
id, err := print.QueryUserMoudleId(param.CompanyId)
if err != nil {
return error_message.ErrMessage("用户模板查询失败。。。", err)
}
param.Id = id
param.UpdatedAt = time.Now().Unix()
db := orm.NewOrm()
_, err = print.UpdateUserMoudle(param, db)
if err != nil {
return error_message.ErrMessage("修改用户模板失败。。。", err)
}
} else {
//不存在记录需要增加记录
param.CreatedAt = time.Now().Unix()
_, err := print.AddUserPrintMoudle(param)
if err != nil {
return error_message.ErrMessage("添加用户模板失败。。。", err)
}
}
return nil
}
| ||
example.py | # MIT License
#
# Copyright (c) 2020 Genesis Cloud Ltd. <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Oz Tiram <[email protected]>
"""
An example script to show how to start a Genesis Cloud GPU instance
with custom user data to install the NVIDIA GPU driver.
Grab your API key from the UI and save it in a safe place.
on the shell before running this script
$ export GENESISCLOUD_API_KEY=secretkey
"""
import os
import textwrap
import time
import subprocess as sp
from genesiscloud.client import Client, INSTANCE_TYPES
def simple_startup_script():
"""see the documentation of cloud init"""
return textwrap.dedent("""
#cloud-config
hostname: mytestubuntu
runcmd:
- [ "apt", "install", "-y", "vim" ]
""")
def get_startup_script():
return """#!/bin/bash
set -eux
IS_INSTALLED=false
NVIDIA_SHORT_VERSION=430
manual_fetch_install() {
__nvidia_full_version="430_430.50-0ubuntu2"
for i in $(seq 1 5)
do
echo "Connecting to http://archive.ubuntu.com site for $i time"
if curl -s --head --request GET http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-"${NVIDIA_SHORT_VERSION}" | grep "HTTP/1.1" > /dev/null ;
then
echo "Connected to http://archive.ubuntu.com. Start downloading and installing the NVIDIA driver..."
__tempdir="$(mktemp -d)"
apt-get install -y --no-install-recommends "linux-headers-$(uname -r)" dkms
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-dkms-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-kernel-common-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-kernel-source-${__nvidia_full_version}_amd64.deb "${__tempdir}"/nvidia-dkms-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/nvidia-utils-${__nvidia_full_version}_amd64.deb
wget -P "${__tempdir}" http://archive.ubuntu.com/ubuntu/pool/restricted/n/nvidia-graphics-drivers-${NVIDIA_SHORT_VERSION}/libnvidia-compute-${__nvidia_full_version}_amd64.deb
dpkg -i "${__tempdir}"/nvidia-utils-${__nvidia_full_version}_amd64.deb "${__tempdir}"/libnvidia-compute-${__nvidia_full_version}_amd64.deb
IS_INSTALLED=true
rm -r "${__tempdir}"
break
fi
sleep 2
done
}
apt_fetch_install() {
add-apt-repository -s -u -y restricted
# Ubuntu has only a single version in the repository marked as "latest" of
# this series.
for _ in $(seq 1 5)
do
if apt-get install -y --no-install-recommends nvidia-utils-${NVIDIA_SHORT_VERSION} libnvidia-compute-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-common-${NVIDIA_SHORT_VERSION} \
nvidia-kernel-source-${NVIDIA_SHORT_VERSION} \
nvidia-dkms-${NVIDIA_SHORT_VERSION} \
"linux-headers-$(uname -r)" dkms; then
IS_INSTALLED=true
break
fi
sleep 2
done
}
main() {
apt-get update
if grep xenial /etc/os-release; then
manual_fetch_install
else
apt_fetch_install
fi
# remove the module if it is inserted, blacklist it
rmmod nouveau || echo "nouveau kernel module not loaded ..."
echo "blacklist nouveau" > /etc/modprobe.d/nouveau.conf
# log insertion of the nvidia module
# this should always succeed on customer instances
if modprobe -vi nvidia; then
nvidia-smi
modinfo nvidia
gpu_found=true
else
gpu_found=false
fi
if [ "${IS_INSTALLED}" = true ]; then
echo "NVIDIA driver has been successfully installed."
else
echo "NVIDIA driver has NOT been installed."
fi
if [ "${gpu_found}" ]; then
echo "NVIDIA GPU device is found and ready"
else
echo "WARNING: NVIDIA GPU device is not found or is failed"
fi
}
main
"""
def create_instance():
|
def destroy(instance_id):
# finally destory this instance, when you no longer need it
client = Client(os.getenv("GENESISCLOUD_API_KEY"))
client.Instances.delete(id=instance_id)
if __name__ == "__main__":
instance = create_instance()
instance_id = instance['id']
# destroy(instance_id)
| client = Client(os.getenv("GENESISCLOUD_API_KEY"))
# before we continue to create objects, we check that we can communicate
# with the API, if the connect method does not succeed it will throw an
# error and the script will terminate
if client.connect():
pass
# To create an instance you will need an SSH public key.
# Upload it via the Web UI, you can now find it with.
# replace this to match your key
SSHKEYNAME = 'YourKeyName'
# genesiscloud.client.Resource.find methods returns generators - that is,
# they are lazy per-default.
sshkey_gen = client.SSHKeys.find({"name": SSHKEYNAME})
sshkey = list(sshkey_gen)[0]
# You need to tell the client which OS should be used for your instance
# One can use a snapshot or a base-os to create a new instance
ubuntu_18 = [image for image in client.Images.find({"name": 'Ubuntu 18.04'})][0]
# choose the most simple instance type
# to see the instance properties, use
# list(INSTANCE_TYPES.items())[0]
#
# ('vcpu-4_memory-12g_disk-80g_nvidia1080ti-1',
# {'vCPUs': 4, 'RAM': 12, 'Disk': 80, 'GPU': 1})
instace_type = list(INSTANCE_TYPES.keys())[0]
# To create an instace use Instances.create
# You must pass a ssh key to SSH into the machine. Currently, only one
# SSH key is supported. If you need more use the command
# `ssh-import-id-gh oz123`
# it can fetch public key from github.com/oz123.keys
# *Obviously* __replace__ my user name with YOURS or anyone you TRUST.
# You should put this in the user_data script. You can add this in the
# text block that the function `get_startup_script` returns.
# NOTE:
# you can also create an instance with SSH password enabled, but you should
# prefer SSH key authentication. If you choose to use password, you should
# not pass ssh_keys
my_instance = client.Instances.create(
name="demo",
hostname="demo",
ssh_keys=[sshkey.id], # comment this to enable password
image=ubuntu_18.id,
type=instace_type,
metadata={"startup_script":
simple_startup_script()},
#password="yourSekretPassword#12!"
)
# my_instance is a dictionary containing information about the instance
# that was just created.
print(my_instance)
while my_instance['status'] != 'active':
time.sleep(1)
my_instance = client.Instances.get(my_instance.id)
print(f"{my_instance['status']}\r", end="")
print("")
# yay! the instance is active
# let's ssh to the public IP of the instance
public_ip = my_instance.public_ip
print(f"The ssh address of the Instance is: {public_ip}")
# wait for ssh to become available, this returns exit code other
# than 0 as long the ssh connection isn't available
while sp.run(
("ssh -l ubuntu -o StrictHostKeyChecking=accept-new "
"-o ConnectTimeout=50 "
f"{public_ip} hostname"), shell=True).returncode:
time.sleep(1)
print("Congratulations! You genesiscloud instance has been created!")
print("You can ssh to it with:")
print(f"ssh -l ubuntu {public_ip}")
print("Some interesting commands to try at first:")
print("cloud-init stats # if this is still running, NVIDIA driver is still"
" installing")
print("use the following to see cloud-init output in real time:")
print("sudo tail -f /var/log/cloud-init-output.log")
return my_instance |
Degree.ts | import { firestore } from 'firebase-admin'
import LocalizedString from './LocalizedString'
import ManagedFirestoreDocument from './ManagedFirestoreDocument'
class Degree extends ManagedFirestoreDocument {
name: LocalizedString
regulations: number
semesters: number
constructor(id: string, name: LocalizedString, regulations: number, semesters: number) {
super(id)
this.name = name
this.regulations = regulations
this.semesters = semesters
}
static converter: firestore.FirestoreDataConverter<Degree> = {
toFirestore(course: Degree): firestore.DocumentData {
return course
},
fromFirestore(snapshot: firestore.QueryDocumentSnapshot): Degree {
const data = snapshot.data()
return new Degree(snapshot.id, data.name, data.regulations, data.semesters)
}
}
toJSON(): Record<string, unknown> {
return {
id: this.idURI,
name: this.name,
regulations: this.regulations,
semesters: this.semesters | }
get idURI(): string {
return `/degrees/${this.id}`
}
}
export default Degree | } |
JsPrettier.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import fnmatch
import os
from sys import version_info
from re import match
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
if version_info[0] == 2:
# st-v2x with py-v2x
from jsprettier.const import IS_ST3
from jsprettier.const import PLUGIN_CMD_NAME
from jsprettier.const import PLUGIN_NAME
from jsprettier.const import PLUGIN_PATH
from jsprettier.const import PRETTIER_OPTION_CLI_MAP
from jsprettier.const import SETTINGS_FILENAME
from jsprettier.const import SYNTAX_ERROR_RE
from jsprettier.sthelper import debug_enabled
from jsprettier.sthelper import expand_var
from jsprettier.sthelper import get_setting
from jsprettier.sthelper import get_st_project_path
from jsprettier.sthelper import get_sub_setting
from jsprettier.sthelper import has_selection
from jsprettier.sthelper import is_file_auto_formattable
from jsprettier.sthelper import log_debug
from jsprettier.sthelper import log_error
from jsprettier.sthelper import log_warn
from jsprettier.sthelper import parse_additional_cli_args
from jsprettier.sthelper import resolve_node_path
from jsprettier.sthelper import resolve_prettier_cli_path
from jsprettier.sthelper import scroll_view_to
from jsprettier.sthelper import st_status_message
from jsprettier.util import contains
from jsprettier.util import find_prettier_config
from jsprettier.util import format_debug_message
from jsprettier.util import format_error_message
from jsprettier.util import get_cli_arg_value
from jsprettier.util import get_file_abs_dir
from jsprettier.util import get_proc_env
from jsprettier.util import in_source_file_path_or_project_root
from jsprettier.util import is_bool_str
from jsprettier.util import is_str_empty_or_whitespace_only
from jsprettier.util import is_str_none_or_empty
from jsprettier.util import is_windows
from jsprettier.util import list_to_str
from jsprettier.util import resolve_prettier_ignore_path
from jsprettier.util import trim_trailing_ws_and_lines
else:
# st3x with py-v3x
from .jsprettier.const import IS_ST3
from .jsprettier.const import PLUGIN_CMD_NAME
from .jsprettier.const import PLUGIN_NAME
from .jsprettier.const import PLUGIN_PATH
from .jsprettier.const import PRETTIER_OPTION_CLI_MAP
from .jsprettier.const import SETTINGS_FILENAME
from .jsprettier.const import SYNTAX_ERROR_RE
from .jsprettier.sthelper import debug_enabled
from .jsprettier.sthelper import expand_var
from .jsprettier.sthelper import get_setting
from .jsprettier.sthelper import get_st_project_path
from .jsprettier.sthelper import get_sub_setting
from .jsprettier.sthelper import has_selection
from .jsprettier.sthelper import is_file_auto_formattable
from .jsprettier.sthelper import log_debug
from .jsprettier.sthelper import log_error
from .jsprettier.sthelper import log_warn
from .jsprettier.sthelper import parse_additional_cli_args
from .jsprettier.sthelper import resolve_node_path
from .jsprettier.sthelper import resolve_prettier_cli_path
from .jsprettier.sthelper import scroll_view_to
from .jsprettier.sthelper import st_status_message
from .jsprettier.util import contains
from .jsprettier.util import find_prettier_config
from .jsprettier.util import format_debug_message
from .jsprettier.util import format_error_message
from .jsprettier.util import get_cli_arg_value
from .jsprettier.util import get_file_abs_dir
from .jsprettier.util import get_proc_env
from .jsprettier.util import in_source_file_path_or_project_root
from .jsprettier.util import is_bool_str
from .jsprettier.util import is_str_empty_or_whitespace_only
from .jsprettier.util import is_str_none_or_empty
from .jsprettier.util import is_windows
from .jsprettier.util import list_to_str
from .jsprettier.util import resolve_prettier_ignore_path
from .jsprettier.util import trim_trailing_ws_and_lines
class JsPrettierCommand(sublime_plugin.TextCommand):
_error_message = None
@property
def has_error(self):
if not self._error_message:
return False
return True
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, message=None):
self._error_message = message
@property
def node_path(self):
return expand_var(self.view.window(), get_setting(self.view, 'node_path'))
@property
def tab_size(self):
|
@property
def use_tabs(self):
translate_tabs_to_spaces = self.view.settings().get('translate_tabs_to_spaces', True)
return not translate_tabs_to_spaces
@property
def allow_inline_formatting(self):
return get_setting(self.view, 'allow_inline_formatting', False)
@property
def disable_tab_width_auto_detection(self):
return get_setting(self.view, 'disable_tab_width_auto_detection', False)
@property
def additional_cli_args(self):
return get_setting(self.view, 'additional_cli_args', {})
@property
def max_file_size_limit(self):
return int(get_setting(self.view, 'max_file_size_limit', -1))
@property
def disable_prettier_cursor_offset(self):
return get_setting(self.view, 'disable_prettier_cursor_offset', False)
def exceeds_max_file_size_limit(self, source_file):
if self.max_file_size_limit == -1:
return False
if os.path.getsize(source_file) > self.max_file_size_limit:
return True
return False
def try_find_prettier_config(self, view):
source_file_dir = get_file_abs_dir(view.file_name())
st_project_path = get_st_project_path()
#
# 1. Check if defined in 'additional_cli_args':
additional_cli_arg_config = get_cli_arg_value(self.additional_cli_args, '--config')
if not is_str_none_or_empty(additional_cli_arg_config):
additional_cli_arg_config = os.path.normpath(additional_cli_arg_config)
if not os.path.isabs(additional_cli_arg_config):
additional_cli_arg_config = in_source_file_path_or_project_root(
source_file_dir, st_project_path, additional_cli_arg_config)
if additional_cli_arg_config and os.path.exists(additional_cli_arg_config):
log_debug(view, "Using Prettier config file defined in additional_cli_args '{0}'"
.format(additional_cli_arg_config), True)
return additional_cli_arg_config
log_warn("Could not find Prettier config file defined in additional_cli_args '{0}'"
.format(str(additional_cli_arg_config)), True)
return None
#
# 2. Attempt to automatically resolve:
resolved_prettier_config = find_prettier_config(source_file_dir)
if resolved_prettier_config and os.path.exists(resolved_prettier_config):
log_debug(view, "Found Prettier config file '{0}'".format(resolved_prettier_config))
return resolved_prettier_config
log_debug(view, "Could not resolve Prettier config file, will use options defined in Sublime Text.", True)
return None
def run(self, edit, save_file=False, auto_format_prettier_config_path=None):
view = self.view
source_file_path = view.file_name()
if source_file_path is None:
#
# Handle file must first be saved:
if IS_ST3:
# sublime text 3+: show dialog that includes a save option:
result = sublime.yes_no_cancel_dialog(
'{0}\n\n'
'File must first be Saved.'.format(PLUGIN_NAME),
'Save...', "Don't Save")
if result == sublime.DIALOG_YES:
view.run_command('save')
else:
# sublime text 2x: limited dialog support, just show error:
return sublime.error_message(
'{0} Error\n\n'
'File must first be saved.'.format(PLUGIN_NAME))
#
# set paths
if source_file_path is None:
# Re-check if file was saved, in case user canceled or closed the save dialog:
return st_status_message('Save canceled.')
#
# Max file size check
if self.exceeds_max_file_size_limit(source_file_path):
return st_status_message('Ignored - file too large to format (max_file_size_limit).')
source_file_dir = get_file_abs_dir(source_file_path)
st_project_path = str(get_st_project_path())
#
# cd to the active sublime text project dir:
os.chdir(st_project_path)
#
# if a `--config <path>` option is set in 'additional_cli_args',
# no action is necessary. otherwise, try to sniff the config
# file path:
parsed_additional_cli_args = parse_additional_cli_args(view.window(), self.additional_cli_args)
has_custom_config_defined = parsed_additional_cli_args.count('--config') > 0
has_no_config_defined = parsed_additional_cli_args.count('--no-config') > 0
has_config_precedence_defined = parsed_additional_cli_args.count('--config-precedence') > 0
prettier_config_path = None
# only try to resolve prettier config if '--no-config' or '--config' are NOT in 'additional_cli_args'
if not has_no_config_defined and not has_custom_config_defined:
if save_file and auto_format_prettier_config_path and os.path.exists(auto_format_prettier_config_path):
prettier_config_path = auto_format_prettier_config_path
if not prettier_config_path:
resolved_prettier_config = self.try_find_prettier_config(view)
if resolved_prettier_config and os.path.exists(resolved_prettier_config):
prettier_config_path = resolved_prettier_config
if not prettier_config_path or not os.path.exists(prettier_config_path):
prettier_config_path = ''
#
# Get node and prettier command paths:
node_path = self.node_path
prettier_cli_path = resolve_prettier_cli_path(view, PLUGIN_PATH, st_project_path)
if not prettier_cli_path:
log_error(
"Ensure 'prettier' is installed in your environment PATH, "
"or manually specify an absolute path in your '{0}' file "
"and the 'prettier_cli_path' setting.".format(SETTINGS_FILENAME))
return st_status_message('Prettier not found. See console for more details.')
# try to find a '.prettierignore' file path in the project root
# if the '--ignore-path' option isn't specified in 'additional_cli_args':
prettier_ignore_filepath = None
if not parsed_additional_cli_args.count('--ignore-path') > 0:
prettier_ignore_filepath = resolve_prettier_ignore_path(source_file_dir, st_project_path)
#
# Parse prettier options:
prettier_options = self.parse_prettier_options(
view, parsed_additional_cli_args, prettier_config_path,
has_custom_config_defined, has_no_config_defined,
has_config_precedence_defined, prettier_ignore_filepath,
source_file_path)
#
# Format entire file:
if not has_selection(view) or save_file is True:
region = sublime.Region(0, view.size())
source_text = view.substr(region)
if is_str_empty_or_whitespace_only(source_text):
return st_status_message('Nothing to format.')
result = self.format_code(
source_text, node_path, prettier_cli_path, prettier_options, view,
provide_cursor=self.disable_prettier_cursor_offset is False, is_selection=False)
if self.has_error:
self.format_console_error()
return self.show_status_bar_error()
new_cursor = None
if self.disable_prettier_cursor_offset is True:
prettified_text = result
else:
prettified_text, new_cursor = result
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if is_str_empty_or_whitespace_only(prettified_text):
self.error_message = 'No content returned by stdout'
return self.show_status_bar_error()
source_modified = False
prettified_text = trim_trailing_ws_and_lines(prettified_text)
# Store viewport position to prevent screen jumping (#171):
previous_position = view.viewport_position()
if prettified_text:
if prettified_text == trim_trailing_ws_and_lines(source_text):
if self.ensure_newline_at_eof(view, edit) is True:
# no formatting changes applied, however, a line
# break was needed/inserted at the end of the file:
source_modified = True
else:
view.replace(edit, region, prettified_text)
self.ensure_newline_at_eof(view, edit)
source_modified = True
else:
view.replace(edit, region, prettified_text)
self.ensure_newline_at_eof(view, edit)
source_modified = True
# Restore viewport position to prevent screen jumping (#171)
view.set_viewport_position((0, 0), False)
view.set_viewport_position(previous_position, False)
if source_modified:
if not self.disable_prettier_cursor_offset and new_cursor:
view.sel().clear()
view.sel().add(sublime.Region(new_cursor))
# re-run indention detection
view.run_command('detect_indentation')
st_status_message('File formatted.')
else:
st_status_message('File already formatted.')
return
#
# Format each selection:
atleast_one_selection_formatted = False
for region in view.sel():
if region.empty():
continue
source_text = view.substr(region)
if is_str_empty_or_whitespace_only(source_text):
st_status_message('Nothing to format in selection.')
continue
prettified_text = self.format_code(
source_text, node_path, prettier_cli_path, prettier_options, view,
provide_cursor=False, is_selection=True)
if self.has_error:
self.format_console_error()
return self.show_status_bar_error()
# sanity check to ensure textual content was returned from cmd
# stdout, not necessarily caught in OSError try/catch
# exception handler
if is_str_empty_or_whitespace_only(prettified_text):
self.error_message = 'No content returned by stdout'
return self.show_status_bar_error()
prettified_text = trim_trailing_ws_and_lines(prettified_text)
if prettified_text and prettified_text == trim_trailing_ws_and_lines(source_text):
st_status_message('Selection(s) already formatted.')
else:
atleast_one_selection_formatted = True
view.replace(edit, region, prettified_text)
# re-run indention detection
if atleast_one_selection_formatted:
view.run_command('detect_indentation')
st_status_message('Selection(s) formatted.')
def format_code(self, source, node_path, prettier_cli_path, prettier_options, view, provide_cursor=False,
is_selection=False):
self._error_message = None
cursor = None
if provide_cursor:
cursor = view.sel()[0].a
prettier_options += ['--cursor-offset', str(cursor)]
if is_windows() and is_str_none_or_empty(node_path) and prettier_cli_path.endswith(".js"):
# on windows, when a custom 'node_path' is not specified and 'prettier_cli_path' is
# presumably a .js script (e.g: 'bin-prettier.js')...
# automatically prepend the environment detected node[.exe|.cmd] path to
# the generated command (see #146 --no-bin-links).
cmd = [resolve_node_path(view.file_name())] \
+ [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_options
elif is_str_none_or_empty(node_path):
cmd = [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_options
else:
cmd = [node_path] \
+ [prettier_cli_path] \
+ ['--stdin'] \
+ prettier_options
try:
format_debug_message('Prettier CLI Command', list_to_str(cmd), debug_enabled(view))
proc = Popen(
cmd, stdin=PIPE,
stderr=PIPE,
stdout=PIPE,
env=get_proc_env(),
shell=is_windows())
stdout, stderr = proc.communicate(input=source.encode('utf-8'))
if proc.returncode != 0:
error_output = stderr.decode('utf-8')
self.error_message = format_error_message(error_output, str(proc.returncode))
# detect and scroll to 'Syntax Errors' (if not formatting a selection):
if not is_selection:
_, _, error_line, error_col = self.has_syntax_error(error_output)
if error_line != -1 and error_col != -1:
scroll_view_to(view, error_line, error_col)
return None
new_cursor = None
if stderr:
stderr_output = stderr.decode('utf-8')
if provide_cursor:
stderr_lines = stderr_output.splitlines()
stderr_output, new_cursor = '\n'.join(stderr_lines[:-1]), stderr_lines[-1]
# allow warnings to pass-through
if stderr_output:
print(format_error_message(stderr_output, str(proc.returncode)))
if provide_cursor:
if not new_cursor and cursor is not None:
new_cursor = cursor
try:
new_cursor = int(new_cursor)
except ValueError:
log_warn(view, 'Adjusted cursor position could not be parsed (int).')
return stdout.decode('utf-8'), None
return stdout.decode('utf-8'), new_cursor
return stdout.decode('utf-8')
except OSError as ex:
sublime.error_message('{0} - {1}'.format(PLUGIN_NAME, ex))
raise
def should_show_plugin(self):
view = self.view
if not view.window() or view.is_scratch() or view.is_read_only():
return False
if self.allow_inline_formatting is True:
return True
if self.is_source_js(view) is True:
return True
if self.is_css(view) is True:
return True
if self.is_angular_html(view) is True:
return True
if self.is_mdx(view) is True:
return True
if self.is_markdown(view) is True:
return True
if self.is_yaml(view) is True:
return True
if self.is_html(view) is True:
return True
if self.is_php(view) is True:
return True
if is_file_auto_formattable(view) is True:
return True
return False
def is_visible(self):
return self.should_show_plugin()
def is_enabled(self):
return self.should_show_plugin()
def parse_prettier_options(self, view, parsed_additional_cli_args,
prettier_config_path, has_custom_config_defined,
has_no_config_defined, has_config_precedence_defined,
prettier_ignore_filepath, file_name):
prettier_options = []
#
# Check for prettier config file:
prettier_config_exists = not is_str_none_or_empty(prettier_config_path)
if prettier_config_exists:
if not has_custom_config_defined:
# only add the '--config <path>' option if it's not
# already specified as an additional cli arg:
prettier_options.append('--config')
prettier_options.append(prettier_config_path)
# set config-precedence to 'cli-override' if
# the key wasn't defined in additional_cli_args:
if not has_config_precedence_defined:
prettier_options.append('--config-precedence')
prettier_options.append('cli-override')
else:
if not has_no_config_defined and not has_custom_config_defined:
# only add the '--no-config' option if it's not
# already specified as an additional cli arg:
prettier_options.append('--no-config')
#
# Iterate over option map:
for mapping in PRETTIER_OPTION_CLI_MAP:
option_name = mapping['option']
cli_option_name = mapping['cli']
option_value = get_sub_setting(view, option_name)
if option_name == 'parser':
if self.is_typescript(view):
prettier_options.append(cli_option_name)
prettier_options.append('typescript')
continue
elif self.is_package_or_composer_json(view):
prettier_options.append(cli_option_name)
prettier_options.append('json-stringify')
continue
elif self.is_json(view):
prettier_options.append(cli_option_name)
prettier_options.append('json')
continue
elif self.is_graphql(view):
prettier_options.append(cli_option_name)
prettier_options.append('graphql')
continue
elif self.is_mdx(view):
prettier_options.append(cli_option_name)
prettier_options.append('mdx')
continue
elif self.is_markdown(view):
prettier_options.append(cli_option_name)
prettier_options.append('markdown')
continue
elif self.is_yaml(view):
prettier_options.append(cli_option_name)
prettier_options.append('yaml')
continue
elif self.is_vue(view):
prettier_options.append(cli_option_name)
prettier_options.append('vue')
continue
elif self.is_svelte(view):
prettier_options.append(cli_option_name)
prettier_options.append('svelte')
continue
elif self.is_angular_html(view):
prettier_options.append(cli_option_name)
prettier_options.append('angular')
continue
elif self.is_source_js(view) or self.is_es_module(view):
prettier_options.append(cli_option_name)
prettier_options.append('babel')
continue
elif self.is_css(view):
prettier_options.append(cli_option_name)
prettier_options.append('css')
continue
elif self.is_html(view):
prettier_options.append(cli_option_name)
prettier_options.append('html')
continue
elif self.is_php(view):
prettier_options.append(cli_option_name)
prettier_options.append('php')
continue
else:
# parser couldn't be detected... let Prettier try to infer it via --stdin-filepath:
continue
if not prettier_config_exists and not has_custom_config_defined:
# add the cli args or the respective defaults:
if option_value is None or str(option_value) == '':
option_value = mapping['default']
option_value = str(option_value).strip()
# special handling for "tabWidth":
if option_name == 'tabWidth':
has_additional_cli_for_tab_width = parsed_additional_cli_args.count('--tab-width') > 0
if not has_additional_cli_for_tab_width:
if self.disable_tab_width_auto_detection is False:
# set `tabWidth` from st "tab_size" setting (default behavior)
prettier_options.append(cli_option_name)
prettier_options.append(str(self.tab_size))
else:
if not has_additional_cli_for_tab_width:
prettier_options.append(cli_option_name)
prettier_options.append(option_value)
else:
if not has_additional_cli_for_tab_width:
prettier_options.append(cli_option_name)
prettier_options.append(option_value)
continue
# handle bool types:
if is_bool_str(option_value):
option_value = option_value.lower()
# append the opt/val:
prettier_options.append(cli_option_name)
prettier_options.append(option_value)
# set the `useTabs` option based on the current view:
prettier_options.append('--use-tabs')
prettier_options.append(str(self.use_tabs).lower())
if prettier_ignore_filepath is not None:
prettier_options.append('--ignore-path')
prettier_options.append(prettier_ignore_filepath)
# add the current file name to `--stdin-filepath`, only when
# the current file being edited is NOT html, and in order
# detect and format css/js selection(s) within html files:
# if not self.is_html(view):
prettier_options.append('--stdin-filepath')
prettier_options.append(file_name)
if debug_enabled(view):
if not parsed_additional_cli_args.count('--loglevel') > 0:
# set prettier's log level to debug, when the plug-in's debug setting is enabled:
prettier_options.append('--loglevel')
prettier_options.append('debug')
# Append any additional specified arguments:
prettier_options.extend(parsed_additional_cli_args)
return prettier_options
def format_console_error(self):
print('\n------------------\n {0} ERROR \n------------------\n'
'{1}'.format(PLUGIN_NAME, self.error_message))
@staticmethod
def has_syntax_error(error_output):
error = None
message = ''
line = -1
col = -1
match_groups = SYNTAX_ERROR_RE.search(error_output)
if match_groups:
error = match_groups.group('error')
message = match_groups.group('message')
line = int(match_groups.group('line'))
col = int(match_groups.group('col'))
return error, message, line, col
@staticmethod
def is_source_js(view):
scopename = view.scope_name(view.sel()[0].b)
if scopename.startswith('source.js') or contains('source.js.embedded.html', scopename) \
or contains('source.css.embedded.js', scopename):
return True
return False
@staticmethod
def is_css(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(view.sel()[0].b)
if scopename.startswith('source.css') or filename.endswith('.css') \
or contains('meta.selector.css', scopename) or contains('source.css.embedded.html', scopename):
return True
if scopename.startswith('source.scss') or filename.endswith('.scss'):
return True
if scopename.startswith('source.less') or filename.endswith('.less'):
return True
return False
@staticmethod
def is_typescript(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.ts') or filename.endswith('.ts'):
return True
if scopename.startswith('source.tsx') or filename.endswith('.tsx'):
return True
return False
@staticmethod
def is_json(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.json') or filename.endswith('.json'):
return True
return False
@staticmethod
def is_package_or_composer_json(view):
filename = view.file_name()
if not filename:
return False
filename = os.path.basename(filename)
if filename == 'package.json' or filename == 'composer.json':
return True
return False
@staticmethod
def is_es_module(view):
filename = view.file_name()
if not filename:
return False
if filename.endswith('.mjs'):
return True
return False
@staticmethod
def is_graphql(view):
filename = view.file_name()
if not filename:
return False
if filename.endswith('.graphql') or filename.endswith('.gql'):
return True
return False
@staticmethod
def is_html(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('text.html.markdown') \
or scopename.startswith('text.html.vue') \
or filename.endswith('component.html'):
return False
if scopename.startswith('text.html') or filename.endswith('.html') or filename.endswith('.htm'):
return True
return False
@staticmethod
def is_markdown(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('text.html.markdown') or filename.endswith('.md'):
return True
return False
@staticmethod
def is_mdx(view):
filename = view.file_name()
if not filename:
return False
if filename.endswith('.mdx'):
return True
return False
@staticmethod
def is_yaml(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('source.yaml') or filename.endswith('.yml'):
return True
return False
@staticmethod
def is_vue(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('text.html.vue') or filename.endswith('.vue'):
return True
return False
@staticmethod
def is_svelte(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if scopename.startswith('text.html.svelte') or filename.endswith('.svelte'):
return True
return False
@staticmethod
def is_angular_html(view):
filename = view.file_name()
if not filename:
return False
if filename.endswith('.component.html'):
return True
return False
@staticmethod
def is_php(view):
filename = view.file_name()
if not filename:
return False
scopename = view.scope_name(0)
if contains('source.php', scopename) or filename.endswith('.php'):
return True
return False
@staticmethod
def show_status_bar_error():
st_status_message('Format failed! Open the console window to inspect errors.')
@staticmethod
def ensure_newline_at_eof(view, edit):
new_line_inserted = False
if view.size() > 0 and view.substr(view.size() - 1) != '\n':
new_line_inserted = True
view.insert(edit, view.size(), '\n')
return new_line_inserted
class CommandOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
if self.is_allowed(view) and self.is_enabled(view) and self.is_excluded(view):
if self.get_auto_format_on_save_requires_prettier_config(view) is True:
resolved_prettier_config = self.try_find_prettier_config(view)
if not resolved_prettier_config:
return
view.run_command(PLUGIN_CMD_NAME, {
'save_file': True,
'auto_format_prettier_config_path': resolved_prettier_config
})
return
else:
view.run_command(PLUGIN_CMD_NAME, {
'save_file': True,
'auto_format_prettier_config_path': None
})
def try_find_prettier_config(self, view):
source_file_dir = get_file_abs_dir(view.file_name())
st_project_path = get_st_project_path()
#
# 1. Check if defined in 'additional_cli_args':
additional_cli_arg_config = get_cli_arg_value(self.get_additional_cli_args(view), '--config')
if not is_str_none_or_empty(additional_cli_arg_config):
additional_cli_arg_config = os.path.normpath(additional_cli_arg_config)
if not os.path.isabs(additional_cli_arg_config):
additional_cli_arg_config = in_source_file_path_or_project_root(
source_file_dir, st_project_path, additional_cli_arg_config)
if additional_cli_arg_config and os.path.exists(additional_cli_arg_config):
return additional_cli_arg_config
return None
#
# 2. Attempt to automatically resolve:
resolved_prettier_config = find_prettier_config(source_file_dir)
if resolved_prettier_config and os.path.exists(resolved_prettier_config):
return resolved_prettier_config
return None
@staticmethod
def get_auto_format_on_save(view):
return bool(get_setting(view, 'auto_format_on_save', False))
@staticmethod
def get_auto_format_on_save_excludes(view):
return get_setting(view, 'auto_format_on_save_excludes', [])
@staticmethod
def get_custom_file_extensions(view):
return get_setting(view, 'custom_file_extensions', [])
@staticmethod
def get_auto_format_on_save_requires_prettier_config(view):
return bool(get_setting(view, 'auto_format_on_save_requires_prettier_config', False))
@staticmethod
def is_allowed(view):
return is_file_auto_formattable(view)
@staticmethod
def get_additional_cli_args(view):
return dict(get_setting(view, 'additional_cli_args', {}))
def is_enabled(self, view):
return self.get_auto_format_on_save(view)
def is_excluded(self, view):
filename = view.file_name()
if not filename:
return False
excludes = self.get_auto_format_on_save_excludes(view)
regmatch_ef = [fnmatch.translate(os.path.normpath(pattern)) for pattern in excludes]
for regmatch in regmatch_ef:
if match(regmatch, filename):
return False
return True
| return int(self.view.settings().get('tab_size', 2)) |
Resistor.ts | import {ResistorColorEntry} from './ResistorColorEntry';
export class | {
// Name, Color, Multiplier, Value, Tolerance
static readonly BLACK = new ResistorColorEntry('Black', '#000000', 1, 0);
static readonly BROWN = new ResistorColorEntry('Brown', '#8B4513', 10, 1, 1);
static readonly RED = new ResistorColorEntry('Red', '#FF0000', 100, 2, 2);
static readonly ORANGE = new ResistorColorEntry('Orange', '#EE7420', 1000, 3);
static readonly YELLOW = new ResistorColorEntry(
'Yellow',
'#FFFF00',
10000,
4
);
static readonly GREEN = new ResistorColorEntry(
'Green',
'#008000',
100000,
5,
0.5
);
static readonly BLUE = new ResistorColorEntry(
'Blue',
'#0000FF',
1000000,
6,
0.25
);
static readonly VIOLET = new ResistorColorEntry(
'Violet',
'#800080',
10000000,
7,
0.1
);
static readonly GREY = new ResistorColorEntry(
'Grey',
'#808080',
100000000,
8
);
static readonly WHITE = new ResistorColorEntry(
'White',
'#FFFFFF',
1000000000,
9
);
static readonly GOLD = new ResistorColorEntry(
'Gold',
'#AB8D3F',
0.1,
undefined,
5
);
static readonly SILVER = new ResistorColorEntry(
'Silver',
'#C0C0C0',
0.01,
undefined,
10
);
static readonly colorTable: ResistorColorEntry[] = [
Resistor.BLACK,
Resistor.BROWN,
Resistor.RED,
Resistor.ORANGE,
Resistor.YELLOW,
Resistor.GREEN,
Resistor.BLUE,
Resistor.VIOLET,
Resistor.GREY,
Resistor.WHITE,
Resistor.GOLD,
Resistor.SILVER,
];
static readonly INVALID_RESISTOR = -1;
/**
* getValue assumes there is no tolerance band, as tolerance is not part of
* the value calculation
*/
static getValue(colors: ResistorColorEntry[]) {
if (colors.length < 3 || colors.length > 4) {
throw new RangeError('Invalid resistor size');
}
// Should we throw an error if the first color is black? In resistors, it's
// technically not allowed, but in puzzle events who knows what they'll do.
// The math works out fine (it's just a leading zero), so for now it's
// allowed.
// Iterate through all but the final band (the multipler) and extract the
// values
let value = 0;
for (let i = 0; i < colors.length - 1; ++i) {
const currentColor = colors[i];
if (currentColor.value === undefined) {
return Resistor.INVALID_RESISTOR;
}
value *= 10;
value += currentColor.value;
}
return Resistor.applyMultiplier(
value,
colors[colors.length - 1].multiplier
);
}
static getDisplayValue(value: number) {
if (value >= 1000000000) {
return `${value / 1000000000}G`;
} else if (value >= 1000000) {
return `${value / 1000000}M`;
} else if (value >= 1000) {
return `${value / 1000}k`;
} else {
return value.toString();
}
}
private static applyMultiplier(value: number, multipler: number) {
if (multipler >= 1) {
return value * multipler;
} else if (multipler > 0) {
// Avoid floating point multiplication issues
return value / (1 / multipler);
} else {
throw new RangeError('Invalid multiplier');
}
}
}
| Resistor |
main.rs | mod chunk;
mod compiler;
mod object;
mod opcode;
mod scanner;
mod value;
mod vm;
use std::env;
use std::fs;
use std::io::{self, BufRead, Write};
use std::process;
use vm::VMError;
use vm::VM;
fn main() |
fn repl() {
let mut vm = VM::new();
let stdin = io::stdin();
loop {
print!("> ");
io::stdout().flush().ok().expect("Could not flush stdout");
let mut line = String::new();
stdin
.lock()
.read_line(&mut line)
.expect("Could not read a line from stdin");
match vm.interpret(line.as_ref()) {
Ok(()) => (),
Err(e) => {
println!(
"Error in line \n\t{}\n{}",
line,
match e {
VMError::Runtime => "A runtime error occured",
VMError::Compile => "An error related to compiling your code occured",
}
);
}
}
}
}
fn run_file(file: &str) {
fn interpret(file: &str) -> Result<(), VMError> {
let mut vm = VM::new();
let contents =
fs::read_to_string(file).expect(format!("Could not open file {}\n", file).as_ref());
vm.interpret(contents.as_ref())
}
match interpret(file) {
Ok(()) => (),
Err(e) => {
println!("Error running your file {}", e);
process::exit(3);
}
}
}
| {
let args: Vec<String> = env::args().collect();
if args.len() == 1 {
repl();
} else if args.len() == 2 {
run_file(&args[1].as_ref());
} else {
println!("Usage: kurisu [path]");
process::exit(1);
}
} |
DummySmartChargingIntegration.ts | /* eslint-disable @typescript-eslint/no-unused-vars */
import { ChargingProfile } from '../../../types/ChargingProfile';
import SiteArea from '../../../types/SiteArea';
import SmartChargingIntegration from '../SmartChargingIntegration';
import Tenant from '../../../types/Tenant';
export default class | <SmartChargingSetting> extends SmartChargingIntegration<SmartChargingSetting> {
constructor(tenant: Tenant, setting: SmartChargingSetting) {
super(tenant, setting);
}
public async buildChargingProfiles(siteArea: SiteArea): Promise<ChargingProfile[]> {
return null;
}
public async checkConnection() {
}
}
| DummySapSmartChargingIntegration |
zlib.rs | pub(crate) use zlib::make_module;
#[pymodule]
mod zlib {
use crate::common::lock::PyMutex;
use crate::vm::{
builtins::{PyBaseExceptionRef, PyBytes, PyBytesRef, PyIntRef, PyTypeRef},
function::{ArgBytesLike, OptionalArg, OptionalOption},
PyPayload, PyResult, VirtualMachine,
};
use adler32::RollingAdler32 as Adler32;
use crossbeam_utils::atomic::AtomicCell;
use flate2::{
write::ZlibEncoder, Compress, Compression, Decompress, FlushCompress, FlushDecompress,
Status,
};
use std::io::Write;
#[cfg(not(feature = "zlib"))]
mod constants {
pub const Z_NO_COMPRESSION: i32 = 0;
pub const Z_BEST_COMPRESSION: i32 = 9;
pub const Z_BEST_SPEED: i32 = 1;
pub const Z_DEFAULT_COMPRESSION: i32 = -1;
pub const Z_NO_FLUSH: i32 = 0;
pub const Z_PARTIAL_FLUSH: i32 = 1;
pub const Z_SYNC_FLUSH: i32 = 2;
pub const Z_FULL_FLUSH: i32 = 3;
// not sure what the value here means, but it's the only compression method zlibmodule
// supports, so it doesn't really matter
pub const Z_DEFLATED: i32 = 8;
}
#[cfg(feature = "zlib")]
use libz_sys as constants;
#[pyattr]
use constants::{
Z_BEST_COMPRESSION, Z_BEST_SPEED, Z_DEFAULT_COMPRESSION, Z_DEFLATED as DEFLATED,
Z_FULL_FLUSH, Z_NO_COMPRESSION, Z_NO_FLUSH, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH,
};
#[cfg(feature = "zlib")]
#[pyattr]
use libz_sys::{
Z_BLOCK, Z_DEFAULT_STRATEGY, Z_FILTERED, Z_FINISH, Z_FIXED, Z_HUFFMAN_ONLY, Z_RLE, Z_TREES,
};
// copied from zlibmodule.c (commit 530f506ac91338)
#[pyattr]
const MAX_WBITS: u8 = 15;
#[pyattr]
const DEF_BUF_SIZE: usize = 16 * 1024;
#[pyattr]
const DEF_MEM_LEVEL: u8 = 8;
#[pyattr(once)]
fn error(vm: &VirtualMachine) -> PyTypeRef {
vm.ctx.new_exception_type(
"zlib",
"error",
Some(vec![vm.ctx.exceptions.exception_type.clone()]),
)
}
/// Compute an Adler-32 checksum of data.
#[pyfunction]
fn adler32(data: ArgBytesLike, begin_state: OptionalArg<PyIntRef>) -> u32 {
data.with_ref(|data| {
let begin_state = begin_state.map_or(1, |i| i.as_u32_mask());
let mut hasher = Adler32::from_value(begin_state);
hasher.update_buffer(data);
hasher.hash()
})
}
/// Compute a CRC-32 checksum of data.
#[pyfunction]
fn crc32(data: ArgBytesLike, begin_state: OptionalArg<PyIntRef>) -> u32 {
crate::binascii::crc32(data, begin_state)
}
fn compression_from_int(level: Option<i32>) -> Option<Compression> {
match level.unwrap_or(Z_DEFAULT_COMPRESSION) {
Z_DEFAULT_COMPRESSION => Some(Compression::default()),
valid_level @ Z_NO_COMPRESSION..=Z_BEST_COMPRESSION => {
Some(Compression::new(valid_level as u32))
}
_ => None,
}
}
#[derive(FromArgs)]
struct PyFuncCompressArgs {
#[pyarg(positional)]
data: ArgBytesLike,
#[pyarg(any, optional)]
level: OptionalOption<i32>,
}
/// Returns a bytes object containing compressed data.
#[pyfunction]
fn compress(args: PyFuncCompressArgs, vm: &VirtualMachine) -> PyResult<PyBytesRef> {
let data = args.data;
let level = args.level;
let compression = compression_from_int(level.flatten())
.ok_or_else(|| new_zlib_error("Bad compression level", vm))?;
let mut encoder = ZlibEncoder::new(Vec::new(), compression);
data.with_ref(|input_bytes| encoder.write_all(input_bytes).unwrap());
let encoded_bytes = encoder.finish().unwrap();
Ok(vm.ctx.new_bytes(encoded_bytes))
}
enum InitOptions {
Standard {
header: bool,
// [De]Compress::new_with_window_bits is only enabled for zlib; miniz_oxide doesn't
// support wbits (yet?)
#[cfg(feature = "zlib")]
wbits: u8,
},
#[cfg(feature = "zlib")]
Gzip { wbits: u8 },
}
impl InitOptions {
fn decompress(self) -> Decompress {
match self {
#[cfg(not(feature = "zlib"))]
Self::Standard { header } => Decompress::new(header),
#[cfg(feature = "zlib")]
Self::Standard { header, wbits } => Decompress::new_with_window_bits(header, wbits),
#[cfg(feature = "zlib")]
Self::Gzip { wbits } => Decompress::new_gzip(wbits),
}
}
fn compress(self, level: Compression) -> Compress {
match self {
#[cfg(not(feature = "zlib"))]
Self::Standard { header } => Compress::new(level, header),
#[cfg(feature = "zlib")]
Self::Standard { header, wbits } => {
Compress::new_with_window_bits(level, header, wbits)
}
#[cfg(feature = "zlib")]
Self::Gzip { wbits } => Compress::new_gzip(level, wbits),
}
}
}
fn header_from_wbits(wbits: OptionalArg<i8>, vm: &VirtualMachine) -> PyResult<InitOptions> {
let wbits = wbits.unwrap_or(MAX_WBITS as i8);
let header = wbits > 0;
let wbits = wbits.unsigned_abs();
match wbits {
9..=15 => Ok(InitOptions::Standard {
header,
#[cfg(feature = "zlib")]
wbits,
}),
#[cfg(feature = "zlib")]
25..=31 => Ok(InitOptions::Gzip { wbits: wbits - 16 }),
_ => Err(vm.new_value_error("Invalid initialization option".to_owned())),
}
}
fn _decompress(
mut data: &[u8],
d: &mut Decompress,
bufsize: usize,
max_length: Option<usize>,
is_flush: bool,
vm: &VirtualMachine,
) -> PyResult<(Vec<u8>, bool)> {
if data.is_empty() {
return Ok((Vec::new(), true));
}
let mut buf = Vec::new();
loop {
let final_chunk = data.len() <= CHUNKSIZE;
let chunk = if final_chunk {
data
} else {
&data[..CHUNKSIZE]
};
// if this is the final chunk, finish it
let flush = if is_flush {
if final_chunk {
FlushDecompress::Finish
} else {
FlushDecompress::None
}
} else {
FlushDecompress::Sync
};
loop {
let additional = if let Some(max_length) = max_length {
std::cmp::min(bufsize, max_length - buf.capacity())
} else {
bufsize
};
if additional == 0 {
return Ok((buf, false));
}
buf.reserve_exact(additional);
let prev_in = d.total_in();
let status = d
.decompress_vec(chunk, &mut buf, flush)
.map_err(|_| new_zlib_error("invalid input data", vm))?;
let consumed = d.total_in() - prev_in;
data = &data[consumed as usize..];
let stream_end = status == Status::StreamEnd;
if stream_end || data.is_empty() {
// we've reached the end of the stream, we're done
buf.shrink_to_fit();
return Ok((buf, stream_end));
} else if !chunk.is_empty() && consumed == 0 {
// we're gonna need a bigger buffer
continue;
} else {
// next chunk
break;
}
}
}
}
#[derive(FromArgs)]
struct PyFuncDecompressArgs {
#[pyarg(positional)]
data: ArgBytesLike,
#[pyarg(any, optional)]
wbits: OptionalArg<i8>,
#[pyarg(any, optional)]
bufsize: OptionalArg<usize>,
}
/// Returns a bytes object containing the uncompressed data.
#[pyfunction]
fn decompress(arg: PyFuncDecompressArgs, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let data = arg.data;
let wbits = arg.wbits;
let bufsize = arg.bufsize;
data.with_ref(|data| {
let bufsize = bufsize.unwrap_or(DEF_BUF_SIZE);
let mut d = header_from_wbits(wbits, vm)?.decompress();
_decompress(data, &mut d, bufsize, None, false, vm).and_then(|(buf, stream_end)| { | Ok(buf)
} else {
Err(new_zlib_error(
"Error -5 while decompressing data: incomplete or truncated stream",
vm,
))
}
})
})
}
#[pyfunction]
fn decompressobj(args: DecompressobjArgs, vm: &VirtualMachine) -> PyResult<PyDecompress> {
#[allow(unused_mut)]
let mut decompress = header_from_wbits(args.wbits, vm)?.decompress();
#[cfg(feature = "zlib")]
if let OptionalArg::Present(dict) = args.zdict {
dict.with_ref(|d| decompress.set_dictionary(d).unwrap());
}
Ok(PyDecompress {
decompress: PyMutex::new(decompress),
eof: AtomicCell::new(false),
unused_data: PyMutex::new(PyBytes::from(vec![]).into_ref(vm)),
unconsumed_tail: PyMutex::new(PyBytes::from(vec![]).into_ref(vm)),
})
}
#[pyattr]
#[pyclass(name = "Decompress")]
#[derive(Debug, PyPayload)]
struct PyDecompress {
decompress: PyMutex<Decompress>,
eof: AtomicCell<bool>,
unused_data: PyMutex<PyBytesRef>,
unconsumed_tail: PyMutex<PyBytesRef>,
}
#[pyimpl]
impl PyDecompress {
#[pyproperty]
fn eof(&self) -> bool {
self.eof.load()
}
#[pyproperty]
fn unused_data(&self) -> PyBytesRef {
self.unused_data.lock().clone()
}
#[pyproperty]
fn unconsumed_tail(&self) -> PyBytesRef {
self.unconsumed_tail.lock().clone()
}
fn save_unused_input(
&self,
d: &mut Decompress,
data: &[u8],
stream_end: bool,
orig_in: u64,
vm: &VirtualMachine,
) {
let leftover = &data[(d.total_in() - orig_in) as usize..];
if stream_end && !leftover.is_empty() {
let mut unused_data = self.unused_data.lock();
let unused: Vec<_> = unused_data
.as_bytes()
.iter()
.chain(leftover)
.copied()
.collect();
*unused_data = vm.new_pyref(unused);
}
}
#[pymethod]
fn decompress(&self, args: DecompressArgs, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let max_length = if args.max_length == 0 {
None
} else {
Some(args.max_length)
};
let data = args.data.borrow_buf();
let data = &*data;
let mut d = self.decompress.lock();
let orig_in = d.total_in();
let (ret, stream_end) =
match _decompress(data, &mut d, DEF_BUF_SIZE, max_length, false, vm) {
Ok((buf, true)) => {
self.eof.store(true);
(Ok(buf), true)
}
Ok((buf, false)) => (Ok(buf), false),
Err(err) => (Err(err), false),
};
self.save_unused_input(&mut d, data, stream_end, orig_in, vm);
let leftover = if stream_end {
b""
} else {
&data[(d.total_in() - orig_in) as usize..]
};
let mut unconsumed_tail = self.unconsumed_tail.lock();
if !leftover.is_empty() || !unconsumed_tail.is_empty() {
*unconsumed_tail = PyBytes::from(leftover.to_owned()).into_ref(vm);
}
ret
}
#[pymethod]
fn flush(&self, length: OptionalArg<isize>, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let length = match length {
OptionalArg::Present(l) if l <= 0 => {
return Err(vm.new_value_error("length must be greater than zero".to_owned()));
}
OptionalArg::Present(l) => l as usize,
OptionalArg::Missing => DEF_BUF_SIZE,
};
let mut data = self.unconsumed_tail.lock();
let mut d = self.decompress.lock();
let orig_in = d.total_in();
let (ret, stream_end) = match _decompress(&data, &mut d, length, None, true, vm) {
Ok((buf, stream_end)) => (Ok(buf), stream_end),
Err(err) => (Err(err), false),
};
self.save_unused_input(&mut d, &data, stream_end, orig_in, vm);
*data = PyBytes::from(Vec::new()).into_ref(vm);
// TODO: drop the inner decompressor, somehow
// if stream_end {
//
// }
ret
}
}
#[derive(FromArgs)]
struct DecompressArgs {
#[pyarg(positional)]
data: ArgBytesLike,
#[pyarg(any, default = "0")]
max_length: usize,
}
#[derive(FromArgs)]
struct DecompressobjArgs {
#[pyarg(any, optional)]
wbits: OptionalArg<i8>,
#[cfg(feature = "zlib")]
#[pyarg(any, optional)]
zdict: OptionalArg<ArgBytesLike>,
}
#[pyfunction]
fn compressobj(
level: OptionalArg<i32>,
// only DEFLATED is valid right now, it's w/e
_method: OptionalArg<i32>,
wbits: OptionalArg<i8>,
// these aren't used.
_mem_level: OptionalArg<i32>, // this is memLevel in CPython
_strategy: OptionalArg<i32>,
_zdict: OptionalArg<ArgBytesLike>,
vm: &VirtualMachine,
) -> PyResult<PyCompress> {
let level = compression_from_int(level.into_option())
.ok_or_else(|| vm.new_value_error("invalid initialization option".to_owned()))?;
let compress = header_from_wbits(wbits, vm)?.compress(level);
Ok(PyCompress {
inner: PyMutex::new(CompressInner {
compress,
unconsumed: Vec::new(),
}),
})
}
#[derive(Debug)]
struct CompressInner {
compress: Compress,
unconsumed: Vec<u8>,
}
#[pyattr]
#[pyclass(name = "Compress")]
#[derive(Debug, PyPayload)]
struct PyCompress {
inner: PyMutex<CompressInner>,
}
#[pyimpl]
impl PyCompress {
#[pymethod]
fn compress(&self, data: ArgBytesLike, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let mut inner = self.inner.lock();
data.with_ref(|b| inner.compress(b, vm))
}
// TODO: mode argument isn't used
#[pymethod]
fn flush(&self, _mode: OptionalArg<i32>, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
self.inner.lock().flush(vm)
}
// TODO: This is an optional feature of Compress
// #[pymethod]
// #[pymethod(magic)]
// #[pymethod(name = "__deepcopy__")]
// fn copy(&self) -> Self {
// todo!("<flate2::Compress as Clone>")
// }
}
const CHUNKSIZE: usize = u32::MAX as usize;
impl CompressInner {
fn compress(&mut self, data: &[u8], vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let orig_in = self.compress.total_in() as usize;
let mut cur_in = 0;
let unconsumed = std::mem::take(&mut self.unconsumed);
let mut buf = Vec::new();
'outer: for chunk in unconsumed.chunks(CHUNKSIZE).chain(data.chunks(CHUNKSIZE)) {
while cur_in < chunk.len() {
buf.reserve(DEF_BUF_SIZE);
let status = self
.compress
.compress_vec(&chunk[cur_in..], &mut buf, FlushCompress::None)
.map_err(|_| {
self.unconsumed.extend_from_slice(&data[cur_in..]);
new_zlib_error("error while compressing", vm)
})?;
cur_in = (self.compress.total_in() as usize) - orig_in;
match status {
Status::Ok => continue,
Status::StreamEnd => break 'outer,
_ => break,
}
}
}
self.unconsumed.extend_from_slice(&data[cur_in..]);
buf.shrink_to_fit();
Ok(buf)
}
// TODO: flush mode (FlushDecompress) parameter
fn flush(&mut self, vm: &VirtualMachine) -> PyResult<Vec<u8>> {
let data = std::mem::take(&mut self.unconsumed);
let mut data_it = data.chunks(CHUNKSIZE);
let mut buf = Vec::new();
loop {
let chunk = data_it.next().unwrap_or(&[]);
if buf.len() == buf.capacity() {
buf.reserve(DEF_BUF_SIZE);
}
let status = self
.compress
.compress_vec(chunk, &mut buf, FlushCompress::Finish)
.map_err(|_| new_zlib_error("error while compressing", vm))?;
match status {
Status::StreamEnd => break,
_ => continue,
}
}
buf.shrink_to_fit();
Ok(buf)
}
}
fn new_zlib_error(message: &str, vm: &VirtualMachine) -> PyBaseExceptionRef {
vm.new_exception_msg(vm.class("zlib", "error"), message.to_owned())
}
} | if stream_end { |
runtime-main.6a47c489.js | !function(e){function t(t){for(var n,u,i=t[0],c=t[1],f=t[2],s=0,p=[];s<i.length;s++)u=i[s],Object.prototype.hasOwnProperty.call(o,u)&&o[u]&&p.push(o[u][0]),o[u]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(l&&l(t);p.length;)p.shift()();return a.push.apply(a,f||[]),r()}function r(){for(var e,t=0;t<a.length;t++){for(var r=a[t],n=!0,i=1;i<r.length;i++){var c=r[i];0!==o[c]&&(n=!1)}n&&(a.splice(t--,1),e=u(u.s=r[0]))}return e}var n={},o={1:0},a=[];function u(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,u),r.l=!0,r.exports}u.e=function(e){var t=[],r=o[e];if(0!==r)if(r)t.push(r[2]);else{var n=new Promise((function(t,n){r=o[e]=[t,n]}));t.push(r[2]=n);var a,i=document.createElement("script");i.charset="utf-8",i.timeout=120,u.nc&&i.setAttribute("nonce",u.nc),i.src=function(e){return u.p+"static/js/"+({}[e]||e)+"."+{3:"d76804f1",4:"dd818051",5:"3d710806",6:"fe3cc0ac",7:"afecd0b4",8:"378c3a6d",9:"48a11be4"}[e]+".chunk.js"}(e);var c=new Error;a=function(t){i.onerror=i.onload=null,clearTimeout(f);var r=o[e];if(0!==r){if(r){var n=t&&("load"===t.type?"missing":t.type),a=t&&t.target&&t.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+a+")",c.name="ChunkLoadError",c.type=n,c.request=a,r[1](c)}o[e]=void 0}};var f=setTimeout((function(){a({type:"timeout",target:i})}),12e4);i.onerror=i.onload=a,document.head.appendChild(i)}return Promise.all(t)},u.m=e,u.c=n,u.d=function(e,t,r){u.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},u.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},u.t=function(e,t){if(1&t&&(e=u(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(u.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)u.d(r,n,function(t){return e[t]}.bind(null,n));return r},u.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return u.d(t,"a",t),t},u.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},u.p="/",u.oe=function(e){throw console.error(e),e};var i=this["webpackJsonpcandy-machine-mint"]=this["webpackJsonpcandy-machine-mint"]||[],c=i.push.bind(i);i.push=t,i=i.slice();for(var f=0;f<i.length;f++)t(i[f]);var l=c;r()}([]);
//# sourceMappingURL=runtime-main.6a47c489.js.map |
||
test_text_to_speech_request.py | # coding: utf-8
"""
speechapi
Speech APIs enable you to recognize speech and convert it to text using advanced machine learning, and also to convert text to speech. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_voicerecognition_api_client
from cloudmersive_voicerecognition_api_client.models.text_to_speech_request import TextToSpeechRequest # noqa: E501
from cloudmersive_voicerecognition_api_client.rest import ApiException
class | (unittest.TestCase):
"""TextToSpeechRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTextToSpeechRequest(self):
"""Test TextToSpeechRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_voicerecognition_api_client.models.text_to_speech_request.TextToSpeechRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| TestTextToSpeechRequest |
nddataset.py | # -*- coding: utf-8 -*-
#
# ======================================================================================================================
# Copyright (©) 2015-2019 LCS
# Laboratoire Catalyse et Spectrochimie, Caen, France.
# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
# See full LICENSE agreement in the root directory
# ======================================================================================================================
"""
This module implements the |NDDataset| class.
"""
__all__ = ['NDDataset']
import textwrap
import warnings
import sys
import numpy as np
from traitlets import HasTraits, Instance, Bool, Float, validate, default, Dict, Union
from traittypes import Array
from spectrochempy.core.project.baseproject import AbstractProject
from spectrochempy.core.dataset.ndarray import NDArray, DEFAULT_DIM_NAME
from spectrochempy.core.dataset.ndcomplex import NDComplexArray
from spectrochempy.core.dataset.coord import Coord, LinearCoord
from spectrochempy.core.dataset.coordset import CoordSet
from spectrochempy.core.dataset.ndmath import NDMath, _set_ufuncs, _set_operators
from spectrochempy.core.dataset.ndio import NDIO
from spectrochempy.core.dataset.ndplot import NDPlot
from spectrochempy.core import error_, warning_
from spectrochempy.utils import (colored_output, SpectroChemPyException, SpectroChemPyWarning, MaskedConstant)
HAS_XARRAY = False
try:
import xarray as xr
HAS_XARRAY = True # pragma: no cover
except ImportError:
xr = None # pragma: no cover
# ======================================================================================================================
# NDDataset class definition
# ======================================================================================================================
class NDDataset(NDIO, NDPlot, NDMath, NDComplexArray):
# coordinates
_coordset = Instance(CoordSet, allow_none=True)
# model data (e.g., for fit)
_modeldata = Array(Float(), allow_none=True)
# some setting for NDDataset
_copy = Bool(False)
_labels_allowed = Bool(False) # no labels for NDDataset
# dataset can be members of a project.
# we use the abstract class to avoid circular imports.
_parent = Instance(AbstractProject, allow_none=True)
# For the GUI interface
# parameters state
_state = Dict()
# processed data (for GUI)
_processeddata = Array(Float(), allow_none=True)
# processed mask (for GUI)
_processedmask = Union((Bool(), Array(Bool()), Instance(MaskedConstant)))
# baseline data (for GUI)
_baselinedata = Array(Float(), allow_none=True)
# reference data (for GUI)
_referencedata = Array(Float(), allow_none=True)
| def __init__(self, data=None, coordset=None, coordunits=None, coordtitles=None, **kwargs):
"""
The main N-dimensional dataset class used by |scpy|.
The NDDataset is the main object use by SpectroChemPy. Like numpy ndarrays, NDDataset have the capability to be
sliced, sorted and subject to mathematical operations. But, in addition, NDDataset may have units,
can be masked
and each dimensions can have coordinates also with units. This make NDDataset aware of unit compatibility,
e.g.,
for binary operation such as additions or subtraction or during the application of mathematical operations.
In addition or in replacement of numerical data for coordinates, NDDataset can also have labeled coordinates
where labels can be different kind of objects (strings, datetime, numpy nd.ndarray or othe NDDatasets, etc…).
Parameters
----------
data : array of floats
Data array contained in the object. The data can be a list, a tuple, a |ndarray|, a ndarray-like,
a |NDArray| or any subclass of |NDArray|. Any size or shape of data is accepted. If not given, an empty
|NDArray| will be inited.
At the initialisation the provided data will be eventually casted to a numpy-ndarray.
If a subclass of |NDArray| is passed which already contains some mask, labels, or units, these elements
will
be used to accordingly set those of the created object. If possible, the provided data will not be copied
for `data` input, but will be passed by reference, so you should make a copy of the `data` before passing
them if that's the desired behavior or set the `copy` argument to True.
coordset : An instance of |CoordSet|, optional
`coords` contains the coordinates for the different dimensions of the `data`. if `coords` is provided,
it must specified the `coord` and `labels` for all dimensions of the `data`.
Multiple `coord`'s can be specified in an |CoordSet| instance for each dimension.
coordunits : list, optional
A list of units corresponding to the dimensions in the order of the coordset.
coordtitles : list, optional
A list of titles corresponding of the dimensions in the order of the coordset.
**kwargs : dict
See other parameters.
Other Parameters
----------------
dtype : str or dtype, optional, default=np.float64
If specified, the data will be casted to this dtype, else the data will be casted to float64 or complex128.
dims : list of chars, optional
If specified the list must have a length equal to the number od data dimensions (ndim) and the chars
must be
taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in
this order.
name : str, optional
A user friendly name for this object. If not given, the automatic `id` given at the object creation will be
used as a name.
labels : array of objects, optional
Labels for the `data`. labels can be used only for 1D-datasets.
The labels array may have an additional dimension, meaning several series of labels for the same data.
The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of
|NDArray|.
mask : array of bool or `NOMASK`, optional
Mask for the data. The mask array must have the same shape as the data. The given array can be a list,
a tuple, or a |ndarray|. Each values in the array must be `False` where the data are *valid* and True when
they are not (like in numpy masked arrays). If `data` is already a :class:`~numpy.ma.MaskedArray`, or any
array object (such as a |NDArray| or subclass of it), providing a `mask` here will causes the mask from the
masked array to be ignored.
units : |Unit| instance or str, optional
Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also
explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_
package.
title : str, optional
The title of the dimension. It will later be used for instance for labelling plots of the data.
It is optional but recommended to give a title to each ndarray.
dlabel : str, optional
Alias of `title`.
meta : dict-like object, optional
Additional metadata for this object. Must be dict-like but no
further restriction is placed on meta.
author : str, optional
Name(s) of the author(s) of this dataset. BNy default, name of the computer note where this dataset is
created.
description : str, optional
A optional description of the nd-dataset. A shorter alias is `desc`.
history : str, optional
A string to add to the object history.
copy : bool, optional
Perform a copy of the passed object. Default is False.
See Also
--------
Coord : Explicit coordinates object.
LinearCoord : Implicit coordinates objet.
CoordSet : Set of coordinates.
Notes
-----
The underlying array in a |NDDataset| object can be accessed through the `data` attribute, which will return
a conventional |ndarray|.
Examples
--------
Usage by an end-user
>>> from spectrochempy import *
>>> x = NDDataset([1, 2, 3])
>>> print(x.data) # doctest: +NORMALIZE_WHITESPACE
[ 1 2 3]
"""
super().__init__(data, **kwargs)
self._parent = None
# eventually set the coordinates with optional units and title
if isinstance(coordset, CoordSet):
self.set_coordset(**coordset)
else:
if coordset is None:
coordset = [None] * self.ndim
if coordunits is None:
coordunits = [None] * self.ndim
if coordtitles is None:
coordtitles = [None] * self.ndim
_coordset = []
for c, u, t in zip(coordset, coordunits, coordtitles):
if not isinstance(c, CoordSet):
if isinstance(c, LinearCoord):
coord = LinearCoord(c)
else:
coord = Coord(c)
if u is not None:
coord.units = u
if t is not None:
coord.title = t
else:
if u: # pragma: no cover
warning_('units have been set for a CoordSet, but this will be ignored '
'(units are only defined at the coordinate level')
if t: # pragma: no cover
warning_('title will be ignored as they are only defined at the coordinates level')
coord = c
_coordset.append(coord)
if _coordset and set(_coordset) != {Coord()}: # if they are no coordinates do nothing
self.set_coordset(*_coordset)
# ------------------------------------------------------------------------------------------------------------------
# special methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def __dir__(self):
# WARNING: be carefull to keep the present order of the three first elements! Needed for save/load operations
return ['dims', 'coordset', 'data', 'name', 'title', 'mask', 'units', 'meta', 'preferences',
'author', 'description', 'history', 'date', 'modified', 'origin', 'roi', 'offset', 'transposed',
'modeldata', 'processeddata', 'baselinedata', 'referencedata', 'state'] + NDIO().__dir__()
# ..................................................................................................................
def __getitem__(self, items):
saveditems = items
# coordinate selection to test first
if isinstance(items, str):
try:
return self._coordset[items]
except Exception:
pass
# slicing
new, items = super().__getitem__(items, return_index=True)
if new is None:
return None
if self._coordset is not None:
names = self._coordset.names # all names of the current coordinates
new_coords = [None] * len(names)
for i, item in enumerate(items):
# get the corresponding dimension name in the dims list
name = self.dims[i]
# get the corresponding index in the coordinate's names list
idx = names.index(name)
if self._coordset[idx].is_empty:
new_coords[idx] = Coord(None, name=name)
elif isinstance(item, slice):
# add the slice on the corresponding coordinates on the dim to the new list of coordinates
if not isinstance(self._coordset[idx], CoordSet):
new_coords[idx] = self._coordset[idx][item]
else:
# we must slice all internal coordinates
newc = []
for c in self._coordset[idx]:
newc.append(c[item])
new_coords[idx] = CoordSet(*newc[::-1], name=name) # we reverse to be sure
# the order will be # kept for internal coordinates
new_coords[idx]._default = self._coordset[idx]._default # set the same default coord
new_coords[idx]._is_same_dim = self._coordset[idx]._is_same_dim
elif isinstance(item, (np.ndarray, list)):
new_coords[idx] = self._coordset[idx][item]
new.set_coordset(*new_coords, keepnames=True)
new.history = f'Slice extracted: ({saveditems})'
return new
# ..................................................................................................................
def __getattr__(self, item):
# when the attribute was not found
if item in ["__numpy_ufunc__", "interface", '_pytestfixturefunction', '__dataclass_fields__',
'_ipython_canary_method_should_not_exist_', '_baseclass', '_fill_value', '_ax_lines', '_axcb',
'clevels', '__wrapped__', 'coords', '__await__',
'__aiter__'] or '_validate' in item or '_changed' in item:
# raise an error so that traits, ipython operation and more ... will be handled correctly
raise AttributeError
# syntax such as ds.x, ds.y, etc...
if item[0] in self.dims or self._coordset:
# look also properties
attribute = None
index = 0
# print(item)
if len(item) > 2 and item[1] == '_':
attribute = item[1:]
item = item[0]
index = self.dims.index(item)
if self._coordset:
try:
c = self._coordset[item]
if isinstance(c, str) and c in self.dims:
# probaly a reference to another coordinate name
c = self._coordset[c]
if c.name in self.dims or c._parent_dim in self.dims:
if attribute is not None:
# get the attribute
return getattr(c, attribute)
else:
return c
else:
raise AttributeError
except Exception as err:
if item in self.dims:
return None
else:
raise err
elif attribute is not None:
if attribute == 'size':
# we want the size but there is no coords, get it from the data shape
return self.shape[index]
else:
raise AttributeError(f'Can not find `{attribute}` when no coordinate is defined')
return None
raise AttributeError
def __setattr__(self, key, value):
if key in DEFAULT_DIM_NAME: # syntax such as ds.x, ds.y, etc...
# Note the above test is important to avoid errors with traitlets
# even if it looks redundant with the folllowing
if key in self.dims:
if self._coordset is None:
# we need to create a coordset first
self.set_coordset(dict((self.dims[i], None) for i in range(self.ndim)))
idx = self._coordset.names.index(key)
_coordset = self._coordset
listcoord = False
if isinstance(value, list):
listcoord = all([isinstance(item, Coord) for item in value])
if listcoord:
_coordset[idx] = list(CoordSet(value).to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, CoordSet):
if len(value) > 1:
value = CoordSet(value)
_coordset[idx] = list(value.to_dict().values())[0]
_coordset[idx].name = key
_coordset[idx]._is_same_dim = True
elif isinstance(value, (Coord, LinearCoord)):
value.name = key
_coordset[idx] = value
else:
_coordset[idx] = Coord(value, name=key)
_coordset = self._valid_coordset(_coordset)
self._coordset.set(_coordset)
else:
raise AttributeError(f'Coordinate `{key}` is not used.')
else:
super().__setattr__(key, value)
# ..................................................................................................................
def __eq__(self, other, attrs=None):
attrs = self.__dir__()
for attr in (
'filename', 'preferences', 'name', 'description', 'history', 'date', 'modified', 'origin',
'show_datapoints', 'roi', 'offset', 'modeldata', 'processeddata', 'baselinedata', 'referencedata',
'state'):
# these attibutes are not used for comparison (comparison based on data and units!)
try:
attrs.remove(attr)
except ValueError:
pass
return super().__eq__(other, attrs)
# ..................................................................................................................
def __hash__(self):
# all instance of this class has same hash, so they can be compared
return super().__hash__ + hash(self._coordset)
# ------------------------------------------------------------------------------------------------------------------
# Default values
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@default('_coordset')
def _coordset_default(self):
return None
# ..................................................................................................................
@default('_modeldata')
def _modeldata_default(self):
return None
# ..................................................................................................................
@default('_processeddata')
def _processeddata_default(self):
return None
# ..................................................................................................................
@default('_baselinedata')
def _baselinedata_default(self):
return None
# ..................................................................................................................
@default('_referencedata')
def _referencedata_default(self):
return None
# ------------------------------------------------------------------------------------------------------------------
# GUI options
# ------------------------------------------------------------------------------------------------------------------
# TODO: refactor the spectrochempy preference system to have a common basis
@property
def state(self):
# state of the controller window for this dataset
return self._state
@state.setter
def state(self, val):
self._state = val
@property
def processeddata(self):
return self._processeddata
@processeddata.setter
def processeddata(self, val):
self._processeddata = val
@property
def processedmask(self):
return self._processedmask
@processedmask.setter
def processedmask(self, val):
self._processedmask = val
@property
def baselinedata(self):
return self._baselinedata
@baselinedata.setter
def baselinedata(self, val):
self._baselinedata = val
@property
def referencedata(self):
return self._referencedata
@referencedata.setter
def referencedata(self, val):
self._referencedata = val
# ------------------------------------------------------------------------------------------------------------------
# Validators
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
@validate('_coordset')
def _coordset_validate(self, proposal):
coords = proposal['value']
return self._valid_coordset(coords)
def _valid_coordset(self, coords):
# uses in coords_validate and setattr
if coords is None:
return
for k, coord in enumerate(coords):
if coord is not None and not isinstance(coord, CoordSet) and coord.data is None:
continue
# For coord to be acceptable, we require at least a NDArray, a NDArray subclass or a CoordSet
if not isinstance(coord, (LinearCoord, Coord, CoordSet)):
if isinstance(coord, NDArray):
coord = coords[k] = Coord(coord)
else:
raise TypeError('Coordinates must be an instance or a subclass of Coord class or NDArray, or of '
f' CoordSet class, but an instance of {type(coord)} has been passed')
if self.dims and coord.name in self.dims:
# check the validity of the given coordinates in terms of size (if it correspond to one of the dims)
size = coord.size
if self.implements('NDDataset'):
idx = self._get_dims_index(coord.name)[0] # idx in self.dims
if size != self._data.shape[idx]:
raise ValueError(f'the size of a coordinates array must be None or be equal'
f' to that of the respective `{coord.name}`'
f' data dimension but coordinate size={size} != data shape[{idx}]='
f'{self._data.shape[idx]}')
else:
pass # bypass this checking for any other derived type (should be done in the subclass)
coords._parent = self
return coords
# ..................................................................................................................
@property
def _dict_dims(self):
_dict = {}
for index, dim in enumerate(self.dims):
if dim not in _dict:
_dict[dim] = {'size': self.shape[index], 'coord': getattr(self, dim)}
return _dict
# ------------------------------------------------------------------------------------------------------------------
# public methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def add_coordset(self, *coords, dims=None, **kwargs):
"""
Add one or a set of coordinates from a dataset.
Parameters
----------
*coords : iterable
Coordinates object(s).
dims : list
Name of the coordinates.
**kwargs : dict
Keywords passed to the coordset.
"""
if not coords and not kwargs:
# reset coordinates
self._coordset = None
return
if self._coordset is None:
# make the whole coordset at once
self._coordset = CoordSet(*coords, dims=dims, **kwargs)
else:
# add one coordinate
self._coordset._append(*coords, **kwargs)
if self._coordset:
# set a notifier to the updated traits of the CoordSet instance
HasTraits.observe(self._coordset, self._dims_update, '_updated')
# force it one time after this initialization
self._coordset._updated = True
# ..................................................................................................................
def coord(self, dim='x'):
"""
Return the coordinates along the given dimension.
Parameters
----------
dim : int or str
A dimension index or name, default index = `x`.
If an integer is provided, it is equivalent to the `axis` parameter for numpy array.
Returns
-------
|Coord|
Coordinates along the given axis.
"""
idx = self._get_dims_index(dim)[0] # should generate an error if the
# dimension name is not recognized
if idx is None:
return None
if self._coordset is None:
return None
# idx is not necessarily the position of the coordinates in the CoordSet
# indeed, transposition may have taken place. So we need to retrieve the coordinates by its name
name = self.dims[idx]
if name in self._coordset.names:
idx = self._coordset.names.index(name)
return self._coordset[idx]
else:
error_(f'could not find this dimenson name: `{name}`')
return None
# ..................................................................................................................
@property
def coordset(self):
"""
|CoordSet| instance.
Contains the coordinates of the various dimensions of the dataset.
It's a readonly property. Use set_coords to change one or more coordinates at once.
"""
if self._coordset and all(c.is_empty for c in self._coordset):
# all coordinates are empty, this is equivalent to None for the coordset
return None
return self._coordset
# ..................................................................................................................
@coordset.setter
def coordset(self, coords):
if isinstance(coords, CoordSet):
self.set_coordset(**coords)
else:
self.set_coordset(coords)
# ..................................................................................................................
@property
def coordnames(self):
"""
List of the |Coord| names.
Read only property.
"""
if self._coordset is not None:
return self._coordset.names
# ..................................................................................................................
@property
def coordtitles(self):
"""
List of the |Coord| titles.
Read only property. Use set_coordtitle to eventually set titles.
"""
if self._coordset is not None:
return self._coordset.titles
# ..................................................................................................................
@property
def coordunits(self):
"""
List of the |Coord| units.
Read only property. Use set_coordunits to eventually set units.
"""
if self._coordset is not None:
return self._coordset.units
# ..................................................................................................................
@property
def data(self):
"""
The ``data`` array.
If there is no data but labels, then the labels are returned instead of data.
"""
return super().data
# ..................................................................................................................
@data.setter
def data(self, data):
# as we can't write super().data = data, we call _set_data
# see comment in the data.setter of NDArray
super()._set_data(data)
# ..................................................................................................................
def delete_coordset(self):
"""
Delete all coordinate settings.
"""
self._coordset = None
# ..................................................................................................................
def implements(self, name=None):
"""
Check if the current object implements `NDDataset`.
Rather than isinstance(obj, NDDataset) use object.implements('NDDataset').
This is useful to check type without importing the module
Parameters
----------
name : str
Name of the object class. If None, the function returns the class name.
If name is given, it checks if it correspond to the current class name.
Returns
-------
str or bool
If name is given, a bool is returned
If name is None, the classname is returned
Examples
--------
>>> from spectrochempy import NDDataset, Coord
>>> co = Coord([1., 2., 3.])
>>> co.implements('NDDataset')
False
>>> co.implements('Coord')
True
>>> ds = NDDataset([1., 2., 3.])
>>> ds.implements()
'NDDataset'
"""
if name is None:
return 'NDDataset'
else:
return name == 'NDDataset'
# ..................................................................................................................
@property
def labels(self):
# not valid for NDDataset
# There is no label for nd-dataset
raise NotImplementedError # pragma: no cover
# ..................................................................................................................
@property
def modeldata(self):
"""
|ndarray| - models data.
Data eventually generated by modelling of the data.
"""
return self._modeldata
# ..................................................................................................................
@modeldata.setter
def modeldata(self, data):
self._modeldata = data
# ..................................................................................................................
@property
def parent(self):
"""
|Project| instance
The parent project of the dataset.
"""
return self._parent
# ..................................................................................................................
@parent.setter
def parent(self, value):
if self._parent is not None:
# A parent project already exists for this dataset but the
# entered values gives a different parent. This is not allowed,
# as it can produce impredictable results. We will first remove it
# from the current project.
self._parent.remove_dataset(self.name)
self._parent = value
# ..................................................................................................................
def set_coordset(self, *args, **kwargs):
"""
Set one or more coordinates at once.
Warnings
--------
This method replace all existing coordinates.
See Also
--------
add_coords, set_coordtitles, set_coordunits
"""
self._coordset = None
self.add_coordset(*args, dims=self.dims, **kwargs)
# ..................................................................................................................
def set_coordtitles(self, *args, **kwargs):
"""
Set titles of the one or more coordinates.
"""
self._coordset.set_titles(*args, **kwargs)
# ..................................................................................................................
def set_coordunits(self, *args, **kwargs):
"""
Set units of the one or more coordinates.
"""
self._coordset.set_units(*args, **kwargs)
# ..................................................................................................................
def sort(self, **kwargs):
"""
Returns the dataset sorted along a given dimension.
(by default, the last dimension [axis=-1]) using the numeric or label values.
Parameters
----------
dim : str or int, optional, default=-1
dimension index or name along which to sort.
pos : int , optional
If labels are multidimensional - allow to sort on a define
row of labels : labels[pos]. Experimental : Not yet checked.
by : str among ['value', 'label'], optional, default=``value``
Indicate if the sorting is following the order of labels or
numeric coord values.
descend : `bool`, optional, default=`False`
If true the dataset is sorted in a descending direction. Default is False except if coordinates
are reversed.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
sorted_dataset
"""
inplace = kwargs.get('inplace', False)
if not inplace:
new = self.copy()
else:
new = self
# parameter for selecting the level of labels (default None or 0)
pos = kwargs.pop('pos', None)
# parameter to say if selection is done by values or by labels
by = kwargs.pop('by', 'value')
# determine which axis is sorted (dims or axis can be passed in kwargs)
# it will return a tuple with axis and dim
axis, dim = self.get_axis(**kwargs)
if axis is None:
axis, dim = self.get_axis(axis=0)
# get the corresponding coordinates (remember the their order can be different form the order
# of dimension in dims. S we cannot jsut take the coord from the indice.
coord = getattr(self, dim) # get the coordinate using the syntax such as self.x
descend = kwargs.pop('descend', None)
if descend is None:
# when non specified, default is False (except for reversed coordinates
descend = coord.reversed
# import warnings
# warnings.simplefilter("error")
indexes = []
for i in range(self.ndim):
if i == axis:
if not coord.has_data:
# sometimes we have only label for Coord objects.
# in this case, we sort labels if they exist!
if coord.is_labeled:
by = 'label'
else:
# nothing to do for sorting
# return self itself
return self
args = coord._argsort(by=by, pos=pos, descend=descend)
setattr(new, dim, coord[args])
indexes.append(args)
else:
indexes.append(slice(None))
new._data = new._data[tuple(indexes)]
if new.is_masked:
new._mask = new._mask[tuple(indexes)]
return new
# ..................................................................................................................
def squeeze(self, *dims, inplace=False):
"""
Remove single-dimensional entries from the shape of a NDDataset.
Parameters
----------
dim : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If a dimension (dim) is selected with shape entry greater than
one, an error is raised.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
squeezed
The input array, but with all or a subset of the
dimensions of length 1 removed.
Raises
------
ValueError
If `dim` is not `None`, and the dimension being squeezed is not
of length 1.
"""
# make a copy of the original dims
old = self.dims[:]
# squeeze the data and determine which axis must be squeezed
new, axis = super().squeeze(*dims, inplace=inplace, return_axis=True)
if axis is not None and new._coordset is not None:
# if there are coordinates they have to be squeezed as well (remove
# coordinate for the squeezed axis)
for i in axis:
dim = old[i]
del new._coordset[dim]
return new
def expand_dims(self, dim=None):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
dim : int or str
Position in the expanded axes where the new axis (or axes) is placed.
Returns
-------
result : ndarray
View of `a` with the number of dimensions increased.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
""" # TODO
# ..................................................................................................................
def swapdims(self, dim1, dim2, inplace=False):
"""
Interchange two dimensions of a NDDataset.
Parameters
----------
dim1 : int
First axis.
dim2 : int
Second axis.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
swaped_dataset
See Also
--------
transpose
"""
new = super().swapdims(dim1, dim2, inplace=inplace)
new.history = f'Data swapped between dims {dim1} and {dim2}'
return new
# ..................................................................................................................
@property
def T(self):
"""
Transposed |NDDataset|.
The same object is returned if `ndim` is less than 2.
"""
return self.transpose()
# ..................................................................................................................
def take(self, indices, **kwargs):
"""
Take elements from an array
Parameters
----------
indices
kwargs
Returns
-------
"""
# handle the various syntax to pass the axis
dims = self._get_dims_from_args(**kwargs)
axis = self._get_dims_index(dims)
axis = axis[0] if axis else None
# indices = indices.tolist()
if axis is None:
# just do a fancy indexing
return self[indices]
if axis < 0:
axis = self.ndim + axis
index = tuple([...] + [indices] + [slice(None) for i in range(self.ndim - 1 - axis)])
new = self[index]
return new
def to_array(self):
"""
Return a numpy masked array (i.e., other NDDataset attributes are lost.
Examples
========
>>> import spectrochempy as scp
>>> dataset = scp.read('wodger.spg')
>>> a = scp.to_array(dataset)
equivalent to:
>>> a = np.ma.array(dataset)
or
>>> a= dataset.masked_data
"""
return np.ma.array(self)
# ..................................................................................................................
def to_xarray(self, **kwargs):
"""
Convert a NDDataset instance to an `~xarray.DataArray` object
( the xarray library must be available )
Parameters
Returns
-------
object : a xarray.DataArray object
"""
# Information about DataArray from the DataArray docstring
#
# Attributes
# ----------
# dims: tuple
# Dimension names associated with this array.
# values: np.ndarray
# Access or modify DataArray values as a numpy array.
# coords: dict-like
# Dictionary of DataArray objects that label values along each dimension.
# name: str or None
# Name of this array.
# attrs: OrderedDict
# Dictionary for holding arbitrary metadata.
# Init docstring
#
# Parameters
# ----------
# data: array_like
# Values for this array. Must be an ``numpy.ndarray``, ndarray like,
# or castable to an ``ndarray``.
# coords: sequence or dict of array_like objects, optional
# Coordinates (tick labels) to use for indexing along each dimension.
# If dict-like, should be a mapping from dimension names to the
# corresponding coordinates. If sequence-like, should be a sequence
# of tuples where the first element is the dimension name and the
# second element is the corresponding coordinate array_like object.
# dims: str or sequence of str, optional
# Name(s) of the data dimension(s). Must be either a string (only
# for 1D data) or a sequence of strings with length equal to the
# number of dimensions. If this argument is omitted, dimension names
# are taken from ``coords`` (if possible) and otherwise default to
# ``['dim_0', ... 'dim_n']``.
# name: str or None, optional
# Name of this array.
# attrs: dict_like or None, optional
# Attributes to assign to the new instance. By default, an empty
# attribute dictionary is initialized.
# encoding: dict_like or None, optional
# Dictionary specifying how to encode this array's data into a
# serialized format like netCDF4. Currently used keys (for netCDF)
# include '_FillValue', 'scale_factor', 'add_offset', 'dtype',
# 'units' and 'calendar' (the later two only for datetime arrays).
# Unrecognized keys are ignored.
if not HAS_XARRAY:
warnings.warn('Xarray is not available! This function can not be used', SpectroChemPyWarning)
return None
x, y = self.x, self.y
tx = x.title
if y:
ty = y.title
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(ty, y.data), (tx, x.data)], )
da.attrs['units'] = self.units
else:
da = xr.DataArray(np.array(self.data, dtype=np.float64), coords=[(tx, x.data)], )
da.attrs['units'] = self.units
da.attrs['title'] = self.title
return da
# ..................................................................................................................
def transpose(self, *dims, inplace=False):
"""
Permute the dimensions of a NDDataset.
Parameters
----------
dims : sequence of dimension indexes or names, optional
By default, reverse the dimensions, otherwise permute the dimensions
according to the values given.
inplace : bool, optional, default=`False`
Flag to say that the method return a new object (default)
or not (inplace=True).
Returns
-------
transposed_array
See Also
--------
swapdims : Interchange two dimensions of a NDDataset.
"""
new = super().transpose(*dims, inplace=inplace)
new.history = f'Data transposed between dims: {dims}' if dims else ''
return new
# ------------------------------------------------------------------------------------------------------------------
# private methods
# ------------------------------------------------------------------------------------------------------------------
# ..................................................................................................................
def _cstr(self):
# Display the metadata of the object and partially the data
out = ''
out += ' name: {}\n'.format(self.name)
out += ' author: {}\n'.format(self.author)
out += ' created: {}\n'.format(self._date)
# out += ' modified: {}\n'.format(self._modified) if (self.modified - self.date).seconds > 1 else ''
wrapper1 = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 15, replace_whitespace=True,
width=self._text_width)
pars = self.description.strip().splitlines()
if pars:
out += ' description: '
desc = ''
if pars:
desc += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
desc += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
desc = '\0\0\0{}\0\0\0\n'.format(desc.rstrip())
out += desc
if self._history:
pars = self.history
out += ' history: '
hist = ''
if pars:
hist += '{}\n'.format(wrapper1.fill(pars[0]))
for par in pars[1:]:
hist += '{}\n'.format(textwrap.indent(par, ' ' * 15))
# the three escaped null characters are here to facilitate
# the generation of html outputs
hist = '\0\0\0{}\0\0\0\n'.format(hist.rstrip())
out += hist
out += '{}\n'.format(self._str_value().rstrip())
out += '{}\n'.format(self._str_shape().rstrip()) if self._str_shape() else ''
out += '{}\n'.format(self._str_dims().rstrip())
if not out.endswith('\n'):
out += '\n'
out += '\n'
if not self._html_output:
return colored_output(out.rstrip())
else:
return out.rstrip()
# ..................................................................................................................
def _loc2index(self, loc, dim=-1):
# Return the index of a location (label or coordinates) along the dim
# This can work only if `coords` exists.
if self._coordset is None:
raise SpectroChemPyException('No coords have been defined. Slicing or selection'
' by location ({}) needs coords definition.'.format(loc))
coord = self.coord(dim)
return coord._loc2index(loc)
# ..................................................................................................................
def _str_dims(self):
if self.is_empty:
return ''
if len(self.dims) < 1 or not hasattr(self, "_coordset"):
return ''
if not self._coordset or len(self._coordset) < 1:
return ''
self._coordset._html_output = self._html_output # transfert the html flag if necessary: false by default
txt = self._coordset._cstr()
txt = txt.rstrip() # remove the trailing '\n'
return txt
_repr_dims = _str_dims
# ------------------------------------------------------------------------------------------------------------------
# events
# ------------------------------------------------------------------------------------------------------------------
def _dims_update(self, change=None):
# when notified that a coords names have been updated
_ = self.dims # fire an update
# ..................................................................................................................
# ======================================================================================================================
# module function
# ======================================================================================================================
# make some NDDataset operation accessible from the spectrochempy API
thismodule = sys.modules[__name__]
api_funcs = ['sort', 'copy', 'squeeze', 'swapdims', 'transpose', 'to_array', 'to_xarray', 'take', 'set_complex',
'set_quaternion', 'set_hypercomplex', 'component', 'to', 'to_base_units', 'to_reduced_units', 'ito',
'ito_base_units', 'ito_reduced_units', 'is_units_compatible', 'remove_masks']
# todo: check the fact that some function are defined also in ndmath
for funcname in api_funcs:
setattr(thismodule, funcname, getattr(NDDataset, funcname))
thismodule.__all__.append(funcname)
# load one method from NDIO
load = NDDataset.load
__all__ += ['load']
# ======================================================================================================================
# Set the operators
# ======================================================================================================================
_set_operators(NDDataset, priority=100000)
_set_ufuncs(NDDataset) | # ------------------------------------------------------------------------------------------------------------------
# initialisation
# ------------------------------------------------------------------------------------------------------------------
# .................................................................................................................. |
log.go | // Package log wrapped logrus functions
package log
import (
"encoding/json"
"io"
"github.com/sirupsen/logrus"
"github.com/labstack/gommon/log"
)
// MyLogger extend logrus.MyLogger
type MyLogger struct {
*logrus.Logger
}
// Singleton logger
var singletonLogger = &MyLogger{
Logger: logrus.New(),
}
// Logger return singleton logger
func Logger() *MyLogger {
return singletonLogger
}
// Print output message of print level
func Print(i ...interface{}) {
singletonLogger.Print(i...)
}
// Printf output format message of print level
func Printf(format string, i ...interface{}) {
singletonLogger.Printf(format, i...)
}
// Printj output json of print level
func Printj(j log.JSON) {
singletonLogger.Printj(j)
}
// Debug output message of debug level
func Debug(i ...interface{}) {
singletonLogger.Debug(i...)
}
// Debugf output format message of debug level
func | (format string, args ...interface{}) {
singletonLogger.Debugf(format, args...)
}
// Debugj output json of debug level
func Debugj(j log.JSON) {
singletonLogger.Debugj(j)
}
// Info output message of info level
func Info(i ...interface{}) {
singletonLogger.Info(i...)
}
// Infof output format message of info level
func Infof(format string, args ...interface{}) {
singletonLogger.Infof(format, args...)
}
// Infoj output json of info level
func Infoj(j log.JSON) {
singletonLogger.Infoj(j)
}
// Warn output message of warn level
func Warn(i ...interface{}) {
singletonLogger.Warn(i...)
}
// Warnf output format message of warn level
func Warnf(format string, args ...interface{}) {
singletonLogger.Warnf(format, args...)
}
// Warnj output json of warn level
func Warnj(j log.JSON) {
singletonLogger.Warnj(j)
}
// Error output message of error level
func Error(i ...interface{}) {
singletonLogger.Error(i...)
}
// Errorf output format message of error level
func Errorf(format string, args ...interface{}) {
singletonLogger.Errorf(format, args...)
}
// Errorj output json of error level
func Errorj(j log.JSON) {
singletonLogger.Errorj(j)
}
// Fatal output message of fatal level
func Fatal(i ...interface{}) {
singletonLogger.Fatal(i...)
}
// Fatalf output format message of fatal level
func Fatalf(format string, args ...interface{}) {
singletonLogger.Fatalf(format, args...)
}
// Fatalj output json of fatal level
func Fatalj(j log.JSON) {
singletonLogger.Fatalj(j)
}
// Panic output message of panic level
func Panic(i ...interface{}) {
singletonLogger.Panic(i...)
}
// Panicf output format message of panic level
func Panicf(format string, args ...interface{}) {
singletonLogger.Panicf(format, args...)
}
// Panicj output json of panic level
func Panicj(j log.JSON) {
singletonLogger.Panicj(j)
}
// To logrus.Level
func toLogrusLevel(level log.Lvl) logrus.Level {
switch level {
case log.DEBUG:
return logrus.DebugLevel
case log.INFO:
return logrus.InfoLevel
case log.WARN:
return logrus.WarnLevel
case log.ERROR:
return logrus.ErrorLevel
}
return logrus.InfoLevel
}
// To Echo.log.lvl
func toEchoLevel(level logrus.Level) log.Lvl {
switch level {
case logrus.DebugLevel:
return log.DEBUG
case logrus.InfoLevel:
return log.INFO
case logrus.WarnLevel:
return log.WARN
case logrus.ErrorLevel:
return log.ERROR
}
return log.OFF
}
// Output return logger io.Writer
func (l *MyLogger) Output() io.Writer {
return l.Out
}
// SetOutput logger io.Writer
func (l *MyLogger) SetOutput(w io.Writer) {
l.Out = w
}
// Level return logger level
func (l *MyLogger) Level() log.Lvl {
return toEchoLevel(l.Logger.Level)
}
// SetLevel logger level
func (l *MyLogger) SetLevel(v log.Lvl) {
l.Logger.Level = toLogrusLevel(v)
}
// SetHeader logger header
// Managed by Logrus itself
// This function do nothing
func (l *MyLogger) SetHeader(h string) {
// do nothing
}
// Formatter return logger formatter
func (l *MyLogger) Formatter() logrus.Formatter {
return l.Logger.Formatter
}
// SetFormatter logger formatter
// Only support logrus formatter
func (l *MyLogger) SetFormatter(formatter logrus.Formatter) {
l.Logger.Formatter = formatter
}
// Prefix return logger prefix
// This function do nothing
func (l *MyLogger) Prefix() string {
return ""
}
// SetPrefix logger prefix
// This function do nothing
func (l *MyLogger) SetPrefix(p string) {
// do nothing
}
// Print output message of print level
func (l *MyLogger) Print(i ...interface{}) {
l.Logger.Print(i...)
}
// Printf output format message of print level
func (l *MyLogger) Printf(format string, args ...interface{}) {
l.Logger.Printf(format, args...)
}
// Printj output json of print level
func (l *MyLogger) Printj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Println(string(b))
}
// Debug output message of debug level
func (l *MyLogger) Debug(i ...interface{}) {
l.Logger.Debug(i...)
}
// Debugf output format message of debug level
func (l *MyLogger) Debugf(format string, args ...interface{}) {
l.Logger.Debugf(format, args...)
}
// Debugj output message of debug level
func (l *MyLogger) Debugj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Debugln(string(b))
}
// Info output message of info level
func (l *MyLogger) Info(i ...interface{}) {
l.Logger.Info(i...)
}
// Infof output format message of info level
func (l *MyLogger) Infof(format string, args ...interface{}) {
l.Logger.Infof(format, args...)
}
// Infoj output json of info level
func (l *MyLogger) Infoj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Infoln(string(b))
}
// Warn output message of warn level
func (l *MyLogger) Warn(i ...interface{}) {
l.Logger.Warn(i...)
}
// Warnf output format message of warn level
func (l *MyLogger) Warnf(format string, args ...interface{}) {
l.Logger.Warnf(format, args...)
}
// Warnj output json of warn level
func (l *MyLogger) Warnj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Warnln(string(b))
}
// Error output message of error level
func (l *MyLogger) Error(i ...interface{}) {
l.Logger.Error(i...)
}
// Errorf output format message of error level
func (l *MyLogger) Errorf(format string, args ...interface{}) {
l.Logger.Errorf(format, args...)
}
// Errorj output json of error level
func (l *MyLogger) Errorj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Errorln(string(b))
}
// Fatal output message of fatal level
func (l *MyLogger) Fatal(i ...interface{}) {
l.Logger.Fatal(i...)
}
// Fatalf output format message of fatal level
func (l *MyLogger) Fatalf(format string, args ...interface{}) {
l.Logger.Fatalf(format, args...)
}
// Fatalj output json of fatal level
func (l *MyLogger) Fatalj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Fatalln(string(b))
}
// Panic output message of panic level
func (l *MyLogger) Panic(i ...interface{}) {
l.Logger.Panic(i...)
}
// Panicf output format message of panic level
func (l *MyLogger) Panicf(format string, args ...interface{}) {
l.Logger.Panicf(format, args...)
}
// Panicj output json of panic level
func (l *MyLogger) Panicj(j log.JSON) {
b, err := json.Marshal(j)
if err != nil {
panic(err)
}
l.Logger.Panicln(string(b))
}
| Debugf |
test_parser.rs | #[derive(Debug)]
pub enum ParseError {
MalformedBinary(&'static str),
DuplicateBinary(&'static str),
NoOutput,
ErrorsAndIo,
}
impl ::std::fmt::Display for ParseError {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
use self::ParseError::*;
match *self {
MalformedBinary(name) => write!(f, "annotation `{}` is malformed", name),
DuplicateBinary(name) => write!(f, "annotation `{}` appears multiple times", name),
NoOutput => write!(f, "input is provided but not output"),
ErrorsAndIo => write!(f, "test provides both build errors and io"),
}
}
}
#[derive(Debug)]
pub struct Error {
pub line: u32,
pub message: String,
}
#[derive(Debug)]
pub enum Expectation {
/// Build should fail, and all given errors must be present.
BuildErrors(Vec<Error>),
/// Build should succeed, and when ran with given
/// input program should produce given output.
Io { input: Vec<u8>, output: Vec<u8> },
/// Build should succeed, but program execution is not tested.
BuildSuccess,
}
fn get_errors(source: &str) -> Vec<Error> {
const ANNOTATION: &'static str = "// ERROR: ";
let mut errors = Vec::new();
for (line_num, line) in source.lines().enumerate() {
if let Some((index, _)) = line.match_indices(ANNOTATION).next() {
let message_from = index + ANNOTATION.len();
let message = line[message_from..].into();
errors.push(Error {
line: line_num as u32,
message,
})
}
}
errors
}
fn decode_bytes(mut from: &str) -> Result<Vec<u8>, ()> {
let mut result = Vec::new();
while let Some(ch) = from.chars().next() {
match ch {
'\\' => {
if from.chars().skip(1).next() != Some('x') {
return Err(());
}
let b1 = from
.chars()
.skip(2)
.next()
.ok_or(())?
.to_digit(16)
.ok_or(())? as u8;
let b2 = from
.chars()
.skip(3)
.next()
.ok_or(())?
.to_digit(16)
.ok_or(())? as u8;
result.push((b1 << 4) + b2);
from = &from[4..];
}
_ => {
if ch as u32 >= 127 || (ch as u32) < 32 {
return Err(());
}
result.push(ch as u8);
from = &from[1..];
}
}
}
Ok(result)
}
fn get_io(source: &str, name: &'static str) -> Result<Option<Vec<u8>>, ParseError> {
let mut annotation = None;
let pattern = format!("// {}: ", name);
for line in source.lines() {
if let Some((index, _)) = line.match_indices(&pattern).next() {
let from = index + pattern.len();
let bytes =
decode_bytes(&line[from..]).map_err(|()| ParseError::MalformedBinary(name))?;
if annotation.is_some() {
return Err(ParseError::DuplicateBinary(name));
}
annotation = Some(bytes);
}
}
Ok(annotation)
}
pub fn parse_test(source: &str) -> Result<Expectation, ParseError> {
let errors = get_errors(source);
let input = get_io(source, "INPUT")?;
let output = get_io(source, "OUTPUT")?;
if output.is_none() && input.is_some() |
if (output.is_some() || input.is_some()) && !errors.is_empty() {
return Err(ParseError::ErrorsAndIo);
}
Ok(if !errors.is_empty() {
Expectation::BuildErrors(errors)
} else if input.is_some() || output.is_some() {
Expectation::Io {
input: input.unwrap_or_else(Vec::new),
output: output.unwrap_or_else(Vec::new),
}
} else {
Expectation::BuildSuccess
})
}
| {
return Err(ParseError::NoOutput);
} |
enforcement_stats.py | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, | limitations under the License.
"""
import ipaddress
import os
from collections import defaultdict, namedtuple
from datetime import datetime, timedelta
from subprocess import check_output
import ryu.app.ofctl.api as ofctl_api
from lte.protos.pipelined_pb2 import RuleModResult
from lte.protos.policydb_pb2 import FlowDescription
from lte.protos.session_manager_pb2 import (
RuleRecord,
RuleRecordTable,
UPFSessionState,
)
from magma.common.sentry import EXCLUDE_FROM_ERROR_MONITORING
from magma.pipelined.app.base import (
ControllerType,
MagmaController,
global_epoch,
)
from magma.pipelined.app.policy_mixin import (
DROP_FLOW_STATS,
IGNORE_STATS,
PROCESS_STATS,
PolicyMixin,
)
from magma.pipelined.app.restart_mixin import DefaultMsgsMap, RestartMixin
from magma.pipelined.imsi import decode_imsi, encode_imsi
from magma.pipelined.ipv6_prefix_store import get_ipv6_prefix
from magma.pipelined.ng_manager.session_state_manager import SessionStateManager
from magma.pipelined.openflow import flows
from magma.pipelined.openflow.exceptions import (
MagmaDPDisconnectedError,
MagmaOFError,
)
from magma.pipelined.openflow.magma_match import MagmaMatch
from magma.pipelined.openflow.messages import MessageHub, MsgChannel
from magma.pipelined.openflow.registers import (
DIRECTION_REG,
IMSI_REG,
NG_SESSION_ID_REG,
REG_ZERO_VAL,
RULE_NUM_REG,
RULE_VERSION_REG,
SCRATCH_REGS,
Direction,
)
from magma.pipelined.policy_converters import (
convert_ipv4_str_to_ip_proto,
convert_ipv6_str_to_ip_proto,
get_eth_type,
get_ue_ip_match_args,
)
from magma.pipelined.utils import Utils
from ryu.app.ofctl.exception import (
InvalidDatapath,
OFError,
UnexpectedMultiReply,
)
from ryu.controller import dpset, ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, set_ev_cls
from ryu.lib import hub
from ryu.ofproto.ofproto_v1_4 import OFPMPF_REPLY_MORE
ETH_FRAME_SIZE_BYTES = 14
class EnforcementStatsController(PolicyMixin, RestartMixin, MagmaController):
"""
This openflow controller installs flows for aggregating policy usage
statistics, which are sent to sessiond for tracking.
It periodically polls OVS for flow stats on the its table and reports the
usage records to session manager via RPC. Flows are deleted when their
version (reg4 match) is different from the current version of the rule for
the subscriber maintained by the rule version mapper.
"""
APP_NAME = 'enforcement_stats'
APP_TYPE = ControllerType.LOGICAL
SESSIOND_RPC_TIMEOUT = 10
# 0xffffffffffffffff is reserved in openflow
DEFAULT_FLOW_COOKIE = 0xfffffffffffffffe
INIT_SLEEP_TIME = 3
MAX_DELAY_INTERVALS = 20
ng_config = namedtuple(
'ng_config',
['ng_service_enabled', 'sessiond_setinterface'],
)
_CONTEXTS = {
'dpset': dpset.DPSet,
}
def __init__(self, *args, **kwargs):
super(EnforcementStatsController, self).__init__(*args, **kwargs)
self.tbl_num = self._service_manager.get_table_num(self.APP_NAME)
self.next_table = \
self._service_manager.get_next_table_num(self.APP_NAME)
self.dpset = kwargs['dpset']
self.loop = kwargs['loop']
# Spawn a thread to poll for flow stats
poll_interval = kwargs['config']['enforcement']['poll_interval']
# Create a rpc channel to sessiond
self.sessiond = kwargs['rpc_stubs']['sessiond']
self._msg_hub = MessageHub(self.logger)
self.unhandled_stats_msgs = [] # Store multi-part responses from ovs
self.total_usage = {} # Store total usage
self._clean_restart = kwargs['config']['clean_restart']
self._redis_enabled = kwargs['config'].get('redis_enabled', False)
self._unmatched_bytes = 0 # Store bytes matched by default rule if any
self._default_drop_flow_name = \
kwargs['config']['enforcement']['default_drop_flow_name']
self.flow_stats_thread = hub.spawn(self._monitor, poll_interval)
self._print_grpc_payload = os.environ.get('MAGMA_PRINT_GRPC_PAYLOAD')
self._last_poll_time = datetime.now()
self._last_report_timestamp = datetime.now()
self._bridge_name = kwargs['config']['bridge_name']
self._periodic_stats_reporting = kwargs['config']['enforcement'].get('periodic_stats_reporting', True)
if self._print_grpc_payload is None:
self._print_grpc_payload = \
kwargs['config'].get('magma_print_grpc_payload', False)
self._restart_info_store = kwargs['restart_info_store']
self._ovs_restarted = self._was_ovs_restarted()
self.ng_config = self._get_ng_config(kwargs['config'], kwargs['rpc_stubs'])
self._prefix_mapper = kwargs['interface_to_prefix_mapper']
def _get_ng_config(self, config_dict, rpc_stub_dict):
ng_service_enabled = config_dict.get('enable5g_features', None)
sessiond_setinterface = rpc_stub_dict.get('sessiond_setinterface')
return self.ng_config(ng_service_enabled=ng_service_enabled, sessiond_setinterface=sessiond_setinterface)
def delete_all_flows(self, datapath):
flows.delete_all_flows_from_table(datapath, self.tbl_num)
def cleanup_state(self):
"""
When we remove/reinsert flows we need to remove old usage maps as new
flows will have reset stat counters
"""
self.unhandled_stats_msgs = []
self.total_usage = {}
self._unmatched_bytes = 0
def initialize_on_connect(self, datapath):
"""
Install the default flows on datapath connect event.
Args:
datapath: ryu datapath struct
"""
self._datapath = datapath
def _get_default_flow_msgs(self, datapath) -> DefaultMsgsMap:
"""
Gets the default flow msg that drops traffic
Args:
datapath: ryu datapath struct
Returns:
The list of default msgs to add
"""
match = MagmaMatch()
msg = flows.get_add_drop_flow_msg(
datapath, self.tbl_num, match,
priority=flows.MINIMUM_PRIORITY,
cookie=self.DEFAULT_FLOW_COOKIE,
)
return {self.tbl_num: [msg]}
def cleanup_on_disconnect(self, datapath):
"""
Cleanup flows on datapath disconnect event.
Args:
datapath: ryu datapath struct
"""
if self._clean_restart:
self.delete_all_flows(datapath)
def _install_flow_for_rule(
self, imsi, msisdn: bytes, uplink_tunnel: int, ip_addr, apn_ambr, rule, version, shard_id,
local_f_teid_ng: int,
):
"""
Install a flow to get stats for a particular rule. Flows will match on
IMSI, cookie (the rule num), in/out direction
Args:
imsi (string): subscriber to install rule for
msisdn (bytes): subscriber MSISDN
uplink_tunnel (int): tunnel ID of the subscriber.
ip_addr (string): subscriber session ipv4 address
rule (PolicyRule): policy rule proto
"""
def fail(err):
self.logger.error(
"Failed to install rule %s for subscriber %s: %s",
rule.id, imsi, err,
)
return RuleModResult.FAILURE
msgs = self._get_rule_match_flow_msgs(
imsi, msisdn, uplink_tunnel,
ip_addr, apn_ambr, rule, version, shard_id,
local_f_teid_ng,
)
try:
chan = self._msg_hub.send(msgs, self._datapath)
except MagmaDPDisconnectedError:
self.logger.error(
"Datapath disconnected, failed to install rule %s"
"for imsi %s", rule, imsi, extra=EXCLUDE_FROM_ERROR_MONITORING,
)
return RuleModResult.FAILURE
for _ in range(len(msgs)):
try:
result = chan.get()
except MsgChannel.Timeout:
return fail("No response from OVS")
if not result.ok():
return fail(result.exception())
return RuleModResult.SUCCESS
@set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
def _handle_barrier(self, ev):
self._msg_hub.handle_barrier(ev)
@set_ev_cls(ofp_event.EventOFPErrorMsg, MAIN_DISPATCHER)
def _handle_error(self, ev):
self._msg_hub.handle_error(ev)
# pylint: disable=protected-access,unused-argument
def _get_rule_match_flow_msgs(self, imsi, _, __, ip_addr, ambr, rule, version, shard_id, local_f_teid_ng):
"""
Returns flow add messages used for rule matching.
"""
rule_num = self._rule_mapper.get_or_create_rule_num(rule.id)
self.logger.debug(
'Installing flow for %s with rule num %s (version %s)', imsi,
rule_num, version,
)
inbound_rule_match = _generate_rule_match(
imsi, ip_addr, rule_num,
version, Direction.IN,
local_f_teid_ng,
)
outbound_rule_match = _generate_rule_match(
imsi, ip_addr, rule_num,
version, Direction.OUT,
local_f_teid_ng,
)
flow_actions = [flow.action for flow in rule.flow_list]
msgs = []
if FlowDescription.PERMIT in flow_actions:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = PROCESS_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
else:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = DROP_FLOW_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
if rule.app_name:
inbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
outbound_rule_match._match_kwargs[SCRATCH_REGS[1]] = IGNORE_STATS
msgs.extend([
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
inbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
flows.get_add_drop_flow_msg(
self._datapath,
self.tbl_num,
outbound_rule_match,
priority=flows.DEFAULT_PRIORITY,
cookie=shard_id,
),
])
return msgs
def _get_default_flow_msgs_for_subscriber(self, imsi, ip_addr, local_f_teid_ng):
match_in = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.OUT, local_f_teid_ng,
)
return [
flows.get_add_drop_flow_msg(
self._datapath, self.tbl_num, match_in,
priority=Utils.DROP_PRIORITY,
),
flows.get_add_drop_flow_msg(
self._datapath, self.tbl_num, match_out,
priority=Utils.DROP_PRIORITY,
),
]
def _install_redirect_flow(self, imsi, ip_addr, rule, version):
pass
def _install_default_flow_for_subscriber(self, imsi, ip_addr, local_f_teid_ng):
"""
Add a low priority flow to drop a subscriber's traffic.
Args:
imsi (string): subscriber id
ip_addr (string): subscriber ip_addr
"""
msgs = self._get_default_flow_msgs_for_subscriber(imsi, ip_addr, local_f_teid_ng)
if msgs:
chan = self._msg_hub.send(msgs, self._datapath)
self._wait_for_responses(chan, len(msgs))
def get_policy_usage(self, fut):
record_table = RuleRecordTable(
records=self.total_usage.values(),
epoch=global_epoch,
)
fut.set_result(record_table)
def _monitor(self, poll_interval):
"""
Main thread that sends a stats request at the configured interval in
seconds.
"""
while not self.init_finished:
# Still send an empty report -> for pipelined setup
self._report_usage({})
hub.sleep(self.INIT_SLEEP_TIME)
if not self._periodic_stats_reporting:
return
while True:
hub.sleep(poll_interval)
now = datetime.now()
delta = get_adjusted_delta(self._last_report_timestamp, now)
if delta > poll_interval * self.MAX_DELAY_INTERVALS:
self.logger.info(
'Previous update missing, current time %s, last '
'report timestamp %s, last poll timestamp %s',
now.strftime("%H:%M:%S"),
self._last_report_timestamp.strftime("%H:%M:%S"),
self._last_poll_time.strftime("%H:%M:%S"),
)
self._last_report_timestamp = now
hub.sleep(poll_interval / 2)
continue
if delta < poll_interval:
continue
self._last_poll_time = now
self.logger.debug(
'Started polling: %s',
now.strftime("%H:%M:%S"),
)
self._poll_stats(self._datapath)
def _poll_stats(self, datapath, cookie: int = 0, cookie_mask: int = 0):
"""
Send a FlowStatsRequest message to the datapath
Raises:
MagmaOFError: if we can't poll datapath stats
"""
try:
flows.send_stats_request(
datapath, self.tbl_num,
cookie, cookie_mask,
)
except MagmaOFError as e:
self.logger.warning("Couldn't poll datapath stats: %s", e)
except Exception as e: # pylint: disable=broad-except
self.logger.warning("Couldn't poll datapath stats: %s", e)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
"""
Schedule the flow stats handling in the main event loop, so as to
unblock the ryu event loop
"""
if not self.init_finished:
self.logger.debug('Setup not finished, skipping stats reply')
return
if self._datapath_id != ev.msg.datapath.id:
self.logger.debug('Ignoring stats from different bridge')
return
self.unhandled_stats_msgs.append(ev.msg.body)
if ev.msg.flags == OFPMPF_REPLY_MORE:
# Wait for more multi-part responses thats received for the
# single stats request.
return
self.loop.call_soon_threadsafe(
self._handle_flow_stats, self.unhandled_stats_msgs,
)
self.unhandled_stats_msgs = []
def _handle_flow_stats(self, stats_msgs):
"""
Aggregate flow stats by rule, and report to session manager
"""
stat_count = sum(len(flow_stats) for flow_stats in stats_msgs)
if stat_count == 0:
return
self.logger.debug("Processing %s stats responses", len(stats_msgs))
# Aggregate flows into rule records
aggregated_msgs = []
for flow_stats in stats_msgs:
aggregated_msgs += flow_stats
self.logger.debug("Processing stats of %d flows", len(aggregated_msgs))
try:
current_usage = self._get_usage_from_flow_stat(aggregated_msgs)
except ConnectionError:
self.logger.error('Failed processing stats, redis unavailable')
self.unhandled_stats_msgs.append(stats_msgs)
return
# Send report even if usage is empty. Sessiond uses empty reports to
# recognize when flows have ended
self._report_usage(current_usage)
# This is done primarily for CWF integration tests, TODO rm
self.total_usage = current_usage
# Report only if their is no change in version
if self.ng_config.ng_service_enabled == True:
self._prepare_session_config_report(stats_msgs)
def deactivate_default_flow(self, imsi, ip_addr, local_f_teid_ng=0):
if self._datapath is None:
self.logger.error('Datapath not initialized')
return
match_in = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, 0, 0,
Direction.OUT, local_f_teid_ng,
)
flows.delete_flow(self._datapath, self.tbl_num, match_in)
flows.delete_flow(self._datapath, self.tbl_num, match_out)
def _report_usage(self, usage):
"""
Report usage to sessiond using rpc
"""
record_table = RuleRecordTable(
records=usage.values(),
epoch=global_epoch,
update_rule_versions=self._ovs_restarted,
)
if self._print_grpc_payload:
record_msg = 'Sending RPC payload: {0}{{\n{1}}}'.format(
record_table.DESCRIPTOR.name, str(record_table),
)
self.logger.info(record_msg)
future = self.sessiond.ReportRuleStats.future(
record_table, self.SESSIOND_RPC_TIMEOUT,
)
future.add_done_callback(
lambda future: self.loop.call_soon_threadsafe(
self._report_usage_done, future, usage.values(),
),
)
def _report_usage_done(self, future, records):
"""
Callback after sessiond RPC completion
"""
self._last_report_timestamp = datetime.now()
self.logger.debug(
'Finished reporting: %s',
self._last_report_timestamp.strftime("%H:%M:%S"),
)
err = future.exception()
if err:
self.logger.error('Couldnt send flow records to sessiond: %s', err)
return
try:
self._delete_old_flows(records)
except ConnectionError:
self.logger.error('Failed remove old flows, redis unavailable')
return
def _get_usage_from_flow_stat(self, flow_stats):
"""
Update the rule record map with the flow stat and return the
updated map.
"""
current_usage = defaultdict(RuleRecord)
for flow_stat in flow_stats:
if flow_stat.table_id != self.tbl_num:
# this update is not intended for policy
continue
rule_id = self._get_rule_id(flow_stat)
# Rule not found, must be default flow
if rule_id == "":
default_flow_matched = \
flow_stat.cookie == self.DEFAULT_FLOW_COOKIE
if default_flow_matched:
if flow_stat.byte_count != 0 and \
self._unmatched_bytes != flow_stat.byte_count:
self.logger.debug(
'%s bytes total not reported.',
flow_stat.byte_count,
)
self._unmatched_bytes = flow_stat.byte_count
continue
else:
# This must be the default drop flow
rule_id = self._default_drop_flow_name
# If this is a pass through app name flow ignore stats
if _get_policy_type(flow_stat.match) == IGNORE_STATS:
continue
sid = _get_sid(flow_stat)
if not sid:
continue
ipv4_addr = _get_ipv4(flow_stat)
ipv6_addr = self._get_ipv6(flow_stat)
local_f_teid_ng = _get_ng_local_f_id(flow_stat)
# use a compound key to separate flows for the same rule but for
# different subscribers
key = sid + "|" + rule_id
if ipv4_addr:
key += "|" + ipv4_addr
elif ipv6_addr:
key += "|" + ipv6_addr
rule_version = _get_version(flow_stat)
if not rule_version:
rule_version = 0
key += "|" + str(rule_version)
current_usage[key].rule_id = rule_id
current_usage[key].sid = sid
current_usage[key].rule_version = rule_version
if ipv4_addr:
current_usage[key].ue_ipv4 = ipv4_addr
elif ipv6_addr:
current_usage[key].ue_ipv6 = ipv6_addr
if local_f_teid_ng:
current_usage[key].teid = local_f_teid_ng
bytes_rx = 0
bytes_tx = 0
if flow_stat.match[DIRECTION_REG] == Direction.IN:
# HACK decrement byte count for downlink packets by the length
# of an ethernet frame. Only IP and below should be counted towards
# a user's data. Uplink does this already because the GTP port is
# an L3 port.
bytes_rx = _get_downlink_byte_count(flow_stat)
else:
bytes_tx = flow_stat.byte_count
if _get_policy_type(flow_stat.match) == PROCESS_STATS:
current_usage[key].bytes_rx += bytes_rx
current_usage[key].bytes_tx += bytes_tx
else:
current_usage[key].dropped_rx += bytes_rx
current_usage[key].dropped_tx += bytes_tx
return current_usage
def _delete_old_flows(self, records):
"""
Check if the version of any record is older than the current version.
If so, delete the flow.
"""
for record in records:
ip_addr = None
if record.ue_ipv4:
ip_addr = convert_ipv4_str_to_ip_proto(record.ue_ipv4)
elif record.ue_ipv6:
ip_addr = convert_ipv6_str_to_ip_proto(record.ue_ipv6)
current_ver = self._session_rule_version_mapper.get_version(
record.sid, ip_addr, record.rule_id,
)
local_f_teid_ng = 0
if record.teid:
local_f_teid_ng = record.teid
if current_ver == record.rule_version:
continue
try:
self._delete_flow(
record.sid, ip_addr,
record.rule_id, record.rule_version, local_f_teid_ng,
)
except MagmaOFError as e:
self.logger.error(
'Failed to delete rule %s for subscriber %s ('
'version: %s): %s', record.rule_id,
record.sid, record.rule_version, e,
)
def _delete_flow(self, imsi, ip_addr, rule_id, rule_version, local_f_teid_ng=0):
rule_num = self._rule_mapper.get_or_create_rule_num(rule_id)
match_in = _generate_rule_match(
imsi, ip_addr, rule_num, rule_version,
Direction.IN, local_f_teid_ng,
)
match_out = _generate_rule_match(
imsi, ip_addr, rule_num, rule_version,
Direction.OUT, local_f_teid_ng,
)
flows.delete_flow(
self._datapath,
self.tbl_num,
match_in,
)
flows.delete_flow(
self._datapath,
self.tbl_num,
match_out,
)
def _was_ovs_restarted(self):
try:
ovs_pid = int(check_output(["pidof", "ovs-vswitchd"]).decode())
except Exception as e: # pylint: disable=broad-except
self.logger.warning("Couldn't get ovs pid: %s", e)
ovs_pid = 0
stored_ovs_pid = self._restart_info_store["ovs-vswitchd"]
self._restart_info_store["ovs-vswitchd"] = ovs_pid
self.logger.info(
"Stored ovs_pid %d, new ovs pid %d",
stored_ovs_pid, ovs_pid,
)
return ovs_pid != stored_ovs_pid
def _get_rule_id(self, flow):
"""
Return the rule id from the rule cookie
"""
# the default rule will have a cookie of 0
rule_num = flow.match.get(RULE_NUM_REG, 0)
if rule_num == 0 or rule_num == self.DEFAULT_FLOW_COOKIE:
return ""
try:
return self._rule_mapper.get_rule_id(rule_num)
except KeyError as e:
self.logger.error(
'Could not find rule id for num %d: %s',
rule_num, e,
)
return ""
def get_stats(self, cookie: int = 0, cookie_mask: int = 0):
"""
Use Ryu API to send a stats request containing cookie and cookie mask, retrieve a response and
convert to a Rule Record Table and remove old flows
"""
if not self._datapath:
self.logger.error("Could not initialize datapath for stats retrieval")
return RuleRecordTable()
parser = self._datapath.ofproto_parser
message = parser.OFPFlowStatsRequest(
datapath=self._datapath,
table_id=self.tbl_num,
cookie=cookie,
cookie_mask=cookie_mask,
)
try:
response = ofctl_api.send_msg(
self, message, reply_cls=parser.OFPFlowStatsReply,
reply_multi=True,
)
if not response:
self.logger.error("No rule records match the specified cookie and cookie mask")
return RuleRecordTable()
aggregated_msgs = []
for r in response:
aggregated_msgs += r.body
usage = self._get_usage_from_flow_stat(aggregated_msgs)
self.loop.call_soon_threadsafe(self._delete_old_flows, usage.values())
record_table = RuleRecordTable(
records=usage.values(),
epoch=global_epoch,
)
return record_table
except (InvalidDatapath, OFError, UnexpectedMultiReply):
self.logger.error("Could not obtain rule records due to either InvalidDatapath, OFError or UnexpectedMultiReply")
return RuleRecordTable()
def _prepare_session_config_report(self, stats_msgs):
session_config_dict = {}
for flow_stats in stats_msgs:
for stat in flow_stats:
if stat.table_id != self.tbl_num:
continue
local_f_teid_ng = _get_ng_local_f_id(stat)
if not local_f_teid_ng or local_f_teid_ng == REG_ZERO_VAL:
continue
# Already present
if local_f_teid_ng in session_config_dict:
if local_f_teid_ng != session_config_dict[local_f_teid_ng].local_f_teid:
self.logger.error("Mismatch local TEID value. Need to investigate")
continue
sid = _get_sid(stat)
if not sid:
continue
rule_version = _get_version(stat)
if rule_version == 0:
continue
session_config_dict[local_f_teid_ng] = \
UPFSessionState(
subscriber_id=sid,
session_version=rule_version,
local_f_teid=local_f_teid_ng,
)
SessionStateManager.report_session_config_state(
session_config_dict,
self.ng_config.sessiond_setinterface,
)
def _get_ipv6(self, flow):
if DIRECTION_REG not in flow.match:
return None
if flow.match[DIRECTION_REG] == Direction.OUT:
ip_register = 'ipv6_src'
else:
ip_register = 'ipv6_dst'
if ip_register not in flow.match:
return None
ipv6 = flow.match[ip_register]
# masked value returned as tuple
if type(ipv6) is tuple:
ipv6_addr = ipv6[0]
else:
ipv6_addr = ipv6
prefix = get_ipv6_prefix(ipv6_addr)
interface = self._prefix_mapper.get_interface(prefix)
if interface is None:
return ipv6_addr
# Rebuild UE IPv6 address from prefix map
subnet = ipaddress.ip_address(prefix)
host_id = ipaddress.ip_address(interface)
ue_ip = ipaddress.ip_address(int(subnet) | int(host_id))
self.logger.debug("recalc ue_ip: %s sub: %s host: %s", ue_ip, prefix, host_id)
return str(ue_ip)
def _generate_rule_match(imsi, ip_addr, rule_num, version, direction, local_f_teid_ng=0):
"""
Return a MagmaMatch that matches on the rule num and the version.
"""
ip_match = get_ue_ip_match_args(ip_addr, direction)
return MagmaMatch(
imsi=encode_imsi(imsi), eth_type=get_eth_type(ip_addr),
direction=direction, rule_num=rule_num,
rule_version=version, local_f_teid_ng=local_f_teid_ng,
**ip_match,
)
def _get_sid(flow):
if IMSI_REG not in flow.match:
return None
return decode_imsi(flow.match[IMSI_REG])
def _get_ipv4(flow):
if DIRECTION_REG not in flow.match:
return None
if flow.match[DIRECTION_REG] == Direction.OUT:
ip_register = 'ipv4_src'
else:
ip_register = 'ipv4_dst'
if ip_register not in flow.match:
return None
ipv4 = flow.match[ip_register]
# masked value returned as tuple
if type(ipv4) is tuple:
return ipv4[0]
else:
return ipv4
def _get_version(flow):
if RULE_VERSION_REG not in flow.match:
return None
return flow.match[RULE_VERSION_REG]
def _get_downlink_byte_count(flow_stat):
total_bytes = flow_stat.byte_count
packet_count = flow_stat.packet_count
return total_bytes - ETH_FRAME_SIZE_BYTES * packet_count
def _get_policy_type(match):
if SCRATCH_REGS[1] not in match:
return None
return match[SCRATCH_REGS[1]]
def get_adjusted_delta(begin, end):
# Add on a bit of time to compensate for grpc
return (end - begin + timedelta(milliseconds=150)).total_seconds()
def _get_ng_local_f_id(flow):
if NG_SESSION_ID_REG not in flow.match:
return None
return flow.match[NG_SESSION_ID_REG] | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and |
GatewayPlacement.py | import IoTSensor
import LORAGateway
class GatewayPlacement:
def __init__(self, sensor_list):
|
def add_gateway(self, gateway):
self._gateway_list.append(gateway)
def remove_gateway(self, gateway):
self._gateway_list.remove(gateway)
def sensors_covered(self):
curr_placement_coverage = []
for g in self._gateway_list:
curr_gateway_coverage = g.get_coverage(self._sensor_list)
for s in curr_gateway_coverage:
if not s.get_id() in curr_placement_coverage:
curr_placement_coverage.append(s.get_id())
covers = True
for s in self._sensor_list:
if not s.get_id() in curr_placement_coverage:
covers = False
break
return covers
def energy_consumption(self, time):
energy = 0.0
for s in self._sensor_list:
energy = energy + s.get_total_consumption(time, s.get_closest_gateway(self._gateway_list))
for g in self._gateway_list:
energy = energy + g.get_energy_consumption(time)
return energy
def get_gateways_number(self):
return len(self._gateway_list)
| self._sensor_list = sensor_list
self._gateway_list = [] |
server.go | package main
import (
"time"
"github.com/lesismal/arpc"
"github.com/lesismal/arpc/log"
"github.com/lesismal/arpc/middleware/router"
)
func main() {
svr := arpc.NewServer()
svr.Handler.Use(router.Recover)
svr.Handler.Use(router.Logger)
// register router
svr.Handler.Handle("/panic", func(ctx *arpc.Context) {
ctx.Write(ctx.Body())
log.Info("/panic handler")
panic(string(ctx.Body()))
})
// register router
svr.Handler.Handle("/logger", func(ctx *arpc.Context) {
ctx.Write(ctx.Body())
log.Info("/logger handler")
time.Sleep(time.Millisecond) |
svr.Run(":8888")
} | }) |
spi.rs | use crate::gpio::{gpioa::*, gpiob::*, gpioc::*, gpiod::*, AltFunction, DefaultMode};
use crate::rcc::*;
use crate::stm32::{SPI1, SPI2};
use crate::time::Hertz;
use core::ptr;
pub use hal::spi::{Mode, Phase, Polarity, MODE_0, MODE_1, MODE_2, MODE_3};
/// SPI error
#[derive(Debug)]
pub enum | {
/// Overrun occurred
Overrun,
/// Mode fault occurred
ModeFault,
/// CRC error
Crc,
}
/// A filler type for when the SCK pin is unnecessary
pub struct NoSck;
/// A filler type for when the Miso pin is unnecessary
pub struct NoMiso;
/// A filler type for when the Mosi pin is unnecessary
pub struct NoMosi;
pub trait Pins<SPI> {
fn setup(&self);
fn release(self) -> Self;
}
pub trait PinSck<SPI> {
fn setup(&self);
fn release(self) -> Self;
}
pub trait PinMiso<SPI> {
fn setup(&self);
fn release(self) -> Self;
}
pub trait PinMosi<SPI> {
fn setup(&self);
fn release(self) -> Self;
}
impl<SPI, SCK, MISO, MOSI> Pins<SPI> for (SCK, MISO, MOSI)
where
SCK: PinSck<SPI>,
MISO: PinMiso<SPI>,
MOSI: PinMosi<SPI>,
{
fn setup(&self) {
self.0.setup();
self.1.setup();
self.2.setup();
}
fn release(self) -> Self {
(self.0.release(), self.1.release(), self.2.release())
}
}
#[derive(Debug)]
pub struct Spi<SPI, PINS> {
spi: SPI,
pins: PINS,
}
pub trait SpiExt: Sized {
fn spi<PINS, T>(self, pins: PINS, mode: Mode, freq: T, rcc: &mut Rcc) -> Spi<Self, PINS>
where
PINS: Pins<Self>,
T: Into<Hertz>;
}
macro_rules! spi {
($SPIX:ident, $spiX:ident,
sck: [ $(($SCK:ty, $SCK_AF:expr),)+ ],
miso: [ $(($MISO:ty, $MISO_AF:expr),)+ ],
mosi: [ $(($MOSI:ty, $MOSI_AF:expr),)+ ],
) => {
impl PinSck<$SPIX> for NoSck {
fn setup(&self) {}
fn release(self) -> Self {
self
}
}
impl PinMiso<$SPIX> for NoMiso {
fn setup(&self) {}
fn release(self) -> Self {
self
}
}
impl PinMosi<$SPIX> for NoMosi {
fn setup(&self) {}
fn release(self) -> Self {
self
}
}
$(
impl PinSck<$SPIX> for $SCK {
fn setup(&self) {
self.set_alt_mode($SCK_AF);
}
fn release(self) -> Self {
self.into_analog()
}
}
)*
$(
impl PinMiso<$SPIX> for $MISO {
fn setup(&self) {
self.set_alt_mode($MISO_AF);
}
fn release(self) -> Self {
self.into_analog()
}
}
)*
$(
impl PinMosi<$SPIX> for $MOSI {
fn setup(&self) {
self.set_alt_mode($MOSI_AF);
}
fn release(self) -> Self {
self.into_analog()
}
}
)*
impl<PINS: Pins<$SPIX>> Spi<$SPIX, PINS> {
pub fn $spiX<T>(
spi: $SPIX,
pins: PINS,
mode: Mode,
speed: T,
rcc: &mut Rcc
) -> Self
where
T: Into<Hertz>
{
$SPIX::enable(rcc);
$SPIX::reset(rcc);
// disable SS output
spi.cr2.write(|w| w.ssoe().clear_bit());
let spi_freq = speed.into().0;
let apb_freq = rcc.clocks.apb_clk.0;
let br = match apb_freq / spi_freq {
0 => unreachable!(),
1..=2 => 0b000,
3..=5 => 0b001,
6..=11 => 0b010,
12..=23 => 0b011,
24..=47 => 0b100,
48..=95 => 0b101,
96..=191 => 0b110,
_ => 0b111,
};
spi.cr2.write(|w| unsafe {
w.frxth().set_bit().ds().bits(0b111).ssoe().clear_bit()
});
// Enable pins
pins.setup();
spi.cr1.write(|w| unsafe {
w.cpha()
.bit(mode.phase == Phase::CaptureOnSecondTransition)
.cpol()
.bit(mode.polarity == Polarity::IdleHigh)
.mstr()
.set_bit()
.br()
.bits(br)
.lsbfirst()
.clear_bit()
.ssm()
.set_bit()
.ssi()
.set_bit()
.rxonly()
.clear_bit()
.dff()
.clear_bit()
.bidimode()
.clear_bit()
.ssi()
.set_bit()
.spe()
.set_bit()
});
Spi { spi, pins }
}
pub fn data_size(&mut self, nr_bits: u8) {
self.spi.cr2.modify(|_, w| unsafe {
w.ds().bits(nr_bits-1)
});
}
pub fn half_duplex_enable(&mut self, enable: bool) {
self.spi.cr1.modify(|_, w|
w.bidimode().bit(enable)
);
}
pub fn half_duplex_output_enable(&mut self, enable: bool) {
self.spi.cr1.modify(|_, w|
w.bidioe().bit(enable)
);
}
pub fn release(self) -> ($SPIX, PINS) {
(self.spi, self.pins.release())
}
}
impl SpiExt for $SPIX {
fn spi<PINS, T>(self, pins: PINS, mode: Mode, freq: T, rcc: &mut Rcc) -> Spi<$SPIX, PINS>
where
PINS: Pins<$SPIX>,
T: Into<Hertz>
{
Spi::$spiX(self, pins, mode, freq, rcc)
}
}
impl<PINS> hal::spi::FullDuplex<u8> for Spi<$SPIX, PINS> {
type Error = Error;
fn read(&mut self) -> nb::Result<u8, Error> {
let sr = self.spi.sr.read();
Err(if sr.ovr().bit_is_set() {
nb::Error::Other(Error::Overrun)
} else if sr.modf().bit_is_set() {
nb::Error::Other(Error::ModeFault)
} else if sr.crcerr().bit_is_set() {
nb::Error::Other(Error::Crc)
} else if sr.rxne().bit_is_set() {
// NOTE(read_volatile) read only 1 byte (the svd2rust API only allows
// reading a half-word)
return Ok(unsafe {
ptr::read_volatile(&self.spi.dr as *const _ as *const u8)
});
} else {
nb::Error::WouldBlock
})
}
fn send(&mut self, byte: u8) -> nb::Result<(), Error> {
let sr = self.spi.sr.read();
Err(if sr.ovr().bit_is_set() {
nb::Error::Other(Error::Overrun)
} else if sr.modf().bit_is_set() {
nb::Error::Other(Error::ModeFault)
} else if sr.crcerr().bit_is_set() {
nb::Error::Other(Error::Crc)
} else if sr.txe().bit_is_set() {
// NOTE(write_volatile) see note above
unsafe { ptr::write_volatile(&self.spi.dr as *const _ as *mut u8, byte) }
return Ok(());
} else {
nb::Error::WouldBlock
})
}
}
impl<PINS> ::hal::blocking::spi::transfer::Default<u8> for Spi<$SPIX, PINS> {}
impl<PINS> ::hal::blocking::spi::write::Default<u8> for Spi<$SPIX, PINS> {}
}
}
spi!(
SPI1,
spi1,
sck: [
(PA1<DefaultMode>, AltFunction::AF0),
(PA5<DefaultMode>, AltFunction::AF0),
(PB3<DefaultMode>, AltFunction::AF0),
(PD8<DefaultMode>, AltFunction::AF1),
],
miso: [
(PA6<DefaultMode>, AltFunction::AF0),
(PA11<DefaultMode>, AltFunction::AF0),
(PB4<DefaultMode>, AltFunction::AF0),
(PD5<DefaultMode>, AltFunction::AF1),
],
mosi: [
(PA2<DefaultMode>, AltFunction::AF0),
(PA7<DefaultMode>, AltFunction::AF0),
(PA12<DefaultMode>, AltFunction::AF0),
(PB5<DefaultMode>, AltFunction::AF0),
(PD6<DefaultMode>, AltFunction::AF1),
],
);
spi!(
SPI2,
spi2,
sck: [
(PA0<DefaultMode>, AltFunction::AF0),
(PB8<DefaultMode>, AltFunction::AF1),
(PB10<DefaultMode>, AltFunction::AF5),
(PB13<DefaultMode>, AltFunction::AF0),
(PD1<DefaultMode>, AltFunction::AF1),
],
miso: [
(PA3<DefaultMode>, AltFunction::AF0),
(PA9<DefaultMode>, AltFunction::AF4),
(PB2<DefaultMode>, AltFunction::AF1),
(PB6<DefaultMode>, AltFunction::AF4),
(PB14<DefaultMode>, AltFunction::AF0),
(PC2<DefaultMode>, AltFunction::AF1),
(PD3<DefaultMode>, AltFunction::AF1),
],
mosi: [
(PA4<DefaultMode>, AltFunction::AF1),
(PA10<DefaultMode>, AltFunction::AF0),
(PB7<DefaultMode>, AltFunction::AF1),
(PB11<DefaultMode>, AltFunction::AF0),
(PB15<DefaultMode>, AltFunction::AF0),
(PC3<DefaultMode>, AltFunction::AF1),
(PD4<DefaultMode>, AltFunction::AF1),
],
);
| Error |
managers.py | import base64
import json
from fireo.fields import NestedModel
from fireo.fields.errors import FieldNotFound
from fireo.queries import query_set as queries
class ManagerError(Exception):
pass
class ManagerDescriptor:
"""Restrict user to get `Manager` from model instance and from abstract model"""
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, owner):
# reset parent key
self.manager._parent_key = None
if instance is not None:
raise ManagerError(f'Manager "{self.manager.name}" can not accessible via {owner.__name__} instance')
if owner._meta.abstract:
raise ManagerError(f'Manager "{self.manager.name}" is not accessible via {owner.__name__} abstract model')
return self.manager
class Manager:
"""Manager are used to perform firestore action directly from model class without instance
Default manager can be accessible via `collection` from model class
Examples
-------
.. code-block:: python
class User(Model):
name = TextField()
user = User.collection.create(name="Azeem")
Attributes
----------
queryset:
Read only property, provide operations related to firestore
_parent_key:
Parent key if any
name:
Name of the manager
model_cls:
Model where this manger is contributing
Methods
-------
contribute_to_model(model_cls, name="collection"):
Attach manager to model class
create(mutable_instance, kwargs): Model instance
create new document in firestore collection
_update(mutable_instance, kwargs): Model instance
Update existing document in firestore collection
get(key): Model instance
Get document from firestore
get_all(key_list): Model instance
Get All documents according to key list
parent(key):
Parent key if any
filter(): Model instance
Get filter document from firestore
fetch(limit) : generator
Fetch document from firestore, limit is optional here
group_fetch(limit) : generator
Use a collection group query to retrieve documents from a collection group
transaction():
Firestore transaction
batch():
Firestore batch writes
limit(count):
Set limit for query
offset(num_to_skip)
Set offset for query
order(field_name):
Order document by field_name
delete(key, child=False)
Delete document from firestore, key is optional
delete_all(key_list, batch=None, child=False)
Delete all documents according to given keys
cursor(c):
Start query from specific point
start_after(key, **kwargs):
Start document after this key or after that matching fields
start_at(key, **kwargs):
Start document at this key or at that matching fields
end_before(key, **kwargs):
End document after this key or before that matching fields
end_at(key, **kwargs):
End document at this key or at that matching fields
"""
def __init__(self):
self.model_cls = None
self.name = None
self._parent_key = None
def contribute_to_model(self, model_cls, name="collection"):
"""Attach manager to model class
This method attach manager to model class
Parameters
----------
model_cls : Model
In which model this manager will be attached
name : str
What is the name of this manager when it is attaching with model and
later can be accessible with this name
"""
self.name = name
self.model_cls = model_cls
setattr(model_cls, name, ManagerDescriptor(self))
@property
def queryset(self):
"""provide operations related to firestore"""
return queries.QuerySet(self.model_cls)
def create(self, mutable_instance=None, transaction=None, batch=None, **kwargs,):
"""create new document in firestore collection
Parameters
---------
mutable_instance: Model instance
Make changes in existing model instance After performing firestore action modified this instance
adding things init like id, key etc
transaction:
Firestore transaction
batch:
Firestore batch
"""
field_list = {}
# if mutable instance is none this mean user is creating document directly from manager
# For example User.collection.create(name="Azeem") in this case mutable instance will be None
# If document is creating by directly using manager then check if there is any NestedModel
# If there is any nested model then get get value from nested model
if mutable_instance is None:
for k, v in kwargs.items():
try:
# if this is an id field then save it in field list and pass it
f = self.model_cls._meta.get_field(k)
except FieldNotFound:
field_list[k] = v
continue
if isinstance(f, NestedModel):
model_instance = v
if f.valid_model(model_instance):
field_list[f.name] = model_instance._get_fields()
else:
field_list[k] = v
# Create instance for nested model
for f in self.model_cls._meta.field_list.values():
if isinstance(f, NestedModel):
field_list[f.name] = f.nested_model()._get_fields()
else:
field_list = kwargs
return self.queryset.create(mutable_instance, transaction, batch, **field_list)
def _update(self, mutable_instance=None, transaction=None, batch=None, **kwargs):
"""Update existing document in firestore collection
Parameters
---------
mutable_instance: Model instance
Make changes in existing model instance After performing firestore action modified this instance
adding things init like id, key etc
transaction:
Firestore transaction
batch:
Firestore batch
"""
return self.queryset.update(mutable_instance, transaction, batch, **kwargs)
def get(self, key, transaction=None):
"""Get document from firestore"""
return self.queryset.get(key, transaction)
def get_all(self, key_list):
"""Get All documents according to key list"""
for key in key_list:
yield self.queryset.get(key)
def parent(self, key):
"""Parent collection"""
self._parent_key = key
return self
def filter(self, *args, **kwargs):
"""Get filter document from firestore"""
return self.queryset.filter(self._parent_key, *args, **kwargs)
def fetch(self, limit=None):
"""Fetch document from collection"""
return self.queryset.filter(self._parent_key).fetch(limit)
def group_fetch(self, limit=None):
"""A collection group consists of all collections with the same ID.
By default, queries retrieve results from a single collection in your database.
Use a collection group query to retrieve documents from a collection group
instead of from a single collection."""
return self.queryset.filter(self._parent_key).group_fetch(limit)
def transaction(self, t):
"""Firestore transaction"""
return self.queryset.filter(self._parent_key).transaction(t)
def batch(self, b):
"""Firestore batch"""
return self.queryset.filter(self._parent_key).batch(b)
def limit(self, count):
"""Limit the document"""
return self.queryset.filter(self._parent_key).limit(count)
def offset(self, num_to_skip):
"""Set offset for query"""
return self.queryset.filter(self._parent_key).offset(num_to_skip)
def order(self, field_name):
"""Order the document by field name"""
return self.queryset.filter(self._parent_key).order(field_name)
def delete(self, key=None, transaction=None, batch=None, child=False):
"""Delete document from firestore
if child is True then delete child collection and documents also
"""
if key:
self.queryset.delete(key, transaction, batch, child=child)
else:
self.queryset.filter(self._parent_key).delete(child=child)
def delete_all(self, key_list, batch=None, child=False):
"""Delete all documents according to given keys"""
for key in key_list:
self.queryset.delete(key, batch=batch, child=child)
def cursor(self, cursor):
"""Start query from specific point
Cursor define where to start the query
"""
parent = self._parent_key
cursor_dict = json.loads(base64.b64decode(cursor))
if 'parent' in cursor_dict:
parent = cursor_dict['parent']
query = self.queryset.filter(parent)
if 'filters' in cursor_dict:
for filter in cursor_dict['filters']:
query.filter(*filter)
if 'order' in cursor_dict:
query.order(cursor_dict['order'])
if 'limit' in cursor_dict:
|
# check if last doc key is available or not
if 'last_doc_key' in cursor_dict:
query.start_after(key=cursor_dict['last_doc_key'])
else:
query.offset(cursor_dict['offset'])
return query
def start_after(self, key=None, **kwargs):
"""Start document after this key or after that matching fields"""
return self.queryset.filter(self._parent_key).start_after(key, **kwargs)
def start_at(self, key=None, **kwargs):
"""Start document at this key or at that matching fields"""
return self.queryset.filter(self._parent_key).start_at(key, **kwargs)
def end_before(self, key=None, **kwargs):
"""End document after this key or after that matching fields"""
return self.queryset.filter(self._parent_key).end_before(key, **kwargs)
def end_at(self, key=None, **kwargs):
"""End document at this key or at that matching fields"""
return self.queryset.filter(self._parent_key).end_at(key, **kwargs) | query.limit(cursor_dict['limit']) |
home.js | $(document).ready(function() { | $('.tools .col').addClass('hidden').viewportChecker({
classToAdd: 'visible animated fadeInUp',
});
}); | |
emoji.rs | use std::fmt::{Display, Formatter, Result as FmtResult, Write as FmtWrite};
#[cfg(all(feature = "cache", feature = "model"))]
use serde_json::json;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::cache::Cache;
use crate::http::Http;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::model::id::GuildId;
use crate::model::id::{EmojiId, RoleId};
use crate::model::user::User;
use crate::model::utils::default_true;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::model::ModelError;
/// Represents a custom guild emoji, which can either be created using the API,
/// or via an integration. Emojis created using the API only work within the
/// guild it was created in.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[non_exhaustive]
pub struct Emoji {
/// Whether the emoji is animated.
#[serde(default)]
pub animated: bool,
/// Whether the emoji can be used. This may be false when the guild loses boosts,
/// reducing the emoji limit.
#[serde(default = "default_true")]
pub available: bool,
/// The Id of the emoji.
pub id: EmojiId,
/// The name of the emoji. It must be at least 2 characters long and can
/// only contain alphanumeric characters and underscores.
pub name: String,
/// Whether the emoji is managed via an [`Integration`] service.
///
/// [`Integration`]: super::Integration
#[serde(default)]
pub managed: bool,
/// Whether the emoji name needs to be surrounded by colons in order to be
/// used by the client.
#[serde(default)]
pub require_colons: bool,
/// A list of [`Role`]s that are allowed to use the emoji. If there are no
/// roles specified, then usage is unrestricted.
///
/// [`Role`]: super::Role
#[serde(default)]
pub roles: Vec<RoleId>,
/// The user who created the emoji.
pub user: Option<User>,
}
#[cfg(feature = "model")]
impl Emoji {
/// Deletes the emoji.
/// This method requires the cache to fetch the guild ID.
///
/// **Note**: The [Manage Emojis] permission is required.
///
///
/// # Examples
///
/// Delete a given emoji:
///
/// ```rust,no_run
/// # use serde_json::json;
/// # use serenity::framework::standard::{CommandResult, macros::command};
/// # use serenity::client::Context;
/// # use serenity::model::prelude::{EmojiId, Emoji, Role};
/// #
/// # #[command]
/// # async fn example(ctx: &Context) -> CommandResult {
/// # let mut emoji = serde_json::from_value::<Emoji>(json!({
/// # "animated": false,
/// # "id": EmojiId(7),
/// # "name": "blobface",
/// # "managed": false,
/// # "require_colons": false,
/// # "roles": Vec::<Role>::new(),
/// # }))?;
/// #
/// // assuming emoji has been set already
/// match emoji.delete(&ctx).await {
/// Ok(()) => println!("Emoji deleted."),
/// Err(_) => println!("Could not delete emoji.")
/// }
/// # Ok(())
/// # }
/// ```
///
/// # Errors
///
/// Returns [`Error::Http`] if the current user lacks permission,
/// or may return [`ModelError::ItemMissing`] if the emoji is not
/// in the cache.
///
/// [Manage Emojis]: crate::model::permissions::Permissions::MANAGE_EMOJIS
#[cfg(feature = "cache")]
#[inline]
pub async fn delete<T: AsRef<Cache> + AsRef<Http>>(&self, cache_http: T) -> Result<()> {
match self.find_guild_id(&cache_http).await {
Some(guild_id) => {
AsRef::<Http>::as_ref(&cache_http).delete_emoji(guild_id.0, self.id.0).await
},
None => Err(Error::Model(ModelError::ItemMissing)),
}
}
/// Edits the emoji by updating it with a new name.
/// This method requires the cache to fetch the guild ID.
///
/// **Note**: The [Manage Emojis] permission is required.
///
/// # Errors
///
/// Returns [`Error::Http`] if the current user lacks permission,
/// or if an invalid name is given.
///
/// [Manage Emojis]: crate::model::permissions::Permissions::MANAGE_EMOJIS
#[cfg(feature = "cache")]
pub async fn edit<T: AsRef<Cache> + AsRef<Http>>(
&mut self,
cache_http: T,
name: &str,
) -> Result<()> {
match self.find_guild_id(&cache_http).await {
Some(guild_id) => | ,
None => Err(Error::Model(ModelError::ItemMissing)),
}
}
/// Finds the [`Guild`] that owns the emoji by looking through the Cache.
///
/// [`Guild`]: super::Guild
///
/// # Examples
///
/// Print the guild id that owns this emoji:
///
/// ```rust,no_run
/// # use serde_json::json;
/// # use serenity::{cache::Cache, model::{guild::{Emoji, Role}, id::EmojiId}};
/// # use tokio::sync::RwLock;
/// # use std::sync::Arc;
/// #
/// # async fn run() {
/// # let cache = Cache::default();
/// #
/// # let mut emoji = serde_json::from_value::<Emoji>(json!({
/// # "animated": false,
/// # "id": EmojiId(7),
/// # "name": "blobface",
/// # "managed": false,
/// # "require_colons": false,
/// # "roles": Vec::<Role>::new(),
/// # })).unwrap();
/// #
/// // assuming emoji has been set already
/// if let Some(guild_id) = emoji.find_guild_id(&cache).await {
/// println!("{} is owned by {}", emoji.name, guild_id);
/// }
/// # }
/// ```
#[cfg(feature = "cache")]
pub async fn find_guild_id(&self, cache: impl AsRef<Cache>) -> Option<GuildId> {
for guild in cache.as_ref().guilds.read().await.values() {
if guild.emojis.contains_key(&self.id) {
return Some(guild.id);
}
}
None
}
/// Generates a URL to the emoji's image.
///
/// # Examples
///
/// Print the direct link to the given emoji:
///
/// ```rust,no_run
/// # extern crate serde_json;
/// # extern crate serenity;
/// #
/// # use serde_json::json;
/// # use serenity::model::{guild::{Emoji, Role}, id::EmojiId};
/// #
/// # fn main() {
/// # let mut emoji = serde_json::from_value::<Emoji>(json!({
/// # "animated": false,
/// # "id": EmojiId(7),
/// # "name": "blobface",
/// # "managed": false,
/// # "require_colons": false,
/// # "roles": Vec::<Role>::new(),
/// # })).unwrap();
/// #
/// // assuming emoji has been set already
/// println!("Direct link to emoji image: {}", emoji.url());
/// # }
/// ```
#[inline]
pub fn url(&self) -> String {
let extension = if self.animated { "gif" } else { "png" };
format!(cdn!("/emojis/{}.{}"), self.id, extension)
}
}
impl Display for Emoji {
/// Formats the emoji into a string that will cause Discord clients to
/// render the emoji.
///
/// This is in the format of either `<:NAME:EMOJI_ID>` for normal emojis,
/// or `<a:NAME:EMOJI_ID>` for animated emojis.
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
if self.animated {
f.write_str("<a:")?;
} else {
f.write_str("<:")?;
}
f.write_str(&self.name)?;
FmtWrite::write_char(f, ':')?;
Display::fmt(&self.id, f)?;
FmtWrite::write_char(f, '>')
}
}
impl From<Emoji> for EmojiId {
/// Gets the Id of an `Emoji`.
fn from(emoji: Emoji) -> EmojiId {
emoji.id
}
}
impl<'a> From<&'a Emoji> for EmojiId {
/// Gets the Id of an `Emoji`.
fn from(emoji: &Emoji) -> EmojiId {
emoji.id
}
}
| {
let map = json!({
"name": name,
});
*self = AsRef::<Http>::as_ref(&cache_http)
.edit_emoji(guild_id.0, self.id.0, &map)
.await?;
Ok(())
} |
nn_ops.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NCHW"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filters: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filters`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filters`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`.
name: A name for the operation (optional).
Returns: | A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
# pylint: enable=redefined-builtin
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None): # pylint: disable=invalid-name
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `rate`, drops elements of `x`. Input that are kept are
scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that
the expected sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
keep_prob = 1 - rate
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
value: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filter: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [3] if reached this point.
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool) | |
trainer.py | import logging
import pickle
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Union
from numpy.typing import NDArray
from sklearn.preprocessing import LabelEncoder
from predictions.face_recognizer import ModelType
from settings import output
logger = logging.getLogger(__name__)
def read_pickle(path: str) -> Dict[str, NDArray[Any]]:
"""Loading pickled object from path."""
with open(path, "rb") as fr:
return pickle.load(fr)
EmbsDictOrPath = Union[str, Dict[str, List[NDArray[Any]]]]
class Trainer(ABC):
"""Model trainer class template."""
def | (self, model: ModelType, embeddings: EmbsDictOrPath) -> None:
self._model = model
self._embeddings: Optional[List[Any]] = None
self._labels: Optional[List[Any]] = None
self.label_encoder = LabelEncoder()
self.load_embeddings(embeddings)
def load_embeddings(self, embeddings: EmbsDictOrPath) -> None:
if isinstance(embeddings, str):
logger.info("Loading embeddings from path %s.", embeddings)
data = read_pickle(embeddings)
self._embeddings = data["vectors"]
self._labels = data["classes"]
elif isinstance(embeddings, dict):
logger.info("Loading embeddings from dictionary.")
self._embeddings = embeddings["vectors"]
self._labels = embeddings["classes"]
else:
raise TypeError("Input must be a dictionary or path to a pickled dict!")
@abstractmethod
def train(self) -> None:
pass
def store_model(self, fn: str = "model.pickle") -> None:
"""Saves model in directory as pickle."""
with open(output / fn, "wb") as fw:
pickle.dump(self._model, fw, protocol=pickle.HIGHEST_PROTOCOL)
| __init__ |
DocumentManifest.js | const DomainResource = require('./DomainResource');
const Identifier = require('./Identifier');
const CodeableConcept = require('./CodeableConcept');
const Reference = require('./Reference');
const DocumentManifest_Content = require('./DocumentManifest_Content');
const DocumentManifest_Related = require('./DocumentManifest_Related');
class | extends DomainResource {
constructor ( opts ) {
super();
Object.assign(this, opts);
}
static get __resourceType () {
return 'DocumentManifest';
}
// This is a DocumentManifest resource
get resourceType () {
return this._resourceType;
}
set resourceType ( new_value ) {
// Throw if new value is not in the allowed values
let allowed_values = ['DocumentManifest'];
if ( allowed_values.indexOf(new_value) === -1 ) {
throw new Error(`Expected one of ${allowed_values}, got ${new_value} for field resourceType`);
}
this._resourceType = new_value;
}
// A single identifier that uniquely identifies this manifest. Principally used to refer to the manifest in non-FHIR contexts.
get masterIdentifier () {
return this._masterIdentifier;
}
set masterIdentifier ( new_value ) {
this._masterIdentifier = new Identifier(new_value);
}
// Other identifiers associated with the document manifest, including version independent identifiers.
get identifier () {
return this._identifier;
}
set identifier ( new_value ) {
this._identifier = Array.isArray(new_value) ? new_value.map(val => new Identifier(val)) : [new Identifier(new_value)];
}
// The status of this document manifest.
get status () {
return this._status;
}
set status ( new_value ) {
// Throw if new value is not in the allowed values
let allowed_values = ['current', 'superseded', 'entered-in-error'];
if ( allowed_values.indexOf(new_value) === -1 ) {
throw new Error(`Expected one of ${allowed_values}, got ${new_value} for field status`);
}
this._status = new_value;
}
// Specifies the kind of this set of documents (e.g. Patient Summary, Discharge Summary, Prescription, etc.). The type of a set of documents may be the same as one of the documents in it - especially if there is only one - but it may be wider.
get type () {
return this._type;
}
set type ( new_value ) {
this._type = new CodeableConcept(new_value);
}
// Who or what the set of documents is about. The documents can be about a person, (patient or healthcare practitioner), a device (i.e. machine) or even a group of subjects (such as a document about a herd of farm animals, or a set of patients that share a common exposure). If the documents cross more than one subject, then more than one subject is allowed here (unusual use case).
get subject () {
return this._subject;
}
set subject ( new_value ) {
this._subject = new Reference(new_value);
}
// When the document manifest was created for submission to the server (not necessarily the same thing as the actual resource last modified time, since it may be modified, replicated, etc.).
get created () {
return this._created;
}
set created ( new_value ) {
// Throw if new value does not match the pattern
let pattern = /-?[0-9]{4}(-(0[1-9]|1[0-2])(-(0[0-9]|[1-2][0-9]|3[0-1])(T([01][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\.[0-9]+)?(Z|(\+|-)((0[0-9]|1[0-3]):[0-5][0-9]|14:00)))?)?)?/;
if ( !pattern.test(new_value) ) {
throw new Error(`Invalid format for ${new_value} on field created`);
}
this._created = new_value;
}
// Identifies who is responsible for creating the manifest, and adding documents to it.
get author () {
return this._author;
}
set author ( new_value ) {
this._author = Array.isArray(new_value) ? new_value.map(val => new Reference(val)) : [new Reference(new_value)];
}
// A patient, practitioner, or organization for which this set of documents is intended.
get recipient () {
return this._recipient;
}
set recipient ( new_value ) {
this._recipient = Array.isArray(new_value) ? new_value.map(val => new Reference(val)) : [new Reference(new_value)];
}
// Identifies the source system, application, or software that produced the document manifest.
get source () {
return this._source;
}
set source ( new_value ) {
this._source = new_value;
}
// Human-readable description of the source document. This is sometimes known as the "title".
get description () {
return this._description;
}
set description ( new_value ) {
this._description = new_value;
}
// The list of Documents included in the manifest.
get content () {
return this._content;
}
set content ( new_value ) {
this._content = Array.isArray(new_value) ? new_value.map(val => new DocumentManifest_Content(val)) : [new DocumentManifest_Content(new_value)];
}
// Related identifiers or resources associated with the DocumentManifest.
get related () {
return this._related;
}
set related ( new_value ) {
this._related = Array.isArray(new_value) ? new_value.map(val => new DocumentManifest_Related(val)) : [new DocumentManifest_Related(new_value)];
}
toJSON () {
return Object.assign(super.toJSON(), {
resourceType: this._resourceType,
masterIdentifier: this._masterIdentifier,
identifier: this._identifier,
status: this._status,
type: this._type,
subject: this._subject,
created: this._created,
author: this._author,
recipient: this._recipient,
source: this._source,
description: this._description,
content: this._content,
related: this._related
});
}
}
module.exports = DocumentManifest;
| DocumentManifest |
build.rs | #[cfg(not(feature = "binary"))]
fn main() {}
#[cfg(feature = "binary")]
#[derive(Default)]
struct BootloaderConfig {
physical_memory_offset: Option<u64>,
kernel_stack_address: Option<u64>,
kernel_stack_size: Option<u64>,
boot_info_address: Option<u64>,
}
#[cfg(feature = "binary")]
fn parse_aligned_addr(key: &str, value: &str) -> u64 {
let num = if value.starts_with("0x") {
u64::from_str_radix(&value[2..], 16)
} else {
u64::from_str_radix(&value, 10)
};
let num = num.expect(&format!(
"`{}` in the kernel manifest must be an integer (is `{}`)",
key, value
));
if num % 0x1000 != 0 {
panic!(
"`{}` in the kernel manifest must be aligned to 4KiB (is `{}`)",
key, value
);
} else {
num
}
}
#[cfg(feature = "binary")]
fn parse_to_config(cfg: &mut BootloaderConfig, table: &toml::value::Table) {
use toml::Value;
for (key, value) in table {
match (key.as_str(), value.clone()) {
("kernel-stack-address", Value::Integer(i))
| ("physical-memory-offset", Value::Integer(i))
| ("boot-info-address", Value::Integer(i)) => {
panic!(
"`{0}` in the kernel manifest must be given as a string, \
as toml does not support unsigned 64-bit integers (try `{0} = \"{1}\"`)",
key.as_str(),
i
);
}
("kernel-stack-address", Value::String(s)) => {
cfg.kernel_stack_address = Some(parse_aligned_addr(key.as_str(), &s));
}
("boot-info-address", Value::String(s)) => {
cfg.boot_info_address = Some(parse_aligned_addr(key.as_str(), &s));
}
#[cfg(not(feature = "map_physical_memory"))]
("physical-memory-offset", Value::String(_)) => {
panic!(
"`physical-memory-offset` is only supported when the `map_physical_memory` \
feature of the crate is enabled"
);
}
#[cfg(feature = "map_physical_memory")]
("physical-memory-offset", Value::String(s)) => {
cfg.physical_memory_offset = Some(parse_aligned_addr(key.as_str(), &s));
}
("kernel-stack-size", Value::Integer(i)) => {
if i <= 0 {
panic!("`kernel-stack-size` in kernel manifest must be positive");
} else {
cfg.kernel_stack_size = Some(i as u64);
}
}
(s, _) => {
panic!(
"unknown key '{}' in kernel manifest \
- you may need to update the bootloader crate",
s
);
}
}
}
}
#[cfg(feature = "binary")]
fn main() {
use std::{
env,
fs::{self, File},
io::Write,
path::{Path, PathBuf},
process::{self, Command},
};
use toml::Value;
let target = env::var("TARGET").expect("TARGET not set");
if Path::new(&target)
.file_stem()
.expect("target has no file stem")
!= "x86_64-bootloader"
{
panic!("The bootloader must be compiled for the `x86_64-bootloader.json` target.");
}
let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set"));
let kernel = PathBuf::from(match env::var("KERNEL") {
Ok(kernel) => kernel,
Err(_) => {
eprintln!(
"The KERNEL environment variable must be set for building the bootloader.\n\n\
If you use `bootimage` for building you need at least version 0.7.0. You can \
update `bootimage` by running `cargo install bootimage --force`."
);
process::exit(1);
}
});
let kernel_file_name = kernel
.file_name()
.expect("KERNEL has no valid file name")
.to_str()
.expect("kernel file name not valid utf8");
// check that the kernel file exists
assert!(
kernel.exists(),
format!("KERNEL does not exist: {}", kernel.display())
);
// get access to llvm tools shipped in the llvm-tools-preview rustup component
let llvm_tools = match llvm_tools::LlvmTools::new() {
Ok(tools) => tools,
Err(llvm_tools::Error::NotFound) => {
eprintln!("Error: llvm-tools not found");
eprintln!("Maybe the rustup component `llvm-tools-preview` is missing?");
eprintln!(" Install it through: `rustup component add llvm-tools-preview`");
process::exit(1);
}
Err(err) => {
eprintln!("Failed to retrieve llvm-tools component: {:?}", err);
process::exit(1);
}
};
// check that kernel executable has code in it
let llvm_size = llvm_tools
.tool(&llvm_tools::exe("llvm-size"))
.expect("llvm-size not found in llvm-tools");
let mut cmd = Command::new(llvm_size);
cmd.arg(&kernel);
let output = cmd.output().expect("failed to run llvm-size");
let output_str = String::from_utf8_lossy(&output.stdout);
let second_line_opt = output_str.lines().skip(1).next();
let second_line = second_line_opt.expect("unexpected llvm-size line output");
let text_size_opt = second_line.split_ascii_whitespace().next();
let text_size = text_size_opt.expect("unexpected llvm-size output");
if text_size == "0" {
panic!("Kernel executable has an empty text section. Perhaps the entry point was set incorrectly?\n\n\
Kernel executable at `{}`\n", kernel.display());
}
// strip debug symbols from kernel for faster loading
let stripped_kernel_file_name = format!("kernel_stripped-{}", kernel_file_name);
let stripped_kernel = out_dir.join(&stripped_kernel_file_name);
let objcopy = llvm_tools
.tool(&llvm_tools::exe("llvm-objcopy"))
.expect("llvm-objcopy not found in llvm-tools");
let mut cmd = Command::new(&objcopy);
cmd.arg("--strip-debug");
cmd.arg(&kernel);
cmd.arg(&stripped_kernel);
let exit_status = cmd
.status()
.expect("failed to run objcopy to strip debug symbols");
if !exit_status.success() {
eprintln!("Error: Stripping debug symbols failed");
process::exit(1);
}
// wrap the kernel executable as binary in a new ELF file
let stripped_kernel_file_name_replaced = stripped_kernel_file_name
.replace('-', "_")
.replace('.', "_");
let kernel_bin = out_dir.join(format!("kernel_bin-{}.o", kernel_file_name));
let kernel_archive = out_dir.join(format!("libkernel_bin-{}.a", kernel_file_name));
let mut cmd = Command::new(&objcopy);
cmd.arg("-I").arg("binary");
cmd.arg("-O").arg("elf64-x86-64");
cmd.arg("--binary-architecture=i386:x86-64");
cmd.arg("--rename-section").arg(".data=.kernel");
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_start=_kernel_start_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_end=_kernel_end_addr",
stripped_kernel_file_name_replaced
));
cmd.arg("--redefine-sym").arg(format!(
"_binary_{}_size=_kernel_size",
stripped_kernel_file_name_replaced
));
cmd.current_dir(&out_dir);
cmd.arg(&stripped_kernel_file_name);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run objcopy");
if !exit_status.success() {
eprintln!("Error: Running objcopy failed");
process::exit(1);
}
// create an archive for linking
let ar = llvm_tools
.tool(&llvm_tools::exe("llvm-ar"))
.unwrap_or_else(|| {
eprintln!("Failed to retrieve llvm-ar component");
eprint!("This component is available since nightly-2019-03-29,");
eprintln!("so try updating your toolchain if you're using an older nightly");
process::exit(1);
});
let mut cmd = Command::new(ar);
cmd.arg("crs");
cmd.arg(&kernel_archive);
cmd.arg(&kernel_bin);
let exit_status = cmd.status().expect("failed to run ar");
if !exit_status.success() {
eprintln!("Error: Running ar failed");
process::exit(1);
}
// Parse the kernel's Cargo.toml which is given to us by bootimage
let mut bootloader_config = BootloaderConfig::default();
match env::var("KERNEL_MANIFEST") {
Err(env::VarError::NotPresent) => {
panic!("The KERNEL_MANIFEST environment variable must be set for building the bootloader.\n\n\
If you use `bootimage` for building you need at least version 0.7.7. You can \
update `bootimage` by running `cargo install bootimage --force`.");
}
Err(env::VarError::NotUnicode(_)) => {
panic!("The KERNEL_MANIFEST environment variable contains invalid unicode")
}
Ok(path) => {
println!("cargo:rerun-if-changed={}", path);
let contents = fs::read_to_string(&path).expect(&format!(
"failed to read kernel manifest file (path: {})",
path
));
let manifest = contents
.parse::<Value>()
.expect("failed to parse kernel's Cargo.toml");
let table = manifest
.get("package")
.and_then(|table| table.get("metadata"))
.and_then(|table| table.get("bootloader"))
.and_then(|table| table.as_table());
| }
}
// Configure constants for the bootloader
// We leave some variables as Option<T> rather than hardcoding their defaults so that they
// can be calculated dynamically by the bootloader.
let file_path = out_dir.join("bootloader_config.rs");
let mut file = File::create(file_path).expect("failed to create bootloader_config.rs");
file.write_all(
format!(
"const PHYSICAL_MEMORY_OFFSET: Option<u64> = {:?};
const KERNEL_STACK_ADDRESS: Option<u64> = {:?};
const KERNEL_STACK_SIZE: u64 = {};
const BOOT_INFO_ADDRESS: Option<u64> = {:?};",
bootloader_config.physical_memory_offset,
bootloader_config.kernel_stack_address,
bootloader_config.kernel_stack_size.unwrap_or(512), // size is in number of pages
bootloader_config.boot_info_address,
)
.as_bytes(),
)
.expect("write to bootloader_config.rs failed");
// pass link arguments to rustc
println!("cargo:rustc-link-search=native={}", out_dir.display());
println!(
"cargo:rustc-link-lib=static=kernel_bin-{}",
kernel_file_name
);
println!("cargo:rerun-if-env-changed=KERNEL");
println!("cargo:rerun-if-env-changed=KERNEL_MANIFEST");
println!("cargo:rerun-if-changed={}", kernel.display());
println!("cargo:rerun-if-changed=build.rs");
} | if let Some(table) = table {
parse_to_config(&mut bootloader_config, table);
} |
azure_backoff.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"net/http"
"regexp"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
"github.com/Azure/go-autorest/autorest/to"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog/v2"
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
const (
// not active means the instance is under deleting from Azure VMSS.
vmssVMNotActiveErrorMessage = "not an active Virtual Machine Scale Set VM instanceId"
// operationCanceledErrorMessage means the operation is canceled by another new operation.
operationCanceledErrorMessage = "canceledandsupersededduetoanotheroperation"
cannotDeletePublicIPErrorMessageCode = "PublicIPAddressCannotBeDeleted"
referencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned"
)
var (
pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`)
)
// RequestBackoff if backoff is disabled in cloud provider it
// returns a new Backoff object steps = 1
// This is to make sure that the requested command executes
// at least once
func (az *Cloud) RequestBackoff() (resourceRequestBackoff wait.Backoff) {
if az.CloudProviderBackoff {
return az.ResourceRequestBackoff
}
resourceRequestBackoff = wait.Backoff{
Steps: 1,
}
return resourceRequestBackoff
}
// Event creates a event for the specified object.
func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) {
if obj != nil && reason != "" {
az.eventRecorder.Event(obj, eventType, reason, message)
}
}
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt azcache.AzureCacheReadType) (compute.VirtualMachine, error) {
var machine compute.VirtualMachine
var retryErr error
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
machine, retryErr = az.getVirtualMachine(name, crt)
if retryErr == cloudprovider.InstanceNotFound {
return true, cloudprovider.InstanceNotFound
}
if retryErr != nil {
klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
return false, nil
}
klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
return true, nil
})
if err == wait.ErrWaitTimeout |
return machine, err
}
// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry
func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
allNodes, rerr := az.VirtualMachinesClient.List(ctx, resourceGroup)
if rerr != nil {
klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr)
return nil, rerr.Error()
}
klog.V(2).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
return allNodes, nil
}
// getPrivateIPsForMachine is wrapper for optional backoff getting private ips
// list of a node by name
func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) {
return az.getPrivateIPsForMachineWithRetry(nodeName)
}
func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) {
var privateIPs []string
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
var retryErr error
privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
if retryErr != nil {
// won't retry since the instance doesn't exist on Azure.
if retryErr == cloudprovider.InstanceNotFound {
return true, retryErr
}
klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr)
return false, nil
}
klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName)
return true, nil
})
return privateIPs, err
}
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
return az.GetIPForMachineWithRetry(nodeName)
}
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
var ip, publicIP string
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
var retryErr error
ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
if retryErr != nil {
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
return false, nil
}
klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
return true, nil
})
return ip, publicIP, err
}
// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, to.String(sg.Etag))
klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
if rerr == nil {
// Invalidate the cache right after updating
_ = az.nsgCache.Delete(*sg.Name)
return nil
}
// Invalidate the cache because ETAG precondition mismatch.
if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name)
_ = az.nsgCache.Delete(*sg.Name)
}
// Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name)
_ = az.nsgCache.Delete(*sg.Name)
}
return rerr.Error()
}
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rgName := az.getLoadBalancerResourceGroup()
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, to.String(lb.Name), lb, to.String(lb.Etag))
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
if rerr == nil {
// Invalidate the cache right after updating
_ = az.lbCache.Delete(*lb.Name)
return nil
}
// Invalidate the cache because ETAG precondition mismatch.
if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", to.String(lb.Name))
_ = az.lbCache.Delete(*lb.Name)
}
retryErrorMessage := rerr.Error().Error()
// Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(retryErrorMessage), operationCanceledErrorMessage) {
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", to.String(lb.Name))
_ = az.lbCache.Delete(*lb.Name)
}
// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state
if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(referencedResourceNotProvisionedMessageCode)) {
matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)
if len(matches) != 3 {
klog.Warningf("Failed to parse the retry error message %s", retryErrorMessage)
return rerr.Error()
}
pipRG, pipName := matches[1], matches[2]
klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name))
pip, _, err := az.getPublicIPAddress(pipRG, pipName)
if err != nil {
klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err)
return rerr.Error()
}
// Perform a dummy update to fix the provisioning state
err = az.CreateOrUpdatePIP(service, pipRG, pip)
if err != nil {
klog.Warningf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err)
return rerr.Error()
}
// Invalidate the LB cache, return the error, and the controller manager
// would retry the LB update in the next reconcile loop
_ = az.lbCache.Delete(*lb.Name)
}
return rerr.Error()
}
// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry
func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
rgName := az.getLoadBalancerResourceGroup()
allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName)
if rerr != nil {
az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error())
klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr)
return nil, rerr.Error()
}
klog.V(2).Infof("LoadBalancerClient.List(%v) success", rgName)
return allLBs, nil
}
// ListPIP list the PIP resources in the given resource group
func (az *Cloud) ListPIP(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
allPIPs, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
if rerr != nil {
az.Event(service, v1.EventTypeWarning, "ListPublicIPs", rerr.Error().Error())
klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, rerr)
return nil, rerr.Error()
}
klog.V(2).Infof("PublicIPAddressesClient.List(%v) success", pipResourceGroup)
return allPIPs, nil
}
// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, to.String(pip.Name), pip)
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, to.String(pip.Name))
if rerr != nil {
klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, to.String(pip.Name), rerr.Error().Error())
az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error())
return rerr.Error()
}
return nil
}
// CreateOrUpdateInterface invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
if rerr != nil {
klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error().Error())
az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error().Error())
return rerr.Error()
}
return nil
}
// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
if rerr != nil {
klog.Errorf("PublicIPAddressesClient.Delete(%s) failed: %s", pipName, rerr.Error().Error())
az.Event(service, v1.EventTypeWarning, "DeletePublicIPAddress", rerr.Error().Error())
if strings.Contains(rerr.Error().Error(), cannotDeletePublicIPErrorMessageCode) {
klog.Warningf("DeletePublicIP for public IP %s failed with error %v, this is because other resources are referencing the public IP. The deletion of the service will continue.", pipName, rerr.Error())
return nil
}
return rerr.Error()
}
return nil
}
// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry
func (az *Cloud) DeleteLB(service *v1.Service, lbName string) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rgName := az.getLoadBalancerResourceGroup()
rerr := az.LoadBalancerClient.Delete(ctx, rgName, lbName)
if rerr == nil {
// Invalidate the cache right after updating
_ = az.lbCache.Delete(lbName)
return nil
}
klog.Errorf("LoadBalancerClient.Delete(%s) failed: %s", lbName, rerr.Error().Error())
az.Event(service, v1.EventTypeWarning, "DeleteLoadBalancer", rerr.Error().Error())
return rerr.Error()
}
// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, to.String(routeTable.Etag))
if rerr == nil {
// Invalidate the cache right after updating
_ = az.rtCache.Delete(*routeTable.Name)
return nil
}
// Invalidate the cache because etag mismatch.
if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
klog.V(3).Infof("Route table cache for %s is cleanup because of http.StatusPreconditionFailed", *routeTable.Name)
_ = az.rtCache.Delete(*routeTable.Name)
}
// Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name)
_ = az.rtCache.Delete(*routeTable.Name)
}
klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error())
return rerr.Error()
}
// CreateOrUpdateRoute invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, to.String(route.Etag))
klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
if rerr == nil {
_ = az.rtCache.Delete(az.RouteTableName)
return nil
}
if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
klog.V(3).Infof("Route cache for %s is cleanup because of http.StatusPreconditionFailed", *route.Name)
_ = az.rtCache.Delete(az.RouteTableName)
}
// Invalidate the cache because another new operation has canceled the current request.
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name)
_ = az.rtCache.Delete(az.RouteTableName)
}
return rerr.Error()
}
// DeleteRouteWithName invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) DeleteRouteWithName(routeName string) error {
ctx, cancel := getContextWithCancel()
defer cancel()
rerr := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName)
klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName)
if rerr == nil {
return nil
}
klog.Errorf("RoutesClient.Delete(%s, %s) failed: %v", az.RouteTableName, routeName, rerr.Error())
return rerr.Error()
}
// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update().
func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error {
ctx, cancel := getContextWithCancel()
defer cancel()
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated")
vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName)
if rerr != nil {
klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr)
return rerr
}
if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, virtualMachineScaleSetsDeallocating) {
klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName)
return nil
}
rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters)
klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName)
if rerr != nil {
klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr)
return rerr
}
return nil
}
| {
err = retryErr
} |
lib.rs | use std::path;
mod alphabet;
mod history;
mod modification;
mod testspace;
mod testspacefile;
mod testspacetextfile;
pub struct TestSpace {
working_directory: path::PathBuf,
history: history::FileHistory,
allow_cleanup: bool,
}
pub struct TestSpaceFile {
path_to_file: path::PathBuf,
history: history::FileHistory,
allow_cleanup: bool,
}
#[derive(Copy, Clone)]
pub enum | {
Arabic,
Chinese,
Cyrillic,
Latin,
}
pub enum LineModification {
Insert(usize, String),
Remove(usize),
Changed(usize, String, String),
}
pub struct TestSpaceTextFile {
path_to_file: path::PathBuf,
alphabet: Alphabet,
lines_of_text: Vec<String>,
auto_flush: bool, // After each edit do we automatically write the changes to disk
}
pub struct TextModifier {
original: Vec<String>,
modified: Vec<String>,
changes: Vec<LineModification>,
lines_modified: Vec<usize>,
}
| Alphabet |
http_service_test.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//use std::net::SocketAddr;
//use std::sync::Arc;
//
use std::fs::File;
use std::io::Read;
use common_exception::Result;
use common_runtime::tokio;
use crate::api::HttpService;
use crate::clusters::Cluster;
use crate::configs::Config;
use crate::servers::Server;
use crate::tests::tls_constants::TEST_CA_CERT;
use crate::tests::tls_constants::TEST_CN_NAME;
use crate::tests::tls_constants::TEST_SERVER_CERT;
use crate::tests::tls_constants::TEST_SERVER_KEY;
use crate::tests::tls_constants::TEST_TLS_CA_CERT;
use crate::tests::tls_constants::TEST_TLS_CLIENT_IDENTITY;
use crate::tests::tls_constants::TEST_TLS_CLIENT_PASSWORD;
// need to support local_addr, but axum_server do not have local_addr callback
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_http_service_tls_server() -> Result<()> {
let mut conf = Config::default();
conf.query.api_tls_server_key = TEST_SERVER_KEY.to_owned();
conf.query.api_tls_server_cert = TEST_SERVER_CERT.to_owned();
let addr_str = "127.0.0.1:30001";
let cluster = Cluster::create_global(conf.clone())?;
let mut srv = HttpService::create(conf.clone(), cluster.clone());
let listening = srv.start(addr_str.parse()?).await?;
let port = listening.port();
// test cert is issued for "localhost"
let url = format!("https://{}:{}/v1/health", TEST_CN_NAME, port);
// load cert
let mut buf = Vec::new();
File::open(TEST_CA_CERT)?.read_to_end(&mut buf)?;
let cert = reqwest::Certificate::from_pem(&buf).unwrap();
// kick off
let client = reqwest::Client::builder()
.add_root_certificate(cert)
.build()
.unwrap();
let resp = client.get(url).send().await;
assert!(resp.is_ok());
let resp = resp.unwrap();
assert!(resp.status().is_success());
assert_eq!("/v1/health", resp.url().path());
Ok(())
}
// client cannot communicate with server without ca certificate
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn | () -> Result<()> {
let mut conf = Config::default();
conf.query.api_tls_server_key = TEST_SERVER_KEY.to_owned();
conf.query.api_tls_server_cert = TEST_SERVER_CERT.to_owned();
let addr_str = "127.0.0.1:30010";
let cluster = Cluster::create_global(conf.clone())?;
let mut srv = HttpService::create(conf.clone(), cluster.clone());
let listening = srv.start(addr_str.parse()?).await?;
let port = listening.port();
// test cert is issued for "localhost"
let url = format!("https://{}:{}/v1/health", TEST_CN_NAME, port);
// kick off
let client = reqwest::Client::builder().build().unwrap();
let resp = client.get(url).send().await;
assert!(resp.is_err());
Ok(())
}
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_http_service_tls_server_mutual_tls() -> Result<()> {
use crate::tests::tls_constants::TEST_TLS_SERVER_CERT;
use crate::tests::tls_constants::TEST_TLS_SERVER_KEY;
let mut conf = Config::default();
conf.query.api_tls_server_key = TEST_TLS_SERVER_KEY.to_owned();
conf.query.api_tls_server_cert = TEST_TLS_SERVER_CERT.to_owned();
conf.query.api_tls_server_root_ca_cert = TEST_TLS_CA_CERT.to_owned();
let addr_str = "127.0.0.1:30011";
let cluster = Cluster::create_global(conf.clone())?;
let mut srv = HttpService::create(conf.clone(), cluster.clone());
let listening = srv.start(addr_str.parse()?).await?;
let port = listening.port();
// test cert is issued for "localhost"
let url = format!("https://{}:{}/v1/health", TEST_CN_NAME, port);
// get identity
let mut buf = Vec::new();
File::open(TEST_TLS_CLIENT_IDENTITY)?.read_to_end(&mut buf)?;
let pkcs12 = reqwest::Identity::from_pkcs12_der(&buf, TEST_TLS_CLIENT_PASSWORD).unwrap();
let mut buf = Vec::new();
File::open(TEST_TLS_CA_CERT)?.read_to_end(&mut buf)?;
let cert = reqwest::Certificate::from_pem(&buf).unwrap();
// kick off
let client = reqwest::Client::builder()
.identity(pkcs12)
.add_root_certificate(cert)
.build()
.expect("preconfigured rustls tls");
let resp = client.get(url).send().await;
assert!(resp.is_ok());
let resp = resp.unwrap();
assert!(resp.status().is_success());
assert_eq!("/v1/health", resp.url().path());
Ok(())
}
// cannot connect with server unless it have CA signed identity
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_http_service_tls_server_mutual_tls_failed() -> Result<()> {
use crate::tests::tls_constants::TEST_TLS_SERVER_CERT;
use crate::tests::tls_constants::TEST_TLS_SERVER_KEY;
let mut conf = Config::default();
conf.query.api_tls_server_key = TEST_TLS_SERVER_KEY.to_owned();
conf.query.api_tls_server_cert = TEST_TLS_SERVER_CERT.to_owned();
conf.query.api_tls_server_root_ca_cert = TEST_TLS_CA_CERT.to_owned();
let addr_str = "127.0.0.1:30012";
let cluster = Cluster::create_global(conf.clone())?;
let mut srv = HttpService::create(conf.clone(), cluster.clone());
let listening = srv.start(addr_str.parse()?).await?;
let port = listening.port();
// test cert is issued for "localhost"
let url = format!("https://{}:{}/v1/health", TEST_CN_NAME, port);
let mut buf = Vec::new();
File::open(TEST_TLS_CA_CERT)?.read_to_end(&mut buf)?;
let cert = reqwest::Certificate::from_pem(&buf).unwrap();
// kick off
let client = reqwest::Client::builder()
.add_root_certificate(cert)
.build()
.expect("preconfigured rustls tls");
let resp = client.get(url).send().await;
assert!(resp.is_err());
Ok(())
}
| test_http_service_tls_server_failed_case_1 |
time_converter.py | from django.template.defaulttags import register
@register.filter
def | (dictionary, key):
return dictionary.get(key)
@register.filter
def get_item_dict(dictionary, key):
return {'data': dictionary.get(key)}
| get_item |
flink.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Flink."""
from typing import Dict, Optional
from pydolphinscheduler.constants import TaskType
from pydolphinscheduler.core.task import Task
from pydolphinscheduler.java_gateway import launch_gateway
class ProgramType(str):
"""Type of program flink runs, for now it just contain `JAVA`, `SCALA` and `PYTHON`."""
JAVA = "JAVA"
SCALA = "SCALA"
PYTHON = "PYTHON"
class FlinkVersion(str):
"""Flink version, for now it just contain `HIGHT` and `LOW`."""
LOW_VERSION = "<1.10"
HIGHT_VERSION = ">=1.10"
class DeployMode(str):
"""Flink deploy mode, for now it just contain `LOCAL` and `CLUSTER`."""
LOCAL = "local"
CLUSTER = "cluster"
class Flink(Task):
"""Task flink object, declare behavior for flink task to dolphinscheduler."""
_task_custom_attr = {
"main_class",
"main_jar",
"deploy_mode",
"flink_version",
"slot",
"task_manager",
"job_manager_memory",
"task_manager_memory",
"app_name",
"program_type",
"parallelism",
"main_args",
"others",
}
def | (
self,
name: str,
main_class: str,
main_package: str,
program_type: Optional[ProgramType] = ProgramType.SCALA,
deploy_mode: Optional[DeployMode] = DeployMode.CLUSTER,
flink_version: Optional[FlinkVersion] = FlinkVersion.LOW_VERSION,
app_name: Optional[str] = None,
job_manager_memory: Optional[str] = "1G",
task_manager_memory: Optional[str] = "2G",
slot: Optional[int] = 1,
task_manager: Optional[int] = 2,
parallelism: Optional[int] = 1,
main_args: Optional[str] = None,
others: Optional[str] = None,
*args,
**kwargs
):
super().__init__(name, TaskType.FLINK, *args, **kwargs)
self.main_class = main_class
self.main_package = main_package
self.program_type = program_type
self.deploy_mode = deploy_mode
self.flink_version = flink_version
self.app_name = app_name
self.job_manager_memory = job_manager_memory
self.task_manager_memory = task_manager_memory
self.slot = slot
self.task_manager = task_manager
self.parallelism = parallelism
self.main_args = main_args
self.others = others
self._resource = {}
@property
def main_jar(self) -> Dict:
"""Return main package of dict."""
resource_info = self.get_resource_info(self.program_type, self.main_package)
return {"id": resource_info.get("id")}
def get_resource_info(self, program_type, main_package) -> Dict:
"""Get resource info from java gateway, contains resource id, name."""
if not self._resource:
self._resource = launch_gateway().entry_point.getResourcesFileInfo(
program_type,
main_package,
)
return self._resource
| __init__ |
differentiate.py | # Third party imports
from gmprocess.metrics.transform.transform import Transform
from gmprocess.stationstream import StationStream
from gmprocess.stationtrace import StationTrace
class Differentiate(Transform):
"""Class for computing the derivative."""
def __init__(self, transform_data, damping=None, period=None, times=None):
"""
Args:
transform_data (obspy.core.stream.Stream or numpy.ndarray): Intensity
measurement component.
damping (float): Damping for spectral amplitude calculations.
Default is None.
period (float): Period for spectral amplitude calculations.
Default is None.
times (numpy.ndarray): Times for the spectral amplitude calculations.
Default is None.
"""
super().__init__(transform_data, damping=None, period=None, times=None)
self.result = self.get_derivative()
def get_derivative(self):
| """
Calculated the derivative of each trace's data.
Returns:
stream: StationStream with the differentiated data.
"""
stream = StationStream([])
for trace in self.transform_data:
integrated_trace = trace.differentiate()
integrated_trace.stats['units'] = 'acc'
strace = StationTrace(data=integrated_trace.data,
header=integrated_trace.stats)
stream.append(strace)
return stream |
|
TaobaoDeActivityLuckydrawRequest.go | package gameact
import (
"net/url"
"github.com/bububa/opentaobao/model"
)
/*
抽奖 APIRequest
taobao.de.activity.luckydraw
用于激励平台对外提供抽奖功能,包括但不限于集分宝、红包、宝点、淘金币、淘彩票等
*/
type TaobaoDeActivityLuckydrawRequest struct {
model.Params
// 运营和cp约定的事件唯一标示
eventKey string
// 时间戳
sequenceId int64
// 用户的串ID
accountId string
// 机器设备号
machineId string
// 确认签名key
confirmKey string
// 行为Key
behaviorKey string
// 渠道
channel string
// 使用市场
market string
// 盒型号
deviceModel string
// 魔盒分发渠道
distribChannel string
// 魔盒UUID
uuid string
}
func NewTaobaoDeActivityLuckydrawRequest() *TaobaoDeActivityLuckydrawRequest{
return &TaobaoDeActivityLuckydrawRequest{
Params: model.NewParams(),
}
}
func (r Taoba | MethodName() string {
return "taobao.de.activity.luckydraw"
}
func (r TaobaoDeActivityLuckydrawRequest) GetApiParams() url.Values {
params := url.Values{}
for k, v := range r.GetRawParams() {
params.Set(k, v.String())
}
return params
}
func (r *TaobaoDeActivityLuckydrawRequest) SetEventKey(eventKey string) error {
r.eventKey = eventKey
r.Set("event_key", eventKey)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetEventKey() string {
return r.eventKey
}
func (r *TaobaoDeActivityLuckydrawRequest) SetSequenceId(sequenceId int64) error {
r.sequenceId = sequenceId
r.Set("sequence_id", sequenceId)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetSequenceId() int64 {
return r.sequenceId
}
func (r *TaobaoDeActivityLuckydrawRequest) SetAccountId(accountId string) error {
r.accountId = accountId
r.Set("account_id", accountId)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetAccountId() string {
return r.accountId
}
func (r *TaobaoDeActivityLuckydrawRequest) SetMachineId(machineId string) error {
r.machineId = machineId
r.Set("machine_id", machineId)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetMachineId() string {
return r.machineId
}
func (r *TaobaoDeActivityLuckydrawRequest) SetConfirmKey(confirmKey string) error {
r.confirmKey = confirmKey
r.Set("confirm_key", confirmKey)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetConfirmKey() string {
return r.confirmKey
}
func (r *TaobaoDeActivityLuckydrawRequest) SetBehaviorKey(behaviorKey string) error {
r.behaviorKey = behaviorKey
r.Set("behavior_key", behaviorKey)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetBehaviorKey() string {
return r.behaviorKey
}
func (r *TaobaoDeActivityLuckydrawRequest) SetChannel(channel string) error {
r.channel = channel
r.Set("channel", channel)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetChannel() string {
return r.channel
}
func (r *TaobaoDeActivityLuckydrawRequest) SetMarket(market string) error {
r.market = market
r.Set("market", market)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetMarket() string {
return r.market
}
func (r *TaobaoDeActivityLuckydrawRequest) SetDeviceModel(deviceModel string) error {
r.deviceModel = deviceModel
r.Set("device_model", deviceModel)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetDeviceModel() string {
return r.deviceModel
}
func (r *TaobaoDeActivityLuckydrawRequest) SetDistribChannel(distribChannel string) error {
r.distribChannel = distribChannel
r.Set("distrib_channel", distribChannel)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetDistribChannel() string {
return r.distribChannel
}
func (r *TaobaoDeActivityLuckydrawRequest) SetUuid(uuid string) error {
r.uuid = uuid
r.Set("uuid", uuid)
return nil
}
func (r TaobaoDeActivityLuckydrawRequest) GetUuid() string {
return r.uuid
}
| oDeActivityLuckydrawRequest) GetApi |
radio-group.tsx | import { ThemingProps, chakra, PropsOf, forwardRef } from "@chakra-ui/system" | import * as React from "react"
import {
useRadioGroup,
UseRadioGroupProps,
UseRadioGroupReturn,
} from "./use-radio-group"
export interface RadioGroupContext
extends Pick<UseRadioGroupReturn, "onChange" | "value" | "name">,
Omit<ThemingProps, "orientation"> {}
const [RadioGroupProvider, useRadioGroupContext] = createContext<
RadioGroupContext
>({
name: "RadioGroupContext",
strict: false,
})
export { useRadioGroupContext }
export interface RadioGroupProps
extends UseRadioGroupProps,
Omit<
PropsOf<typeof chakra.div>,
"onChange" | "value" | "defaultValue" | "children"
>,
Omit<ThemingProps, "orientation"> {
children: React.ReactNode
}
/**
* Used for multiple radios which are bound in one group,
* and it indicates which option is selected.
*
* @see Docs https://chakra-ui.com/components/radio
*/
export const RadioGroup = forwardRef<RadioGroupProps, "div">(
function RadioGroup(props, ref) {
const { colorScheme, size, variant, children, className, ...rest } = props
const { value, onChange, getRootProps, name, htmlProps } = useRadioGroup(
rest,
)
const group = React.useMemo(
() => ({
name,
size,
onChange,
colorScheme,
value,
variant,
}),
[size, name, onChange, colorScheme, value, variant],
)
const groupProps = getRootProps(htmlProps, ref)
const _className = cx("chakra-radio-group", className)
return (
<RadioGroupProvider value={group}>
<chakra.div {...groupProps} className={_className}>
{children}
</chakra.div>
</RadioGroupProvider>
)
},
)
if (__DEV__) {
RadioGroup.displayName = "RadioGroup"
} | import { createContext, __DEV__, cx } from "@chakra-ui/utils" |
utils.go | package utils
import (
"encoding/json"
"log"
"net/http"
"github.com/TheGolurk/infraApi/models"
)
func DisplayMessage(w http.ResponseWriter, m models.Message) {
JSON, err := json.Marshal(m)
if err != nil {
log.Fatalf("Error al convertir el mensaje: %s", err)
} | w.WriteHeader(m.Code)
w.Write(JSON)
} |
|
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def | ():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'deploy_ml.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| main |
data_source.py | import requests
import os
import time
def get_page(i):
url = r'https://shr32taah3.execute-api.us-east-1.amazonaws.com/Prod/applications/browse?pageSize=12&pageNumber=%d&searchText=&category=&runtime=&verified=&sortFields='
page = requests.get(url%i)
return eval(page.text.replace("true", "True").replace("false", "False"))
data = get_page(3)
for i in range(1, 4):
data = get_page(i)
for item in data["applications"]:
print(item["deploymentCount"], end="\t")
print(item["name"])
print(item["homePageUrl"]) | # time.sleep(3) | print()
# os.popen("git clone " + item["homePageUrl"]) |
threefish256_ref.go | // Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
package threefish
func (t *threefish256) Encrypt(dst, src []byte) {
var block [4]uint64
bytesToBlock256(&block, src)
Encrypt256(&block, &(t.keys), &(t.tweak))
block256ToBytes(dst, &block)
}
func (t *threefish256) Decrypt(dst, src []byte) {
var block [4]uint64
bytesToBlock256(&block, src)
Decrypt256(&block, &(t.keys), &(t.tweak))
block256ToBytes(dst, &block)
}
func newCipher256(tweak *[TweakSize]byte, key []byte) *threefish256 {
c := new(threefish256)
c.tweak[0] = uint64(tweak[0]) | uint64(tweak[1])<<8 | uint64(tweak[2])<<16 | uint64(tweak[3])<<24 |
uint64(tweak[4])<<32 | uint64(tweak[5])<<40 | uint64(tweak[6])<<48 | uint64(tweak[7])<<56
c.tweak[1] = uint64(tweak[8]) | uint64(tweak[9])<<8 | uint64(tweak[10])<<16 | uint64(tweak[11])<<24 |
uint64(tweak[12])<<32 | uint64(tweak[13])<<40 | uint64(tweak[14])<<48 | uint64(tweak[15])<<56
c.tweak[2] = c.tweak[0] ^ c.tweak[1]
for i := range c.keys[:4] {
j := i * 8
c.keys[i] = uint64(key[j]) | uint64(key[j+1])<<8 | uint64(key[j+2])<<16 | uint64(key[j+3])<<24 |
uint64(key[j+4])<<32 | uint64(key[j+5])<<40 | uint64(key[j+6])<<48 | uint64(key[j+7])<<56
}
c.keys[4] = C240 ^ c.keys[0] ^ c.keys[1] ^ c.keys[2] ^ c.keys[3]
return c
}
// Encrypt256 encrypts the 4 words of block using the expanded 256 bit key and
// the 128 bit tweak. The keys[4] must be keys[0] xor keys[1] xor ... keys[3] xor C240.
// The tweak[2] must be tweak[0] xor tweak[1].
func Encrypt256(block *[4]uint64, keys *[5]uint64, tweak *[3]uint64) {
b0, b1, b2, b3 := block[0], block[1], block[2], block[3]
for r := 0; r < 17; r++ {
b0 += keys[r%5]
b1 += keys[(r+1)%5] + tweak[r%3]
b2 += keys[(r+2)%5] + tweak[(r+1)%3]
b3 += keys[(r+3)%5] + uint64(r)
b0 += b1
b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0
b2 += b3
b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2
b0 += b3
b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0
b2 += b1
b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2
b0 += b1
b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0
b2 += b3
b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2
b0 += b3
b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0
b2 += b1
b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2
r++
b0 += keys[r%5]
b1 += keys[(r+1)%5] + tweak[r%3]
b2 += keys[(r+2)%5] + tweak[(r+1)%3]
b3 += keys[(r+3)%5] + uint64(r)
b0 += b1
b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0
b2 += b3
b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2
b0 += b3
b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0
b2 += b1
b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2
b0 += b1
b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0
b2 += b3
b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2
b0 += b3
b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0
b2 += b1
b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2
}
b0 += keys[3]
b1 += keys[4] + tweak[0]
b2 += keys[0] + tweak[1]
b3 += keys[1] + uint64(18)
block[0], block[1], block[2], block[3] = b0, b1, b2, b3
}
// Decrypt256 decrypts the 4 words of block using the expanded 256 bit key and
// the 128 bit tweak. The keys[4] must be keys[0] xor keys[1] xor ... keys[3] xor C240.
// The tweak[2] must be tweak[0] xor tweak[1].
func Decrypt256(block *[4]uint64, keys *[5]uint64, tweak *[3]uint64) |
// UBI256 does a Threefish256 encryption of the given block using
// the chain values hVal and the tweak.
// The chain values are updated through hVal[i] = block[i] ^ Enc(block)[i]
func UBI256(block *[4]uint64, hVal *[5]uint64, tweak *[3]uint64) {
b0, b1, b2, b3 := block[0], block[1], block[2], block[3]
hVal[4] = C240 ^ hVal[0] ^ hVal[1] ^ hVal[2] ^ hVal[3]
tweak[2] = tweak[0] ^ tweak[1]
Encrypt256(block, hVal, tweak)
hVal[0] = block[0] ^ b0
hVal[1] = block[1] ^ b1
hVal[2] = block[2] ^ b2
hVal[3] = block[3] ^ b3
}
| {
b0, b1, b2, b3 := block[0], block[1], block[2], block[3]
var tmp uint64
for r := 18; r > 1; r-- {
b0 -= keys[r%5]
b1 -= keys[(r+1)%5] + tweak[r%3]
b2 -= keys[(r+2)%5] + tweak[(r+1)%3]
b3 -= keys[(r+3)%5] + uint64(r)
tmp = b1 ^ b2
b1 = (tmp >> 32) | (tmp << (64 - 32))
b2 -= b1
tmp = b3 ^ b0
b3 = (tmp >> 32) | (tmp << (64 - 32))
b0 -= b3
tmp = b3 ^ b2
b3 = (tmp >> 22) | (tmp << (64 - 22))
b2 -= b3
tmp = b1 ^ b0
b1 = (tmp >> 58) | (tmp << (64 - 58))
b0 -= b1
tmp = b1 ^ b2
b1 = (tmp >> 12) | (tmp << (64 - 12))
b2 -= b1
tmp = b3 ^ b0
b3 = (tmp >> 46) | (tmp << (64 - 46))
b0 -= b3
tmp = b3 ^ b2
b3 = (tmp >> 33) | (tmp << (64 - 33))
b2 -= b3
tmp = b1 ^ b0
b1 = (tmp >> 25) | (tmp << (64 - 25))
b0 -= b1
r--
b0 -= keys[r%5]
b1 -= keys[(r+1)%5] + tweak[r%3]
b2 -= keys[(r+2)%5] + tweak[(r+1)%3]
b3 -= keys[(r+3)%5] + uint64(r)
tmp = b1 ^ b2
b1 = (tmp >> 37) | (tmp << (64 - 37))
b2 -= b1
tmp = b3 ^ b0
b3 = (tmp >> 5) | (tmp << (64 - 5))
b0 -= b3
tmp = b3 ^ b2
b3 = (tmp >> 40) | (tmp << (64 - 40))
b2 -= b3
tmp = b1 ^ b0
b1 = (tmp >> 23) | (tmp << (64 - 23))
b0 -= b1
tmp = b1 ^ b2
b1 = (tmp >> 57) | (tmp << (64 - 57))
b2 -= b1
tmp = b3 ^ b0
b3 = (tmp >> 52) | (tmp << (64 - 52))
b0 -= b3
tmp = b3 ^ b2
b3 = (tmp >> 16) | (tmp << (64 - 16))
b2 -= b3
tmp = b1 ^ b0
b1 = (tmp >> 14) | (tmp << (64 - 14))
b0 -= b1
}
b0 -= keys[0]
b1 -= keys[1] + tweak[0]
b2 -= keys[2] + tweak[1]
b3 -= keys[3]
block[0], block[1], block[2], block[3] = b0, b1, b2, b3
} |
resource.ts | import {Observable, of} from 'rxjs';
import {catchError, concatAll, map} from 'rxjs/operators';
/**
* Resource - Parameterized wrapper for an async loading resource.
* Initial loading state and possible success, failure and empty outcomes.
*/
export class Resource<T> {
get isFailure(): boolean {
return !this.isLoading && this.error !== null;
}
get isEmpty(): boolean {
if (this.isLoading || this.isFailure) {
return false;
}
if (this.data === null || this.data === undefined) {
return true;
}
if (typeof this.data === 'string') {
return this.data.length === 0;
}
if (Array.isArray(this.data)) {
return this.data.length === 0;
}
return false;
}
get isSuccess(): boolean {
return !this.isLoading && !this.isFailure && !this.isEmpty;
}
readonly isLoading: boolean;
readonly data: T;
readonly error: any;
constructor(data: T, error: any = null, loading: boolean = false) {
this.data = data;
this.error = error;
this.isLoading = loading;
}
/**
* Dynamically sets triggers for possible resource outcomes based on a PartialChecker structure
* @param options - PartialChecker given for the current resource
*/
on(options: PartialResourceCallbacks<T>): void {
if (options.loading && this.isLoading) {
options.loading();
}
if (options.success && this.isSuccess) {
options.success(this.data);
}
if (options.failure && this.isFailure) {
options.failure(this.error);
}
if (options.empty && this.isEmpty) {
options.empty();
}
if (options.always && !this.isLoading) {
options.always();
}
}
}
// https://stackoverflow.com/questions/48230773/how-to-create-a-partial-like-that-requires-a-single-property-to-be-set
type AtLeastOne<T, U = {[K in keyof T]: Pick<T, K> }> = Partial<T> & U[keyof U];
type ResourceCallbacks<T> = {
success: (data: T) => void;
failure: (err: any) => void;
empty: () => void;
loading: () => void;
always: () => void;
}
/**
* Partial type that expects at leas one attribute from ResourceCallbacks<T>
*/
type PartialResourceCallbacks<T> = AtLeastOne<ResourceCallbacks<T>>
/**
* Maps PartialChecker options to a function.
* To be used on a subscription
* @param options - Options to check
*/
export function | <T>(options: PartialResourceCallbacks<T>): ((res: Resource<T>) => void) {
return (res: Resource<T>) => res.on(options);
}
/**
* Transforms any element to a resource.
* Alternative to constructor that can be used as a map pipe in Observables.
* @param data T
*/
export function toResource<T>(data: T): Resource<T> {
return new Resource(data);
}
/**
* Transforms any error to a failure resource.
* Alternative to constructor that can be used as a map pipe in Observables.
* @param error any
*/
export function toFailure<T>(error: any): Resource<T> {
return new Resource<T>(null, error);
}
export const FAILURE = new Resource<any>(null, true);
export const LOADING = new Resource<any>(null, null, true);
/* Observable functions */
/**
* Wrap resource logic around an HTTP observable
* First response is always loading, followed up by a resource or a failure resource
* @param request Observable - Request observable
*/
export function resourceRequestObservable<T>(request: Observable<T>): Observable<Resource<T>> {
return of(
of(LOADING),
request
.pipe(
map(toResource),
catchError((error: any) => of(toFailure<T>(error)))
)
).pipe(concatAll());
}
| onResource |
server.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""server.py initialises the appengine server for ClusterFuzz."""
import urllib
import webapp2
from webapp2_extras import routes
from base import utils
from config import local_config
from handlers import base_handler
from handlers import bots
from handlers import commit_range
from handlers import configuration
from handlers import corpora
from handlers import coverage_report
from handlers import crash_stats
from handlers import domain_verifier
from handlers import download
from handlers import fuzzer_stats
from handlers import fuzzers
from handlers import gcs_redirector
from handlers import help_redirector
from handlers import home
from handlers import issue_redirector
from handlers import jobs
from handlers import parse_stacktrace
from handlers import report_csp_failure
from handlers import revisions_info
from handlers import testcase_list
from handlers import upload_testcase
from handlers import viewer
from handlers.cron import backup
from handlers.cron import build_crash_stats
from handlers.cron import cleanup
from handlers.cron import corpus_backup
from handlers.cron import fuzzer_weights
from handlers.cron import load_bigquery_stats
from handlers.cron import manage_vms
from handlers.cron import ml_train
from handlers.cron import oss_fuzz_apply_ccs
from handlers.cron import oss_fuzz_build_status
from handlers.cron import oss_fuzz_setup
from handlers.cron import predator_pull
from handlers.cron import recurring_tasks
from handlers.cron import schedule_corpus_pruning
from handlers.cron import triage
from handlers.performance_report import (show as show_performance_report)
from handlers.testcase_detail import (crash_stats as crash_stats_on_testcase)
from handlers.testcase_detail import (show as show_testcase)
from handlers.testcase_detail import create_issue
from handlers.testcase_detail import delete
from handlers.testcase_detail import download_testcase
from handlers.testcase_detail import find_similar_issues
from handlers.testcase_detail import mark_fixed
from handlers.testcase_detail import mark_security
from handlers.testcase_detail import mark_unconfirmed
from handlers.testcase_detail import redo
from handlers.testcase_detail import remove_duplicate
from handlers.testcase_detail import remove_group
from handlers.testcase_detail import remove_issue
from handlers.testcase_detail import update_from_trunk
from handlers.testcase_detail import update_issue
class _TrailingSlashRemover(webapp2.RequestHandler):
def get(self, url):
self.redirect(url)
# TODO(aarya): Remove after all /v2 links are deprecated.
class _V2Remover(webapp2.RequestHandler):
def get(self, url):
self.redirect('/%s?%s' % (url, urllib.urlencode(self.request.params)))
def redirect_to(to_domain):
"""Create a redirect handler to a domain."""
class RedirectHandler(webapp2.RequestHandler):
"""Handler to redirect to domain."""
def get(self, _):
self.redirect(
'https://' + to_domain + self.request.path_qs, permanent=True)
return RedirectHandler
# Add item to the navigation menu. Order is important.
base_handler.add_menu('Testcases', '/testcases')
base_handler.add_menu('Fuzzer Statistics', '/fuzzer-stats')
base_handler.add_menu('Crash Statistics', '/crash-stats')
base_handler.add_menu('Upload Testcase', '/upload-testcase')
if utils.is_chromium():
base_handler.add_menu('Crashes by range', '/commit-range')
if not utils.is_oss_fuzz():
base_handler.add_menu('Fuzzers', '/fuzzers')
base_handler.add_menu('Corpora', '/corpora')
base_handler.add_menu('Bots', '/bots')
base_handler.add_menu('Jobs', '/jobs')
base_handler.add_menu('Configuration', '/configuration')
base_handler.add_menu('Report Bug', '/report-bug')
base_handler.add_menu('Documentation', '/docs')
# We need to separate routes for cron to avoid redirection.
_CRON_ROUTES = [
('/backup', backup.Handler),
('/build-crash-stats', build_crash_stats.Handler),
('/cleanup', cleanup.Handler),
('/corpus-backup/make-public', corpus_backup.MakePublicHandler),
('/fuzzer-stats/cache', fuzzer_stats.RefreshCacheHandler),
('/fuzzer-stats/preload', fuzzer_stats.PreloadHandler),
('/fuzzer-weights', fuzzer_weights.Handler),
('/home-cache', home.RefreshCacheHandler),
('/load-bigquery-stats', load_bigquery_stats.Handler),
('/manage-vms', manage_vms.Handler),
('/oss-fuzz-apply-ccs', oss_fuzz_apply_ccs.Handler),
('/oss-fuzz-build-status', oss_fuzz_build_status.Handler),
('/oss-fuzz-setup', oss_fuzz_setup.Handler),
('/predator-pull', predator_pull.Handler),
('/schedule-corpus-pruning', schedule_corpus_pruning.Handler),
('/schedule-impact-tasks', recurring_tasks.ImpactTasksScheduler),
('/schedule-ml-train-tasks', ml_train.Handler),
('/schedule-progression-tasks', recurring_tasks.ProgressionTasksScheduler),
('/schedule-upload-reports-tasks',
recurring_tasks.UploadReportsTaskScheduler),
('/testcases/cache', testcase_list.CacheHandler),
('/triage', triage.Handler),
]
_ROUTES = [
('/', home.Handler),
('(.*)/$', _TrailingSlashRemover),
('/v2/(.*)', _V2Remover),
(r'/(google.+\.html)$', domain_verifier.Handler),
('/bots', bots.Handler),
('/bots/dead', bots.DeadBotsHandler),
('/commit-range', commit_range.Handler),
('/commit-range/load', commit_range.JsonHandler),
('/configuration', configuration.Handler),
('/add-external-user-permission', configuration.AddExternalUserPermission),
('/delete-external-user-permission',
configuration.DeleteExternalUserPermission),
('/coverage-report/([^/]+)/([^/]+)/([^/]+)(/.*)?', coverage_report.Handler),
('/crash-stats/load', crash_stats.JsonHandler),
('/crash-stats', crash_stats.Handler),
('/corpora', corpora.Handler),
('/corpora/create', corpora.CreateHandler),
('/corpora/delete', corpora.DeleteHandler), | ('/fuzzers/delete', fuzzers.DeleteHandler),
('/fuzzers/edit', fuzzers.EditHandler),
('/fuzzers/log/([^/]+)', fuzzers.LogHandler),
('/fuzzer-stats/load', fuzzer_stats.LoadHandler),
('/fuzzer-stats', fuzzer_stats.Handler),
('/fuzzer-stats/.*', fuzzer_stats.Handler),
('/gcs-redirect', gcs_redirector.Handler),
('/issue/([0-9]+)/(.+)', issue_redirector.Handler),
('/jobs', jobs.Handler),
('/jobs/.*', jobs.Handler),
('/update-job', jobs.UpdateJob),
('/update-job-template', jobs.UpdateJobTemplate),
('/parse_stacktrace', parse_stacktrace.Handler),
('/performance-report/(.+)/(.+)/(.+)', show_performance_report.Handler),
('/report-csp-failure', report_csp_failure.ReportCspFailureHandler),
('/testcase', show_testcase.DeprecatedHandler),
('/testcase-detail/([0-9]+)', show_testcase.Handler),
('/testcase-detail/crash-stats', crash_stats_on_testcase.Handler),
('/testcase-detail/create-issue', create_issue.Handler),
('/testcase-detail/delete', delete.Handler),
('/testcase-detail/download-testcase', download_testcase.Handler),
('/testcase-detail/find-similar-issues', find_similar_issues.Handler),
('/testcase-detail/mark-fixed', mark_fixed.Handler),
('/testcase-detail/mark-security', mark_security.Handler),
('/testcase-detail/mark-unconfirmed', mark_unconfirmed.Handler),
('/testcase-detail/redo', redo.Handler),
('/testcase-detail/refresh', show_testcase.RefreshHandler),
('/testcase-detail/remove-duplicate', remove_duplicate.Handler),
('/testcase-detail/remove-issue', remove_issue.Handler),
('/testcase-detail/remove-group', remove_group.Handler),
('/testcase-detail/update-from-trunk', update_from_trunk.Handler),
('/testcase-detail/update-issue', update_issue.Handler),
('/testcases', testcase_list.Handler),
('/testcases/load', testcase_list.JsonHandler),
('/upload-testcase', upload_testcase.Handler),
('/upload-testcase/get-url-oauth', upload_testcase.UploadUrlHandlerOAuth),
('/upload-testcase/prepare', upload_testcase.PrepareUploadHandler),
('/upload-testcase/load', upload_testcase.JsonHandler),
('/upload-testcase/upload', upload_testcase.UploadHandler),
('/upload-testcase/upload-oauth', upload_testcase.UploadHandlerOAuth),
('/revisions', revisions_info.Handler),
('/report-bug', help_redirector.ReportBugHandler),
('/viewer', viewer.Handler),
]
config = local_config.GAEConfig()
main_domain = config.get('domains.main')
redirect_domains = config.get('domains.redirects')
_DOMAIN_ROUTES = []
if main_domain and redirect_domains:
for redirect_domain in redirect_domains:
_DOMAIN_ROUTES.append(
routes.DomainRoute(redirect_domain, [
webapp2.Route('<:.*>', redirect_to(main_domain)),
]))
app = webapp2.WSGIApplication(
_CRON_ROUTES + _DOMAIN_ROUTES + _ROUTES, debug=True) | ('/docs', help_redirector.DocumentationHandler),
('/download/?([^/]+)?', download.Handler),
('/fuzzers', fuzzers.Handler),
('/fuzzers/create', fuzzers.CreateHandler), |
test_deregister_request_data.py | import pytest
from app.request_schemes.deregister_request_data import DeregisterRequestData
pytestmark = pytest.mark.asyncio
@pytest.mark.usefixtures('unstub')
class TestDeregisterRequestData:
| @pytest.mark.parametrize("data", [
{'eventType': ''},
{'eventType': '', 'jobName': ''},
{'eventType': '', 'jobName': ''},
{'eventType': '', 'caller': ''},
{'jobName': '', 'caller': ''},
{'jobName': '', 'jenkins_url': '', 'caller': ''}
])
async def test__when_data_does_not_have_mandatory_keys__should_not_be_valid(self, data):
assert not DeregisterRequestData.is_valid_deregister_request_data(data)
async def test__when_data_has_mandatory_keys__should_be_valid(self):
data = {'eventType': '', 'repository': '', 'jobName': '', 'caller': '', 'jenkins_url': ''}
assert DeregisterRequestData.is_valid_deregister_request_data(data) |
|
servicenamer.go | /*
* Copyright IBM Corporation 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package transformer
import (
"path/filepath"
"strings"
"github.com/konveyor/move2kube/internal/common"
"github.com/sirupsen/logrus"
plantypes "github.com/konveyor/move2kube/types/plan"
"github.com/konveyor/move2kube/types/transformer/artifacts"
)
type project struct {
path string
pathsuffix string
}
func nameServices(projName string, nServices map[string]plantypes.Service, sts []plantypes.Transformer) (services map[string]plantypes.Service) |
func bucketProjects(projects []project) map[string][]project {
nProjects := map[string][]project{}
commonPath := findCommonPrefix(projects)
if commonPath != "." {
projects = trimPrefix(projects, commonPath)
}
for _, df := range projects {
parts := strings.Split(df.pathsuffix, string(filepath.Separator))
prefix := ""
if len(parts) == 0 {
prefix = ""
} else if len(parts) > 0 {
prefix = parts[0]
}
if pdfs, ok := nProjects[prefix]; !ok {
nProjects[prefix] = []project{df}
} else {
nProjects[prefix] = append(pdfs, df)
}
}
sProjects := map[string][]project{}
for p, paths := range nProjects {
if len(paths) == 1 {
sProjects[p] = []project{paths[0]}
} else if p == "" {
for _, v := range paths {
if v1, ok := sProjects[p]; ok {
sProjects[p] = append(v1, v)
} else {
sProjects[p] = []project{v}
}
}
} else {
for k, v := range bucketProjects(paths) {
separator := "-"
if p == "" || k == "" {
separator = ""
}
nk := p + separator + k
if v1, ok := sProjects[nk]; ok {
sProjects[nk] = append(v, v1...)
} else {
sProjects[nk] = v
}
}
}
}
return sProjects
}
func findCommonPrefix(files []project) string {
paths := make([]string, len(files))
for i, file := range files {
paths[i] = file.pathsuffix
}
return common.CleanAndFindCommonDirectory(paths)
}
func trimPrefix(files []project, prefix string) []project {
for i, f := range files {
files[i].pathsuffix = strings.TrimPrefix(f.pathsuffix, prefix+string(filepath.Separator))
}
return files
}
| {
services = nServices
// Collate services by project path or shared common base dir
servicePaths := make(map[string][]plantypes.Transformer)
for _, st := range sts {
pps, ok := st.Paths[artifacts.ProjectPathPathType]
bpp := common.CleanAndFindCommonDirectory(pps)
if !ok {
paths := []string{}
for _, p := range st.Paths {
paths = append(paths, p...)
}
if len(paths) > 0 {
bpp = common.CleanAndFindCommonDirectory(paths)
} else {
logrus.Errorf("No paths in the transformer. Ignoring transformer : %+v", st)
continue
}
}
if ts, ok := servicePaths[bpp]; ok {
servicePaths[bpp] = append(ts, st)
} else {
servicePaths[bpp] = []plantypes.Transformer{st}
}
}
// Find if base dir is a git repo, and has only one service or many services
gitRepoNames := make(map[string][]string) // [repoName][]basePath
basePathRepos := make(map[string]string)
for sp := range servicePaths {
repoName, _, _, repoUrl, _, err := common.GatherGitInfo(sp)
if err != nil {
logrus.Debugf("Unable to find any git repo for directory %s : %s", sp, err)
continue
}
if repoName == "" {
logrus.Debugf("No repo name found for repo at %s", repoUrl)
continue
}
if bps, ok := gitRepoNames[repoName]; ok {
gitRepoNames[repoName] = append(bps, sp)
} else {
gitRepoNames[repoName] = []string{sp}
}
basePathRepos[sp] = repoName
}
for repoName, basePaths := range gitRepoNames {
if len(basePaths) == 1 {
// Only one service in repo
services[repoName] = servicePaths[basePaths[0]]
delete(servicePaths, basePaths[0])
}
}
// Only one set of unnamed services, use project name
if len(nServices) == 0 && len(servicePaths) == 1 {
for _, ts := range servicePaths {
services[projName] = ts
}
return services
}
repoProjects := map[string][]project{}
for sp := range servicePaths {
repo, ok := basePathRepos[sp]
if !ok {
repo = projName
}
p := project{sp, sp}
if ps, ok := repoProjects[repo]; !ok {
repoProjects[repo] = []project{p}
} else {
repoProjects[repo] = append(ps, p)
}
}
sProjects := map[string][]project{}
for repo, projects := range repoProjects {
if len(projects) == 1 {
sProjects[repo] = []project{projects[0]}
continue
}
for k, v := range bucketProjects(projects) {
separator := "-"
if repo == "" || k == "" {
separator = ""
}
nk := repo + separator + k
if v1, ok := sProjects[nk]; ok {
sProjects[nk] = append(v, v1...)
} else {
sProjects[nk] = v
}
}
}
//TODO: Consider whether we should take into consideration pre-existing serviceNames
svcs := map[string]plantypes.Service{}
for sn, ps := range sProjects {
for _, p := range ps {
svcs[sn] = servicePaths[p.path]
}
}
return plantypes.MergeServices(services, svcs)
} |
bitcou2.go | package bitcou
import (
b64 "encoding/base64"
"encoding/json"
"errors"
"github.com/grupokindynos/common/ladon"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"time"
)
type ServiceV2 struct {
BitcouURL string
BitcouToken string
ImageMap map[int]ProviderImage
}
func | () *ServiceV2 {
service := &ServiceV2{
BitcouURL: os.Getenv("BITCOU_URL_DEV_V2"),
BitcouToken: os.Getenv("BITCOU_TOKEN_V2"),
ImageMap: make(map[int]ProviderImage),
}
return service
}
func (bs *ServiceV2) GetListV2(dev bool) ([]VoucherV2, error) {
var url string
if dev {
url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/availableVouchers/"
} else {
url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/availableVouchers/"
}
log.Println("Getting products using url: ", url)
token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", token)
client := &http.Client{Timeout: 500 * time.Second}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer func() {
_ = res.Body.Close()
}()
contents, _ := ioutil.ReadAll(res.Body)
var response BaseResponse
err = json.Unmarshal(contents, &response)
if err != nil {
return nil, err
}
var vouchersList []VoucherV2
dataBytes, err := json.Marshal(response.Data)
if err != nil {
return nil, err
}
err = json.Unmarshal(dataBytes, &vouchersList)
if err != nil {
return nil, err
}
return vouchersList, nil
}
func (bs *ServiceV2) GetProvidersV2(dev bool) ([]Provider, error) {
var url string
if dev {
url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/providers"
} else {
url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/providers"
}
log.Println("Getting providers using url: ", url)
token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2")
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", token)
client := &http.Client{Timeout: 20 * time.Second}
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer func() {
_ = res.Body.Close()
}()
contents, _ := ioutil.ReadAll(res.Body)
var response BaseResponse
err = json.Unmarshal(contents, &response)
if err != nil {
return nil, err
}
var providerList []Provider
dataBytes, err := json.Marshal(response.Data)
if err != nil {
return nil, err
}
err = json.Unmarshal(dataBytes, &providerList)
if err != nil {
return nil, err
}
return providerList, nil
}
func (bs *ServiceV2) GetProviderImage(providerId int, dev bool) (imageInfo ProviderImage, err error) {
if val, ok := bs.ImageMap[providerId]; ok {
//log.Println("using cached image for ", providerId)
return val, nil
}
var url string
if dev {
url = os.Getenv("BITCOU_URL_DEV_V2") + "voucher/providerImage"
} else {
url = os.Getenv("BITCOU_URL_PROD_V2") + "voucher/providerImage"
}
token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2")
req, err := http.NewRequest("GET", url + "?provider_id=" + strconv.Itoa(providerId), nil)
if err != nil {
return imageInfo, err
}
req.Header.Add("Authorization", token)
client := &http.Client{Timeout: 20 * time.Second}
res, err := client.Do(req)
if err != nil {
return imageInfo, err
}
defer func() {
_ = res.Body.Close()
}()
contents, _ := ioutil.ReadAll(res.Body)
var response BaseResponse
err = json.Unmarshal(contents, &response)
if err != nil {
return imageInfo, err
}
if len(response.Data) > 0 {
dataBytes, err := json.Marshal(response.Data[0])
if err != nil {
return imageInfo, err
}
err = json.Unmarshal(dataBytes, &imageInfo)
if err != nil {
return imageInfo, err
}
bs.ImageMap[providerId] = imageInfo
return imageInfo, nil
} else {
return imageInfo, errors.New("image unavailable")
}
}
func (bs *ServiceV2) GetProviderImageBase64(imageUrl string, providerId int) (imageInfo ladon.ProviderImageApp, err error) {
token := "Bearer " + os.Getenv("BITCOU_TOKEN_V2")
req, err := http.NewRequest("GET", imageUrl, nil)
if err != nil {
return imageInfo, err
}
req.Header.Add("Authorization", token)
client := &http.Client{Timeout: 20 * time.Second}
res, err := client.Do(req)
if err != nil {
return imageInfo, err
}
contents, _ := ioutil.ReadAll(res.Body)
_ = res.Body.Close()
uEnc := b64.StdEncoding.EncodeToString(contents)
imageInfo.Image = uEnc
imageInfo.ProviderId = providerId
imageInfo.Url = imageUrl
return
} | InitServiceV2 |
models.py | import datetime
# from sqlalchemy import Column, Integer, String
import hashlib
from server.extensions import Base, argon2, bcrypt
class User(Base):
__table__ = Base.metadata.tables["users"]
__table_args__ = {"autoload": True}
# Attribute names to help out with functions
# id = Column(Integer, primary_key=True, unique=True)
# ip_address = Column(String(64))
# username = Column(String(64), index=True, unique=True)
# email = Column(String(120), index=True, unique=True)
# password = Column(String(240))
# activation_selector = Column(String(120))#Unique
# activation_code = Column(String(120))
# forgotten_password_selector = Column(String(120))#Unique
# forgotten_password_code = Column(String(120))
# forgotten_password_time = Column(String(120))
# remember_selector = Column(String(120))#Unique
# remember_code = Column(String(120))
# created_on = Column(String(120))
# last_login = Column(String(120))
# active = Column(String(120))
# first_name = Column(String(120))
# last_name = Column(String(120))
# company = Column(String(120))
# phone = Column(String(120))
# country = Column(String(120))
# image = Column(String(120))
# bio = Column(String(240))
# core = Column(String(240))
# external_source = Column(String(120))
# external_id = Column(String(120))
# session_hash = Column(String(120))# session hash is API key
# password_hash = Column(String(120))
def set_password(self, password):
self.password = argon2.generate_password_hash(password)
def check_password(self, passwd):
"""
Check if the passwordhash is in Argon2 or Bcrypt(old) format
Resets the password hash to argon2 format if stored in bcrypt
Returns value for login route
"""
try:
if bcrypt.check_password_hash(self.password, passwd):
bpass = True
except ValueError as error:
print(error)
bpass = False
if argon2.check_password_hash(self.password, passwd):
return True
elif not argon2.check_password_hash(self.password, passwd) and not bpass:
return False
elif not argon2.check_password_hash(self.password, passwd) and bpass:
self.set_password(passwd)
return True
def update_bio(self, new_bio):
self.bio = new_bio
def update_email(self, email):
self.email = email
| self.last_name = last_name
def update_forgotten_code(self, code):
self.forgotten_password_code = code
def update_activation_code(self, code):
self.activation_code = code
def update_activation(self):
self.active = "1"
print("user activated successfully")
def update_forgotten_time(self, time):
self.forgotten_password_time = time
def set_session_hash(self):
timestamp = datetime.datetime.now()
timestamp1 = timestamp.strftime("%Y-%m-%d %H:%M:%S")
md5_digest = hashlib.md5(timestamp1.encode()).hexdigest()
self.session_hash = md5_digest
def update_image_address(self, path):
self.image = path
def __repr__(self):
return "<User {}>".format(self.username)
class UserGroups(Base):
__table__ = Base.metadata.tables["users_groups"]
__table_args__ = {"autoload": True}
def set_group(self):
self.group_id = 2
print('group updated')
def __repr__(self):
return "<User {}>".format(self.username) | def update_first_name(self, first_name):
self.first_name = first_name
def update_last_name(self, last_name): |
inject-feature-toggles.tsx | import React from 'react';
import {
wrapDisplayName,
setDisplayName,
DEFAULT_FLAGS_PROP_KEY,
} from '@flopflip/react';
import { useFlagVariations } from '../../hooks';
import { FlagName, Flags } from '@flopflip/types';
type InjectedProps = {
[propKey: string]: Flags;
};
export default <OwnProps extends object>(
flagNames: FlagName[],
propKey: string = DEFAULT_FLAGS_PROP_KEY
) => (
Component: React.ComponentType
): React.ComponentType<OwnProps & InjectedProps> => {
const WrappedComponent = (ownProps: OwnProps) => {
const flagVariations = useFlagVariations(flagNames);
const flags = Object.fromEntries( | flagNames.map((flagName, indexOfFlagName) => [
flagName,
flagVariations[indexOfFlagName],
])
);
const props = {
...ownProps,
[propKey]: flags,
};
return <Component {...props} />;
};
setDisplayName(wrapDisplayName(Component, 'injectFeatureToggles'));
return WrappedComponent;
}; | |
index.js | /******/ (() => { // webpackBootstrap
/******/ var __webpack_modules__ = ({
/***/ "./node_modules/@babel/runtime/regenerator/index.js":
/*!**********************************************************!*\
!*** ./node_modules/@babel/runtime/regenerator/index.js ***!
\**********************************************************/
/***/ ((module, __unused_webpack_exports, __webpack_require__) => {
module.exports = __webpack_require__(/*! regenerator-runtime */ "./node_modules/regenerator-runtime/runtime.js");
/***/ }),
/***/ "./node_modules/regenerator-runtime/runtime.js":
/*!*****************************************************!*\
!*** ./node_modules/regenerator-runtime/runtime.js ***!
\*****************************************************/
/***/ ((module) => {
/**
* Copyright (c) 2014-present, Facebook, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
var runtime = (function (exports) {
"use strict";
var Op = Object.prototype;
var hasOwn = Op.hasOwnProperty;
var undefined; // More compressible than void 0.
var $Symbol = typeof Symbol === "function" ? Symbol : {};
var iteratorSymbol = $Symbol.iterator || "@@iterator";
var asyncIteratorSymbol = $Symbol.asyncIterator || "@@asyncIterator";
var toStringTagSymbol = $Symbol.toStringTag || "@@toStringTag";
function define(obj, key, value) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true
});
return obj[key];
}
try {
// IE 8 has a broken Object.defineProperty that only works on DOM objects.
define({}, "");
} catch (err) {
define = function(obj, key, value) {
return obj[key] = value;
};
}
function wrap(innerFn, outerFn, self, tryLocsList) {
// If outerFn provided and outerFn.prototype is a Generator, then outerFn.prototype instanceof Generator.
var protoGenerator = outerFn && outerFn.prototype instanceof Generator ? outerFn : Generator;
var generator = Object.create(protoGenerator.prototype);
var context = new Context(tryLocsList || []);
// The ._invoke method unifies the implementations of the .next,
// .throw, and .return methods.
generator._invoke = makeInvokeMethod(innerFn, self, context);
return generator;
}
exports.wrap = wrap;
// Try/catch helper to minimize deoptimizations. Returns a completion
// record like context.tryEntries[i].completion. This interface could
// have been (and was previously) designed to take a closure to be
// invoked without arguments, but in all the cases we care about we
// already have an existing method we want to call, so there's no need
// to create a new function object. We can even get away with assuming
// the method takes exactly one argument, since that happens to be true
// in every case, so we don't have to touch the arguments object. The
// only additional allocation required is the completion record, which
// has a stable shape and so hopefully should be cheap to allocate.
function tryCatch(fn, obj, arg) {
try {
return { type: "normal", arg: fn.call(obj, arg) };
} catch (err) {
return { type: "throw", arg: err };
}
}
var GenStateSuspendedStart = "suspendedStart";
var GenStateSuspendedYield = "suspendedYield";
var GenStateExecuting = "executing";
var GenStateCompleted = "completed";
// Returning this object from the innerFn has the same effect as
// breaking out of the dispatch switch statement.
var ContinueSentinel = {};
// Dummy constructor functions that we use as the .constructor and
// .constructor.prototype properties for functions that return Generator
// objects. For full spec compliance, you may wish to configure your
// minifier not to mangle the names of these two functions.
function Generator() {}
function GeneratorFunction() {}
function GeneratorFunctionPrototype() {}
// This is a polyfill for %IteratorPrototype% for environments that
// don't natively support it.
var IteratorPrototype = {};
define(IteratorPrototype, iteratorSymbol, function () {
return this;
});
var getProto = Object.getPrototypeOf;
var NativeIteratorPrototype = getProto && getProto(getProto(values([])));
if (NativeIteratorPrototype &&
NativeIteratorPrototype !== Op &&
hasOwn.call(NativeIteratorPrototype, iteratorSymbol)) {
// This environment has a native %IteratorPrototype%; use it instead
// of the polyfill.
IteratorPrototype = NativeIteratorPrototype;
}
var Gp = GeneratorFunctionPrototype.prototype =
Generator.prototype = Object.create(IteratorPrototype);
GeneratorFunction.prototype = GeneratorFunctionPrototype;
define(Gp, "constructor", GeneratorFunctionPrototype);
define(GeneratorFunctionPrototype, "constructor", GeneratorFunction);
GeneratorFunction.displayName = define(
GeneratorFunctionPrototype,
toStringTagSymbol,
"GeneratorFunction"
);
// Helper for defining the .next, .throw, and .return methods of the
// Iterator interface in terms of a single ._invoke method.
function defineIteratorMethods(prototype) {
["next", "throw", "return"].forEach(function(method) {
define(prototype, method, function(arg) {
return this._invoke(method, arg);
});
});
}
exports.isGeneratorFunction = function(genFun) {
var ctor = typeof genFun === "function" && genFun.constructor;
return ctor
? ctor === GeneratorFunction ||
// For the native GeneratorFunction constructor, the best we can
// do is to check its .name property.
(ctor.displayName || ctor.name) === "GeneratorFunction"
: false;
};
exports.mark = function(genFun) {
if (Object.setPrototypeOf) {
Object.setPrototypeOf(genFun, GeneratorFunctionPrototype);
} else {
genFun.__proto__ = GeneratorFunctionPrototype;
define(genFun, toStringTagSymbol, "GeneratorFunction");
}
genFun.prototype = Object.create(Gp);
return genFun;
};
// Within the body of any async function, `await x` is transformed to
// `yield regeneratorRuntime.awrap(x)`, so that the runtime can test
// `hasOwn.call(value, "__await")` to determine if the yielded value is
// meant to be awaited.
exports.awrap = function(arg) {
return { __await: arg };
};
function AsyncIterator(generator, PromiseImpl) {
function invoke(method, arg, resolve, reject) {
var record = tryCatch(generator[method], generator, arg);
if (record.type === "throw") {
reject(record.arg);
} else {
var result = record.arg;
var value = result.value;
if (value &&
typeof value === "object" &&
hasOwn.call(value, "__await")) {
return PromiseImpl.resolve(value.__await).then(function(value) {
invoke("next", value, resolve, reject);
}, function(err) {
invoke("throw", err, resolve, reject);
});
}
return PromiseImpl.resolve(value).then(function(unwrapped) {
// When a yielded Promise is resolved, its final value becomes
// the .value of the Promise<{value,done}> result for the
// current iteration.
result.value = unwrapped;
resolve(result);
}, function(error) {
// If a rejected Promise was yielded, throw the rejection back
// into the async generator function so it can be handled there.
return invoke("throw", error, resolve, reject);
});
}
}
var previousPromise;
function enqueue(method, arg) {
function callInvokeWithMethodAndArg() {
return new PromiseImpl(function(resolve, reject) {
invoke(method, arg, resolve, reject);
});
}
return previousPromise =
// If enqueue has been called before, then we want to wait until
// all previous Promises have been resolved before calling invoke,
// so that results are always delivered in the correct order. If
// enqueue has not been called before, then it is important to
// call invoke immediately, without waiting on a callback to fire,
// so that the async generator function has the opportunity to do
// any necessary setup in a predictable way. This predictability
// is why the Promise constructor synchronously invokes its
// executor callback, and why async functions synchronously
// execute code before the first await. Since we implement simple
// async functions in terms of async generators, it is especially
// important to get this right, even though it requires care.
previousPromise ? previousPromise.then(
callInvokeWithMethodAndArg,
// Avoid propagating failures to Promises returned by later
// invocations of the iterator.
callInvokeWithMethodAndArg
) : callInvokeWithMethodAndArg();
}
// Define the unified helper method that is used to implement .next,
// .throw, and .return (see defineIteratorMethods).
this._invoke = enqueue;
}
defineIteratorMethods(AsyncIterator.prototype);
define(AsyncIterator.prototype, asyncIteratorSymbol, function () {
return this;
});
exports.AsyncIterator = AsyncIterator;
// Note that simple async functions are implemented on top of
// AsyncIterator objects; they just return a Promise for the value of
// the final result produced by the iterator.
exports.async = function(innerFn, outerFn, self, tryLocsList, PromiseImpl) {
if (PromiseImpl === void 0) PromiseImpl = Promise;
var iter = new AsyncIterator(
wrap(innerFn, outerFn, self, tryLocsList),
PromiseImpl
);
return exports.isGeneratorFunction(outerFn)
? iter // If outerFn is a generator, return the full iterator.
: iter.next().then(function(result) {
return result.done ? result.value : iter.next();
});
};
function makeInvokeMethod(innerFn, self, context) {
var state = GenStateSuspendedStart;
return function invoke(method, arg) {
if (state === GenStateExecuting) {
throw new Error("Generator is already running");
}
if (state === GenStateCompleted) {
if (method === "throw") {
throw arg;
}
// Be forgiving, per 25.3.3.3.3 of the spec:
// https://people.mozilla.org/~jorendorff/es6-draft.html#sec-generatorresume
return doneResult();
}
context.method = method;
context.arg = arg;
while (true) {
var delegate = context.delegate;
if (delegate) {
var delegateResult = maybeInvokeDelegate(delegate, context);
if (delegateResult) {
if (delegateResult === ContinueSentinel) continue;
return delegateResult;
}
}
if (context.method === "next") {
// Setting context._sent for legacy support of Babel's
// function.sent implementation.
context.sent = context._sent = context.arg;
} else if (context.method === "throw") {
if (state === GenStateSuspendedStart) {
state = GenStateCompleted;
throw context.arg;
}
context.dispatchException(context.arg);
} else if (context.method === "return") {
context.abrupt("return", context.arg);
}
state = GenStateExecuting;
var record = tryCatch(innerFn, self, context);
if (record.type === "normal") {
// If an exception is thrown from innerFn, we leave state ===
// GenStateExecuting and loop back for another invocation.
state = context.done
? GenStateCompleted
: GenStateSuspendedYield;
if (record.arg === ContinueSentinel) {
continue;
}
return {
value: record.arg,
done: context.done
};
} else if (record.type === "throw") {
state = GenStateCompleted;
// Dispatch the exception by looping back around to the
// context.dispatchException(context.arg) call above.
context.method = "throw";
context.arg = record.arg;
}
}
};
}
// Call delegate.iterator[context.method](context.arg) and handle the
// result, either by returning a { value, done } result from the
// delegate iterator, or by modifying context.method and context.arg,
// setting context.delegate to null, and returning the ContinueSentinel.
function maybeInvokeDelegate(delegate, context) {
var method = delegate.iterator[context.method];
if (method === undefined) {
// A .throw or .return when the delegate iterator has no .throw
// method always terminates the yield* loop.
context.delegate = null;
if (context.method === "throw") {
// Note: ["return"] must be used for ES3 parsing compatibility.
if (delegate.iterator["return"]) {
// If the delegate iterator has a return method, give it a
// chance to clean up.
context.method = "return";
context.arg = undefined;
maybeInvokeDelegate(delegate, context);
if (context.method === "throw") {
// If maybeInvokeDelegate(context) changed context.method from
// "return" to "throw", let that override the TypeError below.
return ContinueSentinel;
}
}
context.method = "throw";
context.arg = new TypeError(
"The iterator does not provide a 'throw' method");
}
return ContinueSentinel;
}
var record = tryCatch(method, delegate.iterator, context.arg);
if (record.type === "throw") {
context.method = "throw";
context.arg = record.arg;
context.delegate = null;
return ContinueSentinel;
}
var info = record.arg;
if (! info) {
context.method = "throw";
context.arg = new TypeError("iterator result is not an object");
context.delegate = null;
return ContinueSentinel;
}
if (info.done) {
// Assign the result of the finished delegate to the temporary
// variable specified by delegate.resultName (see delegateYield).
context[delegate.resultName] = info.value;
// Resume execution at the desired location (see delegateYield).
context.next = delegate.nextLoc;
// If context.method was "throw" but the delegate handled the
// exception, let the outer generator proceed normally. If
// context.method was "next", forget context.arg since it has been
// "consumed" by the delegate iterator. If context.method was
// "return", allow the original .return call to continue in the
// outer generator.
if (context.method !== "return") {
context.method = "next";
context.arg = undefined;
}
} else {
// Re-yield the result returned by the delegate method.
return info;
}
// The delegate iterator is finished, so forget it and continue with
// the outer generator.
context.delegate = null;
return ContinueSentinel;
}
// Define Generator.prototype.{next,throw,return} in terms of the
// unified ._invoke helper method.
defineIteratorMethods(Gp);
define(Gp, toStringTagSymbol, "Generator");
// A Generator should always return itself as the iterator object when the
// @@iterator function is called on it. Some browsers' implementations of the
// iterator prototype chain incorrectly implement this, causing the Generator
// object to not be returned from this call. This ensures that doesn't happen.
// See https://github.com/facebook/regenerator/issues/274 for more details.
define(Gp, iteratorSymbol, function() {
return this;
});
define(Gp, "toString", function() {
return "[object Generator]";
});
function pushTryEntry(locs) {
var entry = { tryLoc: locs[0] };
if (1 in locs) {
entry.catchLoc = locs[1];
}
if (2 in locs) {
entry.finallyLoc = locs[2];
entry.afterLoc = locs[3];
}
this.tryEntries.push(entry);
}
function resetTryEntry(entry) {
var record = entry.completion || {};
record.type = "normal";
delete record.arg;
entry.completion = record;
}
function Context(tryLocsList) {
// The root entry object (effectively a try statement without a catch
// or a finally block) gives us a place to store values thrown from
// locations where there is no enclosing try statement.
this.tryEntries = [{ tryLoc: "root" }];
tryLocsList.forEach(pushTryEntry, this);
this.reset(true);
}
exports.keys = function(object) {
var keys = [];
for (var key in object) {
keys.push(key);
}
keys.reverse();
// Rather than returning an object with a next method, we keep
// things simple and return the next function itself.
return function next() {
while (keys.length) {
var key = keys.pop();
if (key in object) {
next.value = key;
next.done = false;
return next;
}
}
// To avoid creating an additional object, we just hang the .value
// and .done properties off the next function object itself. This
// also ensures that the minifier will not anonymize the function.
next.done = true;
return next;
};
};
function values(iterable) {
if (iterable) {
var iteratorMethod = iterable[iteratorSymbol];
if (iteratorMethod) {
return iteratorMethod.call(iterable);
}
if (typeof iterable.next === "function") {
return iterable;
}
if (!isNaN(iterable.length)) {
var i = -1, next = function next() {
while (++i < iterable.length) {
if (hasOwn.call(iterable, i)) {
next.value = iterable[i];
next.done = false;
return next;
}
}
next.value = undefined;
next.done = true;
return next;
};
return next.next = next;
}
}
// Return an iterator with no values.
return { next: doneResult };
}
exports.values = values;
function doneResult() {
return { value: undefined, done: true };
}
Context.prototype = {
constructor: Context,
reset: function(skipTempReset) {
this.prev = 0;
this.next = 0;
// Resetting context._sent for legacy support of Babel's
// function.sent implementation.
this.sent = this._sent = undefined;
this.done = false;
this.delegate = null;
this.method = "next";
this.arg = undefined;
this.tryEntries.forEach(resetTryEntry);
if (!skipTempReset) {
for (var name in this) {
// Not sure about the optimal order of these conditions:
if (name.charAt(0) === "t" &&
hasOwn.call(this, name) &&
!isNaN(+name.slice(1))) {
this[name] = undefined;
}
}
}
},
stop: function() {
this.done = true;
var rootEntry = this.tryEntries[0];
var rootRecord = rootEntry.completion;
if (rootRecord.type === "throw") {
throw rootRecord.arg;
}
return this.rval;
},
dispatchException: function(exception) {
if (this.done) {
throw exception;
}
var context = this;
function handle(loc, caught) {
record.type = "throw";
record.arg = exception;
context.next = loc;
if (caught) {
// If the dispatched exception was caught by a catch block,
// then let that catch block handle the exception normally.
context.method = "next";
context.arg = undefined;
}
return !! caught;
}
for (var i = this.tryEntries.length - 1; i >= 0; --i) {
var entry = this.tryEntries[i];
var record = entry.completion;
if (entry.tryLoc === "root") {
// Exception thrown outside of any try block that could handle
// it, so set the completion value of the entire function to
// throw the exception.
return handle("end");
}
if (entry.tryLoc <= this.prev) {
var hasCatch = hasOwn.call(entry, "catchLoc");
var hasFinally = hasOwn.call(entry, "finallyLoc");
if (hasCatch && hasFinally) {
if (this.prev < entry.catchLoc) {
return handle(entry.catchLoc, true);
} else if (this.prev < entry.finallyLoc) {
return handle(entry.finallyLoc);
}
} else if (hasCatch) {
if (this.prev < entry.catchLoc) {
return handle(entry.catchLoc, true);
}
} else if (hasFinally) {
if (this.prev < entry.finallyLoc) {
return handle(entry.finallyLoc);
}
} else {
throw new Error("try statement without catch or finally");
}
}
}
},
abrupt: function(type, arg) {
for (var i = this.tryEntries.length - 1; i >= 0; --i) {
var entry = this.tryEntries[i];
if (entry.tryLoc <= this.prev &&
hasOwn.call(entry, "finallyLoc") &&
this.prev < entry.finallyLoc) {
var finallyEntry = entry;
break;
}
}
if (finallyEntry &&
(type === "break" ||
type === "continue") &&
finallyEntry.tryLoc <= arg &&
arg <= finallyEntry.finallyLoc) {
// Ignore the finally entry if control is not jumping to a
// location outside the try/catch block.
finallyEntry = null;
}
var record = finallyEntry ? finallyEntry.completion : {};
record.type = type;
record.arg = arg;
if (finallyEntry) {
this.method = "next";
this.next = finallyEntry.finallyLoc;
return ContinueSentinel;
}
return this.complete(record);
},
complete: function(record, afterLoc) {
if (record.type === "throw") {
throw record.arg;
}
if (record.type === "break" ||
record.type === "continue") {
this.next = record.arg;
} else if (record.type === "return") {
this.rval = this.arg = record.arg;
this.method = "return";
this.next = "end";
} else if (record.type === "normal" && afterLoc) {
this.next = afterLoc;
}
return ContinueSentinel;
},
finish: function(finallyLoc) {
for (var i = this.tryEntries.length - 1; i >= 0; --i) {
var entry = this.tryEntries[i];
if (entry.finallyLoc === finallyLoc) {
this.complete(entry.completion, entry.afterLoc);
resetTryEntry(entry);
return ContinueSentinel;
}
}
},
"catch": function(tryLoc) {
for (var i = this.tryEntries.length - 1; i >= 0; --i) {
var entry = this.tryEntries[i];
if (entry.tryLoc === tryLoc) {
var record = entry.completion;
if (record.type === "throw") {
var thrown = record.arg;
resetTryEntry(entry);
}
return thrown;
}
}
// The context.catch method must only be called with a location
// argument that corresponds to a known catch block.
throw new Error("illegal catch attempt");
},
delegateYield: function(iterable, resultName, nextLoc) {
this.delegate = {
iterator: values(iterable),
resultName: resultName,
nextLoc: nextLoc
};
if (this.method === "next") {
// Deliberately forget the last sent value so that we don't
// accidentally pass it on to the delegate.
this.arg = undefined;
}
return ContinueSentinel;
}
};
// Regardless of whether this script is executing as a CommonJS module
// or not, return the runtime object so that we can declare the variable
// regeneratorRuntime in the outer scope, which allows this module to be
// injected easily by `bin/regenerator --include-runtime script.js`.
return exports;
}(
// If this script is executing as a CommonJS module, use module.exports
// as the regeneratorRuntime namespace. Otherwise create a new empty
// object. Either way, the resulting object will be used to initialize
// the regeneratorRuntime variable at the top of this file.
true ? module.exports : 0
));
try {
regeneratorRuntime = runtime;
} catch (accidentalStrictMode) {
// This module should not be running in strict mode, so the above
// assignment should always work unless something is misconfigured. Just
// in case runtime.js accidentally runs in strict mode, in modern engines
// we can explicitly access globalThis. In older engines we can escape
// strict mode using a global Function call. This could conceivably fail
// if a Content Security Policy forbids using Function, but in that case
// the proper solution is to fix the accidental strict mode problem. If
// you've misconfigured your bundler to force strict mode and applied a
// CSP to forbid Function, and you're not willing to fix either of those
// problems, please detail your unique predicament in a GitHub issue.
if (typeof globalThis === "object") {
globalThis.regeneratorRuntime = runtime;
} else {
Function("r", "regeneratorRuntime = r")(runtime);
}
}
/***/ }),
/***/ "./node_modules/vue/dist/vue.js":
/*!**************************************!*\
!*** ./node_modules/vue/dist/vue.js ***!
\**************************************/
/***/ (function(module, __unused_webpack_exports, __webpack_require__) {
/*!
* Vue.js v2.6.14
* (c) 2014-2021 Evan You
* Released under the MIT License.
*/
(function (global, factory) {
true ? module.exports = factory() :
0;
}(this, function () { 'use strict';
/* */
var emptyObject = Object.freeze({});
// These helpers produce better VM code in JS engines due to their
// explicitness and function inlining.
function isUndef (v) {
return v === undefined || v === null
}
function isDef (v) {
return v !== undefined && v !== null
}
function isTrue (v) {
return v === true
}
function isFalse (v) {
return v === false
}
/**
* Check if value is primitive.
*/
function isPrimitive (value) {
return (
typeof value === 'string' ||
typeof value === 'number' ||
// $flow-disable-line
typeof value === 'symbol' ||
typeof value === 'boolean'
)
}
/**
* Quick object check - this is primarily used to tell
* Objects from primitive values when we know the value
* is a JSON-compliant type.
*/
function isObject (obj) {
return obj !== null && typeof obj === 'object'
}
/**
* Get the raw type string of a value, e.g., [object Object].
*/
var _toString = Object.prototype.toString;
function toRawType (value) {
return _toString.call(value).slice(8, -1)
}
/**
* Strict object type check. Only returns true
* for plain JavaScript objects.
*/
function isPlainObject (obj) {
return _toString.call(obj) === '[object Object]'
}
function isRegExp (v) {
return _toString.call(v) === '[object RegExp]'
}
/**
* Check if val is a valid array index.
*/
function isValidArrayIndex (val) {
var n = parseFloat(String(val));
return n >= 0 && Math.floor(n) === n && isFinite(val)
}
function isPromise (val) {
return (
isDef(val) &&
typeof val.then === 'function' &&
typeof val.catch === 'function'
)
}
/**
* Convert a value to a string that is actually rendered.
*/
function toString (val) {
return val == null
? ''
: Array.isArray(val) || (isPlainObject(val) && val.toString === _toString)
? JSON.stringify(val, null, 2)
: String(val)
}
/**
* Convert an input value to a number for persistence.
* If the conversion fails, return original string.
*/
function toNumber (val) {
var n = parseFloat(val);
return isNaN(n) ? val : n
}
/**
* Make a map and return a function for checking if a key
* is in that map.
*/
function makeMap (
str,
expectsLowerCase
) {
var map = Object.create(null);
var list = str.split(',');
for (var i = 0; i < list.length; i++) {
map[list[i]] = true;
}
return expectsLowerCase
? function (val) { return map[val.toLowerCase()]; }
: function (val) { return map[val]; }
}
/**
* Check if a tag is a built-in tag.
*/
var isBuiltInTag = makeMap('slot,component', true);
/**
* Check if an attribute is a reserved attribute.
*/
var isReservedAttribute = makeMap('key,ref,slot,slot-scope,is');
/**
* Remove an item from an array.
*/
function remove (arr, item) {
if (arr.length) {
var index = arr.indexOf(item);
if (index > -1) {
return arr.splice(index, 1)
}
}
}
/**
* Check whether an object has the property.
*/
var hasOwnProperty = Object.prototype.hasOwnProperty;
function hasOwn (obj, key) {
return hasOwnProperty.call(obj, key)
}
/**
* Create a cached version of a pure function.
*/
function cached (fn) {
var cache = Object.create(null);
return (function cachedFn (str) {
var hit = cache[str];
return hit || (cache[str] = fn(str))
})
}
/**
* Camelize a hyphen-delimited string.
*/
var camelizeRE = /-(\w)/g;
var camelize = cached(function (str) {
return str.replace(camelizeRE, function (_, c) { return c ? c.toUpperCase() : ''; })
});
/**
* Capitalize a string.
*/
var capitalize = cached(function (str) {
return str.charAt(0).toUpperCase() + str.slice(1)
});
/**
* Hyphenate a camelCase string.
*/
var hyphenateRE = /\B([A-Z])/g;
var hyphenate = cached(function (str) {
return str.replace(hyphenateRE, '-$1').toLowerCase()
});
/**
* Simple bind polyfill for environments that do not support it,
* e.g., PhantomJS 1.x. Technically, we don't need this anymore
* since native bind is now performant enough in most browsers.
* But removing it would mean breaking code that was able to run in
* PhantomJS 1.x, so this must be kept for backward compatibility.
*/
/* istanbul ignore next */
function polyfillBind (fn, ctx) {
function boundFn (a) {
var l = arguments.length;
return l
? l > 1
? fn.apply(ctx, arguments)
: fn.call(ctx, a)
: fn.call(ctx)
}
boundFn._length = fn.length;
return boundFn
}
function nativeBind (fn, ctx) {
return fn.bind(ctx)
}
var bind = Function.prototype.bind
? nativeBind
: polyfillBind;
/**
* Convert an Array-like object to a real Array.
*/
function toArray (list, start) {
start = start || 0;
var i = list.length - start;
var ret = new Array(i);
while (i--) {
ret[i] = list[i + start];
}
return ret
}
/**
* Mix properties into target object.
*/
function extend (to, _from) {
for (var key in _from) {
to[key] = _from[key];
}
return to
}
/**
* Merge an Array of Objects into a single Object.
*/
function toObject (arr) {
var res = {};
for (var i = 0; i < arr.length; i++) {
if (arr[i]) {
extend(res, arr[i]);
}
}
return res
}
/* eslint-disable no-unused-vars */
/**
* Perform no operation.
* Stubbing args to make Flow happy without leaving useless transpiled code
* with ...rest (https://flow.org/blog/2017/05/07/Strict-Function-Call-Arity/).
*/
function noop (a, b, c) {}
/**
* Always return false.
*/
var no = function (a, b, c) { return false; };
/* eslint-enable no-unused-vars */
/**
* Return the same value.
*/
var identity = function (_) { return _; };
/**
* Generate a string containing static keys from compiler modules.
*/
function genStaticKeys (modules) {
return modules.reduce(function (keys, m) {
return keys.concat(m.staticKeys || [])
}, []).join(',')
}
/**
* Check if two values are loosely equal - that is,
* if they are plain objects, do they have the same shape?
*/
function looseEqual (a, b) {
if (a === b) { return true }
var isObjectA = isObject(a);
var isObjectB = isObject(b);
if (isObjectA && isObjectB) {
try {
var isArrayA = Array.isArray(a);
var isArrayB = Array.isArray(b);
if (isArrayA && isArrayB) {
return a.length === b.length && a.every(function (e, i) {
return looseEqual(e, b[i])
})
} else if (a instanceof Date && b instanceof Date) {
return a.getTime() === b.getTime()
} else if (!isArrayA && !isArrayB) {
var keysA = Object.keys(a);
var keysB = Object.keys(b);
return keysA.length === keysB.length && keysA.every(function (key) {
return looseEqual(a[key], b[key])
})
} else {
/* istanbul ignore next */
return false
}
} catch (e) {
/* istanbul ignore next */
return false
}
} else if (!isObjectA && !isObjectB) {
return String(a) === String(b)
} else {
return false
}
}
/**
* Return the first index at which a loosely equal value can be
* found in the array (if value is a plain object, the array must
* contain an object of the same shape), or -1 if it is not present.
*/
function looseIndexOf (arr, val) {
for (var i = 0; i < arr.length; i++) {
if (looseEqual(arr[i], val)) { return i }
}
return -1
}
/**
* Ensure a function is called only once.
*/
function once (fn) {
var called = false;
return function () {
if (!called) {
called = true;
fn.apply(this, arguments);
}
}
}
var SSR_ATTR = 'data-server-rendered';
var ASSET_TYPES = [
'component',
'directive',
'filter'
];
var LIFECYCLE_HOOKS = [
'beforeCreate',
'created',
'beforeMount',
'mounted',
'beforeUpdate',
'updated',
'beforeDestroy',
'destroyed',
'activated',
'deactivated',
'errorCaptured',
'serverPrefetch'
];
/* */
var config = ({
/**
* Option merge strategies (used in core/util/options)
*/
// $flow-disable-line
optionMergeStrategies: Object.create(null),
/**
* Whether to suppress warnings.
*/
silent: false,
/**
* Show production mode tip message on boot?
*/
productionTip: "development" !== 'production',
/**
* Whether to enable devtools
*/
devtools: "development" !== 'production',
/**
* Whether to record perf
*/
performance: false,
/**
* Error handler for watcher errors
*/
errorHandler: null,
/**
* Warn handler for watcher warns
*/
warnHandler: null,
/**
* Ignore certain custom elements
*/
ignoredElements: [],
/**
* Custom user key aliases for v-on
*/
// $flow-disable-line
keyCodes: Object.create(null),
/**
* Check if a tag is reserved so that it cannot be registered as a
* component. This is platform-dependent and may be overwritten.
*/
isReservedTag: no,
/**
* Check if an attribute is reserved so that it cannot be used as a component
* prop. This is platform-dependent and may be overwritten.
*/
isReservedAttr: no,
/**
* Check if a tag is an unknown element.
* Platform-dependent.
*/
isUnknownElement: no,
/**
* Get the namespace of an element
*/
getTagNamespace: noop,
/**
* Parse the real tag name for the specific platform.
*/
parsePlatformTagName: identity,
/**
* Check if an attribute must be bound using property, e.g. value
* Platform-dependent.
*/
mustUseProp: no,
/**
* Perform updates asynchronously. Intended to be used by Vue Test Utils
* This will significantly reduce performance if set to false.
*/
async: true,
/**
* Exposed for legacy reasons
*/
_lifecycleHooks: LIFECYCLE_HOOKS
});
/* */
/**
* unicode letters used for parsing html tags, component names and property paths.
* using https://www.w3.org/TR/html53/semantics-scripting.html#potentialcustomelementname
* skipping \u10000-\uEFFFF due to it freezing up PhantomJS
*/
var unicodeRegExp = /a-zA-Z\u00B7\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u037D\u037F-\u1FFF\u200C-\u200D\u203F-\u2040\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD/;
/**
* Check if a string starts with $ or _
*/
function isReserved (str) {
var c = (str + '').charCodeAt(0);
return c === 0x24 || c === 0x5F
}
/**
* Define a property.
*/
function def (obj, key, val, enumerable) {
Object.defineProperty(obj, key, {
value: val,
enumerable: !!enumerable,
writable: true,
configurable: true
});
}
/**
* Parse simple path.
*/
var bailRE = new RegExp(("[^" + (unicodeRegExp.source) + ".$_\\d]"));
function parsePath (path) {
if (bailRE.test(path)) {
return
}
var segments = path.split('.');
return function (obj) {
for (var i = 0; i < segments.length; i++) {
if (!obj) { return }
obj = obj[segments[i]];
}
return obj
}
}
/* */
// can we use __proto__?
var hasProto = '__proto__' in {};
// Browser environment sniffing
var inBrowser = typeof window !== 'undefined';
var inWeex = typeof WXEnvironment !== 'undefined' && !!WXEnvironment.platform;
var weexPlatform = inWeex && WXEnvironment.platform.toLowerCase();
var UA = inBrowser && window.navigator.userAgent.toLowerCase();
var isIE = UA && /msie|trident/.test(UA);
var isIE9 = UA && UA.indexOf('msie 9.0') > 0;
var isEdge = UA && UA.indexOf('edge/') > 0;
var isAndroid = (UA && UA.indexOf('android') > 0) || (weexPlatform === 'android');
var isIOS = (UA && /iphone|ipad|ipod|ios/.test(UA)) || (weexPlatform === 'ios');
var isChrome = UA && /chrome\/\d+/.test(UA) && !isEdge;
var isPhantomJS = UA && /phantomjs/.test(UA);
var isFF = UA && UA.match(/firefox\/(\d+)/);
// Firefox has a "watch" function on Object.prototype...
var nativeWatch = ({}).watch;
var supportsPassive = false;
if (inBrowser) {
try {
var opts = {};
Object.defineProperty(opts, 'passive', ({
get: function get () {
/* istanbul ignore next */
supportsPassive = true;
}
})); // https://github.com/facebook/flow/issues/285
window.addEventListener('test-passive', null, opts);
} catch (e) {}
}
// this needs to be lazy-evaled because vue may be required before
// vue-server-renderer can set VUE_ENV
var _isServer;
var isServerRendering = function () {
if (_isServer === undefined) {
/* istanbul ignore if */
if (!inBrowser && !inWeex && typeof __webpack_require__.g !== 'undefined') {
// detect presence of vue-server-renderer and avoid
// Webpack shimming the process
_isServer = __webpack_require__.g['process'] && __webpack_require__.g['process'].env.VUE_ENV === 'server';
} else {
_isServer = false;
}
}
return _isServer
};
// detect devtools
var devtools = inBrowser && window.__VUE_DEVTOOLS_GLOBAL_HOOK__;
/* istanbul ignore next */
function isNative (Ctor) {
return typeof Ctor === 'function' && /native code/.test(Ctor.toString())
}
var hasSymbol =
typeof Symbol !== 'undefined' && isNative(Symbol) &&
typeof Reflect !== 'undefined' && isNative(Reflect.ownKeys);
var _Set;
/* istanbul ignore if */ // $flow-disable-line
if (typeof Set !== 'undefined' && isNative(Set)) {
// use native Set when available.
_Set = Set;
} else {
// a non-standard Set polyfill that only works with primitive keys.
_Set = /*@__PURE__*/(function () {
function Set () {
this.set = Object.create(null);
}
Set.prototype.has = function has (key) {
return this.set[key] === true
};
Set.prototype.add = function add (key) {
this.set[key] = true;
};
Set.prototype.clear = function clear () {
this.set = Object.create(null);
};
return Set;
}());
}
/* */
var warn = noop;
var tip = noop;
var generateComponentTrace = (noop); // work around flow check
var formatComponentName = (noop);
{
var hasConsole = typeof console !== 'undefined';
var classifyRE = /(?:^|[-_])(\w)/g;
var classify = function (str) { return str
.replace(classifyRE, function (c) { return c.toUpperCase(); })
.replace(/[-_]/g, ''); };
warn = function (msg, vm) {
var trace = vm ? generateComponentTrace(vm) : '';
if (config.warnHandler) {
config.warnHandler.call(null, msg, vm, trace);
} else if (hasConsole && (!config.silent)) {
console.error(("[Vue warn]: " + msg + trace));
}
};
tip = function (msg, vm) {
if (hasConsole && (!config.silent)) {
console.warn("[Vue tip]: " + msg + (
vm ? generateComponentTrace(vm) : ''
));
}
};
formatComponentName = function (vm, includeFile) {
if (vm.$root === vm) {
return '<Root>'
}
var options = typeof vm === 'function' && vm.cid != null
? vm.options
: vm._isVue
? vm.$options || vm.constructor.options
: vm;
var name = options.name || options._componentTag;
var file = options.__file;
if (!name && file) {
var match = file.match(/([^/\\]+)\.vue$/);
name = match && match[1];
}
return (
(name ? ("<" + (classify(name)) + ">") : "<Anonymous>") +
(file && includeFile !== false ? (" at " + file) : '')
)
};
var repeat = function (str, n) {
var res = '';
while (n) {
if (n % 2 === 1) { res += str; }
if (n > 1) { str += str; }
n >>= 1;
}
return res
};
generateComponentTrace = function (vm) {
if (vm._isVue && vm.$parent) {
var tree = [];
var currentRecursiveSequence = 0;
while (vm) {
if (tree.length > 0) {
var last = tree[tree.length - 1];
if (last.constructor === vm.constructor) {
currentRecursiveSequence++;
vm = vm.$parent;
continue
} else if (currentRecursiveSequence > 0) {
tree[tree.length - 1] = [last, currentRecursiveSequence];
currentRecursiveSequence = 0;
}
}
tree.push(vm);
vm = vm.$parent;
}
return '\n\nfound in\n\n' + tree
.map(function (vm, i) { return ("" + (i === 0 ? '---> ' : repeat(' ', 5 + i * 2)) + (Array.isArray(vm)
? ((formatComponentName(vm[0])) + "... (" + (vm[1]) + " recursive calls)")
: formatComponentName(vm))); })
.join('\n')
} else {
return ("\n\n(found in " + (formatComponentName(vm)) + ")")
}
};
}
/* */
var uid = 0;
/**
* A dep is an observable that can have multiple
* directives subscribing to it.
*/
var Dep = function Dep () {
this.id = uid++;
this.subs = [];
};
Dep.prototype.addSub = function addSub (sub) {
this.subs.push(sub);
};
Dep.prototype.removeSub = function removeSub (sub) {
remove(this.subs, sub);
};
Dep.prototype.depend = function depend () {
if (Dep.target) {
Dep.target.addDep(this);
}
};
Dep.prototype.notify = function notify () {
// stabilize the subscriber list first
var subs = this.subs.slice();
if (!config.async) {
// subs aren't sorted in scheduler if not running async
// we need to sort them now to make sure they fire in correct
// order
subs.sort(function (a, b) { return a.id - b.id; });
}
for (var i = 0, l = subs.length; i < l; i++) {
subs[i].update();
}
};
// The current target watcher being evaluated.
// This is globally unique because only one watcher
// can be evaluated at a time.
Dep.target = null;
var targetStack = [];
function pushTarget (target) {
targetStack.push(target);
Dep.target = target;
}
function popTarget () {
targetStack.pop();
Dep.target = targetStack[targetStack.length - 1];
}
/* */
var VNode = function VNode (
tag,
data,
children,
text,
elm,
context,
componentOptions,
asyncFactory
) {
this.tag = tag;
this.data = data;
this.children = children;
this.text = text;
this.elm = elm;
this.ns = undefined;
this.context = context;
this.fnContext = undefined;
this.fnOptions = undefined;
this.fnScopeId = undefined;
this.key = data && data.key;
this.componentOptions = componentOptions;
this.componentInstance = undefined;
this.parent = undefined;
this.raw = false;
this.isStatic = false;
this.isRootInsert = true;
this.isComment = false;
this.isCloned = false;
this.isOnce = false;
this.asyncFactory = asyncFactory;
this.asyncMeta = undefined;
this.isAsyncPlaceholder = false;
};
var prototypeAccessors = { child: { configurable: true } };
// DEPRECATED: alias for componentInstance for backwards compat.
/* istanbul ignore next */
prototypeAccessors.child.get = function () {
return this.componentInstance
};
Object.defineProperties( VNode.prototype, prototypeAccessors );
var createEmptyVNode = function (text) {
if ( text === void 0 ) text = '';
var node = new VNode();
node.text = text;
node.isComment = true;
return node
};
function createTextVNode (val) {
return new VNode(undefined, undefined, undefined, String(val))
}
// optimized shallow clone
// used for static nodes and slot nodes because they may be reused across
// multiple renders, cloning them avoids errors when DOM manipulations rely
// on their elm reference.
function cloneVNode (vnode) {
var cloned = new VNode(
vnode.tag,
vnode.data,
// #7975
// clone children array to avoid mutating original in case of cloning
// a child.
vnode.children && vnode.children.slice(),
vnode.text,
vnode.elm,
vnode.context,
vnode.componentOptions,
vnode.asyncFactory
);
cloned.ns = vnode.ns;
cloned.isStatic = vnode.isStatic;
cloned.key = vnode.key;
cloned.isComment = vnode.isComment;
cloned.fnContext = vnode.fnContext;
cloned.fnOptions = vnode.fnOptions;
cloned.fnScopeId = vnode.fnScopeId;
cloned.asyncMeta = vnode.asyncMeta;
cloned.isCloned = true;
return cloned
}
/*
* not type checking this file because flow doesn't play well with
* dynamically accessing methods on Array prototype
*/
var arrayProto = Array.prototype;
var arrayMethods = Object.create(arrayProto);
var methodsToPatch = [
'push',
'pop',
'shift',
'unshift',
'splice',
'sort',
'reverse'
];
/**
* Intercept mutating methods and emit events
*/
methodsToPatch.forEach(function (method) {
// cache original method
var original = arrayProto[method];
def(arrayMethods, method, function mutator () {
var args = [], len = arguments.length;
while ( len-- ) args[ len ] = arguments[ len ];
var result = original.apply(this, args);
var ob = this.__ob__;
var inserted;
switch (method) {
case 'push':
case 'unshift':
inserted = args;
break
case 'splice':
inserted = args.slice(2);
break
}
if (inserted) { ob.observeArray(inserted); }
// notify change
ob.dep.notify();
return result
});
});
/* */
var arrayKeys = Object.getOwnPropertyNames(arrayMethods);
/**
* In some cases we may want to disable observation inside a component's
* update computation.
*/
var shouldObserve = true;
function toggleObserving (value) {
shouldObserve = value;
}
/**
* Observer class that is attached to each observed
* object. Once attached, the observer converts the target
* object's property keys into getter/setters that
* collect dependencies and dispatch updates.
*/
var Observer = function Observer (value) {
this.value = value;
this.dep = new Dep();
this.vmCount = 0;
def(value, '__ob__', this);
if (Array.isArray(value)) {
if (hasProto) {
protoAugment(value, arrayMethods);
} else {
copyAugment(value, arrayMethods, arrayKeys);
}
this.observeArray(value);
} else {
this.walk(value);
}
};
/**
* Walk through all properties and convert them into
* getter/setters. This method should only be called when
* value type is Object.
*/
Observer.prototype.walk = function walk (obj) {
var keys = Object.keys(obj);
for (var i = 0; i < keys.length; i++) {
defineReactive$$1(obj, keys[i]);
}
};
/**
* Observe a list of Array items.
*/
Observer.prototype.observeArray = function observeArray (items) {
for (var i = 0, l = items.length; i < l; i++) {
observe(items[i]);
}
};
// helpers
/**
* Augment a target Object or Array by intercepting
* the prototype chain using __proto__
*/
function protoAugment (target, src) {
/* eslint-disable no-proto */
target.__proto__ = src;
/* eslint-enable no-proto */
}
/**
* Augment a target Object or Array by defining
* hidden properties.
*/
/* istanbul ignore next */
function copyAugment (target, src, keys) {
for (var i = 0, l = keys.length; i < l; i++) {
var key = keys[i];
def(target, key, src[key]);
}
}
/**
* Attempt to create an observer instance for a value,
* returns the new observer if successfully observed,
* or the existing observer if the value already has one.
*/
function observe (value, asRootData) {
if (!isObject(value) || value instanceof VNode) {
return
}
var ob;
if (hasOwn(value, '__ob__') && value.__ob__ instanceof Observer) {
ob = value.__ob__;
} else if (
shouldObserve &&
!isServerRendering() &&
(Array.isArray(value) || isPlainObject(value)) &&
Object.isExtensible(value) &&
!value._isVue
) {
ob = new Observer(value);
}
if (asRootData && ob) {
ob.vmCount++;
}
return ob
}
/**
* Define a reactive property on an Object.
*/
function defineReactive$$1 (
obj,
key,
val,
customSetter,
shallow
) {
var dep = new Dep();
var property = Object.getOwnPropertyDescriptor(obj, key);
if (property && property.configurable === false) {
return
}
// cater for pre-defined getter/setters
var getter = property && property.get;
var setter = property && property.set;
if ((!getter || setter) && arguments.length === 2) {
val = obj[key];
}
var childOb = !shallow && observe(val);
Object.defineProperty(obj, key, {
enumerable: true,
configurable: true,
get: function reactiveGetter () {
var value = getter ? getter.call(obj) : val;
if (Dep.target) {
dep.depend();
if (childOb) {
childOb.dep.depend();
if (Array.isArray(value)) {
dependArray(value);
}
}
}
return value
},
set: function reactiveSetter (newVal) {
var value = getter ? getter.call(obj) : val;
/* eslint-disable no-self-compare */
if (newVal === value || (newVal !== newVal && value !== value)) {
return
}
/* eslint-enable no-self-compare */
if (customSetter) {
customSetter();
}
// #7981: for accessor properties without setter
if (getter && !setter) { return }
if (setter) {
setter.call(obj, newVal);
} else {
val = newVal;
}
childOb = !shallow && observe(newVal);
dep.notify();
}
});
}
/**
* Set a property on an object. Adds the new property and
* triggers change notification if the property doesn't
* already exist.
*/
function set (target, key, val) {
if (isUndef(target) || isPrimitive(target)
) {
warn(("Cannot set reactive property on undefined, null, or primitive value: " + ((target))));
}
if (Array.isArray(target) && isValidArrayIndex(key)) {
target.length = Math.max(target.length, key);
target.splice(key, 1, val);
return val
}
if (key in target && !(key in Object.prototype)) {
target[key] = val;
return val
}
var ob = (target).__ob__;
if (target._isVue || (ob && ob.vmCount)) {
warn(
'Avoid adding reactive properties to a Vue instance or its root $data ' +
'at runtime - declare it upfront in the data option.'
);
return val
}
if (!ob) {
target[key] = val;
return val
}
defineReactive$$1(ob.value, key, val);
ob.dep.notify();
return val
}
/**
* Delete a property and trigger change if necessary.
*/
function del (target, key) {
if (isUndef(target) || isPrimitive(target)
) {
warn(("Cannot delete reactive property on undefined, null, or primitive value: " + ((target))));
}
if (Array.isArray(target) && isValidArrayIndex(key)) {
target.splice(key, 1);
return
}
var ob = (target).__ob__;
if (target._isVue || (ob && ob.vmCount)) {
warn(
'Avoid deleting properties on a Vue instance or its root $data ' +
'- just set it to null.'
);
return
}
if (!hasOwn(target, key)) {
return
}
delete target[key];
if (!ob) {
return
}
ob.dep.notify();
}
/**
* Collect dependencies on array elements when the array is touched, since
* we cannot intercept array element access like property getters.
*/
function dependArray (value) {
for (var e = (void 0), i = 0, l = value.length; i < l; i++) {
e = value[i];
e && e.__ob__ && e.__ob__.dep.depend();
if (Array.isArray(e)) {
dependArray(e);
}
}
}
/* */
/**
* Option overwriting strategies are functions that handle
* how to merge a parent option value and a child option
* value into the final value.
*/
var strats = config.optionMergeStrategies;
/**
* Options with restrictions
*/
{
strats.el = strats.propsData = function (parent, child, vm, key) {
if (!vm) {
warn(
"option \"" + key + "\" can only be used during instance " +
'creation with the `new` keyword.'
);
}
return defaultStrat(parent, child)
};
}
/**
* Helper that recursively merges two data objects together.
*/
function mergeData (to, from) {
if (!from) { return to }
var key, toVal, fromVal;
var keys = hasSymbol
? Reflect.ownKeys(from)
: Object.keys(from);
for (var i = 0; i < keys.length; i++) {
key = keys[i];
// in case the object is already observed...
if (key === '__ob__') { continue }
toVal = to[key];
fromVal = from[key];
if (!hasOwn(to, key)) {
set(to, key, fromVal);
} else if (
toVal !== fromVal &&
isPlainObject(toVal) &&
isPlainObject(fromVal)
) {
mergeData(toVal, fromVal);
}
}
return to
}
/**
* Data
*/
function mergeDataOrFn (
parentVal,
childVal,
vm
) {
if (!vm) {
// in a Vue.extend merge, both should be functions
if (!childVal) {
return parentVal
}
if (!parentVal) {
return childVal
}
// when parentVal & childVal are both present,
// we need to return a function that returns the
// merged result of both functions... no need to
// check if parentVal is a function here because
// it has to be a function to pass previous merges.
return function mergedDataFn () {
return mergeData(
typeof childVal === 'function' ? childVal.call(this, this) : childVal,
typeof parentVal === 'function' ? parentVal.call(this, this) : parentVal
)
}
} else {
return function mergedInstanceDataFn () {
// instance merge
var instanceData = typeof childVal === 'function'
? childVal.call(vm, vm)
: childVal;
var defaultData = typeof parentVal === 'function'
? parentVal.call(vm, vm)
: parentVal;
if (instanceData) {
return mergeData(instanceData, defaultData)
} else {
return defaultData
}
}
}
}
strats.data = function (
parentVal,
childVal,
vm
) {
if (!vm) {
if (childVal && typeof childVal !== 'function') {
warn(
'The "data" option should be a function ' +
'that returns a per-instance value in component ' +
'definitions.',
vm
);
return parentVal
}
return mergeDataOrFn(parentVal, childVal)
}
return mergeDataOrFn(parentVal, childVal, vm)
};
/**
* Hooks and props are merged as arrays.
*/
function mergeHook (
parentVal,
childVal
) {
var res = childVal
? parentVal
? parentVal.concat(childVal)
: Array.isArray(childVal)
? childVal
: [childVal]
: parentVal;
return res
? dedupeHooks(res)
: res
}
function dedupeHooks (hooks) {
var res = [];
for (var i = 0; i < hooks.length; i++) {
if (res.indexOf(hooks[i]) === -1) {
res.push(hooks[i]);
}
}
return res
}
LIFECYCLE_HOOKS.forEach(function (hook) {
strats[hook] = mergeHook;
});
/**
* Assets
*
* When a vm is present (instance creation), we need to do
* a three-way merge between constructor options, instance
* options and parent options.
*/
function mergeAssets (
parentVal,
childVal,
vm,
key
) {
var res = Object.create(parentVal || null);
if (childVal) {
assertObjectType(key, childVal, vm);
return extend(res, childVal)
} else {
return res
}
}
ASSET_TYPES.forEach(function (type) {
strats[type + 's'] = mergeAssets;
});
/**
* Watchers.
*
* Watchers hashes should not overwrite one
* another, so we merge them as arrays.
*/
strats.watch = function (
parentVal,
childVal,
vm,
key
) {
// work around Firefox's Object.prototype.watch...
if (parentVal === nativeWatch) { parentVal = undefined; }
if (childVal === nativeWatch) { childVal = undefined; }
/* istanbul ignore if */
if (!childVal) { return Object.create(parentVal || null) }
{
assertObjectType(key, childVal, vm);
}
if (!parentVal) { return childVal }
var ret = {};
extend(ret, parentVal);
for (var key$1 in childVal) {
var parent = ret[key$1];
var child = childVal[key$1];
if (parent && !Array.isArray(parent)) {
parent = [parent];
}
ret[key$1] = parent
? parent.concat(child)
: Array.isArray(child) ? child : [child];
}
return ret
};
/**
* Other object hashes.
*/
strats.props =
strats.methods =
strats.inject =
strats.computed = function (
parentVal,
childVal,
vm,
key
) {
if (childVal && "development" !== 'production') {
assertObjectType(key, childVal, vm);
}
if (!parentVal) { return childVal }
var ret = Object.create(null);
extend(ret, parentVal);
if (childVal) { extend(ret, childVal); }
return ret
};
strats.provide = mergeDataOrFn;
/**
* Default strategy.
*/
var defaultStrat = function (parentVal, childVal) {
return childVal === undefined
? parentVal
: childVal
};
/**
* Validate component names
*/
function checkComponents (options) {
for (var key in options.components) {
validateComponentName(key);
}
}
function validateComponentName (name) {
if (!new RegExp(("^[a-zA-Z][\\-\\.0-9_" + (unicodeRegExp.source) + "]*$")).test(name)) {
warn(
'Invalid component name: "' + name + '". Component names ' +
'should conform to valid custom element name in html5 specification.'
);
}
if (isBuiltInTag(name) || config.isReservedTag(name)) {
warn(
'Do not use built-in or reserved HTML elements as component ' +
'id: ' + name
);
}
}
/**
* Ensure all props option syntax are normalized into the
* Object-based format.
*/
function normalizeProps (options, vm) {
var props = options.props;
if (!props) { return }
var res = {};
var i, val, name;
if (Array.isArray(props)) {
i = props.length;
while (i--) {
val = props[i];
if (typeof val === 'string') {
name = camelize(val);
res[name] = { type: null };
} else {
warn('props must be strings when using array syntax.');
}
}
} else if (isPlainObject(props)) {
for (var key in props) {
val = props[key];
name = camelize(key);
res[name] = isPlainObject(val)
? val
: { type: val };
}
} else {
warn(
"Invalid value for option \"props\": expected an Array or an Object, " +
"but got " + (toRawType(props)) + ".",
vm
);
}
options.props = res;
}
/**
* Normalize all injections into Object-based format
*/
function normalizeInject (options, vm) {
var inject = options.inject;
if (!inject) { return }
var normalized = options.inject = {};
if (Array.isArray(inject)) {
for (var i = 0; i < inject.length; i++) {
normalized[inject[i]] = { from: inject[i] };
}
} else if (isPlainObject(inject)) {
for (var key in inject) {
var val = inject[key];
normalized[key] = isPlainObject(val)
? extend({ from: key }, val)
: { from: val };
}
} else {
warn(
"Invalid value for option \"inject\": expected an Array or an Object, " +
"but got " + (toRawType(inject)) + ".",
vm
);
}
}
/**
* Normalize raw function directives into object format.
*/
function normalizeDirectives (options) {
var dirs = options.directives;
if (dirs) {
for (var key in dirs) {
var def$$1 = dirs[key];
if (typeof def$$1 === 'function') {
dirs[key] = { bind: def$$1, update: def$$1 };
}
}
}
}
function assertObjectType (name, value, vm) {
if (!isPlainObject(value)) {
warn(
"Invalid value for option \"" + name + "\": expected an Object, " +
"but got " + (toRawType(value)) + ".",
vm
);
}
}
/**
* Merge two option objects into a new one.
* Core utility used in both instantiation and inheritance.
*/
function mergeOptions (
parent,
child,
vm
) {
{
checkComponents(child);
}
if (typeof child === 'function') {
child = child.options;
}
normalizeProps(child, vm);
normalizeInject(child, vm);
normalizeDirectives(child);
// Apply extends and mixins on the child options,
// but only if it is a raw options object that isn't
// the result of another mergeOptions call.
// Only merged options has the _base property.
if (!child._base) {
if (child.extends) {
parent = mergeOptions(parent, child.extends, vm);
}
if (child.mixins) {
for (var i = 0, l = child.mixins.length; i < l; i++) {
parent = mergeOptions(parent, child.mixins[i], vm);
}
}
}
var options = {};
var key;
for (key in parent) {
mergeField(key);
}
for (key in child) {
if (!hasOwn(parent, key)) {
mergeField(key);
}
}
function mergeField (key) {
var strat = strats[key] || defaultStrat;
options[key] = strat(parent[key], child[key], vm, key);
}
return options
}
/**
* Resolve an asset.
* This function is used because child instances need access
* to assets defined in its ancestor chain.
*/
function resolveAsset (
options,
type,
id,
warnMissing
) {
/* istanbul ignore if */
if (typeof id !== 'string') {
return
}
var assets = options[type];
// check local registration variations first
if (hasOwn(assets, id)) { return assets[id] }
var camelizedId = camelize(id);
if (hasOwn(assets, camelizedId)) { return assets[camelizedId] }
var PascalCaseId = capitalize(camelizedId);
if (hasOwn(assets, PascalCaseId)) { return assets[PascalCaseId] }
// fallback to prototype chain
var res = assets[id] || assets[camelizedId] || assets[PascalCaseId];
if (warnMissing && !res) {
warn(
'Failed to resolve ' + type.slice(0, -1) + ': ' + id,
options
);
}
return res
}
/* */
function validateProp (
key,
propOptions,
propsData,
vm
) {
var prop = propOptions[key];
var absent = !hasOwn(propsData, key);
var value = propsData[key];
// boolean casting
var booleanIndex = getTypeIndex(Boolean, prop.type);
if (booleanIndex > -1) {
if (absent && !hasOwn(prop, 'default')) {
value = false;
} else if (value === '' || value === hyphenate(key)) {
// only cast empty string / same name to boolean if
// boolean has higher priority
var stringIndex = getTypeIndex(String, prop.type);
if (stringIndex < 0 || booleanIndex < stringIndex) {
value = true;
}
}
}
// check default value
if (value === undefined) {
value = getPropDefaultValue(vm, prop, key);
// since the default value is a fresh copy,
// make sure to observe it.
var prevShouldObserve = shouldObserve;
toggleObserving(true);
observe(value);
toggleObserving(prevShouldObserve);
}
{
assertProp(prop, key, value, vm, absent);
}
return value
}
/**
* Get the default value of a prop.
*/
function getPropDefaultValue (vm, prop, key) {
// no default, return undefined
if (!hasOwn(prop, 'default')) {
return undefined
}
var def = prop.default;
// warn against non-factory defaults for Object & Array
if (isObject(def)) {
warn(
'Invalid default value for prop "' + key + '": ' +
'Props with type Object/Array must use a factory function ' +
'to return the default value.',
vm
);
}
// the raw prop value was also undefined from previous render,
// return previous default value to avoid unnecessary watcher trigger
if (vm && vm.$options.propsData &&
vm.$options.propsData[key] === undefined &&
vm._props[key] !== undefined
) {
return vm._props[key]
}
// call factory function for non-Function types
// a value is Function if its prototype is function even across different execution context
return typeof def === 'function' && getType(prop.type) !== 'Function'
? def.call(vm)
: def
}
/**
* Assert whether a prop is valid.
*/
function assertProp (
prop,
name,
value,
vm,
absent
) {
if (prop.required && absent) {
warn(
'Missing required prop: "' + name + '"',
vm
);
return
}
if (value == null && !prop.required) {
return
}
var type = prop.type;
var valid = !type || type === true;
var expectedTypes = [];
if (type) {
if (!Array.isArray(type)) {
type = [type];
}
for (var i = 0; i < type.length && !valid; i++) {
var assertedType = assertType(value, type[i], vm);
expectedTypes.push(assertedType.expectedType || '');
valid = assertedType.valid;
}
}
var haveExpectedTypes = expectedTypes.some(function (t) { return t; });
if (!valid && haveExpectedTypes) {
warn(
getInvalidTypeMessage(name, value, expectedTypes),
vm
);
return
}
var validator = prop.validator;
if (validator) {
if (!validator(value)) {
warn(
'Invalid prop: custom validator check failed for prop "' + name + '".',
vm
);
}
}
}
var simpleCheckRE = /^(String|Number|Boolean|Function|Symbol|BigInt)$/;
function assertType (value, type, vm) {
var valid;
var expectedType = getType(type);
if (simpleCheckRE.test(expectedType)) {
var t = typeof value;
valid = t === expectedType.toLowerCase();
// for primitive wrapper objects
if (!valid && t === 'object') {
valid = value instanceof type;
}
} else if (expectedType === 'Object') {
valid = isPlainObject(value);
} else if (expectedType === 'Array') {
valid = Array.isArray(value);
} else {
try {
valid = value instanceof type;
} catch (e) {
warn('Invalid prop type: "' + String(type) + '" is not a constructor', vm);
valid = false;
}
}
return {
valid: valid,
expectedType: expectedType
}
}
var functionTypeCheckRE = /^\s*function (\w+)/;
/**
* Use function string name to check built-in types,
* because a simple equality check will fail when running
* across different vms / iframes.
*/
function getType (fn) {
var match = fn && fn.toString().match(functionTypeCheckRE);
return match ? match[1] : ''
}
function isSameType (a, b) {
return getType(a) === getType(b)
}
function getTypeIndex (type, expectedTypes) {
if (!Array.isArray(expectedTypes)) {
return isSameType(expectedTypes, type) ? 0 : -1
}
for (var i = 0, len = expectedTypes.length; i < len; i++) {
if (isSameType(expectedTypes[i], type)) {
return i
}
}
return -1
}
function getInvalidTypeMessage (name, value, expectedTypes) {
var message = "Invalid prop: type check failed for prop \"" + name + "\"." +
" Expected " + (expectedTypes.map(capitalize).join(', '));
var expectedType = expectedTypes[0];
var receivedType = toRawType(value);
// check if we need to specify expected value
if (
expectedTypes.length === 1 &&
isExplicable(expectedType) &&
isExplicable(typeof value) &&
!isBoolean(expectedType, receivedType)
) {
message += " with value " + (styleValue(value, expectedType));
}
message += ", got " + receivedType + " ";
// check if we need to specify received value
if (isExplicable(receivedType)) {
message += "with value " + (styleValue(value, receivedType)) + ".";
}
return message
}
function styleValue (value, type) {
if (type === 'String') {
return ("\"" + value + "\"")
} else if (type === 'Number') {
return ("" + (Number(value)))
} else {
return ("" + value)
}
}
var EXPLICABLE_TYPES = ['string', 'number', 'boolean'];
function isExplicable (value) {
return EXPLICABLE_TYPES.some(function (elem) { return value.toLowerCase() === elem; })
}
function isBoolean () {
var args = [], len = arguments.length;
while ( len-- ) args[ len ] = arguments[ len ];
return args.some(function (elem) { return elem.toLowerCase() === 'boolean'; })
}
/* */
function handleError (err, vm, info) {
// Deactivate deps tracking while processing error handler to avoid possible infinite rendering.
// See: https://github.com/vuejs/vuex/issues/1505
pushTarget();
try {
if (vm) {
var cur = vm;
while ((cur = cur.$parent)) {
var hooks = cur.$options.errorCaptured;
if (hooks) {
for (var i = 0; i < hooks.length; i++) {
try {
var capture = hooks[i].call(cur, err, vm, info) === false;
if (capture) { return }
} catch (e) {
globalHandleError(e, cur, 'errorCaptured hook');
}
}
}
}
}
globalHandleError(err, vm, info);
} finally {
popTarget();
}
}
function invokeWithErrorHandling (
handler,
context,
args,
vm,
info
) {
var res;
try {
res = args ? handler.apply(context, args) : handler.call(context);
if (res && !res._isVue && isPromise(res) && !res._handled) {
res.catch(function (e) { return handleError(e, vm, info + " (Promise/async)"); });
// issue #9511
// avoid catch triggering multiple times when nested calls
res._handled = true;
}
} catch (e) {
handleError(e, vm, info);
}
return res
}
function globalHandleError (err, vm, info) {
if (config.errorHandler) {
try {
return config.errorHandler.call(null, err, vm, info)
} catch (e) {
// if the user intentionally throws the original error in the handler,
// do not log it twice
if (e !== err) {
logError(e, null, 'config.errorHandler');
}
}
}
logError(err, vm, info);
}
function logError (err, vm, info) {
{
warn(("Error in " + info + ": \"" + (err.toString()) + "\""), vm);
}
/* istanbul ignore else */
if ((inBrowser || inWeex) && typeof console !== 'undefined') {
console.error(err);
} else {
throw err
}
}
/* */
var isUsingMicroTask = false;
var callbacks = [];
var pending = false;
function flushCallbacks () {
pending = false;
var copies = callbacks.slice(0);
callbacks.length = 0;
for (var i = 0; i < copies.length; i++) {
copies[i]();
}
}
// Here we have async deferring wrappers using microtasks.
// In 2.5 we used (macro) tasks (in combination with microtasks).
// However, it has subtle problems when state is changed right before repaint
// (e.g. #6813, out-in transitions).
// Also, using (macro) tasks in event handler would cause some weird behaviors
// that cannot be circumvented (e.g. #7109, #7153, #7546, #7834, #8109).
// So we now use microtasks everywhere, again.
// A major drawback of this tradeoff is that there are some scenarios
// where microtasks have too high a priority and fire in between supposedly
// sequential events (e.g. #4521, #6690, which have workarounds)
// or even between bubbling of the same event (#6566).
var timerFunc;
// The nextTick behavior leverages the microtask queue, which can be accessed
// via either native Promise.then or MutationObserver.
// MutationObserver has wider support, however it is seriously bugged in
// UIWebView in iOS >= 9.3.3 when triggered in touch event handlers. It
// completely stops working after triggering a few times... so, if native
// Promise is available, we will use it:
/* istanbul ignore next, $flow-disable-line */
if (typeof Promise !== 'undefined' && isNative(Promise)) {
var p = Promise.resolve();
timerFunc = function () {
p.then(flushCallbacks);
// In problematic UIWebViews, Promise.then doesn't completely break, but
// it can get stuck in a weird state where callbacks are pushed into the
// microtask queue but the queue isn't being flushed, until the browser
// needs to do some other work, e.g. handle a timer. Therefore we can
// "force" the microtask queue to be flushed by adding an empty timer.
if (isIOS) { setTimeout(noop); }
};
isUsingMicroTask = true;
} else if (!isIE && typeof MutationObserver !== 'undefined' && (
isNative(MutationObserver) ||
// PhantomJS and iOS 7.x
MutationObserver.toString() === '[object MutationObserverConstructor]'
)) {
// Use MutationObserver where native Promise is not available,
// e.g. PhantomJS, iOS7, Android 4.4
// (#6466 MutationObserver is unreliable in IE11)
var counter = 1;
var observer = new MutationObserver(flushCallbacks);
var textNode = document.createTextNode(String(counter));
observer.observe(textNode, {
characterData: true
});
timerFunc = function () {
counter = (counter + 1) % 2;
textNode.data = String(counter);
};
isUsingMicroTask = true;
} else if (typeof setImmediate !== 'undefined' && isNative(setImmediate)) {
// Fallback to setImmediate.
// Technically it leverages the (macro) task queue,
// but it is still a better choice than setTimeout.
timerFunc = function () {
setImmediate(flushCallbacks);
};
} else {
// Fallback to setTimeout.
timerFunc = function () {
setTimeout(flushCallbacks, 0);
};
}
function nextTick (cb, ctx) {
var _resolve;
callbacks.push(function () {
if (cb) {
try {
cb.call(ctx);
} catch (e) {
handleError(e, ctx, 'nextTick');
}
} else if (_resolve) {
_resolve(ctx);
}
});
if (!pending) {
pending = true;
timerFunc();
}
// $flow-disable-line
if (!cb && typeof Promise !== 'undefined') {
return new Promise(function (resolve) {
_resolve = resolve;
})
}
}
/* */
var mark;
var measure;
{
var perf = inBrowser && window.performance;
/* istanbul ignore if */
if (
perf &&
perf.mark &&
perf.measure &&
perf.clearMarks &&
perf.clearMeasures
) {
mark = function (tag) { return perf.mark(tag); };
measure = function (name, startTag, endTag) {
perf.measure(name, startTag, endTag);
perf.clearMarks(startTag);
perf.clearMarks(endTag);
// perf.clearMeasures(name)
};
}
}
/* not type checking this file because flow doesn't play well with Proxy */
var initProxy;
{
var allowedGlobals = makeMap(
'Infinity,undefined,NaN,isFinite,isNaN,' +
'parseFloat,parseInt,decodeURI,decodeURIComponent,encodeURI,encodeURIComponent,' +
'Math,Number,Date,Array,Object,Boolean,String,RegExp,Map,Set,JSON,Intl,BigInt,' +
'require' // for Webpack/Browserify
);
var warnNonPresent = function (target, key) {
warn(
"Property or method \"" + key + "\" is not defined on the instance but " +
'referenced during render. Make sure that this property is reactive, ' +
'either in the data option, or for class-based components, by ' +
'initializing the property. ' +
'See: https://vuejs.org/v2/guide/reactivity.html#Declaring-Reactive-Properties.',
target
);
};
var warnReservedPrefix = function (target, key) {
warn(
"Property \"" + key + "\" must be accessed with \"$data." + key + "\" because " +
'properties starting with "$" or "_" are not proxied in the Vue instance to ' +
'prevent conflicts with Vue internals. ' +
'See: https://vuejs.org/v2/api/#data',
target
);
};
var hasProxy =
typeof Proxy !== 'undefined' && isNative(Proxy);
if (hasProxy) {
var isBuiltInModifier = makeMap('stop,prevent,self,ctrl,shift,alt,meta,exact');
config.keyCodes = new Proxy(config.keyCodes, {
set: function set (target, key, value) {
if (isBuiltInModifier(key)) {
warn(("Avoid overwriting built-in modifier in config.keyCodes: ." + key));
return false
} else {
target[key] = value;
return true
}
}
});
}
var hasHandler = {
has: function has (target, key) {
var has = key in target;
var isAllowed = allowedGlobals(key) ||
(typeof key === 'string' && key.charAt(0) === '_' && !(key in target.$data));
if (!has && !isAllowed) {
if (key in target.$data) { warnReservedPrefix(target, key); }
else { warnNonPresent(target, key); }
}
return has || !isAllowed
}
};
var getHandler = {
get: function get (target, key) {
if (typeof key === 'string' && !(key in target)) {
if (key in target.$data) { warnReservedPrefix(target, key); }
else { warnNonPresent(target, key); }
}
return target[key]
}
};
initProxy = function initProxy (vm) {
if (hasProxy) {
// determine which proxy handler to use
var options = vm.$options;
var handlers = options.render && options.render._withStripped
? getHandler
: hasHandler;
vm._renderProxy = new Proxy(vm, handlers);
} else {
vm._renderProxy = vm;
}
};
}
/* */
var seenObjects = new _Set();
/**
* Recursively traverse an object to evoke all converted
* getters, so that every nested property inside the object
* is collected as a "deep" dependency.
*/
function traverse (val) {
_traverse(val, seenObjects);
seenObjects.clear();
}
function _traverse (val, seen) {
var i, keys;
var isA = Array.isArray(val);
if ((!isA && !isObject(val)) || Object.isFrozen(val) || val instanceof VNode) {
return
}
if (val.__ob__) {
var depId = val.__ob__.dep.id;
if (seen.has(depId)) {
return
}
seen.add(depId);
}
if (isA) {
i = val.length;
while (i--) { _traverse(val[i], seen); }
} else {
keys = Object.keys(val);
i = keys.length;
while (i--) { _traverse(val[keys[i]], seen); }
}
}
/* */
var normalizeEvent = cached(function (name) {
var passive = name.charAt(0) === '&';
name = passive ? name.slice(1) : name;
var once$$1 = name.charAt(0) === '~'; // Prefixed last, checked first
name = once$$1 ? name.slice(1) : name;
var capture = name.charAt(0) === '!';
name = capture ? name.slice(1) : name;
return {
name: name,
once: once$$1,
capture: capture,
passive: passive
}
});
function createFnInvoker (fns, vm) {
function invoker () {
var arguments$1 = arguments;
var fns = invoker.fns;
if (Array.isArray(fns)) {
var cloned = fns.slice();
for (var i = 0; i < cloned.length; i++) {
invokeWithErrorHandling(cloned[i], null, arguments$1, vm, "v-on handler");
}
} else {
// return handler return value for single handlers
return invokeWithErrorHandling(fns, null, arguments, vm, "v-on handler")
}
}
invoker.fns = fns;
return invoker
}
function updateListeners (
on,
oldOn,
add,
remove$$1,
createOnceHandler,
vm
) {
var name, def$$1, cur, old, event;
for (name in on) {
def$$1 = cur = on[name];
old = oldOn[name];
event = normalizeEvent(name);
if (isUndef(cur)) {
warn(
"Invalid handler for event \"" + (event.name) + "\": got " + String(cur),
vm
);
} else if (isUndef(old)) {
if (isUndef(cur.fns)) {
cur = on[name] = createFnInvoker(cur, vm);
}
if (isTrue(event.once)) {
cur = on[name] = createOnceHandler(event.name, cur, event.capture);
}
add(event.name, cur, event.capture, event.passive, event.params);
} else if (cur !== old) {
old.fns = cur;
on[name] = old;
}
}
for (name in oldOn) {
if (isUndef(on[name])) {
event = normalizeEvent(name);
remove$$1(event.name, oldOn[name], event.capture);
}
}
}
/* */
function mergeVNodeHook (def, hookKey, hook) {
if (def instanceof VNode) {
def = def.data.hook || (def.data.hook = {});
}
var invoker;
var oldHook = def[hookKey];
function wrappedHook () {
hook.apply(this, arguments);
// important: remove merged hook to ensure it's called only once
// and prevent memory leak
remove(invoker.fns, wrappedHook);
}
if (isUndef(oldHook)) {
// no existing hook
invoker = createFnInvoker([wrappedHook]);
} else {
/* istanbul ignore if */
if (isDef(oldHook.fns) && isTrue(oldHook.merged)) {
// already a merged invoker
invoker = oldHook;
invoker.fns.push(wrappedHook);
} else {
// existing plain hook
invoker = createFnInvoker([oldHook, wrappedHook]);
}
}
invoker.merged = true;
def[hookKey] = invoker;
}
/* */
function extractPropsFromVNodeData (
data,
Ctor,
tag
) {
// we are only extracting raw values here.
// validation and default values are handled in the child
// component itself.
var propOptions = Ctor.options.props;
if (isUndef(propOptions)) {
return
}
var res = {};
var attrs = data.attrs;
var props = data.props;
if (isDef(attrs) || isDef(props)) {
for (var key in propOptions) {
var altKey = hyphenate(key);
{
var keyInLowerCase = key.toLowerCase();
if (
key !== keyInLowerCase &&
attrs && hasOwn(attrs, keyInLowerCase)
) {
tip(
"Prop \"" + keyInLowerCase + "\" is passed to component " +
(formatComponentName(tag || Ctor)) + ", but the declared prop name is" +
" \"" + key + "\". " +
"Note that HTML attributes are case-insensitive and camelCased " +
"props need to use their kebab-case equivalents when using in-DOM " +
"templates. You should probably use \"" + altKey + "\" instead of \"" + key + "\"."
);
}
}
checkProp(res, props, key, altKey, true) ||
checkProp(res, attrs, key, altKey, false);
}
}
return res
}
function checkProp (
res,
hash,
key,
altKey,
preserve
) {
if (isDef(hash)) {
if (hasOwn(hash, key)) {
res[key] = hash[key];
if (!preserve) {
delete hash[key];
}
return true
} else if (hasOwn(hash, altKey)) {
res[key] = hash[altKey];
if (!preserve) {
delete hash[altKey];
}
return true
}
}
return false
}
/* */
// The template compiler attempts to minimize the need for normalization by
// statically analyzing the template at compile time.
//
// For plain HTML markup, normalization can be completely skipped because the
// generated render function is guaranteed to return Array<VNode>. There are
// two cases where extra normalization is needed:
// 1. When the children contains components - because a functional component
// may return an Array instead of a single root. In this case, just a simple
// normalization is needed - if any child is an Array, we flatten the whole
// thing with Array.prototype.concat. It is guaranteed to be only 1-level deep
// because functional components already normalize their own children.
function simpleNormalizeChildren (children) {
for (var i = 0; i < children.length; i++) {
if (Array.isArray(children[i])) {
return Array.prototype.concat.apply([], children)
}
}
return children
}
// 2. When the children contains constructs that always generated nested Arrays,
// e.g. <template>, <slot>, v-for, or when the children is provided by user
// with hand-written render functions / JSX. In such cases a full normalization
// is needed to cater to all possible types of children values.
function normalizeChildren (children) {
return isPrimitive(children)
? [createTextVNode(children)]
: Array.isArray(children)
? normalizeArrayChildren(children)
: undefined
}
function isTextNode (node) {
return isDef(node) && isDef(node.text) && isFalse(node.isComment)
}
function normalizeArrayChildren (children, nestedIndex) {
var res = [];
var i, c, lastIndex, last;
for (i = 0; i < children.length; i++) {
c = children[i];
if (isUndef(c) || typeof c === 'boolean') { continue }
lastIndex = res.length - 1;
last = res[lastIndex];
// nested
if (Array.isArray(c)) {
if (c.length > 0) {
c = normalizeArrayChildren(c, ((nestedIndex || '') + "_" + i));
// merge adjacent text nodes
if (isTextNode(c[0]) && isTextNode(last)) {
res[lastIndex] = createTextVNode(last.text + (c[0]).text);
c.shift();
}
res.push.apply(res, c);
}
} else if (isPrimitive(c)) {
if (isTextNode(last)) {
// merge adjacent text nodes
// this is necessary for SSR hydration because text nodes are
// essentially merged when rendered to HTML strings
res[lastIndex] = createTextVNode(last.text + c);
} else if (c !== '') {
// convert primitive to vnode
res.push(createTextVNode(c));
}
} else {
if (isTextNode(c) && isTextNode(last)) {
// merge adjacent text nodes
res[lastIndex] = createTextVNode(last.text + c.text);
} else {
// default key for nested array children (likely generated by v-for)
if (isTrue(children._isVList) &&
isDef(c.tag) &&
isUndef(c.key) &&
isDef(nestedIndex)) {
c.key = "__vlist" + nestedIndex + "_" + i + "__";
}
res.push(c);
}
}
}
return res
}
/* */
function initProvide (vm) {
var provide = vm.$options.provide;
if (provide) {
vm._provided = typeof provide === 'function'
? provide.call(vm)
: provide;
}
}
function initInjections (vm) {
var result = resolveInject(vm.$options.inject, vm);
if (result) {
toggleObserving(false);
Object.keys(result).forEach(function (key) {
/* istanbul ignore else */
{
defineReactive$$1(vm, key, result[key], function () {
warn(
"Avoid mutating an injected value directly since the changes will be " +
"overwritten whenever the provided component re-renders. " +
"injection being mutated: \"" + key + "\"",
vm
);
});
}
});
toggleObserving(true);
}
}
function resolveInject (inject, vm) {
if (inject) {
// inject is :any because flow is not smart enough to figure out cached
var result = Object.create(null);
var keys = hasSymbol
? Reflect.ownKeys(inject)
: Object.keys(inject);
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
// #6574 in case the inject object is observed...
if (key === '__ob__') { continue }
var provideKey = inject[key].from;
var source = vm;
while (source) {
if (source._provided && hasOwn(source._provided, provideKey)) {
result[key] = source._provided[provideKey];
break
}
source = source.$parent;
}
if (!source) {
if ('default' in inject[key]) {
var provideDefault = inject[key].default;
result[key] = typeof provideDefault === 'function'
? provideDefault.call(vm)
: provideDefault;
} else {
warn(("Injection \"" + key + "\" not found"), vm);
}
}
}
return result
}
}
/* */
/**
* Runtime helper for resolving raw children VNodes into a slot object.
*/
function resolveSlots (
children,
context
) {
if (!children || !children.length) {
return {}
}
var slots = {};
for (var i = 0, l = children.length; i < l; i++) {
var child = children[i];
var data = child.data;
// remove slot attribute if the node is resolved as a Vue slot node
if (data && data.attrs && data.attrs.slot) {
delete data.attrs.slot;
}
// named slots should only be respected if the vnode was rendered in the
// same context.
if ((child.context === context || child.fnContext === context) &&
data && data.slot != null
) {
var name = data.slot;
var slot = (slots[name] || (slots[name] = []));
if (child.tag === 'template') {
slot.push.apply(slot, child.children || []);
} else {
slot.push(child);
}
} else {
(slots.default || (slots.default = [])).push(child);
}
}
// ignore slots that contains only whitespace
for (var name$1 in slots) {
if (slots[name$1].every(isWhitespace)) {
delete slots[name$1];
}
}
return slots
}
function isWhitespace (node) {
return (node.isComment && !node.asyncFactory) || node.text === ' '
}
/* */
function isAsyncPlaceholder (node) {
return node.isComment && node.asyncFactory
}
/* */
function normalizeScopedSlots (
slots,
normalSlots,
prevSlots
) {
var res;
var hasNormalSlots = Object.keys(normalSlots).length > 0;
var isStable = slots ? !!slots.$stable : !hasNormalSlots;
var key = slots && slots.$key;
if (!slots) {
res = {};
} else if (slots._normalized) {
// fast path 1: child component re-render only, parent did not change
return slots._normalized
} else if (
isStable &&
prevSlots &&
prevSlots !== emptyObject &&
key === prevSlots.$key &&
!hasNormalSlots &&
!prevSlots.$hasNormal
) {
// fast path 2: stable scoped slots w/ no normal slots to proxy,
// only need to normalize once
return prevSlots
} else {
res = {};
for (var key$1 in slots) {
if (slots[key$1] && key$1[0] !== '$') {
res[key$1] = normalizeScopedSlot(normalSlots, key$1, slots[key$1]);
}
}
}
// expose normal slots on scopedSlots
for (var key$2 in normalSlots) {
if (!(key$2 in res)) {
res[key$2] = proxyNormalSlot(normalSlots, key$2);
}
}
// avoriaz seems to mock a non-extensible $scopedSlots object
// and when that is passed down this would cause an error
if (slots && Object.isExtensible(slots)) {
(slots)._normalized = res;
}
def(res, '$stable', isStable);
def(res, '$key', key);
def(res, '$hasNormal', hasNormalSlots);
return res
}
function normalizeScopedSlot(normalSlots, key, fn) {
var normalized = function () {
var res = arguments.length ? fn.apply(null, arguments) : fn({});
res = res && typeof res === 'object' && !Array.isArray(res)
? [res] // single vnode
: normalizeChildren(res);
var vnode = res && res[0];
return res && (
!vnode ||
(res.length === 1 && vnode.isComment && !isAsyncPlaceholder(vnode)) // #9658, #10391
) ? undefined
: res
};
// this is a slot using the new v-slot syntax without scope. although it is
// compiled as a scoped slot, render fn users would expect it to be present
// on this.$slots because the usage is semantically a normal slot.
if (fn.proxy) {
Object.defineProperty(normalSlots, key, {
get: normalized,
enumerable: true,
configurable: true
});
}
return normalized
}
function proxyNormalSlot(slots, key) {
return function () { return slots[key]; }
}
/* */
/**
* Runtime helper for rendering v-for lists.
*/
function renderList (
val,
render
) {
var ret, i, l, keys, key;
if (Array.isArray(val) || typeof val === 'string') {
ret = new Array(val.length);
for (i = 0, l = val.length; i < l; i++) {
ret[i] = render(val[i], i);
}
} else if (typeof val === 'number') {
ret = new Array(val);
for (i = 0; i < val; i++) {
ret[i] = render(i + 1, i);
}
} else if (isObject(val)) {
if (hasSymbol && val[Symbol.iterator]) {
ret = [];
var iterator = val[Symbol.iterator]();
var result = iterator.next();
while (!result.done) {
ret.push(render(result.value, ret.length));
result = iterator.next();
}
} else {
keys = Object.keys(val);
ret = new Array(keys.length);
for (i = 0, l = keys.length; i < l; i++) {
key = keys[i];
ret[i] = render(val[key], key, i);
}
}
}
if (!isDef(ret)) {
ret = [];
}
(ret)._isVList = true;
return ret
}
/* */
/**
* Runtime helper for rendering <slot>
*/
function renderSlot (
name,
fallbackRender,
props,
bindObject
) {
var scopedSlotFn = this.$scopedSlots[name];
var nodes;
if (scopedSlotFn) {
// scoped slot
props = props || {};
if (bindObject) {
if (!isObject(bindObject)) {
warn('slot v-bind without argument expects an Object', this);
}
props = extend(extend({}, bindObject), props);
}
nodes =
scopedSlotFn(props) ||
(typeof fallbackRender === 'function' ? fallbackRender() : fallbackRender);
} else {
nodes =
this.$slots[name] ||
(typeof fallbackRender === 'function' ? fallbackRender() : fallbackRender);
}
var target = props && props.slot;
if (target) {
return this.$createElement('template', { slot: target }, nodes)
} else {
return nodes
}
}
/* */
/**
* Runtime helper for resolving filters
*/
function resolveFilter (id) {
return resolveAsset(this.$options, 'filters', id, true) || identity
}
/* */
function isKeyNotMatch (expect, actual) {
if (Array.isArray(expect)) {
return expect.indexOf(actual) === -1
} else {
return expect !== actual
}
}
/**
* Runtime helper for checking keyCodes from config.
* exposed as Vue.prototype._k
* passing in eventKeyName as last argument separately for backwards compat
*/
function checkKeyCodes (
eventKeyCode,
key,
builtInKeyCode,
eventKeyName,
builtInKeyName
) {
var mappedKeyCode = config.keyCodes[key] || builtInKeyCode;
if (builtInKeyName && eventKeyName && !config.keyCodes[key]) {
return isKeyNotMatch(builtInKeyName, eventKeyName)
} else if (mappedKeyCode) {
return isKeyNotMatch(mappedKeyCode, eventKeyCode)
} else if (eventKeyName) {
return hyphenate(eventKeyName) !== key
}
return eventKeyCode === undefined
}
/* */
/**
* Runtime helper for merging v-bind="object" into a VNode's data.
*/
function bindObjectProps (
data,
tag,
value,
asProp,
isSync
) {
if (value) {
if (!isObject(value)) {
warn(
'v-bind without argument expects an Object or Array value',
this
);
} else {
if (Array.isArray(value)) {
value = toObject(value);
}
var hash;
var loop = function ( key ) {
if (
key === 'class' ||
key === 'style' ||
isReservedAttribute(key)
) {
hash = data;
} else {
var type = data.attrs && data.attrs.type;
hash = asProp || config.mustUseProp(tag, type, key)
? data.domProps || (data.domProps = {})
: data.attrs || (data.attrs = {});
}
var camelizedKey = camelize(key);
var hyphenatedKey = hyphenate(key);
if (!(camelizedKey in hash) && !(hyphenatedKey in hash)) {
hash[key] = value[key];
if (isSync) {
var on = data.on || (data.on = {});
on[("update:" + key)] = function ($event) {
value[key] = $event;
};
}
}
};
for (var key in value) loop( key );
}
}
return data
}
/* */
/**
* Runtime helper for rendering static trees.
*/
function renderStatic (
index,
isInFor
) {
var cached = this._staticTrees || (this._staticTrees = []);
var tree = cached[index];
// if has already-rendered static tree and not inside v-for,
// we can reuse the same tree.
if (tree && !isInFor) {
return tree
}
// otherwise, render a fresh tree.
tree = cached[index] = this.$options.staticRenderFns[index].call(
this._renderProxy,
null,
this // for render fns generated for functional component templates
);
markStatic(tree, ("__static__" + index), false);
return tree
}
/**
* Runtime helper for v-once.
* Effectively it means marking the node as static with a unique key.
*/
function markOnce (
tree,
index,
key
) {
markStatic(tree, ("__once__" + index + (key ? ("_" + key) : "")), true);
return tree
}
function markStatic (
tree,
key,
isOnce
) {
if (Array.isArray(tree)) {
for (var i = 0; i < tree.length; i++) {
if (tree[i] && typeof tree[i] !== 'string') {
markStaticNode(tree[i], (key + "_" + i), isOnce);
}
}
} else {
markStaticNode(tree, key, isOnce);
}
}
function markStaticNode (node, key, isOnce) {
node.isStatic = true;
node.key = key;
node.isOnce = isOnce;
}
/* */
function bindObjectListeners (data, value) {
if (value) {
if (!isPlainObject(value)) {
warn(
'v-on without argument expects an Object value',
this
);
} else {
var on = data.on = data.on ? extend({}, data.on) : {};
for (var key in value) {
var existing = on[key];
var ours = value[key];
on[key] = existing ? [].concat(existing, ours) : ours;
}
}
}
return data
}
/* */
function resolveScopedSlots (
fns, // see flow/vnode
res,
// the following are added in 2.6
hasDynamicKeys,
contentHashKey
) {
res = res || { $stable: !hasDynamicKeys };
for (var i = 0; i < fns.length; i++) {
var slot = fns[i];
if (Array.isArray(slot)) {
resolveScopedSlots(slot, res, hasDynamicKeys);
} else if (slot) {
// marker for reverse proxying v-slot without scope on this.$slots
if (slot.proxy) {
slot.fn.proxy = true;
}
res[slot.key] = slot.fn;
}
}
if (contentHashKey) {
(res).$key = contentHashKey;
}
return res
}
/* */
function bindDynamicKeys (baseObj, values) {
for (var i = 0; i < values.length; i += 2) {
var key = values[i];
if (typeof key === 'string' && key) {
baseObj[values[i]] = values[i + 1];
} else if (key !== '' && key !== null) {
// null is a special value for explicitly removing a binding
warn(
("Invalid value for dynamic directive argument (expected string or null): " + key),
this
);
}
}
return baseObj
}
// helper to dynamically append modifier runtime markers to event names.
// ensure only append when value is already string, otherwise it will be cast
// to string and cause the type check to miss.
function prependModifier (value, symbol) {
return typeof value === 'string' ? symbol + value : value
}
/* */
function installRenderHelpers (target) {
target._o = markOnce;
target._n = toNumber;
target._s = toString;
target._l = renderList;
target._t = renderSlot;
target._q = looseEqual;
target._i = looseIndexOf;
target._m = renderStatic;
target._f = resolveFilter;
target._k = checkKeyCodes;
target._b = bindObjectProps;
target._v = createTextVNode;
target._e = createEmptyVNode;
target._u = resolveScopedSlots;
target._g = bindObjectListeners;
target._d = bindDynamicKeys;
target._p = prependModifier;
}
/* */
function FunctionalRenderContext (
data,
props,
children,
parent,
Ctor
) {
var this$1 = this;
var options = Ctor.options;
// ensure the createElement function in functional components
// gets a unique context - this is necessary for correct named slot check
var contextVm;
if (hasOwn(parent, '_uid')) {
contextVm = Object.create(parent);
// $flow-disable-line
contextVm._original = parent;
} else {
// the context vm passed in is a functional context as well.
// in this case we want to make sure we are able to get a hold to the
// real context instance.
contextVm = parent;
// $flow-disable-line
parent = parent._original;
}
var isCompiled = isTrue(options._compiled);
var needNormalization = !isCompiled;
this.data = data;
this.props = props;
this.children = children;
this.parent = parent;
this.listeners = data.on || emptyObject;
this.injections = resolveInject(options.inject, parent);
this.slots = function () {
if (!this$1.$slots) {
normalizeScopedSlots(
data.scopedSlots,
this$1.$slots = resolveSlots(children, parent)
);
}
return this$1.$slots
};
Object.defineProperty(this, 'scopedSlots', ({
enumerable: true,
get: function get () {
return normalizeScopedSlots(data.scopedSlots, this.slots())
}
}));
// support for compiled functional template
if (isCompiled) {
// exposing $options for renderStatic()
this.$options = options;
// pre-resolve slots for renderSlot()
this.$slots = this.slots();
this.$scopedSlots = normalizeScopedSlots(data.scopedSlots, this.$slots);
}
if (options._scopeId) {
this._c = function (a, b, c, d) {
var vnode = createElement(contextVm, a, b, c, d, needNormalization);
if (vnode && !Array.isArray(vnode)) {
vnode.fnScopeId = options._scopeId;
vnode.fnContext = parent;
}
return vnode
};
} else {
this._c = function (a, b, c, d) { return createElement(contextVm, a, b, c, d, needNormalization); };
}
}
installRenderHelpers(FunctionalRenderContext.prototype);
function createFunctionalComponent (
Ctor,
propsData,
data,
contextVm,
children
) {
var options = Ctor.options;
var props = {};
var propOptions = options.props;
if (isDef(propOptions)) {
for (var key in propOptions) {
props[key] = validateProp(key, propOptions, propsData || emptyObject);
}
} else {
if (isDef(data.attrs)) { mergeProps(props, data.attrs); }
if (isDef(data.props)) { mergeProps(props, data.props); }
}
var renderContext = new FunctionalRenderContext(
data,
props,
children,
contextVm,
Ctor
);
var vnode = options.render.call(null, renderContext._c, renderContext);
if (vnode instanceof VNode) {
return cloneAndMarkFunctionalResult(vnode, data, renderContext.parent, options, renderContext)
} else if (Array.isArray(vnode)) {
var vnodes = normalizeChildren(vnode) || [];
var res = new Array(vnodes.length);
for (var i = 0; i < vnodes.length; i++) {
res[i] = cloneAndMarkFunctionalResult(vnodes[i], data, renderContext.parent, options, renderContext);
}
return res
}
}
function cloneAndMarkFunctionalResult (vnode, data, contextVm, options, renderContext) {
// #7817 clone node before setting fnContext, otherwise if the node is reused
// (e.g. it was from a cached normal slot) the fnContext causes named slots
// that should not be matched to match.
var clone = cloneVNode(vnode);
clone.fnContext = contextVm;
clone.fnOptions = options;
{
(clone.devtoolsMeta = clone.devtoolsMeta || {}).renderContext = renderContext;
}
if (data.slot) {
(clone.data || (clone.data = {})).slot = data.slot;
}
return clone
}
function mergeProps (to, from) {
for (var key in from) {
to[camelize(key)] = from[key];
}
}
/* */
/* */
/* */
/* */
// inline hooks to be invoked on component VNodes during patch
var componentVNodeHooks = {
init: function init (vnode, hydrating) {
if (
vnode.componentInstance &&
!vnode.componentInstance._isDestroyed &&
vnode.data.keepAlive
) {
// kept-alive components, treat as a patch
var mountedNode = vnode; // work around flow
componentVNodeHooks.prepatch(mountedNode, mountedNode);
} else {
var child = vnode.componentInstance = createComponentInstanceForVnode(
vnode,
activeInstance
);
child.$mount(hydrating ? vnode.elm : undefined, hydrating);
}
},
prepatch: function prepatch (oldVnode, vnode) {
var options = vnode.componentOptions;
var child = vnode.componentInstance = oldVnode.componentInstance;
updateChildComponent(
child,
options.propsData, // updated props
options.listeners, // updated listeners
vnode, // new parent vnode
options.children // new children
);
},
insert: function insert (vnode) {
var context = vnode.context;
var componentInstance = vnode.componentInstance;
if (!componentInstance._isMounted) {
componentInstance._isMounted = true;
callHook(componentInstance, 'mounted');
}
if (vnode.data.keepAlive) {
if (context._isMounted) {
// vue-router#1212
// During updates, a kept-alive component's child components may
// change, so directly walking the tree here may call activated hooks
// on incorrect children. Instead we push them into a queue which will
// be processed after the whole patch process ended.
queueActivatedComponent(componentInstance);
} else {
activateChildComponent(componentInstance, true /* direct */);
}
}
},
destroy: function destroy (vnode) {
var componentInstance = vnode.componentInstance;
if (!componentInstance._isDestroyed) {
if (!vnode.data.keepAlive) {
componentInstance.$destroy();
} else {
deactivateChildComponent(componentInstance, true /* direct */);
}
}
}
};
var hooksToMerge = Object.keys(componentVNodeHooks);
function createComponent (
Ctor,
data,
context,
children,
tag
) {
if (isUndef(Ctor)) {
return
}
var baseCtor = context.$options._base;
// plain options object: turn it into a constructor
if (isObject(Ctor)) {
Ctor = baseCtor.extend(Ctor);
}
// if at this stage it's not a constructor or an async component factory,
// reject.
if (typeof Ctor !== 'function') {
{
warn(("Invalid Component definition: " + (String(Ctor))), context);
}
return
}
// async component
var asyncFactory;
if (isUndef(Ctor.cid)) {
asyncFactory = Ctor;
Ctor = resolveAsyncComponent(asyncFactory, baseCtor);
if (Ctor === undefined) {
// return a placeholder node for async component, which is rendered
// as a comment node but preserves all the raw information for the node.
// the information will be used for async server-rendering and hydration.
return createAsyncPlaceholder(
asyncFactory,
data,
context,
children,
tag
)
}
}
data = data || {};
// resolve constructor options in case global mixins are applied after
// component constructor creation
resolveConstructorOptions(Ctor);
// transform component v-model data into props & events
if (isDef(data.model)) {
transformModel(Ctor.options, data);
}
// extract props
var propsData = extractPropsFromVNodeData(data, Ctor, tag);
// functional component
if (isTrue(Ctor.options.functional)) {
return createFunctionalComponent(Ctor, propsData, data, context, children)
}
// extract listeners, since these needs to be treated as
// child component listeners instead of DOM listeners
var listeners = data.on;
// replace with listeners with .native modifier
// so it gets processed during parent component patch.
data.on = data.nativeOn;
if (isTrue(Ctor.options.abstract)) {
// abstract components do not keep anything
// other than props & listeners & slot
// work around flow
var slot = data.slot;
data = {};
if (slot) {
data.slot = slot;
}
}
// install component management hooks onto the placeholder node
installComponentHooks(data);
// return a placeholder vnode
var name = Ctor.options.name || tag;
var vnode = new VNode(
("vue-component-" + (Ctor.cid) + (name ? ("-" + name) : '')),
data, undefined, undefined, undefined, context,
{ Ctor: Ctor, propsData: propsData, listeners: listeners, tag: tag, children: children },
asyncFactory
);
return vnode
}
function createComponentInstanceForVnode (
// we know it's MountedComponentVNode but flow doesn't
vnode,
// activeInstance in lifecycle state
parent
) {
var options = {
_isComponent: true,
_parentVnode: vnode,
parent: parent
};
// check inline-template render functions
var inlineTemplate = vnode.data.inlineTemplate;
if (isDef(inlineTemplate)) {
options.render = inlineTemplate.render;
options.staticRenderFns = inlineTemplate.staticRenderFns;
}
return new vnode.componentOptions.Ctor(options)
}
function installComponentHooks (data) {
var hooks = data.hook || (data.hook = {});
for (var i = 0; i < hooksToMerge.length; i++) {
var key = hooksToMerge[i];
var existing = hooks[key];
var toMerge = componentVNodeHooks[key];
if (existing !== toMerge && !(existing && existing._merged)) {
hooks[key] = existing ? mergeHook$1(toMerge, existing) : toMerge;
}
}
}
function mergeHook$1 (f1, f2) {
var merged = function (a, b) {
// flow complains about extra args which is why we use any
f1(a, b);
f2(a, b);
};
merged._merged = true;
return merged
}
// transform component v-model info (value and callback) into
// prop and event handler respectively.
function transformModel (options, data) {
var prop = (options.model && options.model.prop) || 'value';
var event = (options.model && options.model.event) || 'input'
;(data.attrs || (data.attrs = {}))[prop] = data.model.value;
var on = data.on || (data.on = {});
var existing = on[event];
var callback = data.model.callback;
if (isDef(existing)) {
if (
Array.isArray(existing)
? existing.indexOf(callback) === -1
: existing !== callback
) {
on[event] = [callback].concat(existing);
}
} else {
on[event] = callback;
}
}
/* */
var SIMPLE_NORMALIZE = 1;
var ALWAYS_NORMALIZE = 2;
// wrapper function for providing a more flexible interface
// without getting yelled at by flow
function createElement (
context,
tag,
data,
children,
normalizationType,
alwaysNormalize
) {
if (Array.isArray(data) || isPrimitive(data)) {
normalizationType = children;
children = data;
data = undefined;
}
if (isTrue(alwaysNormalize)) {
normalizationType = ALWAYS_NORMALIZE;
}
return _createElement(context, tag, data, children, normalizationType)
}
function _createElement (
context,
tag,
data,
children,
normalizationType
) {
if (isDef(data) && isDef((data).__ob__)) {
warn(
"Avoid using observed data object as vnode data: " + (JSON.stringify(data)) + "\n" +
'Always create fresh vnode data objects in each render!',
context
);
return createEmptyVNode()
}
// object syntax in v-bind
if (isDef(data) && isDef(data.is)) {
tag = data.is;
}
if (!tag) {
// in case of component :is set to falsy value
return createEmptyVNode()
}
// warn against non-primitive key
if (isDef(data) && isDef(data.key) && !isPrimitive(data.key)
) {
{
warn(
'Avoid using non-primitive value as key, ' +
'use string/number value instead.',
context
);
}
}
// support single function children as default scoped slot
if (Array.isArray(children) &&
typeof children[0] === 'function'
) {
data = data || {};
data.scopedSlots = { default: children[0] };
children.length = 0;
}
if (normalizationType === ALWAYS_NORMALIZE) {
children = normalizeChildren(children);
} else if (normalizationType === SIMPLE_NORMALIZE) {
children = simpleNormalizeChildren(children);
}
var vnode, ns;
if (typeof tag === 'string') {
var Ctor;
ns = (context.$vnode && context.$vnode.ns) || config.getTagNamespace(tag);
if (config.isReservedTag(tag)) {
// platform built-in elements
if (isDef(data) && isDef(data.nativeOn) && data.tag !== 'component') {
warn(
("The .native modifier for v-on is only valid on components but it was used on <" + tag + ">."),
context
);
}
vnode = new VNode(
config.parsePlatformTagName(tag), data, children,
undefined, undefined, context
);
} else if ((!data || !data.pre) && isDef(Ctor = resolveAsset(context.$options, 'components', tag))) {
// component
vnode = createComponent(Ctor, data, context, children, tag);
} else {
// unknown or unlisted namespaced elements
// check at runtime because it may get assigned a namespace when its
// parent normalizes children
vnode = new VNode(
tag, data, children,
undefined, undefined, context
);
}
} else {
// direct component options / constructor
vnode = createComponent(tag, data, context, children);
}
if (Array.isArray(vnode)) {
return vnode
} else if (isDef(vnode)) {
if (isDef(ns)) { applyNS(vnode, ns); }
if (isDef(data)) { registerDeepBindings(data); }
return vnode
} else {
return createEmptyVNode()
}
}
function applyNS (vnode, ns, force) {
vnode.ns = ns;
if (vnode.tag === 'foreignObject') {
// use default namespace inside foreignObject
ns = undefined;
force = true;
}
if (isDef(vnode.children)) {
for (var i = 0, l = vnode.children.length; i < l; i++) {
var child = vnode.children[i];
if (isDef(child.tag) && (
isUndef(child.ns) || (isTrue(force) && child.tag !== 'svg'))) {
applyNS(child, ns, force);
}
}
}
}
// ref #5318
// necessary to ensure parent re-render when deep bindings like :style and
// :class are used on slot nodes
function registerDeepBindings (data) {
if (isObject(data.style)) {
traverse(data.style);
}
if (isObject(data.class)) {
traverse(data.class);
}
}
/* */
function initRender (vm) {
vm._vnode = null; // the root of the child tree
vm._staticTrees = null; // v-once cached trees
var options = vm.$options;
var parentVnode = vm.$vnode = options._parentVnode; // the placeholder node in parent tree
var renderContext = parentVnode && parentVnode.context;
vm.$slots = resolveSlots(options._renderChildren, renderContext);
vm.$scopedSlots = emptyObject;
// bind the createElement fn to this instance
// so that we get proper render context inside it.
// args order: tag, data, children, normalizationType, alwaysNormalize
// internal version is used by render functions compiled from templates
vm._c = function (a, b, c, d) { return createElement(vm, a, b, c, d, false); };
// normalization is always applied for the public version, used in
// user-written render functions.
vm.$createElement = function (a, b, c, d) { return createElement(vm, a, b, c, d, true); };
// $attrs & $listeners are exposed for easier HOC creation.
// they need to be reactive so that HOCs using them are always updated
var parentData = parentVnode && parentVnode.data;
/* istanbul ignore else */
{
defineReactive$$1(vm, '$attrs', parentData && parentData.attrs || emptyObject, function () {
!isUpdatingChildComponent && warn("$attrs is readonly.", vm);
}, true);
defineReactive$$1(vm, '$listeners', options._parentListeners || emptyObject, function () {
!isUpdatingChildComponent && warn("$listeners is readonly.", vm);
}, true);
}
}
var currentRenderingInstance = null;
function renderMixin (Vue) {
// install runtime convenience helpers
installRenderHelpers(Vue.prototype);
Vue.prototype.$nextTick = function (fn) {
return nextTick(fn, this)
};
Vue.prototype._render = function () {
var vm = this;
var ref = vm.$options;
var render = ref.render;
var _parentVnode = ref._parentVnode;
if (_parentVnode) {
vm.$scopedSlots = normalizeScopedSlots(
_parentVnode.data.scopedSlots,
vm.$slots,
vm.$scopedSlots
);
}
// set parent vnode. this allows render functions to have access
// to the data on the placeholder node.
vm.$vnode = _parentVnode;
// render self
var vnode;
try {
// There's no need to maintain a stack because all render fns are called
// separately from one another. Nested component's render fns are called
// when parent component is patched.
currentRenderingInstance = vm;
vnode = render.call(vm._renderProxy, vm.$createElement);
} catch (e) {
handleError(e, vm, "render");
// return error render result,
// or previous vnode to prevent render error causing blank component
/* istanbul ignore else */
if (vm.$options.renderError) {
try {
vnode = vm.$options.renderError.call(vm._renderProxy, vm.$createElement, e);
} catch (e) {
handleError(e, vm, "renderError");
vnode = vm._vnode;
}
} else {
vnode = vm._vnode;
}
} finally {
currentRenderingInstance = null;
}
// if the returned array contains only a single node, allow it
if (Array.isArray(vnode) && vnode.length === 1) {
vnode = vnode[0];
}
// return empty vnode in case the render function errored out
if (!(vnode instanceof VNode)) {
if (Array.isArray(vnode)) {
warn(
'Multiple root nodes returned from render function. Render function ' +
'should return a single root node.',
vm
);
}
vnode = createEmptyVNode();
}
// set parent
vnode.parent = _parentVnode;
return vnode
};
}
/* */
function ensureCtor (comp, base) {
if (
comp.__esModule ||
(hasSymbol && comp[Symbol.toStringTag] === 'Module')
) {
comp = comp.default;
}
return isObject(comp)
? base.extend(comp)
: comp
}
function createAsyncPlaceholder (
factory,
data,
context,
children,
tag
) {
var node = createEmptyVNode();
node.asyncFactory = factory;
node.asyncMeta = { data: data, context: context, children: children, tag: tag };
return node
}
function resolveAsyncComponent (
factory,
baseCtor
) {
if (isTrue(factory.error) && isDef(factory.errorComp)) {
return factory.errorComp
}
if (isDef(factory.resolved)) {
return factory.resolved
}
var owner = currentRenderingInstance;
if (owner && isDef(factory.owners) && factory.owners.indexOf(owner) === -1) {
// already pending
factory.owners.push(owner);
}
if (isTrue(factory.loading) && isDef(factory.loadingComp)) {
return factory.loadingComp
}
if (owner && !isDef(factory.owners)) {
var owners = factory.owners = [owner];
var sync = true;
var timerLoading = null;
var timerTimeout = null
;(owner).$on('hook:destroyed', function () { return remove(owners, owner); });
var forceRender = function (renderCompleted) {
for (var i = 0, l = owners.length; i < l; i++) {
(owners[i]).$forceUpdate();
}
if (renderCompleted) {
owners.length = 0;
if (timerLoading !== null) {
clearTimeout(timerLoading);
timerLoading = null;
}
if (timerTimeout !== null) {
clearTimeout(timerTimeout);
timerTimeout = null;
}
}
};
var resolve = once(function (res) {
// cache resolved
factory.resolved = ensureCtor(res, baseCtor);
// invoke callbacks only if this is not a synchronous resolve
// (async resolves are shimmed as synchronous during SSR)
if (!sync) {
forceRender(true);
} else {
owners.length = 0;
}
});
var reject = once(function (reason) {
warn(
"Failed to resolve async component: " + (String(factory)) +
(reason ? ("\nReason: " + reason) : '')
);
if (isDef(factory.errorComp)) {
factory.error = true;
forceRender(true);
}
});
var res = factory(resolve, reject);
if (isObject(res)) {
if (isPromise(res)) {
// () => Promise
if (isUndef(factory.resolved)) {
res.then(resolve, reject);
}
} else if (isPromise(res.component)) {
res.component.then(resolve, reject);
if (isDef(res.error)) {
factory.errorComp = ensureCtor(res.error, baseCtor);
}
if (isDef(res.loading)) {
factory.loadingComp = ensureCtor(res.loading, baseCtor);
if (res.delay === 0) {
factory.loading = true;
} else {
timerLoading = setTimeout(function () {
timerLoading = null;
if (isUndef(factory.resolved) && isUndef(factory.error)) {
factory.loading = true;
forceRender(false);
}
}, res.delay || 200);
}
}
if (isDef(res.timeout)) {
timerTimeout = setTimeout(function () {
timerTimeout = null;
if (isUndef(factory.resolved)) {
reject(
"timeout (" + (res.timeout) + "ms)"
);
}
}, res.timeout);
}
}
}
sync = false;
// return in case resolved synchronously
return factory.loading
? factory.loadingComp
: factory.resolved
}
}
/* */
function getFirstComponentChild (children) {
if (Array.isArray(children)) {
for (var i = 0; i < children.length; i++) {
var c = children[i];
if (isDef(c) && (isDef(c.componentOptions) || isAsyncPlaceholder(c))) {
return c
}
}
}
}
/* */
/* */
function initEvents (vm) {
vm._events = Object.create(null);
vm._hasHookEvent = false;
// init parent attached events
var listeners = vm.$options._parentListeners;
if (listeners) {
updateComponentListeners(vm, listeners);
}
}
var target;
function add (event, fn) {
target.$on(event, fn);
}
function remove$1 (event, fn) {
target.$off(event, fn);
}
function createOnceHandler (event, fn) {
var _target = target;
return function onceHandler () {
var res = fn.apply(null, arguments);
if (res !== null) {
_target.$off(event, onceHandler);
}
}
}
function updateComponentListeners (
vm,
listeners,
oldListeners
) {
target = vm;
updateListeners(listeners, oldListeners || {}, add, remove$1, createOnceHandler, vm);
target = undefined;
}
function eventsMixin (Vue) {
var hookRE = /^hook:/;
Vue.prototype.$on = function (event, fn) {
var vm = this;
if (Array.isArray(event)) {
for (var i = 0, l = event.length; i < l; i++) {
vm.$on(event[i], fn);
}
} else {
(vm._events[event] || (vm._events[event] = [])).push(fn);
// optimize hook:event cost by using a boolean flag marked at registration
// instead of a hash lookup
if (hookRE.test(event)) {
vm._hasHookEvent = true;
}
}
return vm
};
Vue.prototype.$once = function (event, fn) {
var vm = this;
function on () {
vm.$off(event, on);
fn.apply(vm, arguments);
}
on.fn = fn;
vm.$on(event, on);
return vm
};
Vue.prototype.$off = function (event, fn) {
var vm = this;
// all
if (!arguments.length) {
vm._events = Object.create(null);
return vm
}
// array of events
if (Array.isArray(event)) {
for (var i$1 = 0, l = event.length; i$1 < l; i$1++) {
vm.$off(event[i$1], fn);
}
return vm
}
// specific event
var cbs = vm._events[event];
if (!cbs) {
return vm
}
if (!fn) {
vm._events[event] = null;
return vm
}
// specific handler
var cb;
var i = cbs.length;
while (i--) {
cb = cbs[i];
if (cb === fn || cb.fn === fn) {
cbs.splice(i, 1);
break
}
}
return vm
};
Vue.prototype.$emit = function (event) {
var vm = this;
{
var lowerCaseEvent = event.toLowerCase();
if (lowerCaseEvent !== event && vm._events[lowerCaseEvent]) {
tip(
"Event \"" + lowerCaseEvent + "\" is emitted in component " +
(formatComponentName(vm)) + " but the handler is registered for \"" + event + "\". " +
"Note that HTML attributes are case-insensitive and you cannot use " +
"v-on to listen to camelCase events when using in-DOM templates. " +
"You should probably use \"" + (hyphenate(event)) + "\" instead of \"" + event + "\"."
);
}
}
var cbs = vm._events[event];
if (cbs) {
cbs = cbs.length > 1 ? toArray(cbs) : cbs;
var args = toArray(arguments, 1);
var info = "event handler for \"" + event + "\"";
for (var i = 0, l = cbs.length; i < l; i++) {
invokeWithErrorHandling(cbs[i], vm, args, vm, info);
}
}
return vm
};
}
/* */
var activeInstance = null;
var isUpdatingChildComponent = false;
function setActiveInstance(vm) {
var prevActiveInstance = activeInstance;
activeInstance = vm;
return function () {
activeInstance = prevActiveInstance;
}
}
function initLifecycle (vm) {
var options = vm.$options;
// locate first non-abstract parent
var parent = options.parent;
if (parent && !options.abstract) {
while (parent.$options.abstract && parent.$parent) {
parent = parent.$parent;
}
parent.$children.push(vm);
}
vm.$parent = parent;
vm.$root = parent ? parent.$root : vm;
vm.$children = [];
vm.$refs = {};
vm._watcher = null;
vm._inactive = null;
vm._directInactive = false;
vm._isMounted = false;
vm._isDestroyed = false;
vm._isBeingDestroyed = false;
}
function lifecycleMixin (Vue) {
Vue.prototype._update = function (vnode, hydrating) {
var vm = this;
var prevEl = vm.$el;
var prevVnode = vm._vnode;
var restoreActiveInstance = setActiveInstance(vm);
vm._vnode = vnode;
// Vue.prototype.__patch__ is injected in entry points
// based on the rendering backend used.
if (!prevVnode) {
// initial render
vm.$el = vm.__patch__(vm.$el, vnode, hydrating, false /* removeOnly */);
} else {
// updates
vm.$el = vm.__patch__(prevVnode, vnode);
}
restoreActiveInstance();
// update __vue__ reference
if (prevEl) {
prevEl.__vue__ = null;
}
if (vm.$el) {
vm.$el.__vue__ = vm;
}
// if parent is an HOC, update its $el as well
if (vm.$vnode && vm.$parent && vm.$vnode === vm.$parent._vnode) {
vm.$parent.$el = vm.$el;
}
// updated hook is called by the scheduler to ensure that children are
// updated in a parent's updated hook.
};
Vue.prototype.$forceUpdate = function () {
var vm = this;
if (vm._watcher) {
vm._watcher.update();
}
};
Vue.prototype.$destroy = function () {
var vm = this;
if (vm._isBeingDestroyed) {
return
}
callHook(vm, 'beforeDestroy');
vm._isBeingDestroyed = true;
// remove self from parent
var parent = vm.$parent;
if (parent && !parent._isBeingDestroyed && !vm.$options.abstract) {
remove(parent.$children, vm);
}
// teardown watchers
if (vm._watcher) {
vm._watcher.teardown();
}
var i = vm._watchers.length;
while (i--) {
vm._watchers[i].teardown();
}
// remove reference from data ob
// frozen object may not have observer.
if (vm._data.__ob__) {
vm._data.__ob__.vmCount--;
}
// call the last hook...
vm._isDestroyed = true;
// invoke destroy hooks on current rendered tree
vm.__patch__(vm._vnode, null);
// fire destroyed hook
callHook(vm, 'destroyed');
// turn off all instance listeners.
vm.$off();
// remove __vue__ reference
if (vm.$el) {
vm.$el.__vue__ = null;
}
// release circular reference (#6759)
if (vm.$vnode) {
vm.$vnode.parent = null;
}
};
}
function mountComponent (
vm,
el,
hydrating
) {
vm.$el = el;
if (!vm.$options.render) {
vm.$options.render = createEmptyVNode;
{
/* istanbul ignore if */
if ((vm.$options.template && vm.$options.template.charAt(0) !== '#') ||
vm.$options.el || el) {
warn(
'You are using the runtime-only build of Vue where the template ' +
'compiler is not available. Either pre-compile the templates into ' +
'render functions, or use the compiler-included build.',
vm
);
} else {
warn(
'Failed to mount component: template or render function not defined.',
vm
);
}
}
}
callHook(vm, 'beforeMount');
var updateComponent;
/* istanbul ignore if */
if (config.performance && mark) {
updateComponent = function () {
var name = vm._name;
var id = vm._uid;
var startTag = "vue-perf-start:" + id;
var endTag = "vue-perf-end:" + id;
mark(startTag);
var vnode = vm._render();
mark(endTag);
measure(("vue " + name + " render"), startTag, endTag);
mark(startTag);
vm._update(vnode, hydrating);
mark(endTag);
measure(("vue " + name + " patch"), startTag, endTag);
};
} else {
updateComponent = function () {
vm._update(vm._render(), hydrating);
};
}
// we set this to vm._watcher inside the watcher's constructor
// since the watcher's initial patch may call $forceUpdate (e.g. inside child
// component's mounted hook), which relies on vm._watcher being already defined
new Watcher(vm, updateComponent, noop, {
before: function before () {
if (vm._isMounted && !vm._isDestroyed) {
callHook(vm, 'beforeUpdate');
}
}
}, true /* isRenderWatcher */);
hydrating = false;
// manually mounted instance, call mounted on self
// mounted is called for render-created child components in its inserted hook
if (vm.$vnode == null) {
vm._isMounted = true;
callHook(vm, 'mounted');
}
return vm
}
function updateChildComponent (
vm,
propsData,
listeners,
parentVnode,
renderChildren
) {
{
isUpdatingChildComponent = true;
}
// determine whether component has slot children
// we need to do this before overwriting $options._renderChildren.
// check if there are dynamic scopedSlots (hand-written or compiled but with
// dynamic slot names). Static scoped slots compiled from template has the
// "$stable" marker.
var newScopedSlots = parentVnode.data.scopedSlots;
var oldScopedSlots = vm.$scopedSlots;
var hasDynamicScopedSlot = !!(
(newScopedSlots && !newScopedSlots.$stable) ||
(oldScopedSlots !== emptyObject && !oldScopedSlots.$stable) ||
(newScopedSlots && vm.$scopedSlots.$key !== newScopedSlots.$key) ||
(!newScopedSlots && vm.$scopedSlots.$key)
);
// Any static slot children from the parent may have changed during parent's
// update. Dynamic scoped slots may also have changed. In such cases, a forced
// update is necessary to ensure correctness.
var needsForceUpdate = !!(
renderChildren || // has new static slots
vm.$options._renderChildren || // has old static slots
hasDynamicScopedSlot
);
vm.$options._parentVnode = parentVnode;
vm.$vnode = parentVnode; // update vm's placeholder node without re-render
if (vm._vnode) { // update child tree's parent
vm._vnode.parent = parentVnode;
}
vm.$options._renderChildren = renderChildren;
// update $attrs and $listeners hash
// these are also reactive so they may trigger child update if the child
// used them during render
vm.$attrs = parentVnode.data.attrs || emptyObject;
vm.$listeners = listeners || emptyObject;
// update props
if (propsData && vm.$options.props) {
toggleObserving(false);
var props = vm._props;
var propKeys = vm.$options._propKeys || [];
for (var i = 0; i < propKeys.length; i++) {
var key = propKeys[i];
var propOptions = vm.$options.props; // wtf flow?
props[key] = validateProp(key, propOptions, propsData, vm);
}
toggleObserving(true);
// keep a copy of raw propsData
vm.$options.propsData = propsData;
}
// update listeners
listeners = listeners || emptyObject;
var oldListeners = vm.$options._parentListeners;
vm.$options._parentListeners = listeners;
updateComponentListeners(vm, listeners, oldListeners);
// resolve slots + force update if has children
if (needsForceUpdate) {
vm.$slots = resolveSlots(renderChildren, parentVnode.context);
vm.$forceUpdate();
}
{
isUpdatingChildComponent = false;
}
}
function isInInactiveTree (vm) {
while (vm && (vm = vm.$parent)) {
if (vm._inactive) { return true }
}
return false
}
function activateChildComponent (vm, direct) {
if (direct) {
vm._directInactive = false;
if (isInInactiveTree(vm)) {
return
}
} else if (vm._directInactive) {
return
}
if (vm._inactive || vm._inactive === null) {
vm._inactive = false;
for (var i = 0; i < vm.$children.length; i++) {
activateChildComponent(vm.$children[i]);
}
callHook(vm, 'activated');
}
}
function deactivateChildComponent (vm, direct) {
if (direct) {
vm._directInactive = true;
if (isInInactiveTree(vm)) {
return
}
}
if (!vm._inactive) {
vm._inactive = true;
for (var i = 0; i < vm.$children.length; i++) {
deactivateChildComponent(vm.$children[i]);
}
callHook(vm, 'deactivated');
}
}
function callHook (vm, hook) {
// #7573 disable dep collection when invoking lifecycle hooks
pushTarget();
var handlers = vm.$options[hook];
var info = hook + " hook";
if (handlers) {
for (var i = 0, j = handlers.length; i < j; i++) {
invokeWithErrorHandling(handlers[i], vm, null, vm, info);
}
}
if (vm._hasHookEvent) {
vm.$emit('hook:' + hook);
}
popTarget();
}
/* */
var MAX_UPDATE_COUNT = 100;
var queue = [];
var activatedChildren = [];
var has = {};
var circular = {};
var waiting = false;
var flushing = false;
var index = 0;
/**
* Reset the scheduler's state.
*/
function resetSchedulerState () {
index = queue.length = activatedChildren.length = 0;
has = {};
{
circular = {};
}
waiting = flushing = false;
}
// Async edge case #6566 requires saving the timestamp when event listeners are
// attached. However, calling performance.now() has a perf overhead especially
// if the page has thousands of event listeners. Instead, we take a timestamp
// every time the scheduler flushes and use that for all event listeners
// attached during that flush.
var currentFlushTimestamp = 0;
// Async edge case fix requires storing an event listener's attach timestamp.
var getNow = Date.now;
// Determine what event timestamp the browser is using. Annoyingly, the
// timestamp can either be hi-res (relative to page load) or low-res
// (relative to UNIX epoch), so in order to compare time we have to use the
// same timestamp type when saving the flush timestamp.
// All IE versions use low-res event timestamps, and have problematic clock
// implementations (#9632)
if (inBrowser && !isIE) {
var performance = window.performance;
if (
performance &&
typeof performance.now === 'function' &&
getNow() > document.createEvent('Event').timeStamp
) {
// if the event timestamp, although evaluated AFTER the Date.now(), is
// smaller than it, it means the event is using a hi-res timestamp,
// and we need to use the hi-res version for event listener timestamps as
// well.
getNow = function () { return performance.now(); };
}
}
/**
* Flush both queues and run the watchers.
*/
function flushSchedulerQueue () {
currentFlushTimestamp = getNow();
flushing = true;
var watcher, id;
// Sort queue before flush.
// This ensures that:
// 1. Components are updated from parent to child. (because parent is always
// created before the child)
// 2. A component's user watchers are run before its render watcher (because
// user watchers are created before the render watcher)
// 3. If a component is destroyed during a parent component's watcher run,
// its watchers can be skipped.
queue.sort(function (a, b) { return a.id - b.id; });
// do not cache length because more watchers might be pushed
// as we run existing watchers
for (index = 0; index < queue.length; index++) {
watcher = queue[index];
if (watcher.before) {
watcher.before();
}
id = watcher.id;
has[id] = null;
watcher.run();
// in dev build, check and stop circular updates.
if (has[id] != null) {
circular[id] = (circular[id] || 0) + 1;
if (circular[id] > MAX_UPDATE_COUNT) {
warn(
'You may have an infinite update loop ' + (
watcher.user
? ("in watcher with expression \"" + (watcher.expression) + "\"")
: "in a component render function."
),
watcher.vm
);
break
}
}
}
// keep copies of post queues before resetting state
var activatedQueue = activatedChildren.slice();
var updatedQueue = queue.slice();
resetSchedulerState();
// call component updated and activated hooks
callActivatedHooks(activatedQueue);
callUpdatedHooks(updatedQueue);
// devtool hook
/* istanbul ignore if */
if (devtools && config.devtools) {
devtools.emit('flush');
}
}
function callUpdatedHooks (queue) {
var i = queue.length;
while (i--) {
var watcher = queue[i];
var vm = watcher.vm;
if (vm._watcher === watcher && vm._isMounted && !vm._isDestroyed) {
callHook(vm, 'updated');
}
}
}
/**
* Queue a kept-alive component that was activated during patch.
* The queue will be processed after the entire tree has been patched.
*/
function queueActivatedComponent (vm) {
// setting _inactive to false here so that a render function can
// rely on checking whether it's in an inactive tree (e.g. router-view)
vm._inactive = false;
activatedChildren.push(vm);
}
function callActivatedHooks (queue) {
for (var i = 0; i < queue.length; i++) {
queue[i]._inactive = true;
activateChildComponent(queue[i], true /* true */);
}
}
/**
* Push a watcher into the watcher queue.
* Jobs with duplicate IDs will be skipped unless it's
* pushed when the queue is being flushed.
*/
function queueWatcher (watcher) {
var id = watcher.id;
if (has[id] == null) {
has[id] = true;
if (!flushing) {
queue.push(watcher);
} else {
// if already flushing, splice the watcher based on its id
// if already past its id, it will be run next immediately.
var i = queue.length - 1;
while (i > index && queue[i].id > watcher.id) {
i--;
}
queue.splice(i + 1, 0, watcher);
}
// queue the flush
if (!waiting) {
waiting = true;
if (!config.async) {
flushSchedulerQueue();
return
}
nextTick(flushSchedulerQueue);
}
}
}
/* */
var uid$2 = 0;
/**
* A watcher parses an expression, collects dependencies,
* and fires callback when the expression value changes.
* This is used for both the $watch() api and directives.
*/
var Watcher = function Watcher (
vm,
expOrFn,
cb,
options,
isRenderWatcher
) {
this.vm = vm;
if (isRenderWatcher) {
vm._watcher = this;
}
vm._watchers.push(this);
// options
if (options) {
this.deep = !!options.deep;
this.user = !!options.user;
this.lazy = !!options.lazy;
this.sync = !!options.sync;
this.before = options.before;
} else {
this.deep = this.user = this.lazy = this.sync = false;
}
this.cb = cb;
this.id = ++uid$2; // uid for batching
this.active = true;
this.dirty = this.lazy; // for lazy watchers
this.deps = [];
this.newDeps = [];
this.depIds = new _Set();
this.newDepIds = new _Set();
this.expression = expOrFn.toString();
// parse expression for getter
if (typeof expOrFn === 'function') {
this.getter = expOrFn;
} else {
this.getter = parsePath(expOrFn);
if (!this.getter) {
this.getter = noop;
warn(
"Failed watching path: \"" + expOrFn + "\" " +
'Watcher only accepts simple dot-delimited paths. ' +
'For full control, use a function instead.',
vm
);
}
}
this.value = this.lazy
? undefined
: this.get();
};
/**
* Evaluate the getter, and re-collect dependencies.
*/
Watcher.prototype.get = function get () {
pushTarget(this);
var value;
var vm = this.vm;
try {
value = this.getter.call(vm, vm);
} catch (e) {
if (this.user) {
handleError(e, vm, ("getter for watcher \"" + (this.expression) + "\""));
} else {
throw e
}
} finally {
// "touch" every property so they are all tracked as
// dependencies for deep watching
if (this.deep) {
traverse(value);
}
popTarget();
this.cleanupDeps();
}
return value
};
/**
* Add a dependency to this directive.
*/
Watcher.prototype.addDep = function addDep (dep) {
var id = dep.id;
if (!this.newDepIds.has(id)) {
this.newDepIds.add(id);
this.newDeps.push(dep);
if (!this.depIds.has(id)) {
dep.addSub(this);
}
}
};
/**
* Clean up for dependency collection.
*/
Watcher.prototype.cleanupDeps = function cleanupDeps () {
var i = this.deps.length;
while (i--) {
var dep = this.deps[i];
if (!this.newDepIds.has(dep.id)) {
dep.removeSub(this);
}
}
var tmp = this.depIds;
this.depIds = this.newDepIds;
this.newDepIds = tmp;
this.newDepIds.clear();
tmp = this.deps;
this.deps = this.newDeps;
this.newDeps = tmp;
this.newDeps.length = 0;
};
/**
* Subscriber interface.
* Will be called when a dependency changes.
*/
Watcher.prototype.update = function update () {
/* istanbul ignore else */
if (this.lazy) {
this.dirty = true;
} else if (this.sync) {
this.run();
} else {
queueWatcher(this);
}
};
/**
* Scheduler job interface.
* Will be called by the scheduler.
*/
Watcher.prototype.run = function run () {
if (this.active) {
var value = this.get();
if (
value !== this.value ||
// Deep watchers and watchers on Object/Arrays should fire even
// when the value is the same, because the value may
// have mutated.
isObject(value) ||
this.deep
) {
// set new value
var oldValue = this.value;
this.value = value;
if (this.user) {
var info = "callback for watcher \"" + (this.expression) + "\"";
invokeWithErrorHandling(this.cb, this.vm, [value, oldValue], this.vm, info);
} else {
this.cb.call(this.vm, value, oldValue);
}
}
}
};
/**
* Evaluate the value of the watcher.
* This only gets called for lazy watchers.
*/
Watcher.prototype.evaluate = function evaluate () {
this.value = this.get();
this.dirty = false;
};
/**
* Depend on all deps collected by this watcher.
*/
Watcher.prototype.depend = function depend () {
var i = this.deps.length;
while (i--) {
this.deps[i].depend();
}
};
/**
* Remove self from all dependencies' subscriber list.
*/
Watcher.prototype.teardown = function teardown () {
if (this.active) {
// remove self from vm's watcher list
// this is a somewhat expensive operation so we skip it
// if the vm is being destroyed.
if (!this.vm._isBeingDestroyed) {
remove(this.vm._watchers, this);
}
var i = this.deps.length;
while (i--) {
this.deps[i].removeSub(this);
}
this.active = false;
}
};
/* */
var sharedPropertyDefinition = {
enumerable: true,
configurable: true,
get: noop,
set: noop
};
function proxy (target, sourceKey, key) {
sharedPropertyDefinition.get = function proxyGetter () {
return this[sourceKey][key]
};
sharedPropertyDefinition.set = function proxySetter (val) {
this[sourceKey][key] = val;
};
Object.defineProperty(target, key, sharedPropertyDefinition);
}
function initState (vm) {
vm._watchers = [];
var opts = vm.$options;
if (opts.props) { initProps(vm, opts.props); }
if (opts.methods) { initMethods(vm, opts.methods); }
if (opts.data) {
initData(vm);
} else {
observe(vm._data = {}, true /* asRootData */);
}
if (opts.computed) { initComputed(vm, opts.computed); }
if (opts.watch && opts.watch !== nativeWatch) {
initWatch(vm, opts.watch);
}
}
function initProps (vm, propsOptions) {
var propsData = vm.$options.propsData || {};
var props = vm._props = {};
// cache prop keys so that future props updates can iterate using Array
// instead of dynamic object key enumeration.
var keys = vm.$options._propKeys = [];
var isRoot = !vm.$parent;
// root instance props should be converted
if (!isRoot) {
toggleObserving(false);
}
var loop = function ( key ) {
keys.push(key);
var value = validateProp(key, propsOptions, propsData, vm);
/* istanbul ignore else */
{
var hyphenatedKey = hyphenate(key);
if (isReservedAttribute(hyphenatedKey) ||
config.isReservedAttr(hyphenatedKey)) {
warn(
("\"" + hyphenatedKey + "\" is a reserved attribute and cannot be used as component prop."),
vm
);
}
defineReactive$$1(props, key, value, function () {
if (!isRoot && !isUpdatingChildComponent) {
warn(
"Avoid mutating a prop directly since the value will be " +
"overwritten whenever the parent component re-renders. " +
"Instead, use a data or computed property based on the prop's " +
"value. Prop being mutated: \"" + key + "\"",
vm
);
}
});
}
// static props are already proxied on the component's prototype
// during Vue.extend(). We only need to proxy props defined at
// instantiation here.
if (!(key in vm)) {
proxy(vm, "_props", key);
}
};
for (var key in propsOptions) loop( key );
toggleObserving(true);
}
function initData (vm) {
var data = vm.$options.data;
data = vm._data = typeof data === 'function'
? getData(data, vm)
: data || {};
if (!isPlainObject(data)) {
data = {};
warn(
'data functions should return an object:\n' +
'https://vuejs.org/v2/guide/components.html#data-Must-Be-a-Function',
vm
);
}
// proxy data on instance
var keys = Object.keys(data);
var props = vm.$options.props;
var methods = vm.$options.methods;
var i = keys.length;
while (i--) {
var key = keys[i];
{
if (methods && hasOwn(methods, key)) {
warn(
("Method \"" + key + "\" has already been defined as a data property."),
vm
);
}
}
if (props && hasOwn(props, key)) {
warn(
"The data property \"" + key + "\" is already declared as a prop. " +
"Use prop default value instead.",
vm
);
} else if (!isReserved(key)) {
proxy(vm, "_data", key);
}
}
// observe data
observe(data, true /* asRootData */);
}
function getData (data, vm) {
// #7573 disable dep collection when invoking data getters
pushTarget();
try {
return data.call(vm, vm)
} catch (e) {
handleError(e, vm, "data()");
return {}
} finally {
popTarget();
}
}
var computedWatcherOptions = { lazy: true };
function initComputed (vm, computed) {
// $flow-disable-line
var watchers = vm._computedWatchers = Object.create(null);
// computed properties are just getters during SSR
var isSSR = isServerRendering();
for (var key in computed) {
var userDef = computed[key];
var getter = typeof userDef === 'function' ? userDef : userDef.get;
if (getter == null) {
warn(
("Getter is missing for computed property \"" + key + "\"."),
vm
);
}
if (!isSSR) {
// create internal watcher for the computed property.
watchers[key] = new Watcher(
vm,
getter || noop,
noop,
computedWatcherOptions
);
}
// component-defined computed properties are already defined on the
// component prototype. We only need to define computed properties defined
// at instantiation here.
if (!(key in vm)) {
defineComputed(vm, key, userDef);
} else {
if (key in vm.$data) {
warn(("The computed property \"" + key + "\" is already defined in data."), vm);
} else if (vm.$options.props && key in vm.$options.props) {
warn(("The computed property \"" + key + "\" is already defined as a prop."), vm);
} else if (vm.$options.methods && key in vm.$options.methods) {
warn(("The computed property \"" + key + "\" is already defined as a method."), vm);
}
}
}
}
function defineComputed (
target,
key,
userDef
) {
var shouldCache = !isServerRendering();
if (typeof userDef === 'function') {
sharedPropertyDefinition.get = shouldCache
? createComputedGetter(key)
: createGetterInvoker(userDef);
sharedPropertyDefinition.set = noop;
} else {
sharedPropertyDefinition.get = userDef.get
? shouldCache && userDef.cache !== false
? createComputedGetter(key)
: createGetterInvoker(userDef.get)
: noop;
sharedPropertyDefinition.set = userDef.set || noop;
}
if (sharedPropertyDefinition.set === noop) {
sharedPropertyDefinition.set = function () {
warn(
("Computed property \"" + key + "\" was assigned to but it has no setter."),
this
);
};
}
Object.defineProperty(target, key, sharedPropertyDefinition);
}
function createComputedGetter (key) {
return function computedGetter () {
var watcher = this._computedWatchers && this._computedWatchers[key];
if (watcher) {
if (watcher.dirty) {
watcher.evaluate();
}
if (Dep.target) {
watcher.depend();
}
return watcher.value
}
}
}
function createGetterInvoker(fn) {
return function computedGetter () {
return fn.call(this, this)
}
}
function initMethods (vm, methods) {
var props = vm.$options.props;
for (var key in methods) {
{
if (typeof methods[key] !== 'function') {
warn(
"Method \"" + key + "\" has type \"" + (typeof methods[key]) + "\" in the component definition. " +
"Did you reference the function correctly?",
vm
);
}
if (props && hasOwn(props, key)) {
warn(
("Method \"" + key + "\" has already been defined as a prop."),
vm
);
}
if ((key in vm) && isReserved(key)) {
warn(
"Method \"" + key + "\" conflicts with an existing Vue instance method. " +
"Avoid defining component methods that start with _ or $."
);
}
}
vm[key] = typeof methods[key] !== 'function' ? noop : bind(methods[key], vm);
}
}
function initWatch (vm, watch) {
for (var key in watch) {
var handler = watch[key];
if (Array.isArray(handler)) {
for (var i = 0; i < handler.length; i++) {
createWatcher(vm, key, handler[i]);
}
} else {
createWatcher(vm, key, handler);
}
}
}
function createWatcher (
vm,
expOrFn,
handler,
options
) {
if (isPlainObject(handler)) {
options = handler;
handler = handler.handler;
}
if (typeof handler === 'string') {
handler = vm[handler];
}
return vm.$watch(expOrFn, handler, options)
}
function stateMixin (Vue) {
// flow somehow has problems with directly declared definition object
// when using Object.defineProperty, so we have to procedurally build up
// the object here.
var dataDef = {};
dataDef.get = function () { return this._data };
var propsDef = {};
propsDef.get = function () { return this._props };
{
dataDef.set = function () {
warn(
'Avoid replacing instance root $data. ' +
'Use nested data properties instead.',
this
);
};
propsDef.set = function () {
warn("$props is readonly.", this);
};
}
Object.defineProperty(Vue.prototype, '$data', dataDef);
Object.defineProperty(Vue.prototype, '$props', propsDef);
Vue.prototype.$set = set;
Vue.prototype.$delete = del;
Vue.prototype.$watch = function (
expOrFn,
cb,
options
) {
var vm = this;
if (isPlainObject(cb)) {
return createWatcher(vm, expOrFn, cb, options)
}
options = options || {};
options.user = true;
var watcher = new Watcher(vm, expOrFn, cb, options);
if (options.immediate) {
var info = "callback for immediate watcher \"" + (watcher.expression) + "\"";
pushTarget();
invokeWithErrorHandling(cb, vm, [watcher.value], vm, info);
popTarget();
}
return function unwatchFn () {
watcher.teardown();
}
};
}
/* */
var uid$3 = 0;
function initMixin (Vue) {
Vue.prototype._init = function (options) {
var vm = this;
// a uid
vm._uid = uid$3++;
var startTag, endTag;
/* istanbul ignore if */
if (config.performance && mark) {
startTag = "vue-perf-start:" + (vm._uid);
endTag = "vue-perf-end:" + (vm._uid);
mark(startTag);
}
// a flag to avoid this being observed
vm._isVue = true;
// merge options
if (options && options._isComponent) {
// optimize internal component instantiation
// since dynamic options merging is pretty slow, and none of the
// internal component options needs special treatment.
initInternalComponent(vm, options);
} else {
vm.$options = mergeOptions(
resolveConstructorOptions(vm.constructor),
options || {},
vm
);
}
/* istanbul ignore else */
{
initProxy(vm);
}
// expose real self
vm._self = vm;
initLifecycle(vm);
initEvents(vm);
initRender(vm);
callHook(vm, 'beforeCreate');
initInjections(vm); // resolve injections before data/props
initState(vm);
initProvide(vm); // resolve provide after data/props
callHook(vm, 'created');
/* istanbul ignore if */
if (config.performance && mark) {
vm._name = formatComponentName(vm, false);
mark(endTag);
measure(("vue " + (vm._name) + " init"), startTag, endTag);
}
if (vm.$options.el) {
vm.$mount(vm.$options.el);
}
};
}
function initInternalComponent (vm, options) {
var opts = vm.$options = Object.create(vm.constructor.options);
// doing this because it's faster than dynamic enumeration.
var parentVnode = options._parentVnode;
opts.parent = options.parent;
opts._parentVnode = parentVnode;
var vnodeComponentOptions = parentVnode.componentOptions;
opts.propsData = vnodeComponentOptions.propsData;
opts._parentListeners = vnodeComponentOptions.listeners;
opts._renderChildren = vnodeComponentOptions.children;
opts._componentTag = vnodeComponentOptions.tag;
if (options.render) {
opts.render = options.render;
opts.staticRenderFns = options.staticRenderFns;
}
}
function resolveConstructorOptions (Ctor) {
var options = Ctor.options;
if (Ctor.super) {
var superOptions = resolveConstructorOptions(Ctor.super);
var cachedSuperOptions = Ctor.superOptions;
if (superOptions !== cachedSuperOptions) {
// super option changed,
// need to resolve new options.
Ctor.superOptions = superOptions;
// check if there are any late-modified/attached options (#4976)
var modifiedOptions = resolveModifiedOptions(Ctor);
// update base extend options
if (modifiedOptions) {
extend(Ctor.extendOptions, modifiedOptions);
}
options = Ctor.options = mergeOptions(superOptions, Ctor.extendOptions);
if (options.name) {
options.components[options.name] = Ctor;
}
}
}
return options
}
function resolveModifiedOptions (Ctor) {
var modified;
var latest = Ctor.options;
var sealed = Ctor.sealedOptions;
for (var key in latest) {
if (latest[key] !== sealed[key]) {
if (!modified) { modified = {}; }
modified[key] = latest[key];
}
}
return modified
}
function Vue (options) {
if (!(this instanceof Vue)
) {
warn('Vue is a constructor and should be called with the `new` keyword');
}
this._init(options);
}
initMixin(Vue);
stateMixin(Vue);
eventsMixin(Vue);
lifecycleMixin(Vue);
renderMixin(Vue);
/* */
function initUse (Vue) {
Vue.use = function (plugin) {
var installedPlugins = (this._installedPlugins || (this._installedPlugins = []));
if (installedPlugins.indexOf(plugin) > -1) {
return this
}
// additional parameters
var args = toArray(arguments, 1);
args.unshift(this);
if (typeof plugin.install === 'function') {
plugin.install.apply(plugin, args);
} else if (typeof plugin === 'function') {
plugin.apply(null, args);
}
installedPlugins.push(plugin);
return this
};
}
/* */
function initMixin$1 (Vue) {
Vue.mixin = function (mixin) {
this.options = mergeOptions(this.options, mixin);
return this
};
}
/* */
function initExtend (Vue) {
/**
* Each instance constructor, including Vue, has a unique
* cid. This enables us to create wrapped "child
* constructors" for prototypal inheritance and cache them.
*/
Vue.cid = 0;
var cid = 1;
/**
* Class inheritance
*/
Vue.extend = function (extendOptions) {
extendOptions = extendOptions || {};
var Super = this;
var SuperId = Super.cid;
var cachedCtors = extendOptions._Ctor || (extendOptions._Ctor = {});
if (cachedCtors[SuperId]) {
return cachedCtors[SuperId]
}
var name = extendOptions.name || Super.options.name;
if (name) {
validateComponentName(name);
}
var Sub = function VueComponent (options) {
this._init(options);
};
Sub.prototype = Object.create(Super.prototype);
Sub.prototype.constructor = Sub;
Sub.cid = cid++;
Sub.options = mergeOptions(
Super.options,
extendOptions
);
Sub['super'] = Super;
// For props and computed properties, we define the proxy getters on
// the Vue instances at extension time, on the extended prototype. This
// avoids Object.defineProperty calls for each instance created.
if (Sub.options.props) {
initProps$1(Sub);
}
if (Sub.options.computed) {
initComputed$1(Sub);
}
// allow further extension/mixin/plugin usage
Sub.extend = Super.extend;
Sub.mixin = Super.mixin;
Sub.use = Super.use;
// create asset registers, so extended classes
// can have their private assets too.
ASSET_TYPES.forEach(function (type) {
Sub[type] = Super[type];
});
// enable recursive self-lookup
if (name) {
Sub.options.components[name] = Sub;
}
// keep a reference to the super options at extension time.
// later at instantiation we can check if Super's options have
// been updated.
Sub.superOptions = Super.options;
Sub.extendOptions = extendOptions;
Sub.sealedOptions = extend({}, Sub.options);
// cache constructor
cachedCtors[SuperId] = Sub;
return Sub
};
}
function initProps$1 (Comp) {
var props = Comp.options.props;
for (var key in props) {
proxy(Comp.prototype, "_props", key);
}
}
function initComputed$1 (Comp) {
var computed = Comp.options.computed;
for (var key in computed) {
defineComputed(Comp.prototype, key, computed[key]);
}
}
/* */
function initAssetRegisters (Vue) {
/**
* Create asset registration methods.
*/
ASSET_TYPES.forEach(function (type) {
Vue[type] = function (
id,
definition
) {
if (!definition) {
return this.options[type + 's'][id]
} else {
/* istanbul ignore if */
if (type === 'component') {
validateComponentName(id);
}
if (type === 'component' && isPlainObject(definition)) {
definition.name = definition.name || id;
definition = this.options._base.extend(definition);
}
if (type === 'directive' && typeof definition === 'function') {
definition = { bind: definition, update: definition };
}
this.options[type + 's'][id] = definition;
return definition
}
};
});
}
/* */
function getComponentName (opts) {
return opts && (opts.Ctor.options.name || opts.tag)
}
function matches (pattern, name) {
if (Array.isArray(pattern)) {
return pattern.indexOf(name) > -1
} else if (typeof pattern === 'string') {
return pattern.split(',').indexOf(name) > -1
} else if (isRegExp(pattern)) {
return pattern.test(name)
}
/* istanbul ignore next */
return false
}
function pruneCache (keepAliveInstance, filter) {
var cache = keepAliveInstance.cache;
var keys = keepAliveInstance.keys;
var _vnode = keepAliveInstance._vnode;
for (var key in cache) {
var entry = cache[key];
if (entry) {
var name = entry.name;
if (name && !filter(name)) {
pruneCacheEntry(cache, key, keys, _vnode);
}
}
}
}
function pruneCacheEntry (
cache,
key,
keys,
current
) {
var entry = cache[key];
if (entry && (!current || entry.tag !== current.tag)) {
entry.componentInstance.$destroy();
}
cache[key] = null;
remove(keys, key);
}
var patternTypes = [String, RegExp, Array];
var KeepAlive = {
name: 'keep-alive',
abstract: true,
props: {
include: patternTypes,
exclude: patternTypes,
max: [String, Number]
},
methods: {
cacheVNode: function cacheVNode() {
var ref = this;
var cache = ref.cache;
var keys = ref.keys;
var vnodeToCache = ref.vnodeToCache;
var keyToCache = ref.keyToCache;
if (vnodeToCache) {
var tag = vnodeToCache.tag;
var componentInstance = vnodeToCache.componentInstance;
var componentOptions = vnodeToCache.componentOptions;
cache[keyToCache] = {
name: getComponentName(componentOptions),
tag: tag,
componentInstance: componentInstance,
};
keys.push(keyToCache);
// prune oldest entry
if (this.max && keys.length > parseInt(this.max)) {
pruneCacheEntry(cache, keys[0], keys, this._vnode);
}
this.vnodeToCache = null;
}
}
},
created: function created () {
this.cache = Object.create(null);
this.keys = [];
},
destroyed: function destroyed () {
for (var key in this.cache) {
pruneCacheEntry(this.cache, key, this.keys);
}
},
mounted: function mounted () {
var this$1 = this;
this.cacheVNode();
this.$watch('include', function (val) {
pruneCache(this$1, function (name) { return matches(val, name); });
});
this.$watch('exclude', function (val) {
pruneCache(this$1, function (name) { return !matches(val, name); });
});
},
updated: function updated () {
this.cacheVNode();
},
render: function render () {
var slot = this.$slots.default;
var vnode = getFirstComponentChild(slot);
var componentOptions = vnode && vnode.componentOptions;
if (componentOptions) {
// check pattern
var name = getComponentName(componentOptions);
var ref = this;
var include = ref.include;
var exclude = ref.exclude;
if (
// not included
(include && (!name || !matches(include, name))) ||
// excluded
(exclude && name && matches(exclude, name))
) {
return vnode
}
var ref$1 = this;
var cache = ref$1.cache;
var keys = ref$1.keys;
var key = vnode.key == null
// same constructor may get registered as different local components
// so cid alone is not enough (#3269)
? componentOptions.Ctor.cid + (componentOptions.tag ? ("::" + (componentOptions.tag)) : '')
: vnode.key;
if (cache[key]) {
vnode.componentInstance = cache[key].componentInstance;
// make current key freshest
remove(keys, key);
keys.push(key);
} else {
// delay setting the cache until update
this.vnodeToCache = vnode;
this.keyToCache = key;
}
vnode.data.keepAlive = true;
}
return vnode || (slot && slot[0])
}
};
var builtInComponents = {
KeepAlive: KeepAlive
};
/* */
function initGlobalAPI (Vue) {
// config
var configDef = {};
configDef.get = function () { return config; };
{
configDef.set = function () {
warn(
'Do not replace the Vue.config object, set individual fields instead.'
);
};
}
Object.defineProperty(Vue, 'config', configDef);
// exposed util methods.
// NOTE: these are not considered part of the public API - avoid relying on
// them unless you are aware of the risk.
Vue.util = {
warn: warn,
extend: extend,
mergeOptions: mergeOptions,
defineReactive: defineReactive$$1
};
Vue.set = set;
Vue.delete = del;
Vue.nextTick = nextTick;
// 2.6 explicit observable API
Vue.observable = function (obj) {
observe(obj);
return obj
};
Vue.options = Object.create(null);
ASSET_TYPES.forEach(function (type) {
Vue.options[type + 's'] = Object.create(null);
});
// this is used to identify the "base" constructor to extend all plain-object
// components with in Weex's multi-instance scenarios.
Vue.options._base = Vue;
extend(Vue.options.components, builtInComponents);
initUse(Vue);
initMixin$1(Vue);
initExtend(Vue);
initAssetRegisters(Vue);
}
initGlobalAPI(Vue);
Object.defineProperty(Vue.prototype, '$isServer', {
get: isServerRendering
});
Object.defineProperty(Vue.prototype, '$ssrContext', {
get: function get () {
/* istanbul ignore next */
return this.$vnode && this.$vnode.ssrContext
}
});
// expose FunctionalRenderContext for ssr runtime helper installation
Object.defineProperty(Vue, 'FunctionalRenderContext', {
value: FunctionalRenderContext
});
Vue.version = '2.6.14';
/* */
// these are reserved for web because they are directly compiled away
// during template compilation
var isReservedAttr = makeMap('style,class');
// attributes that should be using props for binding
var acceptValue = makeMap('input,textarea,option,select,progress');
var mustUseProp = function (tag, type, attr) {
return (
(attr === 'value' && acceptValue(tag)) && type !== 'button' ||
(attr === 'selected' && tag === 'option') ||
(attr === 'checked' && tag === 'input') ||
(attr === 'muted' && tag === 'video')
)
};
var isEnumeratedAttr = makeMap('contenteditable,draggable,spellcheck');
var isValidContentEditableValue = makeMap('events,caret,typing,plaintext-only');
var convertEnumeratedValue = function (key, value) {
return isFalsyAttrValue(value) || value === 'false'
? 'false'
// allow arbitrary string value for contenteditable
: key === 'contenteditable' && isValidContentEditableValue(value)
? value
: 'true'
};
var isBooleanAttr = makeMap(
'allowfullscreen,async,autofocus,autoplay,checked,compact,controls,declare,' +
'default,defaultchecked,defaultmuted,defaultselected,defer,disabled,' +
'enabled,formnovalidate,hidden,indeterminate,inert,ismap,itemscope,loop,multiple,' +
'muted,nohref,noresize,noshade,novalidate,nowrap,open,pauseonexit,readonly,' +
'required,reversed,scoped,seamless,selected,sortable,' +
'truespeed,typemustmatch,visible'
);
var xlinkNS = 'http://www.w3.org/1999/xlink';
var isXlink = function (name) {
return name.charAt(5) === ':' && name.slice(0, 5) === 'xlink'
};
var getXlinkProp = function (name) {
return isXlink(name) ? name.slice(6, name.length) : ''
};
var isFalsyAttrValue = function (val) {
return val == null || val === false
};
/* */
function genClassForVnode (vnode) {
var data = vnode.data;
var parentNode = vnode;
var childNode = vnode;
while (isDef(childNode.componentInstance)) {
childNode = childNode.componentInstance._vnode;
if (childNode && childNode.data) {
data = mergeClassData(childNode.data, data);
}
}
while (isDef(parentNode = parentNode.parent)) {
if (parentNode && parentNode.data) {
data = mergeClassData(data, parentNode.data);
}
}
return renderClass(data.staticClass, data.class)
}
function mergeClassData (child, parent) {
return {
staticClass: concat(child.staticClass, parent.staticClass),
class: isDef(child.class)
? [child.class, parent.class]
: parent.class
}
}
function renderClass (
staticClass,
dynamicClass
) {
if (isDef(staticClass) || isDef(dynamicClass)) {
return concat(staticClass, stringifyClass(dynamicClass))
}
/* istanbul ignore next */
return ''
}
function concat (a, b) {
return a ? b ? (a + ' ' + b) : a : (b || '')
}
function stringifyClass (value) {
if (Array.isArray(value)) {
return stringifyArray(value)
}
if (isObject(value)) {
return stringifyObject(value)
}
if (typeof value === 'string') {
return value
}
/* istanbul ignore next */
return ''
}
function stringifyArray (value) {
var res = '';
var stringified;
for (var i = 0, l = value.length; i < l; i++) {
if (isDef(stringified = stringifyClass(value[i])) && stringified !== '') {
if (res) { res += ' '; }
res += stringified;
}
}
return res
}
function stringifyObject (value) {
var res = '';
for (var key in value) {
if (value[key]) {
if (res) { res += ' '; }
res += key;
}
}
return res
}
/* */
var namespaceMap = {
svg: 'http://www.w3.org/2000/svg',
math: 'http://www.w3.org/1998/Math/MathML'
};
var isHTMLTag = makeMap(
'html,body,base,head,link,meta,style,title,' +
'address,article,aside,footer,header,h1,h2,h3,h4,h5,h6,hgroup,nav,section,' +
'div,dd,dl,dt,figcaption,figure,picture,hr,img,li,main,ol,p,pre,ul,' +
'a,b,abbr,bdi,bdo,br,cite,code,data,dfn,em,i,kbd,mark,q,rp,rt,rtc,ruby,' +
's,samp,small,span,strong,sub,sup,time,u,var,wbr,area,audio,map,track,video,' +
'embed,object,param,source,canvas,script,noscript,del,ins,' +
'caption,col,colgroup,table,thead,tbody,td,th,tr,' +
'button,datalist,fieldset,form,input,label,legend,meter,optgroup,option,' +
'output,progress,select,textarea,' +
'details,dialog,menu,menuitem,summary,' +
'content,element,shadow,template,blockquote,iframe,tfoot'
);
// this map is intentionally selective, only covering SVG elements that may
// contain child elements.
var isSVG = makeMap(
'svg,animate,circle,clippath,cursor,defs,desc,ellipse,filter,font-face,' +
'foreignobject,g,glyph,image,line,marker,mask,missing-glyph,path,pattern,' +
'polygon,polyline,rect,switch,symbol,text,textpath,tspan,use,view',
true
);
var isPreTag = function (tag) { return tag === 'pre'; };
var isReservedTag = function (tag) {
return isHTMLTag(tag) || isSVG(tag)
};
function getTagNamespace (tag) {
if (isSVG(tag)) {
return 'svg'
}
// basic support for MathML
// note it doesn't support other MathML elements being component roots
if (tag === 'math') {
return 'math'
}
}
var unknownElementCache = Object.create(null);
function isUnknownElement (tag) {
/* istanbul ignore if */
if (!inBrowser) {
return true
}
if (isReservedTag(tag)) {
return false
}
tag = tag.toLowerCase();
/* istanbul ignore if */
if (unknownElementCache[tag] != null) {
return unknownElementCache[tag]
}
var el = document.createElement(tag);
if (tag.indexOf('-') > -1) {
// http://stackoverflow.com/a/28210364/1070244
return (unknownElementCache[tag] = (
el.constructor === window.HTMLUnknownElement ||
el.constructor === window.HTMLElement
))
} else {
return (unknownElementCache[tag] = /HTMLUnknownElement/.test(el.toString()))
}
}
var isTextInputType = makeMap('text,number,password,search,email,tel,url');
/* */
/**
* Query an element selector if it's not an element already.
*/
function query (el) {
if (typeof el === 'string') {
var selected = document.querySelector(el);
if (!selected) {
warn(
'Cannot find element: ' + el
);
return document.createElement('div')
}
return selected
} else {
return el
}
}
/* */
function createElement$1 (tagName, vnode) {
var elm = document.createElement(tagName);
if (tagName !== 'select') {
return elm
}
// false or null will remove the attribute but undefined will not
if (vnode.data && vnode.data.attrs && vnode.data.attrs.multiple !== undefined) {
elm.setAttribute('multiple', 'multiple');
}
return elm
}
function createElementNS (namespace, tagName) {
return document.createElementNS(namespaceMap[namespace], tagName)
}
function createTextNode (text) {
return document.createTextNode(text)
}
function createComment (text) {
return document.createComment(text)
}
function insertBefore (parentNode, newNode, referenceNode) {
parentNode.insertBefore(newNode, referenceNode);
}
function removeChild (node, child) {
node.removeChild(child);
}
function appendChild (node, child) {
node.appendChild(child);
}
function parentNode (node) {
return node.parentNode
}
function nextSibling (node) {
return node.nextSibling
}
function tagName (node) {
return node.tagName
}
function setTextContent (node, text) {
node.textContent = text;
}
function setStyleScope (node, scopeId) {
node.setAttribute(scopeId, '');
}
var nodeOps = /*#__PURE__*/Object.freeze({
createElement: createElement$1,
createElementNS: createElementNS,
createTextNode: createTextNode,
createComment: createComment,
insertBefore: insertBefore,
removeChild: removeChild,
appendChild: appendChild,
parentNode: parentNode,
nextSibling: nextSibling,
tagName: tagName,
setTextContent: setTextContent,
setStyleScope: setStyleScope
});
/* */
var ref = {
create: function create (_, vnode) {
registerRef(vnode);
},
update: function update (oldVnode, vnode) {
if (oldVnode.data.ref !== vnode.data.ref) {
registerRef(oldVnode, true);
registerRef(vnode);
}
},
destroy: function destroy (vnode) {
registerRef(vnode, true);
}
};
function registerRef (vnode, isRemoval) {
var key = vnode.data.ref;
if (!isDef(key)) { return }
var vm = vnode.context;
var ref = vnode.componentInstance || vnode.elm;
var refs = vm.$refs;
if (isRemoval) {
if (Array.isArray(refs[key])) {
remove(refs[key], ref);
} else if (refs[key] === ref) {
refs[key] = undefined;
}
} else {
if (vnode.data.refInFor) {
if (!Array.isArray(refs[key])) {
refs[key] = [ref];
} else if (refs[key].indexOf(ref) < 0) {
// $flow-disable-line
refs[key].push(ref);
}
} else {
refs[key] = ref;
}
}
}
/**
* Virtual DOM patching algorithm based on Snabbdom by
* Simon Friis Vindum (@paldepind)
* Licensed under the MIT License
* https://github.com/paldepind/snabbdom/blob/master/LICENSE
*
* modified by Evan You (@yyx990803)
*
* Not type-checking this because this file is perf-critical and the cost
* of making flow understand it is not worth it.
*/
var emptyNode = new VNode('', {}, []);
var hooks = ['create', 'activate', 'update', 'remove', 'destroy'];
function sameVnode (a, b) {
return (
a.key === b.key &&
a.asyncFactory === b.asyncFactory && (
(
a.tag === b.tag &&
a.isComment === b.isComment &&
isDef(a.data) === isDef(b.data) &&
sameInputType(a, b)
) || (
isTrue(a.isAsyncPlaceholder) &&
isUndef(b.asyncFactory.error)
)
)
)
}
function sameInputType (a, b) {
if (a.tag !== 'input') { return true }
var i;
var typeA = isDef(i = a.data) && isDef(i = i.attrs) && i.type;
var typeB = isDef(i = b.data) && isDef(i = i.attrs) && i.type;
return typeA === typeB || isTextInputType(typeA) && isTextInputType(typeB)
}
function createKeyToOldIdx (children, beginIdx, endIdx) {
var i, key;
var map = {};
for (i = beginIdx; i <= endIdx; ++i) {
key = children[i].key;
if (isDef(key)) { map[key] = i; }
}
return map
}
function createPatchFunction (backend) {
var i, j;
var cbs = {};
var modules = backend.modules;
var nodeOps = backend.nodeOps;
for (i = 0; i < hooks.length; ++i) {
cbs[hooks[i]] = [];
for (j = 0; j < modules.length; ++j) {
if (isDef(modules[j][hooks[i]])) {
cbs[hooks[i]].push(modules[j][hooks[i]]);
}
}
}
function emptyNodeAt (elm) {
return new VNode(nodeOps.tagName(elm).toLowerCase(), {}, [], undefined, elm)
}
function createRmCb (childElm, listeners) {
function remove$$1 () {
if (--remove$$1.listeners === 0) {
removeNode(childElm);
}
}
remove$$1.listeners = listeners;
return remove$$1
}
function removeNode (el) {
var parent = nodeOps.parentNode(el);
// element may have already been removed due to v-html / v-text
if (isDef(parent)) {
nodeOps.removeChild(parent, el);
}
}
function isUnknownElement$$1 (vnode, inVPre) {
return (
!inVPre &&
!vnode.ns &&
!(
config.ignoredElements.length &&
config.ignoredElements.some(function (ignore) {
return isRegExp(ignore)
? ignore.test(vnode.tag)
: ignore === vnode.tag
})
) &&
config.isUnknownElement(vnode.tag)
)
}
var creatingElmInVPre = 0;
function createElm (
vnode,
insertedVnodeQueue,
parentElm,
refElm,
nested,
ownerArray,
index
) {
if (isDef(vnode.elm) && isDef(ownerArray)) {
// This vnode was used in a previous render!
// now it's used as a new node, overwriting its elm would cause
// potential patch errors down the road when it's used as an insertion
// reference node. Instead, we clone the node on-demand before creating
// associated DOM element for it.
vnode = ownerArray[index] = cloneVNode(vnode);
}
vnode.isRootInsert = !nested; // for transition enter check
if (createComponent(vnode, insertedVnodeQueue, parentElm, refElm)) {
return
}
var data = vnode.data;
var children = vnode.children;
var tag = vnode.tag;
if (isDef(tag)) {
{
if (data && data.pre) {
creatingElmInVPre++;
}
if (isUnknownElement$$1(vnode, creatingElmInVPre)) {
warn(
'Unknown custom element: <' + tag + '> - did you ' +
'register the component correctly? For recursive components, ' +
'make sure to provide the "name" option.',
vnode.context
);
}
}
vnode.elm = vnode.ns
? nodeOps.createElementNS(vnode.ns, tag)
: nodeOps.createElement(tag, vnode);
setScope(vnode);
/* istanbul ignore if */
{
createChildren(vnode, children, insertedVnodeQueue);
if (isDef(data)) {
invokeCreateHooks(vnode, insertedVnodeQueue);
}
insert(parentElm, vnode.elm, refElm);
}
if (data && data.pre) {
creatingElmInVPre--;
}
} else if (isTrue(vnode.isComment)) {
vnode.elm = nodeOps.createComment(vnode.text);
insert(parentElm, vnode.elm, refElm);
} else {
vnode.elm = nodeOps.createTextNode(vnode.text);
insert(parentElm, vnode.elm, refElm);
}
}
function createComponent (vnode, insertedVnodeQueue, parentElm, refElm) {
var i = vnode.data;
if (isDef(i)) {
var isReactivated = isDef(vnode.componentInstance) && i.keepAlive;
if (isDef(i = i.hook) && isDef(i = i.init)) {
i(vnode, false /* hydrating */);
}
// after calling the init hook, if the vnode is a child component
// it should've created a child instance and mounted it. the child
// component also has set the placeholder vnode's elm.
// in that case we can just return the element and be done.
if (isDef(vnode.componentInstance)) {
initComponent(vnode, insertedVnodeQueue);
insert(parentElm, vnode.elm, refElm);
if (isTrue(isReactivated)) {
reactivateComponent(vnode, insertedVnodeQueue, parentElm, refElm);
}
return true
}
}
}
function initComponent (vnode, insertedVnodeQueue) {
if (isDef(vnode.data.pendingInsert)) {
insertedVnodeQueue.push.apply(insertedVnodeQueue, vnode.data.pendingInsert);
vnode.data.pendingInsert = null;
}
vnode.elm = vnode.componentInstance.$el;
if (isPatchable(vnode)) {
invokeCreateHooks(vnode, insertedVnodeQueue);
setScope(vnode);
} else {
// empty component root.
// skip all element-related modules except for ref (#3455)
registerRef(vnode);
// make sure to invoke the insert hook
insertedVnodeQueue.push(vnode);
}
}
function reactivateComponent (vnode, insertedVnodeQueue, parentElm, refElm) {
var i;
// hack for #4339: a reactivated component with inner transition
// does not trigger because the inner node's created hooks are not called
// again. It's not ideal to involve module-specific logic in here but
// there doesn't seem to be a better way to do it.
var innerNode = vnode;
while (innerNode.componentInstance) {
innerNode = innerNode.componentInstance._vnode;
if (isDef(i = innerNode.data) && isDef(i = i.transition)) {
for (i = 0; i < cbs.activate.length; ++i) {
cbs.activate[i](emptyNode, innerNode);
}
insertedVnodeQueue.push(innerNode);
break
}
}
// unlike a newly created component,
// a reactivated keep-alive component doesn't insert itself
insert(parentElm, vnode.elm, refElm);
}
function insert (parent, elm, ref$$1) {
if (isDef(parent)) {
if (isDef(ref$$1)) {
if (nodeOps.parentNode(ref$$1) === parent) {
nodeOps.insertBefore(parent, elm, ref$$1);
}
} else {
nodeOps.appendChild(parent, elm);
}
}
}
function createChildren (vnode, children, insertedVnodeQueue) {
if (Array.isArray(children)) {
{
checkDuplicateKeys(children);
}
for (var i = 0; i < children.length; ++i) {
createElm(children[i], insertedVnodeQueue, vnode.elm, null, true, children, i);
}
} else if (isPrimitive(vnode.text)) {
nodeOps.appendChild(vnode.elm, nodeOps.createTextNode(String(vnode.text)));
}
}
function isPatchable (vnode) {
while (vnode.componentInstance) {
vnode = vnode.componentInstance._vnode;
}
return isDef(vnode.tag)
}
function invokeCreateHooks (vnode, insertedVnodeQueue) {
for (var i$1 = 0; i$1 < cbs.create.length; ++i$1) {
cbs.create[i$1](emptyNode, vnode);
}
i = vnode.data.hook; // Reuse variable
if (isDef(i)) {
if (isDef(i.create)) { i.create(emptyNode, vnode); }
if (isDef(i.insert)) { insertedVnodeQueue.push(vnode); }
}
}
// set scope id attribute for scoped CSS.
// this is implemented as a special case to avoid the overhead
// of going through the normal attribute patching process.
function setScope (vnode) {
var i;
if (isDef(i = vnode.fnScopeId)) {
nodeOps.setStyleScope(vnode.elm, i);
} else {
var ancestor = vnode;
while (ancestor) {
if (isDef(i = ancestor.context) && isDef(i = i.$options._scopeId)) {
nodeOps.setStyleScope(vnode.elm, i);
}
ancestor = ancestor.parent;
}
}
// for slot content they should also get the scopeId from the host instance.
if (isDef(i = activeInstance) &&
i !== vnode.context &&
i !== vnode.fnContext &&
isDef(i = i.$options._scopeId)
) {
nodeOps.setStyleScope(vnode.elm, i);
}
}
function addVnodes (parentElm, refElm, vnodes, startIdx, endIdx, insertedVnodeQueue) {
for (; startIdx <= endIdx; ++startIdx) {
createElm(vnodes[startIdx], insertedVnodeQueue, parentElm, refElm, false, vnodes, startIdx);
}
}
function invokeDestroyHook (vnode) {
var i, j;
var data = vnode.data;
if (isDef(data)) {
if (isDef(i = data.hook) && isDef(i = i.destroy)) { i(vnode); }
for (i = 0; i < cbs.destroy.length; ++i) { cbs.destroy[i](vnode); }
}
if (isDef(i = vnode.children)) {
for (j = 0; j < vnode.children.length; ++j) {
invokeDestroyHook(vnode.children[j]);
}
}
}
function removeVnodes (vnodes, startIdx, endIdx) {
for (; startIdx <= endIdx; ++startIdx) {
var ch = vnodes[startIdx];
if (isDef(ch)) {
if (isDef(ch.tag)) {
removeAndInvokeRemoveHook(ch);
invokeDestroyHook(ch);
} else { // Text node
removeNode(ch.elm);
}
}
}
}
function removeAndInvokeRemoveHook (vnode, rm) {
if (isDef(rm) || isDef(vnode.data)) {
var i;
var listeners = cbs.remove.length + 1;
if (isDef(rm)) {
// we have a recursively passed down rm callback
// increase the listeners count
rm.listeners += listeners;
} else {
// directly removing
rm = createRmCb(vnode.elm, listeners);
}
// recursively invoke hooks on child component root node
if (isDef(i = vnode.componentInstance) && isDef(i = i._vnode) && isDef(i.data)) {
removeAndInvokeRemoveHook(i, rm);
}
for (i = 0; i < cbs.remove.length; ++i) {
cbs.remove[i](vnode, rm);
}
if (isDef(i = vnode.data.hook) && isDef(i = i.remove)) {
i(vnode, rm);
} else {
rm();
}
} else {
removeNode(vnode.elm);
}
}
function updateChildren (parentElm, oldCh, newCh, insertedVnodeQueue, removeOnly) {
var oldStartIdx = 0;
var newStartIdx = 0;
var oldEndIdx = oldCh.length - 1;
var oldStartVnode = oldCh[0];
var oldEndVnode = oldCh[oldEndIdx];
var newEndIdx = newCh.length - 1;
var newStartVnode = newCh[0];
var newEndVnode = newCh[newEndIdx];
var oldKeyToIdx, idxInOld, vnodeToMove, refElm;
// removeOnly is a special flag used only by <transition-group>
// to ensure removed elements stay in correct relative positions
// during leaving transitions
var canMove = !removeOnly;
{
checkDuplicateKeys(newCh);
}
while (oldStartIdx <= oldEndIdx && newStartIdx <= newEndIdx) {
if (isUndef(oldStartVnode)) {
oldStartVnode = oldCh[++oldStartIdx]; // Vnode has been moved left
} else if (isUndef(oldEndVnode)) {
oldEndVnode = oldCh[--oldEndIdx];
} else if (sameVnode(oldStartVnode, newStartVnode)) {
patchVnode(oldStartVnode, newStartVnode, insertedVnodeQueue, newCh, newStartIdx);
oldStartVnode = oldCh[++oldStartIdx];
newStartVnode = newCh[++newStartIdx];
} else if (sameVnode(oldEndVnode, newEndVnode)) {
patchVnode(oldEndVnode, newEndVnode, insertedVnodeQueue, newCh, newEndIdx);
oldEndVnode = oldCh[--oldEndIdx];
newEndVnode = newCh[--newEndIdx];
} else if (sameVnode(oldStartVnode, newEndVnode)) { // Vnode moved right
patchVnode(oldStartVnode, newEndVnode, insertedVnodeQueue, newCh, newEndIdx);
canMove && nodeOps.insertBefore(parentElm, oldStartVnode.elm, nodeOps.nextSibling(oldEndVnode.elm));
oldStartVnode = oldCh[++oldStartIdx];
newEndVnode = newCh[--newEndIdx];
} else if (sameVnode(oldEndVnode, newStartVnode)) { // Vnode moved left
patchVnode(oldEndVnode, newStartVnode, insertedVnodeQueue, newCh, newStartIdx);
canMove && nodeOps.insertBefore(parentElm, oldEndVnode.elm, oldStartVnode.elm);
oldEndVnode = oldCh[--oldEndIdx];
newStartVnode = newCh[++newStartIdx];
} else {
if (isUndef(oldKeyToIdx)) { oldKeyToIdx = createKeyToOldIdx(oldCh, oldStartIdx, oldEndIdx); }
idxInOld = isDef(newStartVnode.key)
? oldKeyToIdx[newStartVnode.key]
: findIdxInOld(newStartVnode, oldCh, oldStartIdx, oldEndIdx);
if (isUndef(idxInOld)) { // New element
createElm(newStartVnode, insertedVnodeQueue, parentElm, oldStartVnode.elm, false, newCh, newStartIdx);
} else {
vnodeToMove = oldCh[idxInOld];
if (sameVnode(vnodeToMove, newStartVnode)) {
patchVnode(vnodeToMove, newStartVnode, insertedVnodeQueue, newCh, newStartIdx);
oldCh[idxInOld] = undefined;
canMove && nodeOps.insertBefore(parentElm, vnodeToMove.elm, oldStartVnode.elm);
} else {
// same key but different element. treat as new element
createElm(newStartVnode, insertedVnodeQueue, parentElm, oldStartVnode.elm, false, newCh, newStartIdx);
}
}
newStartVnode = newCh[++newStartIdx];
}
}
if (oldStartIdx > oldEndIdx) {
refElm = isUndef(newCh[newEndIdx + 1]) ? null : newCh[newEndIdx + 1].elm;
addVnodes(parentElm, refElm, newCh, newStartIdx, newEndIdx, insertedVnodeQueue);
} else if (newStartIdx > newEndIdx) {
removeVnodes(oldCh, oldStartIdx, oldEndIdx);
}
}
function checkDuplicateKeys (children) {
var seenKeys = {};
for (var i = 0; i < children.length; i++) {
var vnode = children[i];
var key = vnode.key;
if (isDef(key)) {
if (seenKeys[key]) {
warn(
("Duplicate keys detected: '" + key + "'. This may cause an update error."),
vnode.context
);
} else {
seenKeys[key] = true;
}
}
}
}
function findIdxInOld (node, oldCh, start, end) {
for (var i = start; i < end; i++) {
var c = oldCh[i];
if (isDef(c) && sameVnode(node, c)) { return i }
}
}
function patchVnode (
oldVnode,
vnode,
insertedVnodeQueue,
ownerArray,
index,
removeOnly
) {
if (oldVnode === vnode) {
return
}
if (isDef(vnode.elm) && isDef(ownerArray)) {
// clone reused vnode
vnode = ownerArray[index] = cloneVNode(vnode);
}
var elm = vnode.elm = oldVnode.elm;
if (isTrue(oldVnode.isAsyncPlaceholder)) {
if (isDef(vnode.asyncFactory.resolved)) {
hydrate(oldVnode.elm, vnode, insertedVnodeQueue);
} else {
vnode.isAsyncPlaceholder = true;
}
return
}
// reuse element for static trees.
// note we only do this if the vnode is cloned -
// if the new node is not cloned it means the render functions have been
// reset by the hot-reload-api and we need to do a proper re-render.
if (isTrue(vnode.isStatic) &&
isTrue(oldVnode.isStatic) &&
vnode.key === oldVnode.key &&
(isTrue(vnode.isCloned) || isTrue(vnode.isOnce))
) {
vnode.componentInstance = oldVnode.componentInstance;
return
}
var i;
var data = vnode.data;
if (isDef(data) && isDef(i = data.hook) && isDef(i = i.prepatch)) {
i(oldVnode, vnode);
}
var oldCh = oldVnode.children;
var ch = vnode.children;
if (isDef(data) && isPatchable(vnode)) {
for (i = 0; i < cbs.update.length; ++i) { cbs.update[i](oldVnode, vnode); }
if (isDef(i = data.hook) && isDef(i = i.update)) { i(oldVnode, vnode); }
}
if (isUndef(vnode.text)) {
if (isDef(oldCh) && isDef(ch)) {
if (oldCh !== ch) { updateChildren(elm, oldCh, ch, insertedVnodeQueue, removeOnly); }
} else if (isDef(ch)) {
{
checkDuplicateKeys(ch);
}
if (isDef(oldVnode.text)) { nodeOps.setTextContent(elm, ''); }
addVnodes(elm, null, ch, 0, ch.length - 1, insertedVnodeQueue);
} else if (isDef(oldCh)) {
removeVnodes(oldCh, 0, oldCh.length - 1);
} else if (isDef(oldVnode.text)) {
nodeOps.setTextContent(elm, '');
}
} else if (oldVnode.text !== vnode.text) {
nodeOps.setTextContent(elm, vnode.text);
}
if (isDef(data)) {
if (isDef(i = data.hook) && isDef(i = i.postpatch)) { i(oldVnode, vnode); }
}
}
function invokeInsertHook (vnode, queue, initial) {
// delay insert hooks for component root nodes, invoke them after the
// element is really inserted
if (isTrue(initial) && isDef(vnode.parent)) {
vnode.parent.data.pendingInsert = queue;
} else {
for (var i = 0; i < queue.length; ++i) {
queue[i].data.hook.insert(queue[i]);
}
}
}
var hydrationBailed = false;
// list of modules that can skip create hook during hydration because they
// are already rendered on the client or has no need for initialization
// Note: style is excluded because it relies on initial clone for future
// deep updates (#7063).
var isRenderedModule = makeMap('attrs,class,staticClass,staticStyle,key');
// Note: this is a browser-only function so we can assume elms are DOM nodes.
function hydrate (elm, vnode, insertedVnodeQueue, inVPre) {
var i;
var tag = vnode.tag;
var data = vnode.data;
var children = vnode.children;
inVPre = inVPre || (data && data.pre);
vnode.elm = elm;
if (isTrue(vnode.isComment) && isDef(vnode.asyncFactory)) {
vnode.isAsyncPlaceholder = true;
return true
}
// assert node match
{
if (!assertNodeMatch(elm, vnode, inVPre)) {
return false
}
}
if (isDef(data)) {
if (isDef(i = data.hook) && isDef(i = i.init)) { i(vnode, true /* hydrating */); }
if (isDef(i = vnode.componentInstance)) {
// child component. it should have hydrated its own tree.
initComponent(vnode, insertedVnodeQueue);
return true
}
}
if (isDef(tag)) {
if (isDef(children)) {
// empty element, allow client to pick up and populate children
if (!elm.hasChildNodes()) {
createChildren(vnode, children, insertedVnodeQueue);
} else {
// v-html and domProps: innerHTML
if (isDef(i = data) && isDef(i = i.domProps) && isDef(i = i.innerHTML)) {
if (i !== elm.innerHTML) {
/* istanbul ignore if */
if (typeof console !== 'undefined' &&
!hydrationBailed
) {
hydrationBailed = true;
console.warn('Parent: ', elm);
console.warn('server innerHTML: ', i);
console.warn('client innerHTML: ', elm.innerHTML);
}
return false
}
} else {
// iterate and compare children lists
var childrenMatch = true;
var childNode = elm.firstChild;
for (var i$1 = 0; i$1 < children.length; i$1++) {
if (!childNode || !hydrate(childNode, children[i$1], insertedVnodeQueue, inVPre)) {
childrenMatch = false;
break
}
childNode = childNode.nextSibling;
}
// if childNode is not null, it means the actual childNodes list is
// longer than the virtual children list.
if (!childrenMatch || childNode) {
/* istanbul ignore if */
if (typeof console !== 'undefined' &&
!hydrationBailed
) {
hydrationBailed = true;
console.warn('Parent: ', elm);
console.warn('Mismatching childNodes vs. VNodes: ', elm.childNodes, children);
}
return false
}
}
}
}
if (isDef(data)) {
var fullInvoke = false;
for (var key in data) {
if (!isRenderedModule(key)) {
fullInvoke = true;
invokeCreateHooks(vnode, insertedVnodeQueue);
break
}
}
if (!fullInvoke && data['class']) {
// ensure collecting deps for deep class bindings for future updates
traverse(data['class']);
}
}
} else if (elm.data !== vnode.text) {
elm.data = vnode.text;
}
return true
}
function assertNodeMatch (node, vnode, inVPre) {
if (isDef(vnode.tag)) {
return vnode.tag.indexOf('vue-component') === 0 || (
!isUnknownElement$$1(vnode, inVPre) &&
vnode.tag.toLowerCase() === (node.tagName && node.tagName.toLowerCase())
)
} else {
return node.nodeType === (vnode.isComment ? 8 : 3)
}
}
return function patch (oldVnode, vnode, hydrating, removeOnly) {
if (isUndef(vnode)) {
if (isDef(oldVnode)) { invokeDestroyHook(oldVnode); }
return
}
var isInitialPatch = false;
var insertedVnodeQueue = [];
if (isUndef(oldVnode)) {
// empty mount (likely as component), create new root element
isInitialPatch = true;
createElm(vnode, insertedVnodeQueue);
} else {
var isRealElement = isDef(oldVnode.nodeType);
if (!isRealElement && sameVnode(oldVnode, vnode)) {
// patch existing root node
patchVnode(oldVnode, vnode, insertedVnodeQueue, null, null, removeOnly);
} else {
if (isRealElement) {
// mounting to a real element
// check if this is server-rendered content and if we can perform
// a successful hydration.
if (oldVnode.nodeType === 1 && oldVnode.hasAttribute(SSR_ATTR)) {
oldVnode.removeAttribute(SSR_ATTR);
hydrating = true;
}
if (isTrue(hydrating)) {
if (hydrate(oldVnode, vnode, insertedVnodeQueue)) {
invokeInsertHook(vnode, insertedVnodeQueue, true);
return oldVnode
} else {
warn(
'The client-side rendered virtual DOM tree is not matching ' +
'server-rendered content. This is likely caused by incorrect ' +
'HTML markup, for example nesting block-level elements inside ' +
'<p>, or missing <tbody>. Bailing hydration and performing ' +
'full client-side render.'
);
}
}
// either not server-rendered, or hydration failed.
// create an empty node and replace it
oldVnode = emptyNodeAt(oldVnode);
}
// replacing existing element
var oldElm = oldVnode.elm;
var parentElm = nodeOps.parentNode(oldElm);
// create new node
createElm(
vnode,
insertedVnodeQueue,
// extremely rare edge case: do not insert if old element is in a
// leaving transition. Only happens when combining transition +
// keep-alive + HOCs. (#4590)
oldElm._leaveCb ? null : parentElm,
nodeOps.nextSibling(oldElm)
);
// update parent placeholder node element, recursively
if (isDef(vnode.parent)) {
var ancestor = vnode.parent;
var patchable = isPatchable(vnode);
while (ancestor) {
for (var i = 0; i < cbs.destroy.length; ++i) {
cbs.destroy[i](ancestor);
}
ancestor.elm = vnode.elm;
if (patchable) {
for (var i$1 = 0; i$1 < cbs.create.length; ++i$1) {
cbs.create[i$1](emptyNode, ancestor);
}
// #6513
// invoke insert hooks that may have been merged by create hooks.
// e.g. for directives that uses the "inserted" hook.
var insert = ancestor.data.hook.insert;
if (insert.merged) {
// start at index 1 to avoid re-invoking component mounted hook
for (var i$2 = 1; i$2 < insert.fns.length; i$2++) {
insert.fns[i$2]();
}
}
} else {
registerRef(ancestor);
}
ancestor = ancestor.parent;
}
}
// destroy old node
if (isDef(parentElm)) {
removeVnodes([oldVnode], 0, 0);
} else if (isDef(oldVnode.tag)) {
invokeDestroyHook(oldVnode);
}
}
}
invokeInsertHook(vnode, insertedVnodeQueue, isInitialPatch);
return vnode.elm
}
}
/* */
var directives = {
create: updateDirectives,
update: updateDirectives,
destroy: function unbindDirectives (vnode) {
updateDirectives(vnode, emptyNode);
}
};
function updateDirectives (oldVnode, vnode) {
if (oldVnode.data.directives || vnode.data.directives) {
_update(oldVnode, vnode);
}
}
function _update (oldVnode, vnode) {
var isCreate = oldVnode === emptyNode;
var isDestroy = vnode === emptyNode;
var oldDirs = normalizeDirectives$1(oldVnode.data.directives, oldVnode.context);
var newDirs = normalizeDirectives$1(vnode.data.directives, vnode.context);
var dirsWithInsert = [];
var dirsWithPostpatch = [];
var key, oldDir, dir;
for (key in newDirs) {
oldDir = oldDirs[key];
dir = newDirs[key];
if (!oldDir) {
// new directive, bind
callHook$1(dir, 'bind', vnode, oldVnode);
if (dir.def && dir.def.inserted) {
dirsWithInsert.push(dir);
}
} else {
// existing directive, update
dir.oldValue = oldDir.value;
dir.oldArg = oldDir.arg;
callHook$1(dir, 'update', vnode, oldVnode);
if (dir.def && dir.def.componentUpdated) {
dirsWithPostpatch.push(dir);
}
}
}
if (dirsWithInsert.length) {
var callInsert = function () {
for (var i = 0; i < dirsWithInsert.length; i++) {
callHook$1(dirsWithInsert[i], 'inserted', vnode, oldVnode);
}
};
if (isCreate) {
mergeVNodeHook(vnode, 'insert', callInsert);
} else {
callInsert();
}
}
if (dirsWithPostpatch.length) {
mergeVNodeHook(vnode, 'postpatch', function () {
for (var i = 0; i < dirsWithPostpatch.length; i++) {
callHook$1(dirsWithPostpatch[i], 'componentUpdated', vnode, oldVnode);
}
});
}
if (!isCreate) {
for (key in oldDirs) {
if (!newDirs[key]) {
// no longer present, unbind
callHook$1(oldDirs[key], 'unbind', oldVnode, oldVnode, isDestroy);
}
}
}
}
var emptyModifiers = Object.create(null);
function normalizeDirectives$1 (
dirs,
vm
) {
var res = Object.create(null);
if (!dirs) {
// $flow-disable-line
return res
}
var i, dir;
for (i = 0; i < dirs.length; i++) {
dir = dirs[i];
if (!dir.modifiers) {
// $flow-disable-line
dir.modifiers = emptyModifiers;
}
res[getRawDirName(dir)] = dir;
dir.def = resolveAsset(vm.$options, 'directives', dir.name, true);
}
// $flow-disable-line
return res
}
function getRawDirName (dir) {
return dir.rawName || ((dir.name) + "." + (Object.keys(dir.modifiers || {}).join('.')))
}
function callHook$1 (dir, hook, vnode, oldVnode, isDestroy) {
var fn = dir.def && dir.def[hook];
if (fn) {
try {
fn(vnode.elm, dir, vnode, oldVnode, isDestroy);
} catch (e) {
handleError(e, vnode.context, ("directive " + (dir.name) + " " + hook + " hook"));
}
}
}
var baseModules = [
ref,
directives
];
/* */
function updateAttrs (oldVnode, vnode) {
var opts = vnode.componentOptions;
if (isDef(opts) && opts.Ctor.options.inheritAttrs === false) {
return
}
if (isUndef(oldVnode.data.attrs) && isUndef(vnode.data.attrs)) {
return
}
var key, cur, old;
var elm = vnode.elm;
var oldAttrs = oldVnode.data.attrs || {};
var attrs = vnode.data.attrs || {};
// clone observed objects, as the user probably wants to mutate it
if (isDef(attrs.__ob__)) {
attrs = vnode.data.attrs = extend({}, attrs);
}
for (key in attrs) {
cur = attrs[key];
old = oldAttrs[key];
if (old !== cur) {
setAttr(elm, key, cur, vnode.data.pre);
}
}
// #4391: in IE9, setting type can reset value for input[type=radio]
// #6666: IE/Edge forces progress value down to 1 before setting a max
/* istanbul ignore if */
if ((isIE || isEdge) && attrs.value !== oldAttrs.value) {
setAttr(elm, 'value', attrs.value);
}
for (key in oldAttrs) {
if (isUndef(attrs[key])) {
if (isXlink(key)) {
elm.removeAttributeNS(xlinkNS, getXlinkProp(key));
} else if (!isEnumeratedAttr(key)) {
elm.removeAttribute(key);
}
}
}
}
function setAttr (el, key, value, isInPre) {
if (isInPre || el.tagName.indexOf('-') > -1) {
baseSetAttr(el, key, value);
} else if (isBooleanAttr(key)) {
// set attribute for blank value
// e.g. <option disabled>Select one</option>
if (isFalsyAttrValue(value)) {
el.removeAttribute(key);
} else {
// technically allowfullscreen is a boolean attribute for <iframe>,
// but Flash expects a value of "true" when used on <embed> tag
value = key === 'allowfullscreen' && el.tagName === 'EMBED'
? 'true'
: key;
el.setAttribute(key, value);
}
} else if (isEnumeratedAttr(key)) {
el.setAttribute(key, convertEnumeratedValue(key, value));
} else if (isXlink(key)) {
if (isFalsyAttrValue(value)) {
el.removeAttributeNS(xlinkNS, getXlinkProp(key));
} else {
el.setAttributeNS(xlinkNS, key, value);
}
} else {
baseSetAttr(el, key, value);
}
}
function baseSetAttr (el, key, value) {
if (isFalsyAttrValue(value)) {
el.removeAttribute(key);
} else {
// #7138: IE10 & 11 fires input event when setting placeholder on
// <textarea>... block the first input event and remove the blocker
// immediately.
/* istanbul ignore if */
if (
isIE && !isIE9 &&
el.tagName === 'TEXTAREA' &&
key === 'placeholder' && value !== '' && !el.__ieph
) {
var blocker = function (e) {
e.stopImmediatePropagation();
el.removeEventListener('input', blocker);
};
el.addEventListener('input', blocker);
// $flow-disable-line
el.__ieph = true; /* IE placeholder patched */
}
el.setAttribute(key, value);
}
}
var attrs = {
create: updateAttrs,
update: updateAttrs
};
/* */
function updateClass (oldVnode, vnode) {
var el = vnode.elm;
var data = vnode.data;
var oldData = oldVnode.data;
if (
isUndef(data.staticClass) &&
isUndef(data.class) && (
isUndef(oldData) || (
isUndef(oldData.staticClass) &&
isUndef(oldData.class)
)
)
) {
return
}
var cls = genClassForVnode(vnode);
// handle transition classes
var transitionClass = el._transitionClasses;
if (isDef(transitionClass)) {
cls = concat(cls, stringifyClass(transitionClass));
}
// set the class
if (cls !== el._prevClass) {
el.setAttribute('class', cls);
el._prevClass = cls;
}
}
var klass = {
create: updateClass,
update: updateClass
};
/* */
var validDivisionCharRE = /[\w).+\-_$\]]/;
function | (exp) {
var inSingle = false;
var inDouble = false;
var inTemplateString = false;
var inRegex = false;
var curly = 0;
var square = 0;
var paren = 0;
var lastFilterIndex = 0;
var c, prev, i, expression, filters;
for (i = 0; i < exp.length; i++) {
prev = c;
c = exp.charCodeAt(i);
if (inSingle) {
if (c === 0x27 && prev !== 0x5C) { inSingle = false; }
} else if (inDouble) {
if (c === 0x22 && prev !== 0x5C) { inDouble = false; }
} else if (inTemplateString) {
if (c === 0x60 && prev !== 0x5C) { inTemplateString = false; }
} else if (inRegex) {
if (c === 0x2f && prev !== 0x5C) { inRegex = false; }
} else if (
c === 0x7C && // pipe
exp.charCodeAt(i + 1) !== 0x7C &&
exp.charCodeAt(i - 1) !== 0x7C &&
!curly && !square && !paren
) {
if (expression === undefined) {
// first filter, end of expression
lastFilterIndex = i + 1;
expression = exp.slice(0, i).trim();
} else {
pushFilter();
}
} else {
switch (c) {
case 0x22: inDouble = true; break // "
case 0x27: inSingle = true; break // '
case 0x60: inTemplateString = true; break // `
case 0x28: paren++; break // (
case 0x29: paren--; break // )
case 0x5B: square++; break // [
case 0x5D: square--; break // ]
case 0x7B: curly++; break // {
case 0x7D: curly--; break // }
}
if (c === 0x2f) { // /
var j = i - 1;
var p = (void 0);
// find first non-whitespace prev char
for (; j >= 0; j--) {
p = exp.charAt(j);
if (p !== ' ') { break }
}
if (!p || !validDivisionCharRE.test(p)) {
inRegex = true;
}
}
}
}
if (expression === undefined) {
expression = exp.slice(0, i).trim();
} else if (lastFilterIndex !== 0) {
pushFilter();
}
function pushFilter () {
(filters || (filters = [])).push(exp.slice(lastFilterIndex, i).trim());
lastFilterIndex = i + 1;
}
if (filters) {
for (i = 0; i < filters.length; i++) {
expression = wrapFilter(expression, filters[i]);
}
}
return expression
}
function wrapFilter (exp, filter) {
var i = filter.indexOf('(');
if (i < 0) {
// _f: resolveFilter
return ("_f(\"" + filter + "\")(" + exp + ")")
} else {
var name = filter.slice(0, i);
var args = filter.slice(i + 1);
return ("_f(\"" + name + "\")(" + exp + (args !== ')' ? ',' + args : args))
}
}
/* */
/* eslint-disable no-unused-vars */
function baseWarn (msg, range) {
console.error(("[Vue compiler]: " + msg));
}
/* eslint-enable no-unused-vars */
function pluckModuleFunction (
modules,
key
) {
return modules
? modules.map(function (m) { return m[key]; }).filter(function (_) { return _; })
: []
}
function addProp (el, name, value, range, dynamic) {
(el.props || (el.props = [])).push(rangeSetItem({ name: name, value: value, dynamic: dynamic }, range));
el.plain = false;
}
function addAttr (el, name, value, range, dynamic) {
var attrs = dynamic
? (el.dynamicAttrs || (el.dynamicAttrs = []))
: (el.attrs || (el.attrs = []));
attrs.push(rangeSetItem({ name: name, value: value, dynamic: dynamic }, range));
el.plain = false;
}
// add a raw attr (use this in preTransforms)
function addRawAttr (el, name, value, range) {
el.attrsMap[name] = value;
el.attrsList.push(rangeSetItem({ name: name, value: value }, range));
}
function addDirective (
el,
name,
rawName,
value,
arg,
isDynamicArg,
modifiers,
range
) {
(el.directives || (el.directives = [])).push(rangeSetItem({
name: name,
rawName: rawName,
value: value,
arg: arg,
isDynamicArg: isDynamicArg,
modifiers: modifiers
}, range));
el.plain = false;
}
function prependModifierMarker (symbol, name, dynamic) {
return dynamic
? ("_p(" + name + ",\"" + symbol + "\")")
: symbol + name // mark the event as captured
}
function addHandler (
el,
name,
value,
modifiers,
important,
warn,
range,
dynamic
) {
modifiers = modifiers || emptyObject;
// warn prevent and passive modifier
/* istanbul ignore if */
if (
warn &&
modifiers.prevent && modifiers.passive
) {
warn(
'passive and prevent can\'t be used together. ' +
'Passive handler can\'t prevent default event.',
range
);
}
// normalize click.right and click.middle since they don't actually fire
// this is technically browser-specific, but at least for now browsers are
// the only target envs that have right/middle clicks.
if (modifiers.right) {
if (dynamic) {
name = "(" + name + ")==='click'?'contextmenu':(" + name + ")";
} else if (name === 'click') {
name = 'contextmenu';
delete modifiers.right;
}
} else if (modifiers.middle) {
if (dynamic) {
name = "(" + name + ")==='click'?'mouseup':(" + name + ")";
} else if (name === 'click') {
name = 'mouseup';
}
}
// check capture modifier
if (modifiers.capture) {
delete modifiers.capture;
name = prependModifierMarker('!', name, dynamic);
}
if (modifiers.once) {
delete modifiers.once;
name = prependModifierMarker('~', name, dynamic);
}
/* istanbul ignore if */
if (modifiers.passive) {
delete modifiers.passive;
name = prependModifierMarker('&', name, dynamic);
}
var events;
if (modifiers.native) {
delete modifiers.native;
events = el.nativeEvents || (el.nativeEvents = {});
} else {
events = el.events || (el.events = {});
}
var newHandler = rangeSetItem({ value: value.trim(), dynamic: dynamic }, range);
if (modifiers !== emptyObject) {
newHandler.modifiers = modifiers;
}
var handlers = events[name];
/* istanbul ignore if */
if (Array.isArray(handlers)) {
important ? handlers.unshift(newHandler) : handlers.push(newHandler);
} else if (handlers) {
events[name] = important ? [newHandler, handlers] : [handlers, newHandler];
} else {
events[name] = newHandler;
}
el.plain = false;
}
function getRawBindingAttr (
el,
name
) {
return el.rawAttrsMap[':' + name] ||
el.rawAttrsMap['v-bind:' + name] ||
el.rawAttrsMap[name]
}
function getBindingAttr (
el,
name,
getStatic
) {
var dynamicValue =
getAndRemoveAttr(el, ':' + name) ||
getAndRemoveAttr(el, 'v-bind:' + name);
if (dynamicValue != null) {
return parseFilters(dynamicValue)
} else if (getStatic !== false) {
var staticValue = getAndRemoveAttr(el, name);
if (staticValue != null) {
return JSON.stringify(staticValue)
}
}
}
// note: this only removes the attr from the Array (attrsList) so that it
// doesn't get processed by processAttrs.
// By default it does NOT remove it from the map (attrsMap) because the map is
// needed during codegen.
function getAndRemoveAttr (
el,
name,
removeFromMap
) {
var val;
if ((val = el.attrsMap[name]) != null) {
var list = el.attrsList;
for (var i = 0, l = list.length; i < l; i++) {
if (list[i].name === name) {
list.splice(i, 1);
break
}
}
}
if (removeFromMap) {
delete el.attrsMap[name];
}
return val
}
function getAndRemoveAttrByRegex (
el,
name
) {
var list = el.attrsList;
for (var i = 0, l = list.length; i < l; i++) {
var attr = list[i];
if (name.test(attr.name)) {
list.splice(i, 1);
return attr
}
}
}
function rangeSetItem (
item,
range
) {
if (range) {
if (range.start != null) {
item.start = range.start;
}
if (range.end != null) {
item.end = range.end;
}
}
return item
}
/* */
/**
* Cross-platform code generation for component v-model
*/
function genComponentModel (
el,
value,
modifiers
) {
var ref = modifiers || {};
var number = ref.number;
var trim = ref.trim;
var baseValueExpression = '$$v';
var valueExpression = baseValueExpression;
if (trim) {
valueExpression =
"(typeof " + baseValueExpression + " === 'string'" +
"? " + baseValueExpression + ".trim()" +
": " + baseValueExpression + ")";
}
if (number) {
valueExpression = "_n(" + valueExpression + ")";
}
var assignment = genAssignmentCode(value, valueExpression);
el.model = {
value: ("(" + value + ")"),
expression: JSON.stringify(value),
callback: ("function (" + baseValueExpression + ") {" + assignment + "}")
};
}
/**
* Cross-platform codegen helper for generating v-model value assignment code.
*/
function genAssignmentCode (
value,
assignment
) {
var res = parseModel(value);
if (res.key === null) {
return (value + "=" + assignment)
} else {
return ("$set(" + (res.exp) + ", " + (res.key) + ", " + assignment + ")")
}
}
/**
* Parse a v-model expression into a base path and a final key segment.
* Handles both dot-path and possible square brackets.
*
* Possible cases:
*
* - test
* - test[key]
* - test[test1[key]]
* - test["a"][key]
* - xxx.test[a[a].test1[key]]
* - test.xxx.a["asa"][test1[key]]
*
*/
var len, str, chr, index$1, expressionPos, expressionEndPos;
function parseModel (val) {
// Fix https://github.com/vuejs/vue/pull/7730
// allow v-model="obj.val " (trailing whitespace)
val = val.trim();
len = val.length;
if (val.indexOf('[') < 0 || val.lastIndexOf(']') < len - 1) {
index$1 = val.lastIndexOf('.');
if (index$1 > -1) {
return {
exp: val.slice(0, index$1),
key: '"' + val.slice(index$1 + 1) + '"'
}
} else {
return {
exp: val,
key: null
}
}
}
str = val;
index$1 = expressionPos = expressionEndPos = 0;
while (!eof()) {
chr = next();
/* istanbul ignore if */
if (isStringStart(chr)) {
parseString(chr);
} else if (chr === 0x5B) {
parseBracket(chr);
}
}
return {
exp: val.slice(0, expressionPos),
key: val.slice(expressionPos + 1, expressionEndPos)
}
}
function next () {
return str.charCodeAt(++index$1)
}
function eof () {
return index$1 >= len
}
function isStringStart (chr) {
return chr === 0x22 || chr === 0x27
}
function parseBracket (chr) {
var inBracket = 1;
expressionPos = index$1;
while (!eof()) {
chr = next();
if (isStringStart(chr)) {
parseString(chr);
continue
}
if (chr === 0x5B) { inBracket++; }
if (chr === 0x5D) { inBracket--; }
if (inBracket === 0) {
expressionEndPos = index$1;
break
}
}
}
function parseString (chr) {
var stringQuote = chr;
while (!eof()) {
chr = next();
if (chr === stringQuote) {
break
}
}
}
/* */
var warn$1;
// in some cases, the event used has to be determined at runtime
// so we used some reserved tokens during compile.
var RANGE_TOKEN = '__r';
var CHECKBOX_RADIO_TOKEN = '__c';
function model (
el,
dir,
_warn
) {
warn$1 = _warn;
var value = dir.value;
var modifiers = dir.modifiers;
var tag = el.tag;
var type = el.attrsMap.type;
{
// inputs with type="file" are read only and setting the input's
// value will throw an error.
if (tag === 'input' && type === 'file') {
warn$1(
"<" + (el.tag) + " v-model=\"" + value + "\" type=\"file\">:\n" +
"File inputs are read only. Use a v-on:change listener instead.",
el.rawAttrsMap['v-model']
);
}
}
if (el.component) {
genComponentModel(el, value, modifiers);
// component v-model doesn't need extra runtime
return false
} else if (tag === 'select') {
genSelect(el, value, modifiers);
} else if (tag === 'input' && type === 'checkbox') {
genCheckboxModel(el, value, modifiers);
} else if (tag === 'input' && type === 'radio') {
genRadioModel(el, value, modifiers);
} else if (tag === 'input' || tag === 'textarea') {
genDefaultModel(el, value, modifiers);
} else if (!config.isReservedTag(tag)) {
genComponentModel(el, value, modifiers);
// component v-model doesn't need extra runtime
return false
} else {
warn$1(
"<" + (el.tag) + " v-model=\"" + value + "\">: " +
"v-model is not supported on this element type. " +
'If you are working with contenteditable, it\'s recommended to ' +
'wrap a library dedicated for that purpose inside a custom component.',
el.rawAttrsMap['v-model']
);
}
// ensure runtime directive metadata
return true
}
function genCheckboxModel (
el,
value,
modifiers
) {
var number = modifiers && modifiers.number;
var valueBinding = getBindingAttr(el, 'value') || 'null';
var trueValueBinding = getBindingAttr(el, 'true-value') || 'true';
var falseValueBinding = getBindingAttr(el, 'false-value') || 'false';
addProp(el, 'checked',
"Array.isArray(" + value + ")" +
"?_i(" + value + "," + valueBinding + ")>-1" + (
trueValueBinding === 'true'
? (":(" + value + ")")
: (":_q(" + value + "," + trueValueBinding + ")")
)
);
addHandler(el, 'change',
"var $$a=" + value + "," +
'$$el=$event.target,' +
"$$c=$$el.checked?(" + trueValueBinding + "):(" + falseValueBinding + ");" +
'if(Array.isArray($$a)){' +
"var $$v=" + (number ? '_n(' + valueBinding + ')' : valueBinding) + "," +
'$$i=_i($$a,$$v);' +
"if($$el.checked){$$i<0&&(" + (genAssignmentCode(value, '$$a.concat([$$v])')) + ")}" +
"else{$$i>-1&&(" + (genAssignmentCode(value, '$$a.slice(0,$$i).concat($$a.slice($$i+1))')) + ")}" +
"}else{" + (genAssignmentCode(value, '$$c')) + "}",
null, true
);
}
function genRadioModel (
el,
value,
modifiers
) {
var number = modifiers && modifiers.number;
var valueBinding = getBindingAttr(el, 'value') || 'null';
valueBinding = number ? ("_n(" + valueBinding + ")") : valueBinding;
addProp(el, 'checked', ("_q(" + value + "," + valueBinding + ")"));
addHandler(el, 'change', genAssignmentCode(value, valueBinding), null, true);
}
function genSelect (
el,
value,
modifiers
) {
var number = modifiers && modifiers.number;
var selectedVal = "Array.prototype.filter" +
".call($event.target.options,function(o){return o.selected})" +
".map(function(o){var val = \"_value\" in o ? o._value : o.value;" +
"return " + (number ? '_n(val)' : 'val') + "})";
var assignment = '$event.target.multiple ? $$selectedVal : $$selectedVal[0]';
var code = "var $$selectedVal = " + selectedVal + ";";
code = code + " " + (genAssignmentCode(value, assignment));
addHandler(el, 'change', code, null, true);
}
function genDefaultModel (
el,
value,
modifiers
) {
var type = el.attrsMap.type;
// warn if v-bind:value conflicts with v-model
// except for inputs with v-bind:type
{
var value$1 = el.attrsMap['v-bind:value'] || el.attrsMap[':value'];
var typeBinding = el.attrsMap['v-bind:type'] || el.attrsMap[':type'];
if (value$1 && !typeBinding) {
var binding = el.attrsMap['v-bind:value'] ? 'v-bind:value' : ':value';
warn$1(
binding + "=\"" + value$1 + "\" conflicts with v-model on the same element " +
'because the latter already expands to a value binding internally',
el.rawAttrsMap[binding]
);
}
}
var ref = modifiers || {};
var lazy = ref.lazy;
var number = ref.number;
var trim = ref.trim;
var needCompositionGuard = !lazy && type !== 'range';
var event = lazy
? 'change'
: type === 'range'
? RANGE_TOKEN
: 'input';
var valueExpression = '$event.target.value';
if (trim) {
valueExpression = "$event.target.value.trim()";
}
if (number) {
valueExpression = "_n(" + valueExpression + ")";
}
var code = genAssignmentCode(value, valueExpression);
if (needCompositionGuard) {
code = "if($event.target.composing)return;" + code;
}
addProp(el, 'value', ("(" + value + ")"));
addHandler(el, event, code, null, true);
if (trim || number) {
addHandler(el, 'blur', '$forceUpdate()');
}
}
/* */
// normalize v-model event tokens that can only be determined at runtime.
// it's important to place the event as the first in the array because
// the whole point is ensuring the v-model callback gets called before
// user-attached handlers.
function normalizeEvents (on) {
/* istanbul ignore if */
if (isDef(on[RANGE_TOKEN])) {
// IE input[type=range] only supports `change` event
var event = isIE ? 'change' : 'input';
on[event] = [].concat(on[RANGE_TOKEN], on[event] || []);
delete on[RANGE_TOKEN];
}
// This was originally intended to fix #4521 but no longer necessary
// after 2.5. Keeping it for backwards compat with generated code from < 2.4
/* istanbul ignore if */
if (isDef(on[CHECKBOX_RADIO_TOKEN])) {
on.change = [].concat(on[CHECKBOX_RADIO_TOKEN], on.change || []);
delete on[CHECKBOX_RADIO_TOKEN];
}
}
var target$1;
function createOnceHandler$1 (event, handler, capture) {
var _target = target$1; // save current target element in closure
return function onceHandler () {
var res = handler.apply(null, arguments);
if (res !== null) {
remove$2(event, onceHandler, capture, _target);
}
}
}
// #9446: Firefox <= 53 (in particular, ESR 52) has incorrect Event.timeStamp
// implementation and does not fire microtasks in between event propagation, so
// safe to exclude.
var useMicrotaskFix = isUsingMicroTask && !(isFF && Number(isFF[1]) <= 53);
function add$1 (
name,
handler,
capture,
passive
) {
// async edge case #6566: inner click event triggers patch, event handler
// attached to outer element during patch, and triggered again. This
// happens because browsers fire microtask ticks between event propagation.
// the solution is simple: we save the timestamp when a handler is attached,
// and the handler would only fire if the event passed to it was fired
// AFTER it was attached.
if (useMicrotaskFix) {
var attachedTimestamp = currentFlushTimestamp;
var original = handler;
handler = original._wrapper = function (e) {
if (
// no bubbling, should always fire.
// this is just a safety net in case event.timeStamp is unreliable in
// certain weird environments...
e.target === e.currentTarget ||
// event is fired after handler attachment
e.timeStamp >= attachedTimestamp ||
// bail for environments that have buggy event.timeStamp implementations
// #9462 iOS 9 bug: event.timeStamp is 0 after history.pushState
// #9681 QtWebEngine event.timeStamp is negative value
e.timeStamp <= 0 ||
// #9448 bail if event is fired in another document in a multi-page
// electron/nw.js app, since event.timeStamp will be using a different
// starting reference
e.target.ownerDocument !== document
) {
return original.apply(this, arguments)
}
};
}
target$1.addEventListener(
name,
handler,
supportsPassive
? { capture: capture, passive: passive }
: capture
);
}
function remove$2 (
name,
handler,
capture,
_target
) {
(_target || target$1).removeEventListener(
name,
handler._wrapper || handler,
capture
);
}
function updateDOMListeners (oldVnode, vnode) {
if (isUndef(oldVnode.data.on) && isUndef(vnode.data.on)) {
return
}
var on = vnode.data.on || {};
var oldOn = oldVnode.data.on || {};
target$1 = vnode.elm;
normalizeEvents(on);
updateListeners(on, oldOn, add$1, remove$2, createOnceHandler$1, vnode.context);
target$1 = undefined;
}
var events = {
create: updateDOMListeners,
update: updateDOMListeners
};
/* */
var svgContainer;
function updateDOMProps (oldVnode, vnode) {
if (isUndef(oldVnode.data.domProps) && isUndef(vnode.data.domProps)) {
return
}
var key, cur;
var elm = vnode.elm;
var oldProps = oldVnode.data.domProps || {};
var props = vnode.data.domProps || {};
// clone observed objects, as the user probably wants to mutate it
if (isDef(props.__ob__)) {
props = vnode.data.domProps = extend({}, props);
}
for (key in oldProps) {
if (!(key in props)) {
elm[key] = '';
}
}
for (key in props) {
cur = props[key];
// ignore children if the node has textContent or innerHTML,
// as these will throw away existing DOM nodes and cause removal errors
// on subsequent patches (#3360)
if (key === 'textContent' || key === 'innerHTML') {
if (vnode.children) { vnode.children.length = 0; }
if (cur === oldProps[key]) { continue }
// #6601 work around Chrome version <= 55 bug where single textNode
// replaced by innerHTML/textContent retains its parentNode property
if (elm.childNodes.length === 1) {
elm.removeChild(elm.childNodes[0]);
}
}
if (key === 'value' && elm.tagName !== 'PROGRESS') {
// store value as _value as well since
// non-string values will be stringified
elm._value = cur;
// avoid resetting cursor position when value is the same
var strCur = isUndef(cur) ? '' : String(cur);
if (shouldUpdateValue(elm, strCur)) {
elm.value = strCur;
}
} else if (key === 'innerHTML' && isSVG(elm.tagName) && isUndef(elm.innerHTML)) {
// IE doesn't support innerHTML for SVG elements
svgContainer = svgContainer || document.createElement('div');
svgContainer.innerHTML = "<svg>" + cur + "</svg>";
var svg = svgContainer.firstChild;
while (elm.firstChild) {
elm.removeChild(elm.firstChild);
}
while (svg.firstChild) {
elm.appendChild(svg.firstChild);
}
} else if (
// skip the update if old and new VDOM state is the same.
// `value` is handled separately because the DOM value may be temporarily
// out of sync with VDOM state due to focus, composition and modifiers.
// This #4521 by skipping the unnecessary `checked` update.
cur !== oldProps[key]
) {
// some property updates can throw
// e.g. `value` on <progress> w/ non-finite value
try {
elm[key] = cur;
} catch (e) {}
}
}
}
// check platforms/web/util/attrs.js acceptValue
function shouldUpdateValue (elm, checkVal) {
return (!elm.composing && (
elm.tagName === 'OPTION' ||
isNotInFocusAndDirty(elm, checkVal) ||
isDirtyWithModifiers(elm, checkVal)
))
}
function isNotInFocusAndDirty (elm, checkVal) {
// return true when textbox (.number and .trim) loses focus and its value is
// not equal to the updated value
var notInFocus = true;
// #6157
// work around IE bug when accessing document.activeElement in an iframe
try { notInFocus = document.activeElement !== elm; } catch (e) {}
return notInFocus && elm.value !== checkVal
}
function isDirtyWithModifiers (elm, newVal) {
var value = elm.value;
var modifiers = elm._vModifiers; // injected by v-model runtime
if (isDef(modifiers)) {
if (modifiers.number) {
return toNumber(value) !== toNumber(newVal)
}
if (modifiers.trim) {
return value.trim() !== newVal.trim()
}
}
return value !== newVal
}
var domProps = {
create: updateDOMProps,
update: updateDOMProps
};
/* */
var parseStyleText = cached(function (cssText) {
var res = {};
var listDelimiter = /;(?![^(]*\))/g;
var propertyDelimiter = /:(.+)/;
cssText.split(listDelimiter).forEach(function (item) {
if (item) {
var tmp = item.split(propertyDelimiter);
tmp.length > 1 && (res[tmp[0].trim()] = tmp[1].trim());
}
});
return res
});
// merge static and dynamic style data on the same vnode
function normalizeStyleData (data) {
var style = normalizeStyleBinding(data.style);
// static style is pre-processed into an object during compilation
// and is always a fresh object, so it's safe to merge into it
return data.staticStyle
? extend(data.staticStyle, style)
: style
}
// normalize possible array / string values into Object
function normalizeStyleBinding (bindingStyle) {
if (Array.isArray(bindingStyle)) {
return toObject(bindingStyle)
}
if (typeof bindingStyle === 'string') {
return parseStyleText(bindingStyle)
}
return bindingStyle
}
/**
* parent component style should be after child's
* so that parent component's style could override it
*/
function getStyle (vnode, checkChild) {
var res = {};
var styleData;
if (checkChild) {
var childNode = vnode;
while (childNode.componentInstance) {
childNode = childNode.componentInstance._vnode;
if (
childNode && childNode.data &&
(styleData = normalizeStyleData(childNode.data))
) {
extend(res, styleData);
}
}
}
if ((styleData = normalizeStyleData(vnode.data))) {
extend(res, styleData);
}
var parentNode = vnode;
while ((parentNode = parentNode.parent)) {
if (parentNode.data && (styleData = normalizeStyleData(parentNode.data))) {
extend(res, styleData);
}
}
return res
}
/* */
var cssVarRE = /^--/;
var importantRE = /\s*!important$/;
var setProp = function (el, name, val) {
/* istanbul ignore if */
if (cssVarRE.test(name)) {
el.style.setProperty(name, val);
} else if (importantRE.test(val)) {
el.style.setProperty(hyphenate(name), val.replace(importantRE, ''), 'important');
} else {
var normalizedName = normalize(name);
if (Array.isArray(val)) {
// Support values array created by autoprefixer, e.g.
// {display: ["-webkit-box", "-ms-flexbox", "flex"]}
// Set them one by one, and the browser will only set those it can recognize
for (var i = 0, len = val.length; i < len; i++) {
el.style[normalizedName] = val[i];
}
} else {
el.style[normalizedName] = val;
}
}
};
var vendorNames = ['Webkit', 'Moz', 'ms'];
var emptyStyle;
var normalize = cached(function (prop) {
emptyStyle = emptyStyle || document.createElement('div').style;
prop = camelize(prop);
if (prop !== 'filter' && (prop in emptyStyle)) {
return prop
}
var capName = prop.charAt(0).toUpperCase() + prop.slice(1);
for (var i = 0; i < vendorNames.length; i++) {
var name = vendorNames[i] + capName;
if (name in emptyStyle) {
return name
}
}
});
function updateStyle (oldVnode, vnode) {
var data = vnode.data;
var oldData = oldVnode.data;
if (isUndef(data.staticStyle) && isUndef(data.style) &&
isUndef(oldData.staticStyle) && isUndef(oldData.style)
) {
return
}
var cur, name;
var el = vnode.elm;
var oldStaticStyle = oldData.staticStyle;
var oldStyleBinding = oldData.normalizedStyle || oldData.style || {};
// if static style exists, stylebinding already merged into it when doing normalizeStyleData
var oldStyle = oldStaticStyle || oldStyleBinding;
var style = normalizeStyleBinding(vnode.data.style) || {};
// store normalized style under a different key for next diff
// make sure to clone it if it's reactive, since the user likely wants
// to mutate it.
vnode.data.normalizedStyle = isDef(style.__ob__)
? extend({}, style)
: style;
var newStyle = getStyle(vnode, true);
for (name in oldStyle) {
if (isUndef(newStyle[name])) {
setProp(el, name, '');
}
}
for (name in newStyle) {
cur = newStyle[name];
if (cur !== oldStyle[name]) {
// ie9 setting to null has no effect, must use empty string
setProp(el, name, cur == null ? '' : cur);
}
}
}
var style = {
create: updateStyle,
update: updateStyle
};
/* */
var whitespaceRE = /\s+/;
/**
* Add class with compatibility for SVG since classList is not supported on
* SVG elements in IE
*/
function addClass (el, cls) {
/* istanbul ignore if */
if (!cls || !(cls = cls.trim())) {
return
}
/* istanbul ignore else */
if (el.classList) {
if (cls.indexOf(' ') > -1) {
cls.split(whitespaceRE).forEach(function (c) { return el.classList.add(c); });
} else {
el.classList.add(cls);
}
} else {
var cur = " " + (el.getAttribute('class') || '') + " ";
if (cur.indexOf(' ' + cls + ' ') < 0) {
el.setAttribute('class', (cur + cls).trim());
}
}
}
/**
* Remove class with compatibility for SVG since classList is not supported on
* SVG elements in IE
*/
function removeClass (el, cls) {
/* istanbul ignore if */
if (!cls || !(cls = cls.trim())) {
return
}
/* istanbul ignore else */
if (el.classList) {
if (cls.indexOf(' ') > -1) {
cls.split(whitespaceRE).forEach(function (c) { return el.classList.remove(c); });
} else {
el.classList.remove(cls);
}
if (!el.classList.length) {
el.removeAttribute('class');
}
} else {
var cur = " " + (el.getAttribute('class') || '') + " ";
var tar = ' ' + cls + ' ';
while (cur.indexOf(tar) >= 0) {
cur = cur.replace(tar, ' ');
}
cur = cur.trim();
if (cur) {
el.setAttribute('class', cur);
} else {
el.removeAttribute('class');
}
}
}
/* */
function resolveTransition (def$$1) {
if (!def$$1) {
return
}
/* istanbul ignore else */
if (typeof def$$1 === 'object') {
var res = {};
if (def$$1.css !== false) {
extend(res, autoCssTransition(def$$1.name || 'v'));
}
extend(res, def$$1);
return res
} else if (typeof def$$1 === 'string') {
return autoCssTransition(def$$1)
}
}
var autoCssTransition = cached(function (name) {
return {
enterClass: (name + "-enter"),
enterToClass: (name + "-enter-to"),
enterActiveClass: (name + "-enter-active"),
leaveClass: (name + "-leave"),
leaveToClass: (name + "-leave-to"),
leaveActiveClass: (name + "-leave-active")
}
});
var hasTransition = inBrowser && !isIE9;
var TRANSITION = 'transition';
var ANIMATION = 'animation';
// Transition property/event sniffing
var transitionProp = 'transition';
var transitionEndEvent = 'transitionend';
var animationProp = 'animation';
var animationEndEvent = 'animationend';
if (hasTransition) {
/* istanbul ignore if */
if (window.ontransitionend === undefined &&
window.onwebkittransitionend !== undefined
) {
transitionProp = 'WebkitTransition';
transitionEndEvent = 'webkitTransitionEnd';
}
if (window.onanimationend === undefined &&
window.onwebkitanimationend !== undefined
) {
animationProp = 'WebkitAnimation';
animationEndEvent = 'webkitAnimationEnd';
}
}
// binding to window is necessary to make hot reload work in IE in strict mode
var raf = inBrowser
? window.requestAnimationFrame
? window.requestAnimationFrame.bind(window)
: setTimeout
: /* istanbul ignore next */ function (fn) { return fn(); };
function nextFrame (fn) {
raf(function () {
raf(fn);
});
}
function addTransitionClass (el, cls) {
var transitionClasses = el._transitionClasses || (el._transitionClasses = []);
if (transitionClasses.indexOf(cls) < 0) {
transitionClasses.push(cls);
addClass(el, cls);
}
}
function removeTransitionClass (el, cls) {
if (el._transitionClasses) {
remove(el._transitionClasses, cls);
}
removeClass(el, cls);
}
function whenTransitionEnds (
el,
expectedType,
cb
) {
var ref = getTransitionInfo(el, expectedType);
var type = ref.type;
var timeout = ref.timeout;
var propCount = ref.propCount;
if (!type) { return cb() }
var event = type === TRANSITION ? transitionEndEvent : animationEndEvent;
var ended = 0;
var end = function () {
el.removeEventListener(event, onEnd);
cb();
};
var onEnd = function (e) {
if (e.target === el) {
if (++ended >= propCount) {
end();
}
}
};
setTimeout(function () {
if (ended < propCount) {
end();
}
}, timeout + 1);
el.addEventListener(event, onEnd);
}
var transformRE = /\b(transform|all)(,|$)/;
function getTransitionInfo (el, expectedType) {
var styles = window.getComputedStyle(el);
// JSDOM may return undefined for transition properties
var transitionDelays = (styles[transitionProp + 'Delay'] || '').split(', ');
var transitionDurations = (styles[transitionProp + 'Duration'] || '').split(', ');
var transitionTimeout = getTimeout(transitionDelays, transitionDurations);
var animationDelays = (styles[animationProp + 'Delay'] || '').split(', ');
var animationDurations = (styles[animationProp + 'Duration'] || '').split(', ');
var animationTimeout = getTimeout(animationDelays, animationDurations);
var type;
var timeout = 0;
var propCount = 0;
/* istanbul ignore if */
if (expectedType === TRANSITION) {
if (transitionTimeout > 0) {
type = TRANSITION;
timeout = transitionTimeout;
propCount = transitionDurations.length;
}
} else if (expectedType === ANIMATION) {
if (animationTimeout > 0) {
type = ANIMATION;
timeout = animationTimeout;
propCount = animationDurations.length;
}
} else {
timeout = Math.max(transitionTimeout, animationTimeout);
type = timeout > 0
? transitionTimeout > animationTimeout
? TRANSITION
: ANIMATION
: null;
propCount = type
? type === TRANSITION
? transitionDurations.length
: animationDurations.length
: 0;
}
var hasTransform =
type === TRANSITION &&
transformRE.test(styles[transitionProp + 'Property']);
return {
type: type,
timeout: timeout,
propCount: propCount,
hasTransform: hasTransform
}
}
function getTimeout (delays, durations) {
/* istanbul ignore next */
while (delays.length < durations.length) {
delays = delays.concat(delays);
}
return Math.max.apply(null, durations.map(function (d, i) {
return toMs(d) + toMs(delays[i])
}))
}
// Old versions of Chromium (below 61.0.3163.100) formats floating pointer numbers
// in a locale-dependent way, using a comma instead of a dot.
// If comma is not replaced with a dot, the input will be rounded down (i.e. acting
// as a floor function) causing unexpected behaviors
function toMs (s) {
return Number(s.slice(0, -1).replace(',', '.')) * 1000
}
/* */
function enter (vnode, toggleDisplay) {
var el = vnode.elm;
// call leave callback now
if (isDef(el._leaveCb)) {
el._leaveCb.cancelled = true;
el._leaveCb();
}
var data = resolveTransition(vnode.data.transition);
if (isUndef(data)) {
return
}
/* istanbul ignore if */
if (isDef(el._enterCb) || el.nodeType !== 1) {
return
}
var css = data.css;
var type = data.type;
var enterClass = data.enterClass;
var enterToClass = data.enterToClass;
var enterActiveClass = data.enterActiveClass;
var appearClass = data.appearClass;
var appearToClass = data.appearToClass;
var appearActiveClass = data.appearActiveClass;
var beforeEnter = data.beforeEnter;
var enter = data.enter;
var afterEnter = data.afterEnter;
var enterCancelled = data.enterCancelled;
var beforeAppear = data.beforeAppear;
var appear = data.appear;
var afterAppear = data.afterAppear;
var appearCancelled = data.appearCancelled;
var duration = data.duration;
// activeInstance will always be the <transition> component managing this
// transition. One edge case to check is when the <transition> is placed
// as the root node of a child component. In that case we need to check
// <transition>'s parent for appear check.
var context = activeInstance;
var transitionNode = activeInstance.$vnode;
while (transitionNode && transitionNode.parent) {
context = transitionNode.context;
transitionNode = transitionNode.parent;
}
var isAppear = !context._isMounted || !vnode.isRootInsert;
if (isAppear && !appear && appear !== '') {
return
}
var startClass = isAppear && appearClass
? appearClass
: enterClass;
var activeClass = isAppear && appearActiveClass
? appearActiveClass
: enterActiveClass;
var toClass = isAppear && appearToClass
? appearToClass
: enterToClass;
var beforeEnterHook = isAppear
? (beforeAppear || beforeEnter)
: beforeEnter;
var enterHook = isAppear
? (typeof appear === 'function' ? appear : enter)
: enter;
var afterEnterHook = isAppear
? (afterAppear || afterEnter)
: afterEnter;
var enterCancelledHook = isAppear
? (appearCancelled || enterCancelled)
: enterCancelled;
var explicitEnterDuration = toNumber(
isObject(duration)
? duration.enter
: duration
);
if (explicitEnterDuration != null) {
checkDuration(explicitEnterDuration, 'enter', vnode);
}
var expectsCSS = css !== false && !isIE9;
var userWantsControl = getHookArgumentsLength(enterHook);
var cb = el._enterCb = once(function () {
if (expectsCSS) {
removeTransitionClass(el, toClass);
removeTransitionClass(el, activeClass);
}
if (cb.cancelled) {
if (expectsCSS) {
removeTransitionClass(el, startClass);
}
enterCancelledHook && enterCancelledHook(el);
} else {
afterEnterHook && afterEnterHook(el);
}
el._enterCb = null;
});
if (!vnode.data.show) {
// remove pending leave element on enter by injecting an insert hook
mergeVNodeHook(vnode, 'insert', function () {
var parent = el.parentNode;
var pendingNode = parent && parent._pending && parent._pending[vnode.key];
if (pendingNode &&
pendingNode.tag === vnode.tag &&
pendingNode.elm._leaveCb
) {
pendingNode.elm._leaveCb();
}
enterHook && enterHook(el, cb);
});
}
// start enter transition
beforeEnterHook && beforeEnterHook(el);
if (expectsCSS) {
addTransitionClass(el, startClass);
addTransitionClass(el, activeClass);
nextFrame(function () {
removeTransitionClass(el, startClass);
if (!cb.cancelled) {
addTransitionClass(el, toClass);
if (!userWantsControl) {
if (isValidDuration(explicitEnterDuration)) {
setTimeout(cb, explicitEnterDuration);
} else {
whenTransitionEnds(el, type, cb);
}
}
}
});
}
if (vnode.data.show) {
toggleDisplay && toggleDisplay();
enterHook && enterHook(el, cb);
}
if (!expectsCSS && !userWantsControl) {
cb();
}
}
function leave (vnode, rm) {
var el = vnode.elm;
// call enter callback now
if (isDef(el._enterCb)) {
el._enterCb.cancelled = true;
el._enterCb();
}
var data = resolveTransition(vnode.data.transition);
if (isUndef(data) || el.nodeType !== 1) {
return rm()
}
/* istanbul ignore if */
if (isDef(el._leaveCb)) {
return
}
var css = data.css;
var type = data.type;
var leaveClass = data.leaveClass;
var leaveToClass = data.leaveToClass;
var leaveActiveClass = data.leaveActiveClass;
var beforeLeave = data.beforeLeave;
var leave = data.leave;
var afterLeave = data.afterLeave;
var leaveCancelled = data.leaveCancelled;
var delayLeave = data.delayLeave;
var duration = data.duration;
var expectsCSS = css !== false && !isIE9;
var userWantsControl = getHookArgumentsLength(leave);
var explicitLeaveDuration = toNumber(
isObject(duration)
? duration.leave
: duration
);
if (isDef(explicitLeaveDuration)) {
checkDuration(explicitLeaveDuration, 'leave', vnode);
}
var cb = el._leaveCb = once(function () {
if (el.parentNode && el.parentNode._pending) {
el.parentNode._pending[vnode.key] = null;
}
if (expectsCSS) {
removeTransitionClass(el, leaveToClass);
removeTransitionClass(el, leaveActiveClass);
}
if (cb.cancelled) {
if (expectsCSS) {
removeTransitionClass(el, leaveClass);
}
leaveCancelled && leaveCancelled(el);
} else {
rm();
afterLeave && afterLeave(el);
}
el._leaveCb = null;
});
if (delayLeave) {
delayLeave(performLeave);
} else {
performLeave();
}
function performLeave () {
// the delayed leave may have already been cancelled
if (cb.cancelled) {
return
}
// record leaving element
if (!vnode.data.show && el.parentNode) {
(el.parentNode._pending || (el.parentNode._pending = {}))[(vnode.key)] = vnode;
}
beforeLeave && beforeLeave(el);
if (expectsCSS) {
addTransitionClass(el, leaveClass);
addTransitionClass(el, leaveActiveClass);
nextFrame(function () {
removeTransitionClass(el, leaveClass);
if (!cb.cancelled) {
addTransitionClass(el, leaveToClass);
if (!userWantsControl) {
if (isValidDuration(explicitLeaveDuration)) {
setTimeout(cb, explicitLeaveDuration);
} else {
whenTransitionEnds(el, type, cb);
}
}
}
});
}
leave && leave(el, cb);
if (!expectsCSS && !userWantsControl) {
cb();
}
}
}
// only used in dev mode
function checkDuration (val, name, vnode) {
if (typeof val !== 'number') {
warn(
"<transition> explicit " + name + " duration is not a valid number - " +
"got " + (JSON.stringify(val)) + ".",
vnode.context
);
} else if (isNaN(val)) {
warn(
"<transition> explicit " + name + " duration is NaN - " +
'the duration expression might be incorrect.',
vnode.context
);
}
}
function isValidDuration (val) {
return typeof val === 'number' && !isNaN(val)
}
/**
* Normalize a transition hook's argument length. The hook may be:
* - a merged hook (invoker) with the original in .fns
* - a wrapped component method (check ._length)
* - a plain function (.length)
*/
function getHookArgumentsLength (fn) {
if (isUndef(fn)) {
return false
}
var invokerFns = fn.fns;
if (isDef(invokerFns)) {
// invoker
return getHookArgumentsLength(
Array.isArray(invokerFns)
? invokerFns[0]
: invokerFns
)
} else {
return (fn._length || fn.length) > 1
}
}
function _enter (_, vnode) {
if (vnode.data.show !== true) {
enter(vnode);
}
}
var transition = inBrowser ? {
create: _enter,
activate: _enter,
remove: function remove$$1 (vnode, rm) {
/* istanbul ignore else */
if (vnode.data.show !== true) {
leave(vnode, rm);
} else {
rm();
}
}
} : {};
var platformModules = [
attrs,
klass,
events,
domProps,
style,
transition
];
/* */
// the directive module should be applied last, after all
// built-in modules have been applied.
var modules = platformModules.concat(baseModules);
var patch = createPatchFunction({ nodeOps: nodeOps, modules: modules });
/**
* Not type checking this file because flow doesn't like attaching
* properties to Elements.
*/
/* istanbul ignore if */
if (isIE9) {
// http://www.matts411.com/post/internet-explorer-9-oninput/
document.addEventListener('selectionchange', function () {
var el = document.activeElement;
if (el && el.vmodel) {
trigger(el, 'input');
}
});
}
var directive = {
inserted: function inserted (el, binding, vnode, oldVnode) {
if (vnode.tag === 'select') {
// #6903
if (oldVnode.elm && !oldVnode.elm._vOptions) {
mergeVNodeHook(vnode, 'postpatch', function () {
directive.componentUpdated(el, binding, vnode);
});
} else {
setSelected(el, binding, vnode.context);
}
el._vOptions = [].map.call(el.options, getValue);
} else if (vnode.tag === 'textarea' || isTextInputType(el.type)) {
el._vModifiers = binding.modifiers;
if (!binding.modifiers.lazy) {
el.addEventListener('compositionstart', onCompositionStart);
el.addEventListener('compositionend', onCompositionEnd);
// Safari < 10.2 & UIWebView doesn't fire compositionend when
// switching focus before confirming composition choice
// this also fixes the issue where some browsers e.g. iOS Chrome
// fires "change" instead of "input" on autocomplete.
el.addEventListener('change', onCompositionEnd);
/* istanbul ignore if */
if (isIE9) {
el.vmodel = true;
}
}
}
},
componentUpdated: function componentUpdated (el, binding, vnode) {
if (vnode.tag === 'select') {
setSelected(el, binding, vnode.context);
// in case the options rendered by v-for have changed,
// it's possible that the value is out-of-sync with the rendered options.
// detect such cases and filter out values that no longer has a matching
// option in the DOM.
var prevOptions = el._vOptions;
var curOptions = el._vOptions = [].map.call(el.options, getValue);
if (curOptions.some(function (o, i) { return !looseEqual(o, prevOptions[i]); })) {
// trigger change event if
// no matching option found for at least one value
var needReset = el.multiple
? binding.value.some(function (v) { return hasNoMatchingOption(v, curOptions); })
: binding.value !== binding.oldValue && hasNoMatchingOption(binding.value, curOptions);
if (needReset) {
trigger(el, 'change');
}
}
}
}
};
function setSelected (el, binding, vm) {
actuallySetSelected(el, binding, vm);
/* istanbul ignore if */
if (isIE || isEdge) {
setTimeout(function () {
actuallySetSelected(el, binding, vm);
}, 0);
}
}
function actuallySetSelected (el, binding, vm) {
var value = binding.value;
var isMultiple = el.multiple;
if (isMultiple && !Array.isArray(value)) {
warn(
"<select multiple v-model=\"" + (binding.expression) + "\"> " +
"expects an Array value for its binding, but got " + (Object.prototype.toString.call(value).slice(8, -1)),
vm
);
return
}
var selected, option;
for (var i = 0, l = el.options.length; i < l; i++) {
option = el.options[i];
if (isMultiple) {
selected = looseIndexOf(value, getValue(option)) > -1;
if (option.selected !== selected) {
option.selected = selected;
}
} else {
if (looseEqual(getValue(option), value)) {
if (el.selectedIndex !== i) {
el.selectedIndex = i;
}
return
}
}
}
if (!isMultiple) {
el.selectedIndex = -1;
}
}
function hasNoMatchingOption (value, options) {
return options.every(function (o) { return !looseEqual(o, value); })
}
function getValue (option) {
return '_value' in option
? option._value
: option.value
}
function onCompositionStart (e) {
e.target.composing = true;
}
function onCompositionEnd (e) {
// prevent triggering an input event for no reason
if (!e.target.composing) { return }
e.target.composing = false;
trigger(e.target, 'input');
}
function trigger (el, type) {
var e = document.createEvent('HTMLEvents');
e.initEvent(type, true, true);
el.dispatchEvent(e);
}
/* */
// recursively search for possible transition defined inside the component root
function locateNode (vnode) {
return vnode.componentInstance && (!vnode.data || !vnode.data.transition)
? locateNode(vnode.componentInstance._vnode)
: vnode
}
var show = {
bind: function bind (el, ref, vnode) {
var value = ref.value;
vnode = locateNode(vnode);
var transition$$1 = vnode.data && vnode.data.transition;
var originalDisplay = el.__vOriginalDisplay =
el.style.display === 'none' ? '' : el.style.display;
if (value && transition$$1) {
vnode.data.show = true;
enter(vnode, function () {
el.style.display = originalDisplay;
});
} else {
el.style.display = value ? originalDisplay : 'none';
}
},
update: function update (el, ref, vnode) {
var value = ref.value;
var oldValue = ref.oldValue;
/* istanbul ignore if */
if (!value === !oldValue) { return }
vnode = locateNode(vnode);
var transition$$1 = vnode.data && vnode.data.transition;
if (transition$$1) {
vnode.data.show = true;
if (value) {
enter(vnode, function () {
el.style.display = el.__vOriginalDisplay;
});
} else {
leave(vnode, function () {
el.style.display = 'none';
});
}
} else {
el.style.display = value ? el.__vOriginalDisplay : 'none';
}
},
unbind: function unbind (
el,
binding,
vnode,
oldVnode,
isDestroy
) {
if (!isDestroy) {
el.style.display = el.__vOriginalDisplay;
}
}
};
var platformDirectives = {
model: directive,
show: show
};
/* */
var transitionProps = {
name: String,
appear: Boolean,
css: Boolean,
mode: String,
type: String,
enterClass: String,
leaveClass: String,
enterToClass: String,
leaveToClass: String,
enterActiveClass: String,
leaveActiveClass: String,
appearClass: String,
appearActiveClass: String,
appearToClass: String,
duration: [Number, String, Object]
};
// in case the child is also an abstract component, e.g. <keep-alive>
// we want to recursively retrieve the real component to be rendered
function getRealChild (vnode) {
var compOptions = vnode && vnode.componentOptions;
if (compOptions && compOptions.Ctor.options.abstract) {
return getRealChild(getFirstComponentChild(compOptions.children))
} else {
return vnode
}
}
function extractTransitionData (comp) {
var data = {};
var options = comp.$options;
// props
for (var key in options.propsData) {
data[key] = comp[key];
}
// events.
// extract listeners and pass them directly to the transition methods
var listeners = options._parentListeners;
for (var key$1 in listeners) {
data[camelize(key$1)] = listeners[key$1];
}
return data
}
function placeholder (h, rawChild) {
if (/\d-keep-alive$/.test(rawChild.tag)) {
return h('keep-alive', {
props: rawChild.componentOptions.propsData
})
}
}
function hasParentTransition (vnode) {
while ((vnode = vnode.parent)) {
if (vnode.data.transition) {
return true
}
}
}
function isSameChild (child, oldChild) {
return oldChild.key === child.key && oldChild.tag === child.tag
}
var isNotTextNode = function (c) { return c.tag || isAsyncPlaceholder(c); };
var isVShowDirective = function (d) { return d.name === 'show'; };
var Transition = {
name: 'transition',
props: transitionProps,
abstract: true,
render: function render (h) {
var this$1 = this;
var children = this.$slots.default;
if (!children) {
return
}
// filter out text nodes (possible whitespaces)
children = children.filter(isNotTextNode);
/* istanbul ignore if */
if (!children.length) {
return
}
// warn multiple elements
if (children.length > 1) {
warn(
'<transition> can only be used on a single element. Use ' +
'<transition-group> for lists.',
this.$parent
);
}
var mode = this.mode;
// warn invalid mode
if (mode && mode !== 'in-out' && mode !== 'out-in'
) {
warn(
'invalid <transition> mode: ' + mode,
this.$parent
);
}
var rawChild = children[0];
// if this is a component root node and the component's
// parent container node also has transition, skip.
if (hasParentTransition(this.$vnode)) {
return rawChild
}
// apply transition data to child
// use getRealChild() to ignore abstract components e.g. keep-alive
var child = getRealChild(rawChild);
/* istanbul ignore if */
if (!child) {
return rawChild
}
if (this._leaving) {
return placeholder(h, rawChild)
}
// ensure a key that is unique to the vnode type and to this transition
// component instance. This key will be used to remove pending leaving nodes
// during entering.
var id = "__transition-" + (this._uid) + "-";
child.key = child.key == null
? child.isComment
? id + 'comment'
: id + child.tag
: isPrimitive(child.key)
? (String(child.key).indexOf(id) === 0 ? child.key : id + child.key)
: child.key;
var data = (child.data || (child.data = {})).transition = extractTransitionData(this);
var oldRawChild = this._vnode;
var oldChild = getRealChild(oldRawChild);
// mark v-show
// so that the transition module can hand over the control to the directive
if (child.data.directives && child.data.directives.some(isVShowDirective)) {
child.data.show = true;
}
if (
oldChild &&
oldChild.data &&
!isSameChild(child, oldChild) &&
!isAsyncPlaceholder(oldChild) &&
// #6687 component root is a comment node
!(oldChild.componentInstance && oldChild.componentInstance._vnode.isComment)
) {
// replace old child transition data with fresh one
// important for dynamic transitions!
var oldData = oldChild.data.transition = extend({}, data);
// handle transition mode
if (mode === 'out-in') {
// return placeholder node and queue update when leave finishes
this._leaving = true;
mergeVNodeHook(oldData, 'afterLeave', function () {
this$1._leaving = false;
this$1.$forceUpdate();
});
return placeholder(h, rawChild)
} else if (mode === 'in-out') {
if (isAsyncPlaceholder(child)) {
return oldRawChild
}
var delayedLeave;
var performLeave = function () { delayedLeave(); };
mergeVNodeHook(data, 'afterEnter', performLeave);
mergeVNodeHook(data, 'enterCancelled', performLeave);
mergeVNodeHook(oldData, 'delayLeave', function (leave) { delayedLeave = leave; });
}
}
return rawChild
}
};
/* */
var props = extend({
tag: String,
moveClass: String
}, transitionProps);
delete props.mode;
var TransitionGroup = {
props: props,
beforeMount: function beforeMount () {
var this$1 = this;
var update = this._update;
this._update = function (vnode, hydrating) {
var restoreActiveInstance = setActiveInstance(this$1);
// force removing pass
this$1.__patch__(
this$1._vnode,
this$1.kept,
false, // hydrating
true // removeOnly (!important, avoids unnecessary moves)
);
this$1._vnode = this$1.kept;
restoreActiveInstance();
update.call(this$1, vnode, hydrating);
};
},
render: function render (h) {
var tag = this.tag || this.$vnode.data.tag || 'span';
var map = Object.create(null);
var prevChildren = this.prevChildren = this.children;
var rawChildren = this.$slots.default || [];
var children = this.children = [];
var transitionData = extractTransitionData(this);
for (var i = 0; i < rawChildren.length; i++) {
var c = rawChildren[i];
if (c.tag) {
if (c.key != null && String(c.key).indexOf('__vlist') !== 0) {
children.push(c);
map[c.key] = c
;(c.data || (c.data = {})).transition = transitionData;
} else {
var opts = c.componentOptions;
var name = opts ? (opts.Ctor.options.name || opts.tag || '') : c.tag;
warn(("<transition-group> children must be keyed: <" + name + ">"));
}
}
}
if (prevChildren) {
var kept = [];
var removed = [];
for (var i$1 = 0; i$1 < prevChildren.length; i$1++) {
var c$1 = prevChildren[i$1];
c$1.data.transition = transitionData;
c$1.data.pos = c$1.elm.getBoundingClientRect();
if (map[c$1.key]) {
kept.push(c$1);
} else {
removed.push(c$1);
}
}
this.kept = h(tag, null, kept);
this.removed = removed;
}
return h(tag, null, children)
},
updated: function updated () {
var children = this.prevChildren;
var moveClass = this.moveClass || ((this.name || 'v') + '-move');
if (!children.length || !this.hasMove(children[0].elm, moveClass)) {
return
}
// we divide the work into three loops to avoid mixing DOM reads and writes
// in each iteration - which helps prevent layout thrashing.
children.forEach(callPendingCbs);
children.forEach(recordPosition);
children.forEach(applyTranslation);
// force reflow to put everything in position
// assign to this to avoid being removed in tree-shaking
// $flow-disable-line
this._reflow = document.body.offsetHeight;
children.forEach(function (c) {
if (c.data.moved) {
var el = c.elm;
var s = el.style;
addTransitionClass(el, moveClass);
s.transform = s.WebkitTransform = s.transitionDuration = '';
el.addEventListener(transitionEndEvent, el._moveCb = function cb (e) {
if (e && e.target !== el) {
return
}
if (!e || /transform$/.test(e.propertyName)) {
el.removeEventListener(transitionEndEvent, cb);
el._moveCb = null;
removeTransitionClass(el, moveClass);
}
});
}
});
},
methods: {
hasMove: function hasMove (el, moveClass) {
/* istanbul ignore if */
if (!hasTransition) {
return false
}
/* istanbul ignore if */
if (this._hasMove) {
return this._hasMove
}
// Detect whether an element with the move class applied has
// CSS transitions. Since the element may be inside an entering
// transition at this very moment, we make a clone of it and remove
// all other transition classes applied to ensure only the move class
// is applied.
var clone = el.cloneNode();
if (el._transitionClasses) {
el._transitionClasses.forEach(function (cls) { removeClass(clone, cls); });
}
addClass(clone, moveClass);
clone.style.display = 'none';
this.$el.appendChild(clone);
var info = getTransitionInfo(clone);
this.$el.removeChild(clone);
return (this._hasMove = info.hasTransform)
}
}
};
function callPendingCbs (c) {
/* istanbul ignore if */
if (c.elm._moveCb) {
c.elm._moveCb();
}
/* istanbul ignore if */
if (c.elm._enterCb) {
c.elm._enterCb();
}
}
function recordPosition (c) {
c.data.newPos = c.elm.getBoundingClientRect();
}
function applyTranslation (c) {
var oldPos = c.data.pos;
var newPos = c.data.newPos;
var dx = oldPos.left - newPos.left;
var dy = oldPos.top - newPos.top;
if (dx || dy) {
c.data.moved = true;
var s = c.elm.style;
s.transform = s.WebkitTransform = "translate(" + dx + "px," + dy + "px)";
s.transitionDuration = '0s';
}
}
var platformComponents = {
Transition: Transition,
TransitionGroup: TransitionGroup
};
/* */
// install platform specific utils
Vue.config.mustUseProp = mustUseProp;
Vue.config.isReservedTag = isReservedTag;
Vue.config.isReservedAttr = isReservedAttr;
Vue.config.getTagNamespace = getTagNamespace;
Vue.config.isUnknownElement = isUnknownElement;
// install platform runtime directives & components
extend(Vue.options.directives, platformDirectives);
extend(Vue.options.components, platformComponents);
// install platform patch function
Vue.prototype.__patch__ = inBrowser ? patch : noop;
// public mount method
Vue.prototype.$mount = function (
el,
hydrating
) {
el = el && inBrowser ? query(el) : undefined;
return mountComponent(this, el, hydrating)
};
// devtools global hook
/* istanbul ignore next */
if (inBrowser) {
setTimeout(function () {
if (config.devtools) {
if (devtools) {
devtools.emit('init', Vue);
} else {
console[console.info ? 'info' : 'log'](
'Download the Vue Devtools extension for a better development experience:\n' +
'https://github.com/vuejs/vue-devtools'
);
}
}
if (config.productionTip !== false &&
typeof console !== 'undefined'
) {
console[console.info ? 'info' : 'log'](
"You are running Vue in development mode.\n" +
"Make sure to turn on production mode when deploying for production.\n" +
"See more tips at https://vuejs.org/guide/deployment.html"
);
}
}, 0);
}
/* */
var defaultTagRE = /\{\{((?:.|\r?\n)+?)\}\}/g;
var regexEscapeRE = /[-.*+?^${}()|[\]\/\\]/g;
var buildRegex = cached(function (delimiters) {
var open = delimiters[0].replace(regexEscapeRE, '\\$&');
var close = delimiters[1].replace(regexEscapeRE, '\\$&');
return new RegExp(open + '((?:.|\\n)+?)' + close, 'g')
});
function parseText (
text,
delimiters
) {
var tagRE = delimiters ? buildRegex(delimiters) : defaultTagRE;
if (!tagRE.test(text)) {
return
}
var tokens = [];
var rawTokens = [];
var lastIndex = tagRE.lastIndex = 0;
var match, index, tokenValue;
while ((match = tagRE.exec(text))) {
index = match.index;
// push text token
if (index > lastIndex) {
rawTokens.push(tokenValue = text.slice(lastIndex, index));
tokens.push(JSON.stringify(tokenValue));
}
// tag token
var exp = parseFilters(match[1].trim());
tokens.push(("_s(" + exp + ")"));
rawTokens.push({ '@binding': exp });
lastIndex = index + match[0].length;
}
if (lastIndex < text.length) {
rawTokens.push(tokenValue = text.slice(lastIndex));
tokens.push(JSON.stringify(tokenValue));
}
return {
expression: tokens.join('+'),
tokens: rawTokens
}
}
/* */
function transformNode (el, options) {
var warn = options.warn || baseWarn;
var staticClass = getAndRemoveAttr(el, 'class');
if (staticClass) {
var res = parseText(staticClass, options.delimiters);
if (res) {
warn(
"class=\"" + staticClass + "\": " +
'Interpolation inside attributes has been removed. ' +
'Use v-bind or the colon shorthand instead. For example, ' +
'instead of <div class="{{ val }}">, use <div :class="val">.',
el.rawAttrsMap['class']
);
}
}
if (staticClass) {
el.staticClass = JSON.stringify(staticClass);
}
var classBinding = getBindingAttr(el, 'class', false /* getStatic */);
if (classBinding) {
el.classBinding = classBinding;
}
}
function genData (el) {
var data = '';
if (el.staticClass) {
data += "staticClass:" + (el.staticClass) + ",";
}
if (el.classBinding) {
data += "class:" + (el.classBinding) + ",";
}
return data
}
var klass$1 = {
staticKeys: ['staticClass'],
transformNode: transformNode,
genData: genData
};
/* */
function transformNode$1 (el, options) {
var warn = options.warn || baseWarn;
var staticStyle = getAndRemoveAttr(el, 'style');
if (staticStyle) {
/* istanbul ignore if */
{
var res = parseText(staticStyle, options.delimiters);
if (res) {
warn(
"style=\"" + staticStyle + "\": " +
'Interpolation inside attributes has been removed. ' +
'Use v-bind or the colon shorthand instead. For example, ' +
'instead of <div style="{{ val }}">, use <div :style="val">.',
el.rawAttrsMap['style']
);
}
}
el.staticStyle = JSON.stringify(parseStyleText(staticStyle));
}
var styleBinding = getBindingAttr(el, 'style', false /* getStatic */);
if (styleBinding) {
el.styleBinding = styleBinding;
}
}
function genData$1 (el) {
var data = '';
if (el.staticStyle) {
data += "staticStyle:" + (el.staticStyle) + ",";
}
if (el.styleBinding) {
data += "style:(" + (el.styleBinding) + "),";
}
return data
}
var style$1 = {
staticKeys: ['staticStyle'],
transformNode: transformNode$1,
genData: genData$1
};
/* */
var decoder;
var he = {
decode: function decode (html) {
decoder = decoder || document.createElement('div');
decoder.innerHTML = html;
return decoder.textContent
}
};
/* */
var isUnaryTag = makeMap(
'area,base,br,col,embed,frame,hr,img,input,isindex,keygen,' +
'link,meta,param,source,track,wbr'
);
// Elements that you can, intentionally, leave open
// (and which close themselves)
var canBeLeftOpenTag = makeMap(
'colgroup,dd,dt,li,options,p,td,tfoot,th,thead,tr,source'
);
// HTML5 tags https://html.spec.whatwg.org/multipage/indices.html#elements-3
// Phrasing Content https://html.spec.whatwg.org/multipage/dom.html#phrasing-content
var isNonPhrasingTag = makeMap(
'address,article,aside,base,blockquote,body,caption,col,colgroup,dd,' +
'details,dialog,div,dl,dt,fieldset,figcaption,figure,footer,form,' +
'h1,h2,h3,h4,h5,h6,head,header,hgroup,hr,html,legend,li,menuitem,meta,' +
'optgroup,option,param,rp,rt,source,style,summary,tbody,td,tfoot,th,thead,' +
'title,tr,track'
);
/**
* Not type-checking this file because it's mostly vendor code.
*/
// Regular Expressions for parsing tags and attributes
var attribute = /^\s*([^\s"'<>\/=]+)(?:\s*(=)\s*(?:"([^"]*)"+|'([^']*)'+|([^\s"'=<>`]+)))?/;
var dynamicArgAttribute = /^\s*((?:v-[\w-]+:|@|:|#)\[[^=]+?\][^\s"'<>\/=]*)(?:\s*(=)\s*(?:"([^"]*)"+|'([^']*)'+|([^\s"'=<>`]+)))?/;
var ncname = "[a-zA-Z_][\\-\\.0-9_a-zA-Z" + (unicodeRegExp.source) + "]*";
var qnameCapture = "((?:" + ncname + "\\:)?" + ncname + ")";
var startTagOpen = new RegExp(("^<" + qnameCapture));
var startTagClose = /^\s*(\/?)>/;
var endTag = new RegExp(("^<\\/" + qnameCapture + "[^>]*>"));
var doctype = /^<!DOCTYPE [^>]+>/i;
// #7298: escape - to avoid being passed as HTML comment when inlined in page
var comment = /^<!\--/;
var conditionalComment = /^<!\[/;
// Special Elements (can contain anything)
var isPlainTextElement = makeMap('script,style,textarea', true);
var reCache = {};
var decodingMap = {
'<': '<',
'>': '>',
'"': '"',
'&': '&',
' ': '\n',
'	': '\t',
''': "'"
};
var encodedAttr = /&(?:lt|gt|quot|amp|#39);/g;
var encodedAttrWithNewLines = /&(?:lt|gt|quot|amp|#39|#10|#9);/g;
// #5992
var isIgnoreNewlineTag = makeMap('pre,textarea', true);
var shouldIgnoreFirstNewline = function (tag, html) { return tag && isIgnoreNewlineTag(tag) && html[0] === '\n'; };
function decodeAttr (value, shouldDecodeNewlines) {
var re = shouldDecodeNewlines ? encodedAttrWithNewLines : encodedAttr;
return value.replace(re, function (match) { return decodingMap[match]; })
}
function parseHTML (html, options) {
var stack = [];
var expectHTML = options.expectHTML;
var isUnaryTag$$1 = options.isUnaryTag || no;
var canBeLeftOpenTag$$1 = options.canBeLeftOpenTag || no;
var index = 0;
var last, lastTag;
while (html) {
last = html;
// Make sure we're not in a plaintext content element like script/style
if (!lastTag || !isPlainTextElement(lastTag)) {
var textEnd = html.indexOf('<');
if (textEnd === 0) {
// Comment:
if (comment.test(html)) {
var commentEnd = html.indexOf('-->');
if (commentEnd >= 0) {
if (options.shouldKeepComment) {
options.comment(html.substring(4, commentEnd), index, index + commentEnd + 3);
}
advance(commentEnd + 3);
continue
}
}
// http://en.wikipedia.org/wiki/Conditional_comment#Downlevel-revealed_conditional_comment
if (conditionalComment.test(html)) {
var conditionalEnd = html.indexOf(']>');
if (conditionalEnd >= 0) {
advance(conditionalEnd + 2);
continue
}
}
// Doctype:
var doctypeMatch = html.match(doctype);
if (doctypeMatch) {
advance(doctypeMatch[0].length);
continue
}
// End tag:
var endTagMatch = html.match(endTag);
if (endTagMatch) {
var curIndex = index;
advance(endTagMatch[0].length);
parseEndTag(endTagMatch[1], curIndex, index);
continue
}
// Start tag:
var startTagMatch = parseStartTag();
if (startTagMatch) {
handleStartTag(startTagMatch);
if (shouldIgnoreFirstNewline(startTagMatch.tagName, html)) {
advance(1);
}
continue
}
}
var text = (void 0), rest = (void 0), next = (void 0);
if (textEnd >= 0) {
rest = html.slice(textEnd);
while (
!endTag.test(rest) &&
!startTagOpen.test(rest) &&
!comment.test(rest) &&
!conditionalComment.test(rest)
) {
// < in plain text, be forgiving and treat it as text
next = rest.indexOf('<', 1);
if (next < 0) { break }
textEnd += next;
rest = html.slice(textEnd);
}
text = html.substring(0, textEnd);
}
if (textEnd < 0) {
text = html;
}
if (text) {
advance(text.length);
}
if (options.chars && text) {
options.chars(text, index - text.length, index);
}
} else {
var endTagLength = 0;
var stackedTag = lastTag.toLowerCase();
var reStackedTag = reCache[stackedTag] || (reCache[stackedTag] = new RegExp('([\\s\\S]*?)(</' + stackedTag + '[^>]*>)', 'i'));
var rest$1 = html.replace(reStackedTag, function (all, text, endTag) {
endTagLength = endTag.length;
if (!isPlainTextElement(stackedTag) && stackedTag !== 'noscript') {
text = text
.replace(/<!\--([\s\S]*?)-->/g, '$1') // #7298
.replace(/<!\[CDATA\[([\s\S]*?)]]>/g, '$1');
}
if (shouldIgnoreFirstNewline(stackedTag, text)) {
text = text.slice(1);
}
if (options.chars) {
options.chars(text);
}
return ''
});
index += html.length - rest$1.length;
html = rest$1;
parseEndTag(stackedTag, index - endTagLength, index);
}
if (html === last) {
options.chars && options.chars(html);
if (!stack.length && options.warn) {
options.warn(("Mal-formatted tag at end of template: \"" + html + "\""), { start: index + html.length });
}
break
}
}
// Clean up any remaining tags
parseEndTag();
function advance (n) {
index += n;
html = html.substring(n);
}
function parseStartTag () {
var start = html.match(startTagOpen);
if (start) {
var match = {
tagName: start[1],
attrs: [],
start: index
};
advance(start[0].length);
var end, attr;
while (!(end = html.match(startTagClose)) && (attr = html.match(dynamicArgAttribute) || html.match(attribute))) {
attr.start = index;
advance(attr[0].length);
attr.end = index;
match.attrs.push(attr);
}
if (end) {
match.unarySlash = end[1];
advance(end[0].length);
match.end = index;
return match
}
}
}
function handleStartTag (match) {
var tagName = match.tagName;
var unarySlash = match.unarySlash;
if (expectHTML) {
if (lastTag === 'p' && isNonPhrasingTag(tagName)) {
parseEndTag(lastTag);
}
if (canBeLeftOpenTag$$1(tagName) && lastTag === tagName) {
parseEndTag(tagName);
}
}
var unary = isUnaryTag$$1(tagName) || !!unarySlash;
var l = match.attrs.length;
var attrs = new Array(l);
for (var i = 0; i < l; i++) {
var args = match.attrs[i];
var value = args[3] || args[4] || args[5] || '';
var shouldDecodeNewlines = tagName === 'a' && args[1] === 'href'
? options.shouldDecodeNewlinesForHref
: options.shouldDecodeNewlines;
attrs[i] = {
name: args[1],
value: decodeAttr(value, shouldDecodeNewlines)
};
if (options.outputSourceRange) {
attrs[i].start = args.start + args[0].match(/^\s*/).length;
attrs[i].end = args.end;
}
}
if (!unary) {
stack.push({ tag: tagName, lowerCasedTag: tagName.toLowerCase(), attrs: attrs, start: match.start, end: match.end });
lastTag = tagName;
}
if (options.start) {
options.start(tagName, attrs, unary, match.start, match.end);
}
}
function parseEndTag (tagName, start, end) {
var pos, lowerCasedTagName;
if (start == null) { start = index; }
if (end == null) { end = index; }
// Find the closest opened tag of the same type
if (tagName) {
lowerCasedTagName = tagName.toLowerCase();
for (pos = stack.length - 1; pos >= 0; pos--) {
if (stack[pos].lowerCasedTag === lowerCasedTagName) {
break
}
}
} else {
// If no tag name is provided, clean shop
pos = 0;
}
if (pos >= 0) {
// Close all the open elements, up the stack
for (var i = stack.length - 1; i >= pos; i--) {
if (i > pos || !tagName &&
options.warn
) {
options.warn(
("tag <" + (stack[i].tag) + "> has no matching end tag."),
{ start: stack[i].start, end: stack[i].end }
);
}
if (options.end) {
options.end(stack[i].tag, start, end);
}
}
// Remove the open elements from the stack
stack.length = pos;
lastTag = pos && stack[pos - 1].tag;
} else if (lowerCasedTagName === 'br') {
if (options.start) {
options.start(tagName, [], true, start, end);
}
} else if (lowerCasedTagName === 'p') {
if (options.start) {
options.start(tagName, [], false, start, end);
}
if (options.end) {
options.end(tagName, start, end);
}
}
}
}
/* */
var onRE = /^@|^v-on:/;
var dirRE = /^v-|^@|^:|^#/;
var forAliasRE = /([\s\S]*?)\s+(?:in|of)\s+([\s\S]*)/;
var forIteratorRE = /,([^,\}\]]*)(?:,([^,\}\]]*))?$/;
var stripParensRE = /^\(|\)$/g;
var dynamicArgRE = /^\[.*\]$/;
var argRE = /:(.*)$/;
var bindRE = /^:|^\.|^v-bind:/;
var modifierRE = /\.[^.\]]+(?=[^\]]*$)/g;
var slotRE = /^v-slot(:|$)|^#/;
var lineBreakRE = /[\r\n]/;
var whitespaceRE$1 = /[ \f\t\r\n]+/g;
var invalidAttributeRE = /[\s"'<>\/=]/;
var decodeHTMLCached = cached(he.decode);
var emptySlotScopeToken = "_empty_";
// configurable state
var warn$2;
var delimiters;
var transforms;
var preTransforms;
var postTransforms;
var platformIsPreTag;
var platformMustUseProp;
var platformGetTagNamespace;
var maybeComponent;
function createASTElement (
tag,
attrs,
parent
) {
return {
type: 1,
tag: tag,
attrsList: attrs,
attrsMap: makeAttrsMap(attrs),
rawAttrsMap: {},
parent: parent,
children: []
}
}
/**
* Convert HTML string to AST.
*/
function parse (
template,
options
) {
warn$2 = options.warn || baseWarn;
platformIsPreTag = options.isPreTag || no;
platformMustUseProp = options.mustUseProp || no;
platformGetTagNamespace = options.getTagNamespace || no;
var isReservedTag = options.isReservedTag || no;
maybeComponent = function (el) { return !!(
el.component ||
el.attrsMap[':is'] ||
el.attrsMap['v-bind:is'] ||
!(el.attrsMap.is ? isReservedTag(el.attrsMap.is) : isReservedTag(el.tag))
); };
transforms = pluckModuleFunction(options.modules, 'transformNode');
preTransforms = pluckModuleFunction(options.modules, 'preTransformNode');
postTransforms = pluckModuleFunction(options.modules, 'postTransformNode');
delimiters = options.delimiters;
var stack = [];
var preserveWhitespace = options.preserveWhitespace !== false;
var whitespaceOption = options.whitespace;
var root;
var currentParent;
var inVPre = false;
var inPre = false;
var warned = false;
function warnOnce (msg, range) {
if (!warned) {
warned = true;
warn$2(msg, range);
}
}
function closeElement (element) {
trimEndingWhitespace(element);
if (!inVPre && !element.processed) {
element = processElement(element, options);
}
// tree management
if (!stack.length && element !== root) {
// allow root elements with v-if, v-else-if and v-else
if (root.if && (element.elseif || element.else)) {
{
checkRootConstraints(element);
}
addIfCondition(root, {
exp: element.elseif,
block: element
});
} else {
warnOnce(
"Component template should contain exactly one root element. " +
"If you are using v-if on multiple elements, " +
"use v-else-if to chain them instead.",
{ start: element.start }
);
}
}
if (currentParent && !element.forbidden) {
if (element.elseif || element.else) {
processIfConditions(element, currentParent);
} else {
if (element.slotScope) {
// scoped slot
// keep it in the children list so that v-else(-if) conditions can
// find it as the prev node.
var name = element.slotTarget || '"default"'
;(currentParent.scopedSlots || (currentParent.scopedSlots = {}))[name] = element;
}
currentParent.children.push(element);
element.parent = currentParent;
}
}
// final children cleanup
// filter out scoped slots
element.children = element.children.filter(function (c) { return !(c).slotScope; });
// remove trailing whitespace node again
trimEndingWhitespace(element);
// check pre state
if (element.pre) {
inVPre = false;
}
if (platformIsPreTag(element.tag)) {
inPre = false;
}
// apply post-transforms
for (var i = 0; i < postTransforms.length; i++) {
postTransforms[i](element, options);
}
}
function trimEndingWhitespace (el) {
// remove trailing whitespace node
if (!inPre) {
var lastNode;
while (
(lastNode = el.children[el.children.length - 1]) &&
lastNode.type === 3 &&
lastNode.text === ' '
) {
el.children.pop();
}
}
}
function checkRootConstraints (el) {
if (el.tag === 'slot' || el.tag === 'template') {
warnOnce(
"Cannot use <" + (el.tag) + "> as component root element because it may " +
'contain multiple nodes.',
{ start: el.start }
);
}
if (el.attrsMap.hasOwnProperty('v-for')) {
warnOnce(
'Cannot use v-for on stateful component root element because ' +
'it renders multiple elements.',
el.rawAttrsMap['v-for']
);
}
}
parseHTML(template, {
warn: warn$2,
expectHTML: options.expectHTML,
isUnaryTag: options.isUnaryTag,
canBeLeftOpenTag: options.canBeLeftOpenTag,
shouldDecodeNewlines: options.shouldDecodeNewlines,
shouldDecodeNewlinesForHref: options.shouldDecodeNewlinesForHref,
shouldKeepComment: options.comments,
outputSourceRange: options.outputSourceRange,
start: function start (tag, attrs, unary, start$1, end) {
// check namespace.
// inherit parent ns if there is one
var ns = (currentParent && currentParent.ns) || platformGetTagNamespace(tag);
// handle IE svg bug
/* istanbul ignore if */
if (isIE && ns === 'svg') {
attrs = guardIESVGBug(attrs);
}
var element = createASTElement(tag, attrs, currentParent);
if (ns) {
element.ns = ns;
}
{
if (options.outputSourceRange) {
element.start = start$1;
element.end = end;
element.rawAttrsMap = element.attrsList.reduce(function (cumulated, attr) {
cumulated[attr.name] = attr;
return cumulated
}, {});
}
attrs.forEach(function (attr) {
if (invalidAttributeRE.test(attr.name)) {
warn$2(
"Invalid dynamic argument expression: attribute names cannot contain " +
"spaces, quotes, <, >, / or =.",
{
start: attr.start + attr.name.indexOf("["),
end: attr.start + attr.name.length
}
);
}
});
}
if (isForbiddenTag(element) && !isServerRendering()) {
element.forbidden = true;
warn$2(
'Templates should only be responsible for mapping the state to the ' +
'UI. Avoid placing tags with side-effects in your templates, such as ' +
"<" + tag + ">" + ', as they will not be parsed.',
{ start: element.start }
);
}
// apply pre-transforms
for (var i = 0; i < preTransforms.length; i++) {
element = preTransforms[i](element, options) || element;
}
if (!inVPre) {
processPre(element);
if (element.pre) {
inVPre = true;
}
}
if (platformIsPreTag(element.tag)) {
inPre = true;
}
if (inVPre) {
processRawAttrs(element);
} else if (!element.processed) {
// structural directives
processFor(element);
processIf(element);
processOnce(element);
}
if (!root) {
root = element;
{
checkRootConstraints(root);
}
}
if (!unary) {
currentParent = element;
stack.push(element);
} else {
closeElement(element);
}
},
end: function end (tag, start, end$1) {
var element = stack[stack.length - 1];
// pop stack
stack.length -= 1;
currentParent = stack[stack.length - 1];
if (options.outputSourceRange) {
element.end = end$1;
}
closeElement(element);
},
chars: function chars (text, start, end) {
if (!currentParent) {
{
if (text === template) {
warnOnce(
'Component template requires a root element, rather than just text.',
{ start: start }
);
} else if ((text = text.trim())) {
warnOnce(
("text \"" + text + "\" outside root element will be ignored."),
{ start: start }
);
}
}
return
}
// IE textarea placeholder bug
/* istanbul ignore if */
if (isIE &&
currentParent.tag === 'textarea' &&
currentParent.attrsMap.placeholder === text
) {
return
}
var children = currentParent.children;
if (inPre || text.trim()) {
text = isTextTag(currentParent) ? text : decodeHTMLCached(text);
} else if (!children.length) {
// remove the whitespace-only node right after an opening tag
text = '';
} else if (whitespaceOption) {
if (whitespaceOption === 'condense') {
// in condense mode, remove the whitespace node if it contains
// line break, otherwise condense to a single space
text = lineBreakRE.test(text) ? '' : ' ';
} else {
text = ' ';
}
} else {
text = preserveWhitespace ? ' ' : '';
}
if (text) {
if (!inPre && whitespaceOption === 'condense') {
// condense consecutive whitespaces into single space
text = text.replace(whitespaceRE$1, ' ');
}
var res;
var child;
if (!inVPre && text !== ' ' && (res = parseText(text, delimiters))) {
child = {
type: 2,
expression: res.expression,
tokens: res.tokens,
text: text
};
} else if (text !== ' ' || !children.length || children[children.length - 1].text !== ' ') {
child = {
type: 3,
text: text
};
}
if (child) {
if (options.outputSourceRange) {
child.start = start;
child.end = end;
}
children.push(child);
}
}
},
comment: function comment (text, start, end) {
// adding anything as a sibling to the root node is forbidden
// comments should still be allowed, but ignored
if (currentParent) {
var child = {
type: 3,
text: text,
isComment: true
};
if (options.outputSourceRange) {
child.start = start;
child.end = end;
}
currentParent.children.push(child);
}
}
});
return root
}
function processPre (el) {
if (getAndRemoveAttr(el, 'v-pre') != null) {
el.pre = true;
}
}
function processRawAttrs (el) {
var list = el.attrsList;
var len = list.length;
if (len) {
var attrs = el.attrs = new Array(len);
for (var i = 0; i < len; i++) {
attrs[i] = {
name: list[i].name,
value: JSON.stringify(list[i].value)
};
if (list[i].start != null) {
attrs[i].start = list[i].start;
attrs[i].end = list[i].end;
}
}
} else if (!el.pre) {
// non root node in pre blocks with no attributes
el.plain = true;
}
}
function processElement (
element,
options
) {
processKey(element);
// determine whether this is a plain element after
// removing structural attributes
element.plain = (
!element.key &&
!element.scopedSlots &&
!element.attrsList.length
);
processRef(element);
processSlotContent(element);
processSlotOutlet(element);
processComponent(element);
for (var i = 0; i < transforms.length; i++) {
element = transforms[i](element, options) || element;
}
processAttrs(element);
return element
}
function processKey (el) {
var exp = getBindingAttr(el, 'key');
if (exp) {
{
if (el.tag === 'template') {
warn$2(
"<template> cannot be keyed. Place the key on real elements instead.",
getRawBindingAttr(el, 'key')
);
}
if (el.for) {
var iterator = el.iterator2 || el.iterator1;
var parent = el.parent;
if (iterator && iterator === exp && parent && parent.tag === 'transition-group') {
warn$2(
"Do not use v-for index as key on <transition-group> children, " +
"this is the same as not using keys.",
getRawBindingAttr(el, 'key'),
true /* tip */
);
}
}
}
el.key = exp;
}
}
function processRef (el) {
var ref = getBindingAttr(el, 'ref');
if (ref) {
el.ref = ref;
el.refInFor = checkInFor(el);
}
}
function processFor (el) {
var exp;
if ((exp = getAndRemoveAttr(el, 'v-for'))) {
var res = parseFor(exp);
if (res) {
extend(el, res);
} else {
warn$2(
("Invalid v-for expression: " + exp),
el.rawAttrsMap['v-for']
);
}
}
}
function parseFor (exp) {
var inMatch = exp.match(forAliasRE);
if (!inMatch) { return }
var res = {};
res.for = inMatch[2].trim();
var alias = inMatch[1].trim().replace(stripParensRE, '');
var iteratorMatch = alias.match(forIteratorRE);
if (iteratorMatch) {
res.alias = alias.replace(forIteratorRE, '').trim();
res.iterator1 = iteratorMatch[1].trim();
if (iteratorMatch[2]) {
res.iterator2 = iteratorMatch[2].trim();
}
} else {
res.alias = alias;
}
return res
}
function processIf (el) {
var exp = getAndRemoveAttr(el, 'v-if');
if (exp) {
el.if = exp;
addIfCondition(el, {
exp: exp,
block: el
});
} else {
if (getAndRemoveAttr(el, 'v-else') != null) {
el.else = true;
}
var elseif = getAndRemoveAttr(el, 'v-else-if');
if (elseif) {
el.elseif = elseif;
}
}
}
function processIfConditions (el, parent) {
var prev = findPrevElement(parent.children);
if (prev && prev.if) {
addIfCondition(prev, {
exp: el.elseif,
block: el
});
} else {
warn$2(
"v-" + (el.elseif ? ('else-if="' + el.elseif + '"') : 'else') + " " +
"used on element <" + (el.tag) + "> without corresponding v-if.",
el.rawAttrsMap[el.elseif ? 'v-else-if' : 'v-else']
);
}
}
function findPrevElement (children) {
var i = children.length;
while (i--) {
if (children[i].type === 1) {
return children[i]
} else {
if (children[i].text !== ' ') {
warn$2(
"text \"" + (children[i].text.trim()) + "\" between v-if and v-else(-if) " +
"will be ignored.",
children[i]
);
}
children.pop();
}
}
}
function addIfCondition (el, condition) {
if (!el.ifConditions) {
el.ifConditions = [];
}
el.ifConditions.push(condition);
}
function processOnce (el) {
var once$$1 = getAndRemoveAttr(el, 'v-once');
if (once$$1 != null) {
el.once = true;
}
}
// handle content being passed to a component as slot,
// e.g. <template slot="xxx">, <div slot-scope="xxx">
function processSlotContent (el) {
var slotScope;
if (el.tag === 'template') {
slotScope = getAndRemoveAttr(el, 'scope');
/* istanbul ignore if */
if (slotScope) {
warn$2(
"the \"scope\" attribute for scoped slots have been deprecated and " +
"replaced by \"slot-scope\" since 2.5. The new \"slot-scope\" attribute " +
"can also be used on plain elements in addition to <template> to " +
"denote scoped slots.",
el.rawAttrsMap['scope'],
true
);
}
el.slotScope = slotScope || getAndRemoveAttr(el, 'slot-scope');
} else if ((slotScope = getAndRemoveAttr(el, 'slot-scope'))) {
/* istanbul ignore if */
if (el.attrsMap['v-for']) {
warn$2(
"Ambiguous combined usage of slot-scope and v-for on <" + (el.tag) + "> " +
"(v-for takes higher priority). Use a wrapper <template> for the " +
"scoped slot to make it clearer.",
el.rawAttrsMap['slot-scope'],
true
);
}
el.slotScope = slotScope;
}
// slot="xxx"
var slotTarget = getBindingAttr(el, 'slot');
if (slotTarget) {
el.slotTarget = slotTarget === '""' ? '"default"' : slotTarget;
el.slotTargetDynamic = !!(el.attrsMap[':slot'] || el.attrsMap['v-bind:slot']);
// preserve slot as an attribute for native shadow DOM compat
// only for non-scoped slots.
if (el.tag !== 'template' && !el.slotScope) {
addAttr(el, 'slot', slotTarget, getRawBindingAttr(el, 'slot'));
}
}
// 2.6 v-slot syntax
{
if (el.tag === 'template') {
// v-slot on <template>
var slotBinding = getAndRemoveAttrByRegex(el, slotRE);
if (slotBinding) {
{
if (el.slotTarget || el.slotScope) {
warn$2(
"Unexpected mixed usage of different slot syntaxes.",
el
);
}
if (el.parent && !maybeComponent(el.parent)) {
warn$2(
"<template v-slot> can only appear at the root level inside " +
"the receiving component",
el
);
}
}
var ref = getSlotName(slotBinding);
var name = ref.name;
var dynamic = ref.dynamic;
el.slotTarget = name;
el.slotTargetDynamic = dynamic;
el.slotScope = slotBinding.value || emptySlotScopeToken; // force it into a scoped slot for perf
}
} else {
// v-slot on component, denotes default slot
var slotBinding$1 = getAndRemoveAttrByRegex(el, slotRE);
if (slotBinding$1) {
{
if (!maybeComponent(el)) {
warn$2(
"v-slot can only be used on components or <template>.",
slotBinding$1
);
}
if (el.slotScope || el.slotTarget) {
warn$2(
"Unexpected mixed usage of different slot syntaxes.",
el
);
}
if (el.scopedSlots) {
warn$2(
"To avoid scope ambiguity, the default slot should also use " +
"<template> syntax when there are other named slots.",
slotBinding$1
);
}
}
// add the component's children to its default slot
var slots = el.scopedSlots || (el.scopedSlots = {});
var ref$1 = getSlotName(slotBinding$1);
var name$1 = ref$1.name;
var dynamic$1 = ref$1.dynamic;
var slotContainer = slots[name$1] = createASTElement('template', [], el);
slotContainer.slotTarget = name$1;
slotContainer.slotTargetDynamic = dynamic$1;
slotContainer.children = el.children.filter(function (c) {
if (!c.slotScope) {
c.parent = slotContainer;
return true
}
});
slotContainer.slotScope = slotBinding$1.value || emptySlotScopeToken;
// remove children as they are returned from scopedSlots now
el.children = [];
// mark el non-plain so data gets generated
el.plain = false;
}
}
}
}
function getSlotName (binding) {
var name = binding.name.replace(slotRE, '');
if (!name) {
if (binding.name[0] !== '#') {
name = 'default';
} else {
warn$2(
"v-slot shorthand syntax requires a slot name.",
binding
);
}
}
return dynamicArgRE.test(name)
// dynamic [name]
? { name: name.slice(1, -1), dynamic: true }
// static name
: { name: ("\"" + name + "\""), dynamic: false }
}
// handle <slot/> outlets
function processSlotOutlet (el) {
if (el.tag === 'slot') {
el.slotName = getBindingAttr(el, 'name');
if (el.key) {
warn$2(
"`key` does not work on <slot> because slots are abstract outlets " +
"and can possibly expand into multiple elements. " +
"Use the key on a wrapping element instead.",
getRawBindingAttr(el, 'key')
);
}
}
}
function processComponent (el) {
var binding;
if ((binding = getBindingAttr(el, 'is'))) {
el.component = binding;
}
if (getAndRemoveAttr(el, 'inline-template') != null) {
el.inlineTemplate = true;
}
}
function processAttrs (el) {
var list = el.attrsList;
var i, l, name, rawName, value, modifiers, syncGen, isDynamic;
for (i = 0, l = list.length; i < l; i++) {
name = rawName = list[i].name;
value = list[i].value;
if (dirRE.test(name)) {
// mark element as dynamic
el.hasBindings = true;
// modifiers
modifiers = parseModifiers(name.replace(dirRE, ''));
// support .foo shorthand syntax for the .prop modifier
if (modifiers) {
name = name.replace(modifierRE, '');
}
if (bindRE.test(name)) { // v-bind
name = name.replace(bindRE, '');
value = parseFilters(value);
isDynamic = dynamicArgRE.test(name);
if (isDynamic) {
name = name.slice(1, -1);
}
if (
value.trim().length === 0
) {
warn$2(
("The value for a v-bind expression cannot be empty. Found in \"v-bind:" + name + "\"")
);
}
if (modifiers) {
if (modifiers.prop && !isDynamic) {
name = camelize(name);
if (name === 'innerHtml') { name = 'innerHTML'; }
}
if (modifiers.camel && !isDynamic) {
name = camelize(name);
}
if (modifiers.sync) {
syncGen = genAssignmentCode(value, "$event");
if (!isDynamic) {
addHandler(
el,
("update:" + (camelize(name))),
syncGen,
null,
false,
warn$2,
list[i]
);
if (hyphenate(name) !== camelize(name)) {
addHandler(
el,
("update:" + (hyphenate(name))),
syncGen,
null,
false,
warn$2,
list[i]
);
}
} else {
// handler w/ dynamic event name
addHandler(
el,
("\"update:\"+(" + name + ")"),
syncGen,
null,
false,
warn$2,
list[i],
true // dynamic
);
}
}
}
if ((modifiers && modifiers.prop) || (
!el.component && platformMustUseProp(el.tag, el.attrsMap.type, name)
)) {
addProp(el, name, value, list[i], isDynamic);
} else {
addAttr(el, name, value, list[i], isDynamic);
}
} else if (onRE.test(name)) { // v-on
name = name.replace(onRE, '');
isDynamic = dynamicArgRE.test(name);
if (isDynamic) {
name = name.slice(1, -1);
}
addHandler(el, name, value, modifiers, false, warn$2, list[i], isDynamic);
} else { // normal directives
name = name.replace(dirRE, '');
// parse arg
var argMatch = name.match(argRE);
var arg = argMatch && argMatch[1];
isDynamic = false;
if (arg) {
name = name.slice(0, -(arg.length + 1));
if (dynamicArgRE.test(arg)) {
arg = arg.slice(1, -1);
isDynamic = true;
}
}
addDirective(el, name, rawName, value, arg, isDynamic, modifiers, list[i]);
if (name === 'model') {
checkForAliasModel(el, value);
}
}
} else {
// literal attribute
{
var res = parseText(value, delimiters);
if (res) {
warn$2(
name + "=\"" + value + "\": " +
'Interpolation inside attributes has been removed. ' +
'Use v-bind or the colon shorthand instead. For example, ' +
'instead of <div id="{{ val }}">, use <div :id="val">.',
list[i]
);
}
}
addAttr(el, name, JSON.stringify(value), list[i]);
// #6887 firefox doesn't update muted state if set via attribute
// even immediately after element creation
if (!el.component &&
name === 'muted' &&
platformMustUseProp(el.tag, el.attrsMap.type, name)) {
addProp(el, name, 'true', list[i]);
}
}
}
}
function checkInFor (el) {
var parent = el;
while (parent) {
if (parent.for !== undefined) {
return true
}
parent = parent.parent;
}
return false
}
function parseModifiers (name) {
var match = name.match(modifierRE);
if (match) {
var ret = {};
match.forEach(function (m) { ret[m.slice(1)] = true; });
return ret
}
}
function makeAttrsMap (attrs) {
var map = {};
for (var i = 0, l = attrs.length; i < l; i++) {
if (
map[attrs[i].name] && !isIE && !isEdge
) {
warn$2('duplicate attribute: ' + attrs[i].name, attrs[i]);
}
map[attrs[i].name] = attrs[i].value;
}
return map
}
// for script (e.g. type="x/template") or style, do not decode content
function isTextTag (el) {
return el.tag === 'script' || el.tag === 'style'
}
function isForbiddenTag (el) {
return (
el.tag === 'style' ||
(el.tag === 'script' && (
!el.attrsMap.type ||
el.attrsMap.type === 'text/javascript'
))
)
}
var ieNSBug = /^xmlns:NS\d+/;
var ieNSPrefix = /^NS\d+:/;
/* istanbul ignore next */
function guardIESVGBug (attrs) {
var res = [];
for (var i = 0; i < attrs.length; i++) {
var attr = attrs[i];
if (!ieNSBug.test(attr.name)) {
attr.name = attr.name.replace(ieNSPrefix, '');
res.push(attr);
}
}
return res
}
function checkForAliasModel (el, value) {
var _el = el;
while (_el) {
if (_el.for && _el.alias === value) {
warn$2(
"<" + (el.tag) + " v-model=\"" + value + "\">: " +
"You are binding v-model directly to a v-for iteration alias. " +
"This will not be able to modify the v-for source array because " +
"writing to the alias is like modifying a function local variable. " +
"Consider using an array of objects and use v-model on an object property instead.",
el.rawAttrsMap['v-model']
);
}
_el = _el.parent;
}
}
/* */
function preTransformNode (el, options) {
if (el.tag === 'input') {
var map = el.attrsMap;
if (!map['v-model']) {
return
}
var typeBinding;
if (map[':type'] || map['v-bind:type']) {
typeBinding = getBindingAttr(el, 'type');
}
if (!map.type && !typeBinding && map['v-bind']) {
typeBinding = "(" + (map['v-bind']) + ").type";
}
if (typeBinding) {
var ifCondition = getAndRemoveAttr(el, 'v-if', true);
var ifConditionExtra = ifCondition ? ("&&(" + ifCondition + ")") : "";
var hasElse = getAndRemoveAttr(el, 'v-else', true) != null;
var elseIfCondition = getAndRemoveAttr(el, 'v-else-if', true);
// 1. checkbox
var branch0 = cloneASTElement(el);
// process for on the main node
processFor(branch0);
addRawAttr(branch0, 'type', 'checkbox');
processElement(branch0, options);
branch0.processed = true; // prevent it from double-processed
branch0.if = "(" + typeBinding + ")==='checkbox'" + ifConditionExtra;
addIfCondition(branch0, {
exp: branch0.if,
block: branch0
});
// 2. add radio else-if condition
var branch1 = cloneASTElement(el);
getAndRemoveAttr(branch1, 'v-for', true);
addRawAttr(branch1, 'type', 'radio');
processElement(branch1, options);
addIfCondition(branch0, {
exp: "(" + typeBinding + ")==='radio'" + ifConditionExtra,
block: branch1
});
// 3. other
var branch2 = cloneASTElement(el);
getAndRemoveAttr(branch2, 'v-for', true);
addRawAttr(branch2, ':type', typeBinding);
processElement(branch2, options);
addIfCondition(branch0, {
exp: ifCondition,
block: branch2
});
if (hasElse) {
branch0.else = true;
} else if (elseIfCondition) {
branch0.elseif = elseIfCondition;
}
return branch0
}
}
}
function cloneASTElement (el) {
return createASTElement(el.tag, el.attrsList.slice(), el.parent)
}
var model$1 = {
preTransformNode: preTransformNode
};
var modules$1 = [
klass$1,
style$1,
model$1
];
/* */
function text (el, dir) {
if (dir.value) {
addProp(el, 'textContent', ("_s(" + (dir.value) + ")"), dir);
}
}
/* */
function html (el, dir) {
if (dir.value) {
addProp(el, 'innerHTML', ("_s(" + (dir.value) + ")"), dir);
}
}
var directives$1 = {
model: model,
text: text,
html: html
};
/* */
var baseOptions = {
expectHTML: true,
modules: modules$1,
directives: directives$1,
isPreTag: isPreTag,
isUnaryTag: isUnaryTag,
mustUseProp: mustUseProp,
canBeLeftOpenTag: canBeLeftOpenTag,
isReservedTag: isReservedTag,
getTagNamespace: getTagNamespace,
staticKeys: genStaticKeys(modules$1)
};
/* */
var isStaticKey;
var isPlatformReservedTag;
var genStaticKeysCached = cached(genStaticKeys$1);
/**
* Goal of the optimizer: walk the generated template AST tree
* and detect sub-trees that are purely static, i.e. parts of
* the DOM that never needs to change.
*
* Once we detect these sub-trees, we can:
*
* 1. Hoist them into constants, so that we no longer need to
* create fresh nodes for them on each re-render;
* 2. Completely skip them in the patching process.
*/
function optimize (root, options) {
if (!root) { return }
isStaticKey = genStaticKeysCached(options.staticKeys || '');
isPlatformReservedTag = options.isReservedTag || no;
// first pass: mark all non-static nodes.
markStatic$1(root);
// second pass: mark static roots.
markStaticRoots(root, false);
}
function genStaticKeys$1 (keys) {
return makeMap(
'type,tag,attrsList,attrsMap,plain,parent,children,attrs,start,end,rawAttrsMap' +
(keys ? ',' + keys : '')
)
}
function markStatic$1 (node) {
node.static = isStatic(node);
if (node.type === 1) {
// do not make component slot content static. this avoids
// 1. components not able to mutate slot nodes
// 2. static slot content fails for hot-reloading
if (
!isPlatformReservedTag(node.tag) &&
node.tag !== 'slot' &&
node.attrsMap['inline-template'] == null
) {
return
}
for (var i = 0, l = node.children.length; i < l; i++) {
var child = node.children[i];
markStatic$1(child);
if (!child.static) {
node.static = false;
}
}
if (node.ifConditions) {
for (var i$1 = 1, l$1 = node.ifConditions.length; i$1 < l$1; i$1++) {
var block = node.ifConditions[i$1].block;
markStatic$1(block);
if (!block.static) {
node.static = false;
}
}
}
}
}
function markStaticRoots (node, isInFor) {
if (node.type === 1) {
if (node.static || node.once) {
node.staticInFor = isInFor;
}
// For a node to qualify as a static root, it should have children that
// are not just static text. Otherwise the cost of hoisting out will
// outweigh the benefits and it's better off to just always render it fresh.
if (node.static && node.children.length && !(
node.children.length === 1 &&
node.children[0].type === 3
)) {
node.staticRoot = true;
return
} else {
node.staticRoot = false;
}
if (node.children) {
for (var i = 0, l = node.children.length; i < l; i++) {
markStaticRoots(node.children[i], isInFor || !!node.for);
}
}
if (node.ifConditions) {
for (var i$1 = 1, l$1 = node.ifConditions.length; i$1 < l$1; i$1++) {
markStaticRoots(node.ifConditions[i$1].block, isInFor);
}
}
}
}
function isStatic (node) {
if (node.type === 2) { // expression
return false
}
if (node.type === 3) { // text
return true
}
return !!(node.pre || (
!node.hasBindings && // no dynamic bindings
!node.if && !node.for && // not v-if or v-for or v-else
!isBuiltInTag(node.tag) && // not a built-in
isPlatformReservedTag(node.tag) && // not a component
!isDirectChildOfTemplateFor(node) &&
Object.keys(node).every(isStaticKey)
))
}
function isDirectChildOfTemplateFor (node) {
while (node.parent) {
node = node.parent;
if (node.tag !== 'template') {
return false
}
if (node.for) {
return true
}
}
return false
}
/* */
var fnExpRE = /^([\w$_]+|\([^)]*?\))\s*=>|^function(?:\s+[\w$]+)?\s*\(/;
var fnInvokeRE = /\([^)]*?\);*$/;
var simplePathRE = /^[A-Za-z_$][\w$]*(?:\.[A-Za-z_$][\w$]*|\['[^']*?']|\["[^"]*?"]|\[\d+]|\[[A-Za-z_$][\w$]*])*$/;
// KeyboardEvent.keyCode aliases
var keyCodes = {
esc: 27,
tab: 9,
enter: 13,
space: 32,
up: 38,
left: 37,
right: 39,
down: 40,
'delete': [8, 46]
};
// KeyboardEvent.key aliases
var keyNames = {
// #7880: IE11 and Edge use `Esc` for Escape key name.
esc: ['Esc', 'Escape'],
tab: 'Tab',
enter: 'Enter',
// #9112: IE11 uses `Spacebar` for Space key name.
space: [' ', 'Spacebar'],
// #7806: IE11 uses key names without `Arrow` prefix for arrow keys.
up: ['Up', 'ArrowUp'],
left: ['Left', 'ArrowLeft'],
right: ['Right', 'ArrowRight'],
down: ['Down', 'ArrowDown'],
// #9112: IE11 uses `Del` for Delete key name.
'delete': ['Backspace', 'Delete', 'Del']
};
// #4868: modifiers that prevent the execution of the listener
// need to explicitly return null so that we can determine whether to remove
// the listener for .once
var genGuard = function (condition) { return ("if(" + condition + ")return null;"); };
var modifierCode = {
stop: '$event.stopPropagation();',
prevent: '$event.preventDefault();',
self: genGuard("$event.target !== $event.currentTarget"),
ctrl: genGuard("!$event.ctrlKey"),
shift: genGuard("!$event.shiftKey"),
alt: genGuard("!$event.altKey"),
meta: genGuard("!$event.metaKey"),
left: genGuard("'button' in $event && $event.button !== 0"),
middle: genGuard("'button' in $event && $event.button !== 1"),
right: genGuard("'button' in $event && $event.button !== 2")
};
function genHandlers (
events,
isNative
) {
var prefix = isNative ? 'nativeOn:' : 'on:';
var staticHandlers = "";
var dynamicHandlers = "";
for (var name in events) {
var handlerCode = genHandler(events[name]);
if (events[name] && events[name].dynamic) {
dynamicHandlers += name + "," + handlerCode + ",";
} else {
staticHandlers += "\"" + name + "\":" + handlerCode + ",";
}
}
staticHandlers = "{" + (staticHandlers.slice(0, -1)) + "}";
if (dynamicHandlers) {
return prefix + "_d(" + staticHandlers + ",[" + (dynamicHandlers.slice(0, -1)) + "])"
} else {
return prefix + staticHandlers
}
}
function genHandler (handler) {
if (!handler) {
return 'function(){}'
}
if (Array.isArray(handler)) {
return ("[" + (handler.map(function (handler) { return genHandler(handler); }).join(',')) + "]")
}
var isMethodPath = simplePathRE.test(handler.value);
var isFunctionExpression = fnExpRE.test(handler.value);
var isFunctionInvocation = simplePathRE.test(handler.value.replace(fnInvokeRE, ''));
if (!handler.modifiers) {
if (isMethodPath || isFunctionExpression) {
return handler.value
}
return ("function($event){" + (isFunctionInvocation ? ("return " + (handler.value)) : handler.value) + "}") // inline statement
} else {
var code = '';
var genModifierCode = '';
var keys = [];
for (var key in handler.modifiers) {
if (modifierCode[key]) {
genModifierCode += modifierCode[key];
// left/right
if (keyCodes[key]) {
keys.push(key);
}
} else if (key === 'exact') {
var modifiers = (handler.modifiers);
genModifierCode += genGuard(
['ctrl', 'shift', 'alt', 'meta']
.filter(function (keyModifier) { return !modifiers[keyModifier]; })
.map(function (keyModifier) { return ("$event." + keyModifier + "Key"); })
.join('||')
);
} else {
keys.push(key);
}
}
if (keys.length) {
code += genKeyFilter(keys);
}
// Make sure modifiers like prevent and stop get executed after key filtering
if (genModifierCode) {
code += genModifierCode;
}
var handlerCode = isMethodPath
? ("return " + (handler.value) + ".apply(null, arguments)")
: isFunctionExpression
? ("return (" + (handler.value) + ").apply(null, arguments)")
: isFunctionInvocation
? ("return " + (handler.value))
: handler.value;
return ("function($event){" + code + handlerCode + "}")
}
}
function genKeyFilter (keys) {
return (
// make sure the key filters only apply to KeyboardEvents
// #9441: can't use 'keyCode' in $event because Chrome autofill fires fake
// key events that do not have keyCode property...
"if(!$event.type.indexOf('key')&&" +
(keys.map(genFilterCode).join('&&')) + ")return null;"
)
}
function genFilterCode (key) {
var keyVal = parseInt(key, 10);
if (keyVal) {
return ("$event.keyCode!==" + keyVal)
}
var keyCode = keyCodes[key];
var keyName = keyNames[key];
return (
"_k($event.keyCode," +
(JSON.stringify(key)) + "," +
(JSON.stringify(keyCode)) + "," +
"$event.key," +
"" + (JSON.stringify(keyName)) +
")"
)
}
/* */
function on (el, dir) {
if (dir.modifiers) {
warn("v-on without argument does not support modifiers.");
}
el.wrapListeners = function (code) { return ("_g(" + code + "," + (dir.value) + ")"); };
}
/* */
function bind$1 (el, dir) {
el.wrapData = function (code) {
return ("_b(" + code + ",'" + (el.tag) + "'," + (dir.value) + "," + (dir.modifiers && dir.modifiers.prop ? 'true' : 'false') + (dir.modifiers && dir.modifiers.sync ? ',true' : '') + ")")
};
}
/* */
var baseDirectives = {
on: on,
bind: bind$1,
cloak: noop
};
/* */
var CodegenState = function CodegenState (options) {
this.options = options;
this.warn = options.warn || baseWarn;
this.transforms = pluckModuleFunction(options.modules, 'transformCode');
this.dataGenFns = pluckModuleFunction(options.modules, 'genData');
this.directives = extend(extend({}, baseDirectives), options.directives);
var isReservedTag = options.isReservedTag || no;
this.maybeComponent = function (el) { return !!el.component || !isReservedTag(el.tag); };
this.onceId = 0;
this.staticRenderFns = [];
this.pre = false;
};
function generate (
ast,
options
) {
var state = new CodegenState(options);
// fix #11483, Root level <script> tags should not be rendered.
var code = ast ? (ast.tag === 'script' ? 'null' : genElement(ast, state)) : '_c("div")';
return {
render: ("with(this){return " + code + "}"),
staticRenderFns: state.staticRenderFns
}
}
function genElement (el, state) {
if (el.parent) {
el.pre = el.pre || el.parent.pre;
}
if (el.staticRoot && !el.staticProcessed) {
return genStatic(el, state)
} else if (el.once && !el.onceProcessed) {
return genOnce(el, state)
} else if (el.for && !el.forProcessed) {
return genFor(el, state)
} else if (el.if && !el.ifProcessed) {
return genIf(el, state)
} else if (el.tag === 'template' && !el.slotTarget && !state.pre) {
return genChildren(el, state) || 'void 0'
} else if (el.tag === 'slot') {
return genSlot(el, state)
} else {
// component or element
var code;
if (el.component) {
code = genComponent(el.component, el, state);
} else {
var data;
if (!el.plain || (el.pre && state.maybeComponent(el))) {
data = genData$2(el, state);
}
var children = el.inlineTemplate ? null : genChildren(el, state, true);
code = "_c('" + (el.tag) + "'" + (data ? ("," + data) : '') + (children ? ("," + children) : '') + ")";
}
// module transforms
for (var i = 0; i < state.transforms.length; i++) {
code = state.transforms[i](el, code);
}
return code
}
}
// hoist static sub-trees out
function genStatic (el, state) {
el.staticProcessed = true;
// Some elements (templates) need to behave differently inside of a v-pre
// node. All pre nodes are static roots, so we can use this as a location to
// wrap a state change and reset it upon exiting the pre node.
var originalPreState = state.pre;
if (el.pre) {
state.pre = el.pre;
}
state.staticRenderFns.push(("with(this){return " + (genElement(el, state)) + "}"));
state.pre = originalPreState;
return ("_m(" + (state.staticRenderFns.length - 1) + (el.staticInFor ? ',true' : '') + ")")
}
// v-once
function genOnce (el, state) {
el.onceProcessed = true;
if (el.if && !el.ifProcessed) {
return genIf(el, state)
} else if (el.staticInFor) {
var key = '';
var parent = el.parent;
while (parent) {
if (parent.for) {
key = parent.key;
break
}
parent = parent.parent;
}
if (!key) {
state.warn(
"v-once can only be used inside v-for that is keyed. ",
el.rawAttrsMap['v-once']
);
return genElement(el, state)
}
return ("_o(" + (genElement(el, state)) + "," + (state.onceId++) + "," + key + ")")
} else {
return genStatic(el, state)
}
}
function genIf (
el,
state,
altGen,
altEmpty
) {
el.ifProcessed = true; // avoid recursion
return genIfConditions(el.ifConditions.slice(), state, altGen, altEmpty)
}
function genIfConditions (
conditions,
state,
altGen,
altEmpty
) {
if (!conditions.length) {
return altEmpty || '_e()'
}
var condition = conditions.shift();
if (condition.exp) {
return ("(" + (condition.exp) + ")?" + (genTernaryExp(condition.block)) + ":" + (genIfConditions(conditions, state, altGen, altEmpty)))
} else {
return ("" + (genTernaryExp(condition.block)))
}
// v-if with v-once should generate code like (a)?_m(0):_m(1)
function genTernaryExp (el) {
return altGen
? altGen(el, state)
: el.once
? genOnce(el, state)
: genElement(el, state)
}
}
function genFor (
el,
state,
altGen,
altHelper
) {
var exp = el.for;
var alias = el.alias;
var iterator1 = el.iterator1 ? ("," + (el.iterator1)) : '';
var iterator2 = el.iterator2 ? ("," + (el.iterator2)) : '';
if (state.maybeComponent(el) &&
el.tag !== 'slot' &&
el.tag !== 'template' &&
!el.key
) {
state.warn(
"<" + (el.tag) + " v-for=\"" + alias + " in " + exp + "\">: component lists rendered with " +
"v-for should have explicit keys. " +
"See https://vuejs.org/guide/list.html#key for more info.",
el.rawAttrsMap['v-for'],
true /* tip */
);
}
el.forProcessed = true; // avoid recursion
return (altHelper || '_l') + "((" + exp + ")," +
"function(" + alias + iterator1 + iterator2 + "){" +
"return " + ((altGen || genElement)(el, state)) +
'})'
}
function genData$2 (el, state) {
var data = '{';
// directives first.
// directives may mutate the el's other properties before they are generated.
var dirs = genDirectives(el, state);
if (dirs) { data += dirs + ','; }
// key
if (el.key) {
data += "key:" + (el.key) + ",";
}
// ref
if (el.ref) {
data += "ref:" + (el.ref) + ",";
}
if (el.refInFor) {
data += "refInFor:true,";
}
// pre
if (el.pre) {
data += "pre:true,";
}
// record original tag name for components using "is" attribute
if (el.component) {
data += "tag:\"" + (el.tag) + "\",";
}
// module data generation functions
for (var i = 0; i < state.dataGenFns.length; i++) {
data += state.dataGenFns[i](el);
}
// attributes
if (el.attrs) {
data += "attrs:" + (genProps(el.attrs)) + ",";
}
// DOM props
if (el.props) {
data += "domProps:" + (genProps(el.props)) + ",";
}
// event handlers
if (el.events) {
data += (genHandlers(el.events, false)) + ",";
}
if (el.nativeEvents) {
data += (genHandlers(el.nativeEvents, true)) + ",";
}
// slot target
// only for non-scoped slots
if (el.slotTarget && !el.slotScope) {
data += "slot:" + (el.slotTarget) + ",";
}
// scoped slots
if (el.scopedSlots) {
data += (genScopedSlots(el, el.scopedSlots, state)) + ",";
}
// component v-model
if (el.model) {
data += "model:{value:" + (el.model.value) + ",callback:" + (el.model.callback) + ",expression:" + (el.model.expression) + "},";
}
// inline-template
if (el.inlineTemplate) {
var inlineTemplate = genInlineTemplate(el, state);
if (inlineTemplate) {
data += inlineTemplate + ",";
}
}
data = data.replace(/,$/, '') + '}';
// v-bind dynamic argument wrap
// v-bind with dynamic arguments must be applied using the same v-bind object
// merge helper so that class/style/mustUseProp attrs are handled correctly.
if (el.dynamicAttrs) {
data = "_b(" + data + ",\"" + (el.tag) + "\"," + (genProps(el.dynamicAttrs)) + ")";
}
// v-bind data wrap
if (el.wrapData) {
data = el.wrapData(data);
}
// v-on data wrap
if (el.wrapListeners) {
data = el.wrapListeners(data);
}
return data
}
function genDirectives (el, state) {
var dirs = el.directives;
if (!dirs) { return }
var res = 'directives:[';
var hasRuntime = false;
var i, l, dir, needRuntime;
for (i = 0, l = dirs.length; i < l; i++) {
dir = dirs[i];
needRuntime = true;
var gen = state.directives[dir.name];
if (gen) {
// compile-time directive that manipulates AST.
// returns true if it also needs a runtime counterpart.
needRuntime = !!gen(el, dir, state.warn);
}
if (needRuntime) {
hasRuntime = true;
res += "{name:\"" + (dir.name) + "\",rawName:\"" + (dir.rawName) + "\"" + (dir.value ? (",value:(" + (dir.value) + "),expression:" + (JSON.stringify(dir.value))) : '') + (dir.arg ? (",arg:" + (dir.isDynamicArg ? dir.arg : ("\"" + (dir.arg) + "\""))) : '') + (dir.modifiers ? (",modifiers:" + (JSON.stringify(dir.modifiers))) : '') + "},";
}
}
if (hasRuntime) {
return res.slice(0, -1) + ']'
}
}
function genInlineTemplate (el, state) {
var ast = el.children[0];
if (el.children.length !== 1 || ast.type !== 1) {
state.warn(
'Inline-template components must have exactly one child element.',
{ start: el.start }
);
}
if (ast && ast.type === 1) {
var inlineRenderFns = generate(ast, state.options);
return ("inlineTemplate:{render:function(){" + (inlineRenderFns.render) + "},staticRenderFns:[" + (inlineRenderFns.staticRenderFns.map(function (code) { return ("function(){" + code + "}"); }).join(',')) + "]}")
}
}
function genScopedSlots (
el,
slots,
state
) {
// by default scoped slots are considered "stable", this allows child
// components with only scoped slots to skip forced updates from parent.
// but in some cases we have to bail-out of this optimization
// for example if the slot contains dynamic names, has v-if or v-for on them...
var needsForceUpdate = el.for || Object.keys(slots).some(function (key) {
var slot = slots[key];
return (
slot.slotTargetDynamic ||
slot.if ||
slot.for ||
containsSlotChild(slot) // is passing down slot from parent which may be dynamic
)
});
// #9534: if a component with scoped slots is inside a conditional branch,
// it's possible for the same component to be reused but with different
// compiled slot content. To avoid that, we generate a unique key based on
// the generated code of all the slot contents.
var needsKey = !!el.if;
// OR when it is inside another scoped slot or v-for (the reactivity may be
// disconnected due to the intermediate scope variable)
// #9438, #9506
// TODO: this can be further optimized by properly analyzing in-scope bindings
// and skip force updating ones that do not actually use scope variables.
if (!needsForceUpdate) {
var parent = el.parent;
while (parent) {
if (
(parent.slotScope && parent.slotScope !== emptySlotScopeToken) ||
parent.for
) {
needsForceUpdate = true;
break
}
if (parent.if) {
needsKey = true;
}
parent = parent.parent;
}
}
var generatedSlots = Object.keys(slots)
.map(function (key) { return genScopedSlot(slots[key], state); })
.join(',');
return ("scopedSlots:_u([" + generatedSlots + "]" + (needsForceUpdate ? ",null,true" : "") + (!needsForceUpdate && needsKey ? (",null,false," + (hash(generatedSlots))) : "") + ")")
}
function hash(str) {
var hash = 5381;
var i = str.length;
while(i) {
hash = (hash * 33) ^ str.charCodeAt(--i);
}
return hash >>> 0
}
function containsSlotChild (el) {
if (el.type === 1) {
if (el.tag === 'slot') {
return true
}
return el.children.some(containsSlotChild)
}
return false
}
function genScopedSlot (
el,
state
) {
var isLegacySyntax = el.attrsMap['slot-scope'];
if (el.if && !el.ifProcessed && !isLegacySyntax) {
return genIf(el, state, genScopedSlot, "null")
}
if (el.for && !el.forProcessed) {
return genFor(el, state, genScopedSlot)
}
var slotScope = el.slotScope === emptySlotScopeToken
? ""
: String(el.slotScope);
var fn = "function(" + slotScope + "){" +
"return " + (el.tag === 'template'
? el.if && isLegacySyntax
? ("(" + (el.if) + ")?" + (genChildren(el, state) || 'undefined') + ":undefined")
: genChildren(el, state) || 'undefined'
: genElement(el, state)) + "}";
// reverse proxy v-slot without scope on this.$slots
var reverseProxy = slotScope ? "" : ",proxy:true";
return ("{key:" + (el.slotTarget || "\"default\"") + ",fn:" + fn + reverseProxy + "}")
}
function genChildren (
el,
state,
checkSkip,
altGenElement,
altGenNode
) {
var children = el.children;
if (children.length) {
var el$1 = children[0];
// optimize single v-for
if (children.length === 1 &&
el$1.for &&
el$1.tag !== 'template' &&
el$1.tag !== 'slot'
) {
var normalizationType = checkSkip
? state.maybeComponent(el$1) ? ",1" : ",0"
: "";
return ("" + ((altGenElement || genElement)(el$1, state)) + normalizationType)
}
var normalizationType$1 = checkSkip
? getNormalizationType(children, state.maybeComponent)
: 0;
var gen = altGenNode || genNode;
return ("[" + (children.map(function (c) { return gen(c, state); }).join(',')) + "]" + (normalizationType$1 ? ("," + normalizationType$1) : ''))
}
}
// determine the normalization needed for the children array.
// 0: no normalization needed
// 1: simple normalization needed (possible 1-level deep nested array)
// 2: full normalization needed
function getNormalizationType (
children,
maybeComponent
) {
var res = 0;
for (var i = 0; i < children.length; i++) {
var el = children[i];
if (el.type !== 1) {
continue
}
if (needsNormalization(el) ||
(el.ifConditions && el.ifConditions.some(function (c) { return needsNormalization(c.block); }))) {
res = 2;
break
}
if (maybeComponent(el) ||
(el.ifConditions && el.ifConditions.some(function (c) { return maybeComponent(c.block); }))) {
res = 1;
}
}
return res
}
function needsNormalization (el) {
return el.for !== undefined || el.tag === 'template' || el.tag === 'slot'
}
function genNode (node, state) {
if (node.type === 1) {
return genElement(node, state)
} else if (node.type === 3 && node.isComment) {
return genComment(node)
} else {
return genText(node)
}
}
function genText (text) {
return ("_v(" + (text.type === 2
? text.expression // no need for () because already wrapped in _s()
: transformSpecialNewlines(JSON.stringify(text.text))) + ")")
}
function genComment (comment) {
return ("_e(" + (JSON.stringify(comment.text)) + ")")
}
function genSlot (el, state) {
var slotName = el.slotName || '"default"';
var children = genChildren(el, state);
var res = "_t(" + slotName + (children ? (",function(){return " + children + "}") : '');
var attrs = el.attrs || el.dynamicAttrs
? genProps((el.attrs || []).concat(el.dynamicAttrs || []).map(function (attr) { return ({
// slot props are camelized
name: camelize(attr.name),
value: attr.value,
dynamic: attr.dynamic
}); }))
: null;
var bind$$1 = el.attrsMap['v-bind'];
if ((attrs || bind$$1) && !children) {
res += ",null";
}
if (attrs) {
res += "," + attrs;
}
if (bind$$1) {
res += (attrs ? '' : ',null') + "," + bind$$1;
}
return res + ')'
}
// componentName is el.component, take it as argument to shun flow's pessimistic refinement
function genComponent (
componentName,
el,
state
) {
var children = el.inlineTemplate ? null : genChildren(el, state, true);
return ("_c(" + componentName + "," + (genData$2(el, state)) + (children ? ("," + children) : '') + ")")
}
function genProps (props) {
var staticProps = "";
var dynamicProps = "";
for (var i = 0; i < props.length; i++) {
var prop = props[i];
var value = transformSpecialNewlines(prop.value);
if (prop.dynamic) {
dynamicProps += (prop.name) + "," + value + ",";
} else {
staticProps += "\"" + (prop.name) + "\":" + value + ",";
}
}
staticProps = "{" + (staticProps.slice(0, -1)) + "}";
if (dynamicProps) {
return ("_d(" + staticProps + ",[" + (dynamicProps.slice(0, -1)) + "])")
} else {
return staticProps
}
}
// #3895, #4268
function transformSpecialNewlines (text) {
return text
.replace(/\u2028/g, '\\u2028')
.replace(/\u2029/g, '\\u2029')
}
/* */
// these keywords should not appear inside expressions, but operators like
// typeof, instanceof and in are allowed
var prohibitedKeywordRE = new RegExp('\\b' + (
'do,if,for,let,new,try,var,case,else,with,await,break,catch,class,const,' +
'super,throw,while,yield,delete,export,import,return,switch,default,' +
'extends,finally,continue,debugger,function,arguments'
).split(',').join('\\b|\\b') + '\\b');
// these unary operators should not be used as property/method names
var unaryOperatorsRE = new RegExp('\\b' + (
'delete,typeof,void'
).split(',').join('\\s*\\([^\\)]*\\)|\\b') + '\\s*\\([^\\)]*\\)');
// strip strings in expressions
var stripStringRE = /'(?:[^'\\]|\\.)*'|"(?:[^"\\]|\\.)*"|`(?:[^`\\]|\\.)*\$\{|\}(?:[^`\\]|\\.)*`|`(?:[^`\\]|\\.)*`/g;
// detect problematic expressions in a template
function detectErrors (ast, warn) {
if (ast) {
checkNode(ast, warn);
}
}
function checkNode (node, warn) {
if (node.type === 1) {
for (var name in node.attrsMap) {
if (dirRE.test(name)) {
var value = node.attrsMap[name];
if (value) {
var range = node.rawAttrsMap[name];
if (name === 'v-for') {
checkFor(node, ("v-for=\"" + value + "\""), warn, range);
} else if (name === 'v-slot' || name[0] === '#') {
checkFunctionParameterExpression(value, (name + "=\"" + value + "\""), warn, range);
} else if (onRE.test(name)) {
checkEvent(value, (name + "=\"" + value + "\""), warn, range);
} else {
checkExpression(value, (name + "=\"" + value + "\""), warn, range);
}
}
}
}
if (node.children) {
for (var i = 0; i < node.children.length; i++) {
checkNode(node.children[i], warn);
}
}
} else if (node.type === 2) {
checkExpression(node.expression, node.text, warn, node);
}
}
function checkEvent (exp, text, warn, range) {
var stripped = exp.replace(stripStringRE, '');
var keywordMatch = stripped.match(unaryOperatorsRE);
if (keywordMatch && stripped.charAt(keywordMatch.index - 1) !== '$') {
warn(
"avoid using JavaScript unary operator as property name: " +
"\"" + (keywordMatch[0]) + "\" in expression " + (text.trim()),
range
);
}
checkExpression(exp, text, warn, range);
}
function checkFor (node, text, warn, range) {
checkExpression(node.for || '', text, warn, range);
checkIdentifier(node.alias, 'v-for alias', text, warn, range);
checkIdentifier(node.iterator1, 'v-for iterator', text, warn, range);
checkIdentifier(node.iterator2, 'v-for iterator', text, warn, range);
}
function checkIdentifier (
ident,
type,
text,
warn,
range
) {
if (typeof ident === 'string') {
try {
new Function(("var " + ident + "=_"));
} catch (e) {
warn(("invalid " + type + " \"" + ident + "\" in expression: " + (text.trim())), range);
}
}
}
function checkExpression (exp, text, warn, range) {
try {
new Function(("return " + exp));
} catch (e) {
var keywordMatch = exp.replace(stripStringRE, '').match(prohibitedKeywordRE);
if (keywordMatch) {
warn(
"avoid using JavaScript keyword as property name: " +
"\"" + (keywordMatch[0]) + "\"\n Raw expression: " + (text.trim()),
range
);
} else {
warn(
"invalid expression: " + (e.message) + " in\n\n" +
" " + exp + "\n\n" +
" Raw expression: " + (text.trim()) + "\n",
range
);
}
}
}
function checkFunctionParameterExpression (exp, text, warn, range) {
try {
new Function(exp, '');
} catch (e) {
warn(
"invalid function parameter expression: " + (e.message) + " in\n\n" +
" " + exp + "\n\n" +
" Raw expression: " + (text.trim()) + "\n",
range
);
}
}
/* */
var range = 2;
function generateCodeFrame (
source,
start,
end
) {
if ( start === void 0 ) start = 0;
if ( end === void 0 ) end = source.length;
var lines = source.split(/\r?\n/);
var count = 0;
var res = [];
for (var i = 0; i < lines.length; i++) {
count += lines[i].length + 1;
if (count >= start) {
for (var j = i - range; j <= i + range || end > count; j++) {
if (j < 0 || j >= lines.length) { continue }
res.push(("" + (j + 1) + (repeat$1(" ", 3 - String(j + 1).length)) + "| " + (lines[j])));
var lineLength = lines[j].length;
if (j === i) {
// push underline
var pad = start - (count - lineLength) + 1;
var length = end > count ? lineLength - pad : end - start;
res.push(" | " + repeat$1(" ", pad) + repeat$1("^", length));
} else if (j > i) {
if (end > count) {
var length$1 = Math.min(end - count, lineLength);
res.push(" | " + repeat$1("^", length$1));
}
count += lineLength + 1;
}
}
break
}
}
return res.join('\n')
}
function repeat$1 (str, n) {
var result = '';
if (n > 0) {
while (true) { // eslint-disable-line
if (n & 1) { result += str; }
n >>>= 1;
if (n <= 0) { break }
str += str;
}
}
return result
}
/* */
function createFunction (code, errors) {
try {
return new Function(code)
} catch (err) {
errors.push({ err: err, code: code });
return noop
}
}
function createCompileToFunctionFn (compile) {
var cache = Object.create(null);
return function compileToFunctions (
template,
options,
vm
) {
options = extend({}, options);
var warn$$1 = options.warn || warn;
delete options.warn;
/* istanbul ignore if */
{
// detect possible CSP restriction
try {
new Function('return 1');
} catch (e) {
if (e.toString().match(/unsafe-eval|CSP/)) {
warn$$1(
'It seems you are using the standalone build of Vue.js in an ' +
'environment with Content Security Policy that prohibits unsafe-eval. ' +
'The template compiler cannot work in this environment. Consider ' +
'relaxing the policy to allow unsafe-eval or pre-compiling your ' +
'templates into render functions.'
);
}
}
}
// check cache
var key = options.delimiters
? String(options.delimiters) + template
: template;
if (cache[key]) {
return cache[key]
}
// compile
var compiled = compile(template, options);
// check compilation errors/tips
{
if (compiled.errors && compiled.errors.length) {
if (options.outputSourceRange) {
compiled.errors.forEach(function (e) {
warn$$1(
"Error compiling template:\n\n" + (e.msg) + "\n\n" +
generateCodeFrame(template, e.start, e.end),
vm
);
});
} else {
warn$$1(
"Error compiling template:\n\n" + template + "\n\n" +
compiled.errors.map(function (e) { return ("- " + e); }).join('\n') + '\n',
vm
);
}
}
if (compiled.tips && compiled.tips.length) {
if (options.outputSourceRange) {
compiled.tips.forEach(function (e) { return tip(e.msg, vm); });
} else {
compiled.tips.forEach(function (msg) { return tip(msg, vm); });
}
}
}
// turn code into functions
var res = {};
var fnGenErrors = [];
res.render = createFunction(compiled.render, fnGenErrors);
res.staticRenderFns = compiled.staticRenderFns.map(function (code) {
return createFunction(code, fnGenErrors)
});
// check function generation errors.
// this should only happen if there is a bug in the compiler itself.
// mostly for codegen development use
/* istanbul ignore if */
{
if ((!compiled.errors || !compiled.errors.length) && fnGenErrors.length) {
warn$$1(
"Failed to generate render function:\n\n" +
fnGenErrors.map(function (ref) {
var err = ref.err;
var code = ref.code;
return ((err.toString()) + " in\n\n" + code + "\n");
}).join('\n'),
vm
);
}
}
return (cache[key] = res)
}
}
/* */
function createCompilerCreator (baseCompile) {
return function createCompiler (baseOptions) {
function compile (
template,
options
) {
var finalOptions = Object.create(baseOptions);
var errors = [];
var tips = [];
var warn = function (msg, range, tip) {
(tip ? tips : errors).push(msg);
};
if (options) {
if (options.outputSourceRange) {
// $flow-disable-line
var leadingSpaceLength = template.match(/^\s*/)[0].length;
warn = function (msg, range, tip) {
var data = { msg: msg };
if (range) {
if (range.start != null) {
data.start = range.start + leadingSpaceLength;
}
if (range.end != null) {
data.end = range.end + leadingSpaceLength;
}
}
(tip ? tips : errors).push(data);
};
}
// merge custom modules
if (options.modules) {
finalOptions.modules =
(baseOptions.modules || []).concat(options.modules);
}
// merge custom directives
if (options.directives) {
finalOptions.directives = extend(
Object.create(baseOptions.directives || null),
options.directives
);
}
// copy other options
for (var key in options) {
if (key !== 'modules' && key !== 'directives') {
finalOptions[key] = options[key];
}
}
}
finalOptions.warn = warn;
var compiled = baseCompile(template.trim(), finalOptions);
{
detectErrors(compiled.ast, warn);
}
compiled.errors = errors;
compiled.tips = tips;
return compiled
}
return {
compile: compile,
compileToFunctions: createCompileToFunctionFn(compile)
}
}
}
/* */
// `createCompilerCreator` allows creating compilers that use alternative
// parser/optimizer/codegen, e.g the SSR optimizing compiler.
// Here we just export a default compiler using the default parts.
var createCompiler = createCompilerCreator(function baseCompile (
template,
options
) {
var ast = parse(template.trim(), options);
if (options.optimize !== false) {
optimize(ast, options);
}
var code = generate(ast, options);
return {
ast: ast,
render: code.render,
staticRenderFns: code.staticRenderFns
}
});
/* */
var ref$1 = createCompiler(baseOptions);
var compile = ref$1.compile;
var compileToFunctions = ref$1.compileToFunctions;
/* */
// check whether current browser encodes a char inside attribute values
var div;
function getShouldDecode (href) {
div = div || document.createElement('div');
div.innerHTML = href ? "<a href=\"\n\"/>" : "<div a=\"\n\"/>";
return div.innerHTML.indexOf(' ') > 0
}
// #3663: IE encodes newlines inside attribute values while other browsers don't
var shouldDecodeNewlines = inBrowser ? getShouldDecode(false) : false;
// #6828: chrome encodes content in a[href]
var shouldDecodeNewlinesForHref = inBrowser ? getShouldDecode(true) : false;
/* */
var idToTemplate = cached(function (id) {
var el = query(id);
return el && el.innerHTML
});
var mount = Vue.prototype.$mount;
Vue.prototype.$mount = function (
el,
hydrating
) {
el = el && query(el);
/* istanbul ignore if */
if (el === document.body || el === document.documentElement) {
warn(
"Do not mount Vue to <html> or <body> - mount to normal elements instead."
);
return this
}
var options = this.$options;
// resolve template/el and convert to render function
if (!options.render) {
var template = options.template;
if (template) {
if (typeof template === 'string') {
if (template.charAt(0) === '#') {
template = idToTemplate(template);
/* istanbul ignore if */
if (!template) {
warn(
("Template element not found or is empty: " + (options.template)),
this
);
}
}
} else if (template.nodeType) {
template = template.innerHTML;
} else {
{
warn('invalid template option:' + template, this);
}
return this
}
} else if (el) {
template = getOuterHTML(el);
}
if (template) {
/* istanbul ignore if */
if (config.performance && mark) {
mark('compile');
}
var ref = compileToFunctions(template, {
outputSourceRange: "development" !== 'production',
shouldDecodeNewlines: shouldDecodeNewlines,
shouldDecodeNewlinesForHref: shouldDecodeNewlinesForHref,
delimiters: options.delimiters,
comments: options.comments
}, this);
var render = ref.render;
var staticRenderFns = ref.staticRenderFns;
options.render = render;
options.staticRenderFns = staticRenderFns;
/* istanbul ignore if */
if (config.performance && mark) {
mark('compile end');
measure(("vue " + (this._name) + " compile"), 'compile', 'compile end');
}
}
}
return mount.call(this, el, hydrating)
};
/**
* Get outerHTML of elements, taking care
* of SVG elements in IE as well.
*/
function getOuterHTML (el) {
if (el.outerHTML) {
return el.outerHTML
} else {
var container = document.createElement('div');
container.appendChild(el.cloneNode(true));
return container.innerHTML
}
}
Vue.compile = compileToFunctions;
return Vue;
}));
/***/ })
/******/ });
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ // no module.id needed
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/************************************************************************/
/******/ /* webpack/runtime/compat get default export */
/******/ (() => {
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = (module) => {
/******/ var getter = module && module.__esModule ?
/******/ () => (module['default']) :
/******/ () => (module);
/******/ __webpack_require__.d(getter, { a: getter });
/******/ return getter;
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/define property getters */
/******/ (() => {
/******/ // define getter functions for harmony exports
/******/ __webpack_require__.d = (exports, definition) => {
/******/ for(var key in definition) {
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
/******/ }
/******/ }
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/global */
/******/ (() => {
/******/ __webpack_require__.g = (function() {
/******/ if (typeof globalThis === 'object') return globalThis;
/******/ try {
/******/ return this || new Function('return this')();
/******/ } catch (e) {
/******/ if (typeof window === 'object') return window;
/******/ }
/******/ })();
/******/ })();
/******/
/******/ /* webpack/runtime/hasOwnProperty shorthand */
/******/ (() => {
/******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))
/******/ })();
/******/
/******/ /* webpack/runtime/make namespace object */
/******/ (() => {
/******/ // define __esModule on exports
/******/ __webpack_require__.r = (exports) => {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/ })();
/******/
/************************************************************************/
var __webpack_exports__ = {};
// This entry need to be wrapped in an IIFE because it need to be in strict mode.
(() => {
"use strict";
/*!***************************************************!*\
!*** ./resources/js/admin/cash-register/index.js ***!
\***************************************************/
__webpack_require__.r(__webpack_exports__);
/* harmony import */ var _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @babel/runtime/regenerator */ "./node_modules/@babel/runtime/regenerator/index.js");
/* harmony import */ var _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0__);
function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } }
function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; }
window.Vue = __webpack_require__(/*! vue/dist/vue.js */ "./node_modules/vue/dist/vue.js");
var vm = new Vue({
el: "#content",
data: {
loading: true,
subtotal: 0 .toFixed(2),
discount: 0.0.toFixed(2),
total: 0.0.toFixed(2),
coupon: {
code: "",
discount: 0,
discount_value: "",
type: ""
},
product: {
id: "",
name: "",
price: "",
real_price: "",
qty: "",
stock: "",
total: ""
},
product_list: [],
search_list: [],
submit_loading: false
},
computed: {},
methods: {
get_coupon: function get_coupon() {
var _this = this;
return _asyncToGenerator( /*#__PURE__*/_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().mark(function _callee() {
var ulr, coupon_input, response, _yield$response$json, error, _yield$response$json2, coupon;
return _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
ulr = "".concat(window.location.origin, "/api/get-coupon-data");
coupon_input = document.querySelector('input[id="coupon"]');
if (!(coupon_input.value !== "")) {
_context.next = 25;
break;
}
_context.next = 5;
return fetch(ulr, {
method: "POST",
cache: "no-cache",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
code: coupon_input.value
})
});
case 5:
response = _context.sent;
if (!(response.status === 400)) {
_context.next = 16;
break;
}
_context.next = 9;
return response.json();
case 9:
_yield$response$json = _context.sent;
error = _yield$response$json.error;
coupon_input.classList.add("border-danger");
coupon_input.nextSibling.nextSibling.textContent = error;
_this.coupon = {
code: "",
discount: 0.0.toFixed(2),
discount_value: "",
type: ""
};
_context.next = 23;
break;
case 16:
_context.next = 18;
return response.json();
case 18:
_yield$response$json2 = _context.sent;
coupon = _yield$response$json2.coupon;
_this.coupon = coupon;
coupon_input.classList.remove("border-danger");
coupon_input.nextSibling.nextSibling.textContent = "";
case 23:
_context.next = 28;
break;
case 25:
_this.coupon = {
code: "",
discount: 0.0.toFixed(2),
discount_value: "",
type: ""
};
coupon_input.classList.remove("border-danger");
coupon_input.nextSibling.nextSibling.textContent = "";
case 28:
_this.calculate_price();
case 29:
case "end":
return _context.stop();
}
}
}, _callee);
}))();
},
add_product: function add_product(product) {
var _this2 = this;
return _asyncToGenerator( /*#__PURE__*/_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().mark(function _callee2() {
return _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
if (!_this2.product_list.some(function (item) {
return item.id === product.id;
})) {
_context2.next = 2;
break;
}
return _context2.abrupt("return");
case 2:
_this2.product = {
id: product.id,
name: product.name,
price: product.price,
real_price: product.real_price,
qty: 1,
stock: product.temp_stock,
total: (1 * product.real_price).toFixed(2)
};
_this2.product_list.push(_this2.product);
_this2.search_list = _this2.search_list.filter(function (item) {
return item.id !== product.id;
});
_this2.calculate_price();
case 6:
case "end":
return _context2.stop();
}
}
}, _callee2);
}))();
},
update_product: function update_product(e) {
var _this3 = this;
return _asyncToGenerator( /*#__PURE__*/_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().mark(function _callee3() {
var input, qty, url, response, _yield$response$json3, product;
return _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().wrap(function _callee3$(_context3) {
while (1) {
switch (_context3.prev = _context3.next) {
case 0:
input = e.target;
qty = parseFloat(input.value);
url = "".concat(window.location.origin, "/api/find-product-by-id");
_context3.next = 5;
return fetch(url, {
method: "POST",
cache: "no-cache",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
id: _this3.product.id
})
});
case 5:
response = _context3.sent;
if (!(response.status !== 400)) {
_context3.next = 19;
break;
}
_context3.next = 9;
return response.json();
case 9:
_yield$response$json3 = _context3.sent;
product = _yield$response$json3.product;
if (!(qty <= 0 || qty > parseFloat(product.stock) || qty % 1 != 0)) {
_context3.next = 16;
break;
}
input.classList.add("border-danger");
return _context3.abrupt("return");
case 16:
input.classList.remove("border-danger");
_this3.product_list = _this3.product_list.map(function (item) {
item.total = (item.real_price * item.qty).toFixed(2);
return item;
});
_this3.calculate_price();
case 19:
case "end":
return _context3.stop();
}
}
}, _callee3);
}))();
},
delete_product: function delete_product(product) {
this.product_list = this.product_list.filter(function (item) {
return item.id !== product.id;
});
this.list_products();
this.calculate_price();
},
list_products: function list_products() {
var _this4 = this;
return _asyncToGenerator( /*#__PURE__*/_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().mark(function _callee4() {
var url, search, response, _yield$response$json4, products;
return _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().wrap(function _callee4$(_context4) {
while (1) {
switch (_context4.prev = _context4.next) {
case 0:
url = "".concat(window.location.origin, "/api/search-products");
search = document.querySelector('input[id="search"]').value;
_context4.next = 4;
return fetch(url, {
method: "POST",
cache: "no-cache",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
search: search,
product_list: _this4.product_list
})
});
case 4:
response = _context4.sent;
if (!(response.status !== 400)) {
_context4.next = 11;
break;
}
_context4.next = 8;
return response.json();
case 8:
_yield$response$json4 = _context4.sent;
products = _yield$response$json4.products;
_this4.search_list = products;
case 11:
case "end":
return _context4.stop();
}
}
}, _callee4);
}))();
},
calculate_price: function calculate_price() {
var total = 0;
this.product_list.forEach(function (prod) {
total += parseFloat(prod.total);
});
this.subtotal = total.toFixed(2);
if (this.coupon.code) {
if (this.coupon.type === "Porcentaje") {
this.discount = (this.subtotal * this.coupon.discount / 100).toFixed(2);
} else {
this.discount = parseFloat(this.subtotal) <= parseFloat(this.coupon.discount) ? this.subtotal : this.coupon.discount;
this.discount = parseFloat(this.discount).toFixed(2);
}
}
this.total = (this.subtotal - this.discount).toFixed(2);
},
submit: function submit() {
var _this5 = this;
return _asyncToGenerator( /*#__PURE__*/_babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().mark(function _callee5() {
var url, formData, response, _yield$response$json5, error;
return _babel_runtime_regenerator__WEBPACK_IMPORTED_MODULE_0___default().wrap(function _callee5$(_context5) {
while (1) {
switch (_context5.prev = _context5.next) {
case 0:
_context5.prev = 0;
_this5.submit_loading = true;
if (_this5.product_list.length) {
_context5.next = 4;
break;
}
throw "Debe agregar productos";
case 4:
if (!_this5.product_list.some(function (item) {
return parseFloat(item.qty) <= 0 || parseFloat(item.qty) > parseFloat(item.stock) || parseFloat(item.qty) % 1 != 0;
})) {
_context5.next = 6;
break;
}
throw "La cantidad de un producto no es correcto";
case 6:
if (document.getElementById("customer").value) {
_context5.next = 8;
break;
}
throw "Falta seleccionar al cliente";
case 8:
url = "".concat(window.location.origin, "/api/store-cash-register");
formData = new FormData(document.querySelector('form[id="form"]'));
formData.append("product_list", JSON.stringify(_this5.product_list));
formData.append("discount", _this5.discount);
formData.append("subtotal", _this5.subtotal);
formData.append("total", _this5.total);
_context5.next = 16;
return fetch(url, {
method: "POST",
cache: "no-cache",
body: formData
});
case 16:
response = _context5.sent;
if (!(response.status !== 400)) {
_context5.next = 22;
break;
}
toastr.success("Orden registrada");
window.location.href = "".concat(window.location.origin, "/admin/order");
_context5.next = 27;
break;
case 22:
_context5.next = 24;
return response.json();
case 24:
_yield$response$json5 = _context5.sent;
error = _yield$response$json5.error;
throw error;
case 27:
_context5.next = 33;
break;
case 29:
_context5.prev = 29;
_context5.t0 = _context5["catch"](0);
toastr.error(_context5.t0);
_this5.submit_loading = false;
case 33:
case "end":
return _context5.stop();
}
}
}, _callee5, null, [[0, 29]]);
}))();
}
},
created: function created() {
this.loading = false;
if (!this.loading) document.querySelector('tr[id="product_list"]').classList.remove("d-none");
this.list_products();
}
});
})();
/******/ })()
; | parseFilters |
main.go | package main
import (
"context"
"fmt"
"log"
"os"
kafka "github.com/apoorvag-mav/kafka-go"
"github.com/mongodb/mongo-go-driver/mongo"
)
func getMongoCollection(mongoURL, dbName, collectionName string) *mongo.Collection {
client, err := mongo.Connect(context.Background(), mongoURL)
if err != nil {
log.Fatal(err)
}
// Check the connection
err = client.Ping(context.Background(), nil)
if err != nil {
log.Fatal(err)
}
fmt.Println("Connected to MongoDB ... !!")
db := client.Database(dbName)
collection := db.Collection(collectionName)
return collection
}
func getKafkaReader(kafkaURL, topic, groupID string) *kafka.Reader |
func main() {
// get Mongo db Collection using environment variables.
mongoURL := os.Getenv("mongoURL")
dbName := os.Getenv("dbName")
collectionName := os.Getenv("collectionName")
collection := getMongoCollection(mongoURL, dbName, collectionName)
// get kafka reader using environment variables.
kafkaURL := os.Getenv("kafkaURL")
topic := os.Getenv("topic")
groupID := os.Getenv("groupID")
reader := getKafkaReader(kafkaURL, topic, groupID)
defer reader.Close()
fmt.Println("start consuming ... !!")
for {
msg, err := reader.ReadMessage(context.Background())
if err != nil {
log.Fatal(err)
}
insertResult, err := collection.InsertOne(context.Background(), msg)
if err != nil {
log.Fatal(err)
}
fmt.Println("Inserted a single document: ", insertResult.InsertedID)
}
}
| {
return kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{kafkaURL},
GroupID: groupID,
Topic: topic,
MinBytes: 10e3, // 10KB
MaxBytes: 10e6, // 10MB
})
} |
abstract-view-port.js | "use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; } | var abstract_component_1 = require("./abstract-component");
var AbstractViewPort = (function (_super) {
__extends(AbstractViewPort, _super);
function AbstractViewPort() {
return _super !== null && _super.apply(this, arguments) || this;
}
AbstractViewPort.prototype.canActivate = function (params, routeConfig, navigationInstruction) {
return Promise.resolve(true);
};
AbstractViewPort.prototype.activate = function (params, routeConfig, navigationInstruction) {
return Promise.resolve();
};
AbstractViewPort.prototype.canDeactivate = function () {
return Promise.resolve(true);
};
AbstractViewPort.prototype.deactivate = function () {
return Promise.resolve();
};
return AbstractViewPort;
}(abstract_component_1.AbstractComponent));
exports.AbstractViewPort = AbstractViewPort;
//# sourceMappingURL=abstract-view-port.js.map | d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
Object.defineProperty(exports, "__esModule", { value: true }); |
text_test.go | package pgtype_test
import (
"bytes"
"reflect"
"testing"
"github.com/jackc/pgtype"
"github.com/jackc/pgtype/testutil"
)
func | (t *testing.T) {
for _, pgTypeName := range []string{"text", "varchar"} {
testutil.TestSuccessfulTranscode(t, pgTypeName, []interface{}{
&pgtype.Text{String: "", Status: pgtype.Present},
&pgtype.Text{String: "foo", Status: pgtype.Present},
&pgtype.Text{Status: pgtype.Null},
})
}
}
func TestTextSet(t *testing.T) {
successfulTests := []struct {
source interface{}
result pgtype.Text
}{
{source: "foo", result: pgtype.Text{String: "foo", Status: pgtype.Present}},
{source: _string("bar"), result: pgtype.Text{String: "bar", Status: pgtype.Present}},
{source: (*string)(nil), result: pgtype.Text{Status: pgtype.Null}},
}
for i, tt := range successfulTests {
var d pgtype.Text
err := d.Set(tt.source)
if err != nil {
t.Errorf("%d: %v", i, err)
}
if d != tt.result {
t.Errorf("%d: expected %v to convert to %v, but it was %v", i, tt.source, tt.result, d)
}
}
}
func TestTextAssignTo(t *testing.T) {
var s string
var ps *string
stringTests := []struct {
src pgtype.Text
dst interface{}
expected interface{}
}{
{src: pgtype.Text{String: "foo", Status: pgtype.Present}, dst: &s, expected: "foo"},
{src: pgtype.Text{Status: pgtype.Null}, dst: &ps, expected: ((*string)(nil))},
}
for i, tt := range stringTests {
err := tt.src.AssignTo(tt.dst)
if err != nil {
t.Errorf("%d: %v", i, err)
}
if dst := reflect.ValueOf(tt.dst).Elem().Interface(); dst != tt.expected {
t.Errorf("%d: expected %v to assign %v, but result was %v", i, tt.src, tt.expected, dst)
}
}
var buf []byte
bytesTests := []struct {
src pgtype.Text
dst *[]byte
expected []byte
}{
{src: pgtype.Text{String: "foo", Status: pgtype.Present}, dst: &buf, expected: []byte("foo")},
{src: pgtype.Text{Status: pgtype.Null}, dst: &buf, expected: nil},
}
for i, tt := range bytesTests {
err := tt.src.AssignTo(tt.dst)
if err != nil {
t.Errorf("%d: %v", i, err)
}
if bytes.Compare(*tt.dst, tt.expected) != 0 {
t.Errorf("%d: expected %v to assign %v, but result was %v", i, tt.src, tt.expected, tt.dst)
}
}
pointerAllocTests := []struct {
src pgtype.Text
dst interface{}
expected interface{}
}{
{src: pgtype.Text{String: "foo", Status: pgtype.Present}, dst: &ps, expected: "foo"},
}
for i, tt := range pointerAllocTests {
err := tt.src.AssignTo(tt.dst)
if err != nil {
t.Errorf("%d: %v", i, err)
}
if dst := reflect.ValueOf(tt.dst).Elem().Elem().Interface(); dst != tt.expected {
t.Errorf("%d: expected %v to assign %v, but result was %v", i, tt.src, tt.expected, dst)
}
}
errorTests := []struct {
src pgtype.Text
dst interface{}
}{
{src: pgtype.Text{Status: pgtype.Null}, dst: &s},
}
for i, tt := range errorTests {
err := tt.src.AssignTo(tt.dst)
if err == nil {
t.Errorf("%d: expected error but none was returned (%v -> %v)", i, tt.src, tt.dst)
}
}
}
| TestTextTranscode |
backend_address_pool.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class BackendAddressPool(SubResource):
"""Pool of backend IP addresses.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar backend_ip_configurations: Gets collection of references to IP
addresses defined in network interfaces.
:vartype backend_ip_configurations:
list[~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceIPConfiguration]
:ivar load_balancing_rules: Gets load balancing rules that use this
backend address pool.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2018_11_01.models.SubResource]
:ivar outbound_rule: Gets outbound rules that use this backend address
pool.
:vartype outbound_rule: ~azure.mgmt.network.v2018_11_01.models.SubResource
:param provisioning_state: Get provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configurations': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'outbound_rule': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'outbound_rule': {'key': 'properties.outboundRule', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
| super(BackendAddressPool, self).__init__(**kwargs)
self.backend_ip_configurations = None
self.load_balancing_rules = None
self.outbound_rule = None
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None) |
|
webpack.config.js | var webpack = require('webpack');
var CommonsChunkPlugin = require("webpack/lib/optimize/CommonsChunkPlugin");
module.exports = {
entry: {
index:'./public/html/js/index.js',
main:'./public/html/js/main.js'
},
output: {
filename: '[name].js'
},
module: {
loaders: [{
test: /\.js$/,
exclude: /node_modules/,
loader: 'babel-loader',
}]
},
plugins: [
//new webpack.optimize.UglifyJsPlugin({
// compress: {
// warnings: false,
// },
// output: {
// comments: false,
// },
//}),//压缩和丑化
new webpack.ProvidePlugin({
$: 'jquery'
}),//直接定义第三方库
new CommonsChunkPlugin({
name: "commons",
// (the commons chunk name)
filename: "commons.js",
// (the filename of the commons chunk)
|
chunks: ["index", "main"]
// (Only use these entries)
})//定义公共chunk
]
};
/*module.exports = {
entry: ['./src/index'], // file extension after index is optional for .js files
output: {
path: path.join(__dirname, 'dist'),
filename: 'bundle.js'
},
devServer: {
contentBase: "./src",//本地服务器所加载的页面所在的目录
colors: true,//终端中输出结果为彩色
historyApiFallback: true,//不跳转
inline: true//实时刷新
},
module: {
//加载器配置
loaders: [
{ test: /\.css$/, loader: 'style-loader!css-loader' }
]
},
plugins: [
new webpack.optimize.UglifyJsPlugin({
compressor: {
warnings: false,
},
}),
new webpack.optimize.OccurenceOrderPlugin()
]
}*/ | minChunks: 2,
// (Modules must be shared between 3 entries)
|
IXMLAttributeOptions.d.ts | export interface IXMLAttributeOptions {
name?: string;
required?: boolean;
namespace?: string;
} | ||
mergeArr.js | var restArgs = require('./restArgs'); |
for (var i = 0, len = arrays.length; i < len; i++) {
var arr = arrays[i];
for (var j = 0, _len = arr.length; j < _len; j++) {
first[end++] = arr[j];
}
}
first.length = end;
return first;
});
module.exports = exports; |
exports = restArgs(function(first, arrays) {
var end = first.length; |
mb13_8b_id.rs | #[doc = "Reader of register MB13_8B_ID"]
pub type R = crate::R<u32, super::MB13_8B_ID>;
#[doc = "Writer for register MB13_8B_ID"]
pub type W = crate::W<u32, super::MB13_8B_ID>;
#[doc = "Register MB13_8B_ID `reset()`'s with value 0"]
impl crate::ResetValue for super::MB13_8B_ID {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `EXT`"]
pub type EXT_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `EXT`"]
pub struct EXT_W<'a> {
w: &'a mut W,
}
impl<'a> EXT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0003_ffff) | ((value as u32) & 0x0003_ffff);
self.w
}
}
#[doc = "Reader of field `STD`"]
pub type STD_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `STD`"]
pub struct STD_W<'a> {
w: &'a mut W,
}
impl<'a> STD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07ff << 18)) | (((value as u32) & 0x07ff) << 18);
self.w
}
}
#[doc = "Reader of field `PRIO`"]
pub type PRIO_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRIO`"]
pub struct PRIO_W<'a> {
w: &'a mut W,
}
impl<'a> PRIO_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 29)) | (((value as u32) & 0x07) << 29);
self.w
}
}
impl R {
#[doc = "Bits 0:17 - Contains extended (LOW word) identifier of message buffer."]
#[inline(always)]
pub fn | (&self) -> EXT_R {
EXT_R::new((self.bits & 0x0003_ffff) as u32)
}
#[doc = "Bits 18:28 - Contains standard/extended (HIGH word) identifier of message buffer."]
#[inline(always)]
pub fn std(&self) -> STD_R {
STD_R::new(((self.bits >> 18) & 0x07ff) as u16)
}
#[doc = "Bits 29:31 - Local priority. This 3-bit fieldis only used when LPRIO_EN bit is set in MCR and it only makes sense for Tx buffers. These bits are not transmitted. They are appended to the regular ID to define the transmission priority."]
#[inline(always)]
pub fn prio(&self) -> PRIO_R {
PRIO_R::new(((self.bits >> 29) & 0x07) as u8)
}
}
impl W {
#[doc = "Bits 0:17 - Contains extended (LOW word) identifier of message buffer."]
#[inline(always)]
pub fn ext(&mut self) -> EXT_W {
EXT_W { w: self }
}
#[doc = "Bits 18:28 - Contains standard/extended (HIGH word) identifier of message buffer."]
#[inline(always)]
pub fn std(&mut self) -> STD_W {
STD_W { w: self }
}
#[doc = "Bits 29:31 - Local priority. This 3-bit fieldis only used when LPRIO_EN bit is set in MCR and it only makes sense for Tx buffers. These bits are not transmitted. They are appended to the regular ID to define the transmission priority."]
#[inline(always)]
pub fn prio(&mut self) -> PRIO_W {
PRIO_W { w: self }
}
}
| ext |
main.go | package main
import (
"image"
"image/color"
"image/png"
"io"
"math/cmplx"
"os"
)
func main() {
mandelbrot(os.Stdout)
}
func mandelbrot(out io.Writer) {
const (
xmin, ymin, xmax, ymax = -2, -2, +2, +2
width, height = 1024, 1024
)
img := image.NewRGBA(image.Rect(0, 0, width, height))
for py := 0; py < height; py++ {
y := float64(py)/height*(ymax-ymin) + ymin
for px := 0; px < width; px++ {
x := float64(px)/width*(xmax-xmin) + xmin
z := complex(x, y)
// Image point (px, py) represents complex value z.
img.Set(px, py, fractalize(z))
}
}
png.Encode(out, img)
}
func fractalize(z complex128) color.Color | {
const iterations, contrast = 200, 15
var v complex128
for n := uint8(0); n < iterations; n++ {
v = v*v + z
if cmplx.Abs(v) > 2 {
return color.Gray{255 - contrast*n}
}
}
return color.Black
} |
|
flood.py | #Contains all the functions required for assessing flood risk
#Exercise 2B - assessing flood risk by level:
def stations_level_over_threshold(stations, tol):
"""returns a list of tuples of stations with relative water level over tol."""
stations_over_threshold = []
for station in stations:
try: # containing the if statement within a try/except block automatically discards stations with no typical range data
if station.relative_water_level()>tol:
stations_over_threshold.append((station.name, station.relative_water_level()))
if type(station.relative_water_level()) == None:
stations_over_threshold.remove(station)
except:
if station.relative_water_level() == None:
pass
stations_over_threshold.sort(key=lambda x: x[1], reverse=True) # sorts the stations tuples by their second values, then reverses tuples to their original internal orders ([0],[1])
return stations_over_threshold
#Exercise 2C - identifying the most at-risk stations:
"""Returns a list of the N stations at which the water level (relative to the typical water level) is highest:"""
def stations_highest_rel_level(stations, N):
| filteredList = []
for station in stations:
if station.relative_water_level() != None:
filteredList.append(station)
else:
pass
filteredList.sort(key=lambda x: x.relative_water_level(), reverse=True)
return filteredList[:N] |
|
main.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::{format_err, Result};
use diem_types::{account_address::AccountAddress, transaction::Transaction};
use diem_writeset_generator::{
encode_custom_script, encode_halt_network_transaction, encode_remove_validators_transaction,
};
use std::path::PathBuf;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
struct Opt {
/// Path to the local DiemDB file
#[structopt(long, short, parse(from_os_str))]
output: PathBuf,
#[structopt(subcommand)] // Note that we mark a field as a subcommand
cmd: Command,
}
#[derive(Debug, StructOpt)]
enum Command {
/// List of addresses to remove from validator set
#[structopt(name = "remove-validators")]
RemoveValidators { addresses: Vec<AccountAddress> },
/// Block the execution of any transaction in the network
#[structopt(name = "halt-network")]
HaltNetwork,
/// Build a custom file in templates into admin script
#[structopt(name = "build-custom-script")]
BuildCustomScript { script_name: String, args: String },
}
fn | (txn: Transaction, path: PathBuf) -> Result<()> {
let bytes = lcs::to_bytes(&txn).map_err(|_| format_err!("Transaction Serialize Error"))?;
std::fs::write(path.as_path(), bytes.as_slice())
.map_err(|_| format_err!("Unable to write to path"))
}
fn main() -> Result<()> {
let opt = Opt::from_args();
let transaction = match opt.cmd {
Command::RemoveValidators { addresses } => encode_remove_validators_transaction(addresses),
Command::HaltNetwork => encode_halt_network_transaction(),
Command::BuildCustomScript { script_name, args } => {
encode_custom_script(&script_name, &serde_json::from_str(args.as_str())?)
}
};
save_transaction(transaction, opt.output)
}
| save_transaction |
subject_public_key_info.rs | use crate::{oids, AlgorithmIdentifier};
use picky_asn1::wrapper::{BitStringAsn1, BitStringAsn1Container, IntegerAsn1, OctetStringAsn1};
use serde::{de, ser, Deserialize, Serialize};
use std::fmt;
#[derive(Debug, PartialEq, Clone)]
pub enum PublicKey {
Rsa(EncapsulatedRsaPublicKey),
Ec(EncapsulatedEcPoint),
Ed(EncapsulatedEcPoint),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct RsaPublicKey {
pub modulus: IntegerAsn1, // n
pub public_exponent: IntegerAsn1, // e
}
pub type EncapsulatedRsaPublicKey = BitStringAsn1Container<RsaPublicKey>;
pub type EcPoint = OctetStringAsn1;
pub type EncapsulatedEcPoint = BitStringAsn1;
#[derive(Debug, PartialEq, Clone)]
pub struct SubjectPublicKeyInfo {
pub algorithm: AlgorithmIdentifier,
pub subject_public_key: PublicKey,
}
impl SubjectPublicKeyInfo {
pub fn new_rsa_key(modulus: IntegerAsn1, public_exponent: IntegerAsn1) -> Self {
Self {
algorithm: AlgorithmIdentifier::new_rsa_encryption(),
subject_public_key: PublicKey::Rsa(
RsaPublicKey {
modulus,
public_exponent,
}
.into(),
),
}
}
}
impl ser::Serialize for SubjectPublicKeyInfo {
fn serialize<S>(&self, serializer: S) -> Result<<S as ser::Serializer>::Ok, <S as ser::Serializer>::Error>
where
S: ser::Serializer,
{
use ser::SerializeSeq;
let mut seq = serializer.serialize_seq(Some(2))?;
seq.serialize_element(&self.algorithm)?;
match &self.subject_public_key {
PublicKey::Rsa(key) => seq.serialize_element(key)?,
PublicKey::Ec(key) => seq.serialize_element(key)?,
PublicKey::Ed(key) => seq.serialize_element(key)?,
}
seq.end()
}
}
impl<'de> de::Deserialize<'de> for SubjectPublicKeyInfo {
fn | <D>(deserializer: D) -> Result<Self, <D as de::Deserializer<'de>>::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = SubjectPublicKeyInfo;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a valid DER-encoded subject public key info")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'de>,
{
let algorithm: AlgorithmIdentifier = seq_next_element!(seq, AlgorithmIdentifier, "algorithm oid");
let subject_public_key = match Into::<String>::into(algorithm.oid()).as_str() {
oids::RSA_ENCRYPTION => PublicKey::Rsa(seq_next_element!(seq, SubjectPublicKeyInfo, "rsa key")),
oids::EC_PUBLIC_KEY => {
PublicKey::Ec(seq_next_element!(seq, SubjectPublicKeyInfo, "elliptic curves key"))
}
oids::ED25519 => PublicKey::Ed(seq_next_element!(seq, SubjectPublicKeyInfo, "curve25519 key")),
_ => {
return Err(serde_invalid_value!(
SubjectPublicKeyInfo,
"unsupported algorithm (unknown oid)",
"a supported algorithm"
));
}
};
Ok(SubjectPublicKeyInfo {
algorithm,
subject_public_key,
})
}
}
deserializer.deserialize_seq(Visitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
use num_bigint_dig::BigInt;
#[test]
fn rsa_subject_public_key_info() {
let encoded = base64::decode(
"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsiLoIx\
mXaZAFRBKtHYZhiF8m+pYR+xGIpupvsdDEvKO92D6fIccgVLIW6p6sSNk\
oXx5J6KDSMbA/chy5M6pRvJkaCXCI4zlCPMYvPhI8OxN3RYPfdQTLpgPy\
wrlfdn2CAum7o4D8nR4NJacB3NfPnS9tsJ2L3p5iHviuTB4xm03IKmPPq\
saJy+nXUFC1XS9E/PseVHRuNvKa7WmlwSZngQzKAVSIwqpgCc+oP1pKEe\
J0M3LHFo8ao5SuzhfXUIGrPnkUKEE3m7B0b8xXZfP1N6ELoonWDK+RMgY\
IBaZdgBhPfHxF8KfTHvSzcUzWZojuR+ynaFL9AJK+8RiXnB4CJwIDAQAB",
)
.expect("invalid base64");
// RSA algorithm identifier
let algorithm = AlgorithmIdentifier::new_rsa_encryption();
check_serde!(algorithm: AlgorithmIdentifier in encoded[4..19]);
// RSA modulus and public exponent
let modulus = IntegerAsn1::from_bytes_be_signed(vec![
0x00, 0xb2, 0x22, 0xe8, 0x23, 0x19, 0x97, 0x69, 0x90, 0x5, 0x44, 0x12, 0xad, 0x1d, 0x86, 0x61, 0x88, 0x5f,
0x26, 0xfa, 0x96, 0x11, 0xfb, 0x11, 0x88, 0xa6, 0xea, 0x6f, 0xb1, 0xd0, 0xc4, 0xbc, 0xa3, 0xbd, 0xd8, 0x3e,
0x9f, 0x21, 0xc7, 0x20, 0x54, 0xb2, 0x16, 0xea, 0x9e, 0xac, 0x48, 0xd9, 0x28, 0x5f, 0x1e, 0x49, 0xe8, 0xa0,
0xd2, 0x31, 0xb0, 0x3f, 0x72, 0x1c, 0xb9, 0x33, 0xaa, 0x51, 0xbc, 0x99, 0x1a, 0x9, 0x70, 0x88, 0xe3, 0x39,
0x42, 0x3c, 0xc6, 0x2f, 0x3e, 0x12, 0x3c, 0x3b, 0x13, 0x77, 0x45, 0x83, 0xdf, 0x75, 0x4, 0xcb, 0xa6, 0x3,
0xf2, 0xc2, 0xb9, 0x5f, 0x76, 0x7d, 0x82, 0x2, 0xe9, 0xbb, 0xa3, 0x80, 0xfc, 0x9d, 0x1e, 0xd, 0x25, 0xa7,
0x1, 0xdc, 0xd7, 0xcf, 0x9d, 0x2f, 0x6d, 0xb0, 0x9d, 0x8b, 0xde, 0x9e, 0x62, 0x1e, 0xf8, 0xae, 0x4c, 0x1e,
0x31, 0x9b, 0x4d, 0xc8, 0x2a, 0x63, 0xcf, 0xaa, 0xc6, 0x89, 0xcb, 0xe9, 0xd7, 0x50, 0x50, 0xb5, 0x5d, 0x2f,
0x44, 0xfc, 0xfb, 0x1e, 0x54, 0x74, 0x6e, 0x36, 0xf2, 0x9a, 0xed, 0x69, 0xa5, 0xc1, 0x26, 0x67, 0x81, 0xc,
0xca, 0x1, 0x54, 0x88, 0xc2, 0xaa, 0x60, 0x9, 0xcf, 0xa8, 0x3f, 0x5a, 0x4a, 0x11, 0xe2, 0x74, 0x33, 0x72,
0xc7, 0x16, 0x8f, 0x1a, 0xa3, 0x94, 0xae, 0xce, 0x17, 0xd7, 0x50, 0x81, 0xab, 0x3e, 0x79, 0x14, 0x28, 0x41,
0x37, 0x9b, 0xb0, 0x74, 0x6f, 0xcc, 0x57, 0x65, 0xf3, 0xf5, 0x37, 0xa1, 0xb, 0xa2, 0x89, 0xd6, 0xc, 0xaf,
0x91, 0x32, 0x6, 0x8, 0x5, 0xa6, 0x5d, 0x80, 0x18, 0x4f, 0x7c, 0x7c, 0x45, 0xf0, 0xa7, 0xd3, 0x1e, 0xf4,
0xb3, 0x71, 0x4c, 0xd6, 0x66, 0x88, 0xee, 0x47, 0xec, 0xa7, 0x68, 0x52, 0xfd, 0x0, 0x92, 0xbe, 0xf1, 0x18,
0x97, 0x9c, 0x1e, 0x2, 0x27,
]);
check_serde!(modulus: IntegerAsn1 in encoded[28..289]);
let public_exponent: IntegerAsn1 = BigInt::from(65537).to_signed_bytes_be().into();
check_serde!(public_exponent: IntegerAsn1 in encoded[289..294]);
// RSA public key
let subject_public_key: EncapsulatedRsaPublicKey = RsaPublicKey {
modulus,
public_exponent,
}
.into();
check_serde!(subject_public_key: EncapsulatedRsaPublicKey in encoded[19..294]);
// full encode / decode
let info = SubjectPublicKeyInfo {
algorithm,
subject_public_key: PublicKey::Rsa(subject_public_key),
};
check_serde!(info: SubjectPublicKeyInfo in encoded);
}
}
| deserialize |
day_one.py |
# coding: utf-8
# # Day One
# ## Table of Contents
# 1. [Data Model](#Data-Model)
# 2. [Data Structures](#Data-Structures)
# 3. [Control Flow](#Control-Flow)
# 4. [Input and Output](#Input-and-Output)
# 5. [`os`](#os)
# 6. [`glob`](#glob)
# 7. [`subprocess`](#subprocess)
#
# Links to documentation will be provided at the beginning and end of each section. Look for: **DOCS**.
# In today's workshop, we'll learn how to combine data types into structures and how to use them for specific purposes. We will also cover looping and interacting with operating systems. Let's get started.
# ## Data Model
# [**DOCS**](https://docs.python.org/3/reference/datamodel.html)
#
# >Objects are Python’s abstraction for data. All data in a Python program is represented by objects or by relations between objects.
#
# Every object in Python has a **type**, a **value**, and an **identity**. We've already seen several data types, such as `int`, `float`, and `str`. An object's type determines its supported operations as well as the possible values it can take.
#
# In some cases, an object's value can change. We call these type of objects *mutable*. Objects whose values cannot be changed are known as *immutable*. The object type determines its mutability. Numbers and strings, for example, are immutable; lists and dictionaries, which we'll cover shortly, are mutable.
#
# To make this concrete, let's describe what an object's identity is. This can be thought of as an object's address in memory. Specifically, it's the memory address for the *value* of the object. Once an object has been created, it's identity never changes.
# In[1]:
x = 'hello'
# In[2]:
hex(id(x))
# The variable `x`'s identity or memory address is `___________` (represented as a hexadecimal string). Note that the memory addresses will be different each time this code is run.
#
# What happens if we create a new variable, `y`, and set it equal to `x`?
# In[3]:
y = x
# In[4]:
hex(id(y))
# In[5]:
hex(id(x))
# The address in memory is the same because both variables *point* to (or reference) the same *value*.
#
# Now, let's make `x` take on some other value.
# In[6]:
x = 'goodbye'
# In[7]:
hex(id(x))
# Now, the address *is* different.
#
# Let's see what happens if we set `x` to equal `'hello'` once more.
# In[8]:
x = 'hello'
# In[9]:
hex(id(x))
# `x` is once again pointing to the memory address associated with `'hello'`.
#
# What does this have to do with mutability? It seems as though we were actually able to change `x`'s value. To answer this, we'll show an example using a mutable object—a list in this case.
# In[10]:
a = [1, 2, 3]
# In[11]:
hex(id(a))
# In[12]:
a.append(4)
a
# In[13]:
hex(id(a))
# Notice what happened. We added `4` to the list, but the memory address *did not* change. This is what is means to be mutable. The value in memory address `0x107f26608` was originally `[1, 2, 3]`, but is now `[1, 2, 3, 4]`. The address in memory for this object's value will never change.
# In[14]:
a.append('#python')
a
# In[15]:
hex(id(a))
# Now let's see what happens when we assign our list `a` to a new variable `b`.
# In[16]:
b = a
# In[17]:
b
# In[18]:
hex(id(b))
# That makes sense. `a` and `b` both reference the same object—`[1, 2, 3, 4, '#python']`.
#
# >Assignment statements in Python do not copy objects, they create bindings between a target and an object.
#
# If we modify `b`, what will happen to `a`?
# In[19]:
b[-1] = 'Python'
# In[20]:
b
# In[21]:
a
# In[22]:
hex(id(a)) == hex(id(b))
# The changes made to `b` have affected `a` because they both point to the same data. It's possible that this behavior is unwanted. As a solution, we can make a copy of the object so that modifying one does not affect the other. To do so, we can use the built-in `copy` module.
# In[23]:
import copy
# In[24]:
c = copy.copy(a)
# This is referred to as making a *shallow* copy. While the values in `a` and `c` are the same, their respective memory addresses are different.
# In[25]:
hex(id(a)) == hex(id(c))
# A shallow copy creates a new container (a list in this case)—which is why the addresses in memory are different—with *references* to the *contents* of the original object.
# In[26]:
hex(id(a[-1]))
# In[27]:
hex(id(c[-1]))
# The addresses in memory for the individual elements are the same for both lists. Because we've made a copy, though, we can now modify one list without affecting the other.
# In[28]:
c[-1] = 'PYTHON'
# In[29]:
c
# In[30]:
a
# What if we were dealing with nested mutable? For this, we'll use a dictionary.
# In[31]:
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.copy(d0)
# In[32]:
d1
# In[33]:
d1['key']['nested'] = 'dict'
# In[34]:
d0 == d1
# In[35]:
d0
# Our intention was to change `d1`, but `d0` was also changed. This is because shallow copies reference contents—they don't copy them. For this, the `copy` module provides the `deepcopy()` function. Let's try that again.
# In[36]:
d0 = {'key' : {'nested' : 'thing'}}
d1 = copy.deepcopy(d0)
d1['key']['nested'] = 'dict'
# In[37]:
d0 == d1
# In[38]:
d0
# In[39]:
d1
# Now that we've learned about mutability and copying objects, let's dive into data structures.
#
# Data model [**DOCS**](https://docs.python.org/3/reference/datamodel.html)
# ## Data Structures
# [**DOCS**](https://docs.python.org/3.1/tutorial/datastructures.html)
#
# A data structure can be thought of as a "container" for storing data that includes functions, called "methods," that are used to access and manipulate that data. Python has several built-in data structures.
# ### Basics
# #### Lists
# A list is a sequence of values. The values are called elements (or items) and can be of any type—integer, float, string, boolean, etc.
#
# As a simple example, consider the following list.
# In[40]:
[1, 2, 3]
# Notice how the list was constructed. We used square brackets around the list elements.
#
# Let's look at a few more examples.
# In[41]:
[1.0, 8.0, 6.8]
# In[42]:
['this', 'is', 'also', 'a', 'valid', 'list']
# In[43]:
[True, False, True]
# It's also fine to have a list with different element types.
# In[44]:
[1, 2.0, 'three']
# Lists can even be nested—which means you can have lists within lists.
# In[45]:
[350, 'barrows', 'hall', ['berkeley', 'CA']]
# This nesting can be arbitrarily deep, but it's not usually a good idea as it can get confusing. For example, it may be difficult to access specific items for an object like:
#
# ```python
# [[[1, 2], [3, 4, [5, 6]]], [7, 8, 9]]
# ```
#
# Speaking of accessing elements, let's describe how to do that. We'll first create a new list and assign it to a variable called `first_list`.
# In[46]:
first_list = [9, 8, 7.0, 6, 5.4]
# To access list elements, we use the square bracket notation. For example, if we're interested in the middle element—the "two-eth" element—we use the following.
# In[47]:
first_list[2]
# This is called indexing and the value inside of the brackets must be an integer. (Recall that indices in Python start at `0`.) A list can be thought of mapping (or correspondence) between indices and elements.
#
# Let's say you're interested in the *last* element of this list. How could you do that? If you know the length of the list, you could access it using something like:
#
# ```python
# first_list[len(first_list) - 1]
# ```
#
# Why is the `-1` needed?
#
# There is an easier way. Python provides negative indices that let you access elements from "back-to-front."
# In[48]:
first_list[-1]
# With this notation, the last element is accessed with `-1` (because `-0 == 0`). Use `-2` to access the second-to-last item, `-3` to access the third-to-last element, and so on.
#
# We can also use the slice operator on lists to access multiple elements. The operator takes the following form: `[n:m]`. The first value before the colon (`:`) specifies the start position and the second value specifies the end position. The former is inclusive and the latter is exclusive. Let's take a look at what we mean.
#
# To motivate this, let's label the indices of our list.
#
# ```
# list: [9, 8, 7.0, 6, 5.4]
# index: [0, 1, 2, 3, 4]
# ```
#
# The code we'll submit is: `first_list[0:2]`. This tells Python to include values associated with position 0, position 1, but **not** for position 2.
# In[49]:
first_list[0:2]
# This is how Python has decided to make this operator work. This isn't intuitive, but thinking about it in the following way might help. If we consider the indices to be to the *left* of each item, we can think of the slice operator as accessing elements *between* those indices.
#
# If you try to access an item at an index that doesn't exist, Python will throw an `IndexError`:
# In[50]:
first_list[10]
# _from Raymond Hettinger_
#
# If, however, I try to access the same item with a slicing operation, e.g. `first_list[10:11]`, there is no error. Why?
# In[51]:
first_list[10:11]
# With lists, because they are mutable, we can modify elements.
# In[52]:
first_list[-1] = 5.43
# In[53]:
first_list
# #### Dictionaries
# A dictionary is a mapping from *keys* to *values*, where the keys, which must be unique, can be (almost) any type. A key and its associated value is referred to as a *key-value pair* or item. Dictionaries can be thought of as *unordered* key-value pairs.
#
# There are several ways to construct a dictionary. We can use braces (`{}`) or the built-in `dict()` function.
# In[54]:
{}
# In[55]:
dict()
# Of course, these are empty. Let's add comma separated key-value pairs to the first and use the assignment operator (`=`) for the second.
# In[56]:
{'one' : 1, 'two' : 2}
# In[57]:
dict(one=1, two=2)
# Keys and values are themselves separated by colons.
#
# Dictionaries are typically used for accessing values associated with keys. In the example above, we started to create a mapping between number words and their integer representations. Let's expand on this.
# In[58]:
nums = {'one' : 1, 'two' : 2, 'three' : 3, 'four' : 4, 'five' : 5, 'six' : 6}
# In[59]:
nums
# Notice that the key-value pairs are *not* in the order we specified when creating the dictionary. This isn't a problem, though, because we use the keys to look up the corresponding values. We do this using bracket notation, like we did with strings and lists.
# In[60]:
nums['five']
# If the key does not exist, you'll get an error.
# In[61]:
nums['seven']
# We can add the value for 'seven' by doing the following:
# In[62]:
nums['seven'] = 7
# In[63]:
nums
# We mentioned earlier that keys can be of almost any type. Values *can* be of any type and we can also mix types.
# In[64]:
mixed = {'one' : 1.0, 'UC Berkeley' : 'Cal', 350 : ['Barrows', 'Hall']}
# In[65]:
mixed
# In this example, we used string and integer keys. We could have actually used any *immutable* objects.
#
# Notice that we used a list as a value, which is valid. What if we tried using a list, which is mutable, as a key?
# In[66]:
{['this'] : 'will not work'}
# We get a `TypeError` saying that we can't use an unhashable type. What does this mean? In Python, dictionaries are implemented using hash tables. Hash tables use hash functions, which return integers given particular values (keys), to store and look up key-value pairs. For this to work, though, the keys have to be immutable, which means they can't be changed.
# #### Tuples
# A tuple is a sequence of values. The values, which are indexed by integers, can be of any type. This sounds a lot like lists, right?
#
# >Though tuples may seem similar to lists, they are often used in different situations and for different purposes. Tuples are immutable, and usually contain an heterogeneous sequence of elements.... Lists are mutable, and their elements are usually homogeneous....
#
# By convention, a tuple's comma-separated values are surrounded by parentheses.
# In[67]:
(1, 2, 3)
# Parentheses aren't necessary, though.
# In[68]:
t = 1, 2, 3
# In[69]:
type(t)
# The commas are what define the tuple. In fact, any set of multiple comma-separated objects *without* identifying symbols, such as brackets for lists, default to tuples.
#
# We can't create a tuple with a single element using the following syntax.
# In[70]:
type((1))
# We need to include a comma following the value.
# In[71]:
type((1,))
# The construction of `t`, above, is an example of *tuple packing*, where the values `1, 2, 3` are "packed" into a tuple.
#
# We can also perform the opposite operation, called *sequence unpacking*.
# In[72]:
a, b, c = t
# In[73]:
print(a, b, c)
# For this, the number of variables on the left must equal the number of elements in the sequence.
#
# This can be used with functions. In Python, functions can only return a single value. However, that value can be a tuple. In this case, you are effectively returning multiple values.
#
# Most list operators work on tuples. To access tuple elements, for example, we can use the bracket operator.
# In[74]:
t = ('a', 'b', 'c', 'd')
# In[75]:
t[0]
# We can also use the slice operator.
# In[76]:
t[1:3]
# Because tuples are immutable, we cannot modify tuple elements.
# In[77]:
t[0] = 'A'
# However, we can create a new tuple using existing tuples.
# In[78]:
t0 = 'A',
t1 = t[1:]
# In[79]:
t0 + t1
# #### Sets
# A set is an unordered collection of unique elements. Because sets are unordered, they do not keep track of element position or order of insertion. As a result, sets do not support indexing or slicing.
#
# >Basic uses include membership testing and eliminating duplicate entries. Set objects also support mathematical operations like union, intersection, difference, and symmetric difference.
#
# To construct a set, we can use braces (`{}`) or the built-in `set()` function.
# In[80]:
{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3}
# This returns the *unique* values passed in. In this case, the digits between 1-9, inclusive.
#
# Let's say we had the following list of fruits.
# In[81]:
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
# We can find the unique fruits by using the `set()` function.
# In[82]:
set(basket)
# Unlike other built-in Python data structures, sets support differencing.
# In[83]:
{1, 2, 3} - {2}
# In[84]:
{1, 2, 3} - {1, 2, 3}
# Sets are useful for finding unique values and for performing mathematical operations like the ones previously mentioned.
#
# Python also provides "specialized" container types in its `collections` module. These are alternatives or, rather, complements, to Python's general-purpose, built-in containers that we've just covered‐lists, dictionaries, tuples, and sets. For more information on these other data structures, see [the documentation](https://docs.python.org/3.5/library/collections.html#module-collections).
#
# In the following section, we'll explore several operators that the data structures covered above respond to.
# ### Operators
# There are several operators supported in Python. They are:
#
# * arithmetic
# * comparison (relational)
# * assignment
# * logical
# * bitwise
# * membership
# * identity
#
# We've already covered some of these either directly or in passing. We'll discuss how some of these operate on the data structures we've learned about thus far.
# #### Arithmetic
# The arithmetic operators are the ones you're probably most familiar with. These include `+`, `-`, `*`, `/`, and `**` to name a few. Of course, not all of these work on all Python data types.
#
# Previously, we saw how the `+` and `*` operators, which correspond to concatenation and repetition, operate on strings. It turns out that lists and tuples respond in similar ways.
# In[85]:
[1, 2, 3] + [4, 5, 6]
# In[86]:
(1, 2, 3) + (4, 5, 6)
# In[87]:
['Cal'] * 3
# In[88]:
('D-Lab',) * 3
# #### Comparison
# These type of operators "compare the values on either sides of them and decide the relation among them."
# In[89]:
[1, 2, 3] == [1, 2, 3]
# In[90]:
[0, 2, 3] == [1, 2, 3]
# >The comparison uses *lexicographical* ordering: first the first two items are compared, and **if they differ this determines the outcome of the comparison**; if they are equal, the next two items are compared, and so on, until either sequence is exhausted.
# In[91]:
[0, 2, 3] < [1, 2, 3]
# In the comparison above, because the `0` is less than the `1`, the result is `True`. Once this is determined, subsequent values are *not* compared. In the example below, the return value is `True` even though `20` is greater than `2`.
# In[92]:
[0, 20, 30] < [1, 2, 3]
# The behavior is the same with tuples.
# In[93]:
(0, 20, 30) < (1, 2, 3)
# In[94]:
(0, 1, 2) == (0, 1, 3)
# Interestingly, the behavior is slightly different with sets. Consider the list and set comparisons below.
# In[95]:
[0, 3, 4] < [1, 2, 9]
# In[96]:
set([0, 3, 4]) < set([1, 2, 9])
# With sets, the comparisons are made for every element in each corresponding sequence.
#
# Comparisons can be made with dictionaries, too.
# In[97]:
{'one' : 1} == {'one' : 1}
# But we can only check for equality.
# In[98]:
{'one' : 1} < {'one' : 1}
# #### Membership
# These operators test for membership—that is, whether the particular item exists—in a sequence.
# In[99]:
'D-Lab' in ['D-Lab', 'UC Berkeley']
# In[100]:
1 in (0, 1, 2)
# In[101]:
99 in {1868, 350, 102}
# For dictionaries, membership is tested against the keys.
# In[102]:
cities = {'Berkeley' : 'California',
'Miami' : 'Florida',
'New York' : 'New York',
'Seattle' : 'Washington'}
# In[103]:
'Berkeley' in cities
# The other membership operator is `not in`.
# In[104]:
99 not in {1868, 350, 102}
# #### Identity
# To compare the memory locations of objects, use identity operators.
# In[105]:
a = 'this'
b = 'this'
# In[106]:
a is b
# In this case, the memory address for both `a` and `b` is the same because they are pointing to the same value.
#
# Is this behavior consistent across daya types?
# In[107]:
a = 1868
b = 1868
# In[108]:
a is b
# Apparently not.
# In[109]:
hex(id(a)), hex(id(b))
# What if we set `b` to equal `a`?
# In[110]:
b = a
# In[111]:
a is b
# Like with the membership operator, the complement to `is` is `is not`.
# In[112]:
a = 'this'
b = 'that'
# In[113]:
a is not b
# ### Functions and Methods
# We're familiar with functions, but what are methods?
#
# >A method is a function that "belongs to" an object.
#
# We have already seen string methods. For example, from day zero:
#
# ```python
# >>> my_string = 'Dav Clark wears a beret'
# >>> my_string = my_string.replace('beret', 'speedo')
# >>> print(my_string)
# Dav Clark wears a speedo
# ```
#
# Here, `.replace()` is the method.
#
# Python data structures have methods, too.
# #### Lists
# Let's use `first_list`, which we created above, to demonstrate some list functions and methods.
#
# Let's say we wanted to know how many elements are in `first_list`. For this, we would use the `len()` function.
# In[114]:
len(first_list)
# What about the largest and smallest values
# In[115]:
max(first_list), min(first_list)
# Let's say we wanted to add an element to `first_list`. For this, we can use the `.append()` method.
# In[116]:
first_list.append(2)
# Notice that methods are called using dot notation on the object we'd like to modify.
#
# By default, `.append()` adds an element to the *end* of a given list.
# In[117]:
first_list
# Notice how we invoked this method. We did not use an assignment operator (e.g., `x = x.append(y)`). This is because—and this is important—list methods are all void, which means that they *modify* lists and return `None`.
#
# Sometimes when we're adding elements to a list, we may with to insert it in a given position. For this, we can use the `.insert()` method. It takes two arguments—the first is the *position* and the second is the *value*. Let's say we wanted to add an item to the front of the list. We could do it using:
# In[118]:
first_list.insert(0, 10)
# In[119]:
first_list
# Let's append another value to the list.
# In[120]:
first_list.append(2)
# Now, let's count how many times the value `2` appears.
# In[121]:
first_list.count(2)
# In[122]:
first_list
# Let's say we wanted to remove one of the `2`s.
# In[123]:
first_list.remove(2)
# In[124]:
first_list
# The remove method removes the *first* item in the list that matches the value in the parentheses.
#
# In some cases, we might want to know the index value for a certain list element. We can use `.index()` for this.
# In[125]:
first_list.index(5.43)
# The value `5.43` can be found at index `5`.
#
# More information on list methods can be found [here](https://docs.python.org/3.5/tutorial/datastructures.html#more-on-lists).
# #### Dictionaries
# Let's use our `nums` dictionary to demonstrate some `dict` methods.
# In[126]:
nums
# The `len()` function we saw above also works on dictionaries. It returns the number of items in the object.
# In[127]:
len(nums)
# We might be interested in getting a list of the keys in `nums`. The `.keys()` method returns a list with this information.
# In[128]:
nums.keys()
# We can do the same for values.
# In[129]:
nums.values()
# To add to the dictionary, we can use the `.update()` method.
# In[130]:
nums.update(eight=8)
# In[131]:
nums
# Notice that we don't use quotation marks around the key name `eight`.
#
# If we'd like to remove an item, we can use the `.pop()` method. This removes the item—the key-value pair—*and* returns the value.
# In[132]:
nums.pop('one')
# In[133]:
nums
# We've successfully removed `{'one' : 1}` from `nums`.
# #### Tuples
# Tuples have no methods.
# #### Sets
# There are several set methods. They can be used for updating set objects or for performing mathematical operations. For example, we can add an element to set `s`.
# In[134]:
s = {1, 8, 6, 8}
# In[135]:
s
# In[136]:
s.add(0)
# In[137]:
s
# We can also remove set elements.
# In[138]:
s.remove(1)
# In[139]:
s
# Python supports several mathematical operations on sets. We can check the intersection—or overlap—of two sets, for example.
# In[140]:
{1, 2, 3} & {3, 4, 5} # or {1, 2, 3}.intersection({3, 4, 5})
# Another common set operation is the union, which basically combines sets.
# In[141]:
{0, 1} | {1, 2} # or {0, 1}.union({1, 2})
# Above, we saw that `{1, 2, 3} - {2}` resulted in `{1, 3}`. However, if the second set had more values in it, those values would not be represented in the final set. Python sets allow you to calculate the symmetric difference:
# In[142]:
{1, 2, 3} ^ {3, 4, 5}
# Along with testing for supersets and subsets
# In[143]:
{1, 2, 3} > {2, }
# Data structures [**DOCS**](https://docs.python.org/3.1/tutorial/datastructures.html)
# ## Control Flow
# [**DOCS**](https://docs.python.org/3/tutorial/controlflow.html)
# ### `for`
# In Python, a `for` statement iterates over items in a sequence—such as strings, lists, and tuples—in the order that they appear. `for` loops have the following syntax.
#
# ```Python
# for item in sequence:
# do_something_with(item)
# ```
#
# > side note - for whatever reason, some students have a really hard time with for loop syntax. Emphasize that in `for x in sequence`, `x` is an arbitrary name so that you can refer to the object returned by the iterator while you are inside of the loop. You could also use `for dinosaur in sequence`, but this reduces readibility in your code
#
# The `sequence` object should be iterable. The `statement(s)` are executed once for each item in the sequence. This is referred to as traversing the sequence. The loop ends when there are no more elements in the sequence.
#
# Let's look at some examples.
# In[144]:
text_var = 'berkeley'
# In[145]:
for c in text_var:
print(c)
# With strings, the `for` statement iterates over each character. With lists (or tuples), on the other hand, each list element is iterated over.
# In[146]:
list_var = [350, 'Barrows', 'Hall']
# In[147]:
for e in list_var:
print(e)
# With dictionaries, `for` loops iterate over keys.
# In[148]:
for k in {'one' : 1, 'two' : 2, 'three' : 3}:
print(k, end=" ")
# If we'd like a loop that iterates a given number of times or over a sequence of numbers, we can use the `range` object.
# In[149]:
for v in range(4):
print(v, end=" ")
# ### `while`
# Another way to achieve this—to iterate a given number of times—is to use the `while` loop.
# In[150]:
n = 0
while n < 4:
print(n, end=" ")
n += 1
print('\ndone')
# In this example, we have to increment `n` with each iteration of the loop. The body statements in `while` loops repeatedly execute as long as the header condition evaluates to `True`. Once the loop ends, program control passes to the line immediately following the loop.
#
# With `while` loops, there are two possibilities to be aware of. First, it's possible that some `while` loops never execute. Using the code above, if the value of `n` is initially `4` or greater, only `'done'` will be printed.
# In[151]:
n = 4
while n < 4:
print(n, end=" ")
n += 1
print('\ndone')
# Above, because the condition evaluates to `False`, the loop body is skipped and the first statement after the `while` loop is executed.
#
# Second, some `while` loops may run indefinitely. This is referred to as an infinite loop and happens when the condition *never* evaluates to `False`. Here is an example.
#
# ```Python
# n = 4
# while n >= 4:
# print(n, end=" ")
# n += 1
# print('\ndone')
# ```
# ### `if`
# In many cases, it's useful to control the order in which statements or function calls are executed or evaluated. A control flow statement determines which path or paths in a program should be followed. Control flow statements, for example, can:
#
# * execute a set of statements if a condition or certain conditions are met
# * execute a set of statements `n` times until a condition or certain conditions are met
# * stop the execution of a program
#
# How can we achieve this? The most well-known statement type is the `if` statement.
# In[152]:
x = 0
# In[153]:
if x == 0:
print('x is zero')
# `if` statements make use of boolean expressions. If the expression (or set of expressions) evaluate to `True`, the indented statement gets executed. Otherwise, nothing happens.
# In[154]:
x = 1
# In[155]:
if x == 0:
print('x is zero')
# The code above is referred to as a clause. Clauses contain "headers" and "bodies." Clause headers begin with identifying keywords—in this case, `if`—include boolean expressions, and end with colons. The body is a group of indented statements controlled by the clause. This is also known as a "block."
#
# Compound statements are made up of one or more clauses. For example, there might be two possibilities in which case we use the `else` keyword. We can combine the above as follows.
# In[156]:
if x == 0:
print('x is zero')
else:
print('x is not zero')
# Notice that clause headers are at the same indentation level.
#
# When there are more than two possibilities, we can use what are called chained conditionals. For this, we use the `elif` keyword.
# In[157]:
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
# Of course, the code above only works if `x` is numeric. Assuming this is the case, all possible values of `x` are listed. Because of this, we can change the last clause (`elif x > 0`) to `else`.
#
# There isn't a "right" way to do this. A good approach is to write it such that its easily readable for yourself and others.
#
# What if `x` is *not* numeric? With the code as is, we'll get a `TypeError`. So, let's generalize what we have and wrap it in a function.
# In[158]:
def x_ | ):
if type(x) is str:
print('x is str')
elif type(x) in [int, float]:
if x == 0:
print('x is zero')
elif x < 0:
print('x is negative')
elif x > 0:
print('x is positive')
else:
print('invalid x value')
# Before we call our function, let's explain what's going on. Our function, as defined, is an example of a "nested conditional." We first perform a type check and, if `x` is numeric, there are another set of conditions which are checked.
# In[159]:
x_is('ucb')
# In[160]:
x_is(1)
# In[161]:
x_is(0)
# In[162]:
x_is([1, 2, 3])
# In[163]:
x_is(None)
# Control flow [**DOCS**](https://docs.python.org/3/tutorial/controlflow.html)
# ## Input and Output
# [**DOCS**](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files)
#
# Interacting with data in files is a common task in Python. These can be plain text files, comma-delimited (CSV) files, or any other number of file formats.
#
# To open files, we can use the built-in `open()` function. There is a file named `lorem-ipsum.txt` in the `data/` directory that we'll use to learn about file input and output.
#
# The `open()` function is typically used with two arguments—the filename and the "mode." The mode describes how the file will be used. The default is `'r'`, which stands for "read only."
# In[164]:
f = open('../data/01_lorem-ipsum.txt', 'r')
# `f` is a file object. There are several methods we can use to interact with the file's contents.
#
# The `.read(size)` method reads the contents of the file object. The optional numeric argument, *`size`*, corresponds to the number of bytes that should be read. This is useful if the data file is large. If we omit `size`, the entire contents of the file will be read and returned.
# In[165]:
f.read()
# What happens if we try to call `f.read()` again? "If the end of the file has been reached, `f.read()` will return an empty string (`''`)." In this situation, the "cursor" is at the end of the file and has nothing more to read.
#
# Because we'd like to show a few other methods, we can return to the beginning of the file using the `.seek()` method, passing in `0` as the argument.
# In[166]:
f.seek(0)
# Let's say we wanted to read the file, line-by-line. We can accomplish this using the `.readline()` method. The end of a "line" is identified by the presence of a new line character, `\n`. You can see some in the text output above.
# In[167]:
f.readline()
# In[168]:
f.readline()
# And so on.
#
# If you ever need to know the file object's current position, use the `.tell()` method.
# In[169]:
f.tell()
# This represents the number of *bytes* from the beginning of the file.
#
# We can also loop over the file object. Let's return to the start of the file first.
# In[170]:
f.seek(0)
# In[171]:
for line in f:
print(line)
# When we're done interacting with a file, that fie should always be closed.
# In[172]:
f.close()
# We can always check whether a file is closed by using the following.
# In[173]:
f.closed
# The `with` keyword in Python ensures that files are properly closed after its associated code is executed. This is true even if an exception is raised. Using the `with` keyword is recommended.
#
# Let's print each line on our document using this syntax.
# In[174]:
with open('../data/01_lorem-ipsum.txt', 'r') as f:
for line in f:
print(line)
# We can also check that the file was, indeed, closed.
# In[175]:
f.closed
# What about writing to a file? There are two primary modes we can use for this: `'w'` for writing only and `'a'` for appending to a file. If a file opened in `'w'` mode already exists, it will be overwritten. Opening a file in `'a'` mode simply allows lines to be added to the end of an existing file.
#
# Let's start by creating a new file.
# In[176]:
with open('first-write.txt', 'w') as f:
f.write('this is our first line\n')
f.write('this is our last line')
# Now, let's check the contents of the file.
# In[177]:
with open('first-write.txt', 'r') as f:
for line in f:
print(line)
# Note that while we've been using `f` to identify our file object, we can use any valid variable name.
#
# Now, let's append to our file.
# In[178]:
with open('first-write.txt', 'a') as append_file:
append_file.write('\nthis is the real last line')
# Notice that we add a new line character to the beginning of this third line.
# In[179]:
with open('first-write.txt') as infile:
for row in infile:
print(row)
# In the code above, we use `row` where we had previously used `line`. We did that to serve as a reminder that the variable names used are not special in any way. It is, however, always a good idea to use descriptive variable names that make reading the code more understandable. This is part of making code "readable." For a bit more on this, see [here](http://docs.python-guide.org/en/latest/writing/style/), [here](https://www.python.org/dev/peps/pep-0008/), and [here](https://github.com/amontalenti/elements-of-python-style).
#
# The `open()` function can take a variety of file types. We've seen examples of how to use this with a `.txt` file.
#
# The CSV (comma separated values) format is "the most common import and export format for spreadsheets and databases."
#
# >[A] comma-separated values (CSV) file stores tabular data (numbers and text) in plain text. Each line of the file is a data record. Each record consists of one or more fields, separated by commas.
#
# We can open comma-delimited CSV files with `open()`, too. Let's open an example CSV file in `data/` called `roster.csv`.
# In[180]:
with open('../data/01_roster.csv', 'r') as roster:
for student_data in roster:
print(student_data)
# This file includes some made-up student information—a four-digit ID number, academic status, and demographic data.
#
# In some cases—say, if we need to calculate the average age of these students—we don't actually want to iterate over the first row, which is often called the "header."
# In[181]:
with open('../data/01_roster.csv', 'r') as roster:
next(roster)
for student_data in roster:
print(student_data)
# We do this using the `next()` function, which just goes to the next line. In this case, since we're starting at the top of the file, it goes to the second line.
#
# Now, let's say we wanted to create a list of the six student ages. How might we go about doing that? One approach might be to split each line on commas to extract the age. This would work expect for the fact that student `2109`'s department *includes* a comma in the value.
#
# To help with situations like these, Python has a built-in `csv` module which includes lots of functionality for working with these types of types. Let's show how we could use this to calculate the average age of the students.
# In[182]:
import csv
# In[183]:
ages = []
with open('../data/01_roster.csv', 'r') as f:
next(f)
roster = csv.reader(f, delimiter=',', quotechar='"')
for student_data in roster:
ages.append(int(student_data[3]))
# The `reader()` function allows us to specify the delimiter and the quote character. The quote character, in this case, is the quotation mark (`"`). CSV files often wrap string values in quotes (or other characters) if they include the delimiter within them. The `reader()` function parses each line as a list of strings, taking into consideration the delimiter and quote character. This is why we can select the third element in `student_data` and why we change (or cast) the type to `int`. As we iterate over each line, we add the age value to `ages`.
#
# Now, we can create a new variable that holds the ages and calculate the average.
# In[184]:
ages_mean = sum(ages) / len(ages)
# In[185]:
print('The average age of students in the roster is: %.2f' % ages_mean)
# The `%.2f % ages_mean` simply instructs Python to print the value in `ages_mean` to two decimal places.
# Input output [**DOCS**](https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files)
# ## `os`
# [**DOCS**](https://docs.python.org/3/library/os.html)
#
# It is often useful and sometimes necessary to interact with the operating system. For example, we might be interested in modifying file paths or getting a list of files in a given directory. Python's built-in `os` module provides "operating system dependent functionality."
#
# To start, let's import `os`.
# In[186]:
import os
# Let's begin by listing our current working directory.
# In[187]:
os.getcwd()
# We know we have a `data/` directory in our repository, but we might not know its contents. We can get that information by using the following.
# In[188]:
os.listdir('../data/')
# This results in a list of the entries in the directory (excluding `.` and `..`). Notice that we're able to specify a *relative* path with `listdir()`.
#
# If we were writing a Python script that used one of these files, we might want to include checks for whether or not the files exist. We can also accomplish this with `os`. First, we can check if a directory exists.
# In[189]:
os.path.isdir('../data/')
# We can also check to see if a file exists.
# In[190]:
os.path.isfile('../data/01_roster.csv')
# Both of these return a Boolean value. One way these could be used is in conjunction with `if` statements. An alternative, the `os.path.exists()` function, checks for either files or directories.
#
# If a directory doesn't exist, we can create it from within Python. This is accomplished using the `mkdir()` function, which takes a file path as an argument.
# In[191]:
os.mkdir('newdir')
# Let's check the contents of the current directory.
# In[192]:
os.listdir()
# We can use the `rmdir()` function to remove `newdir/`.
# In[193]:
os.rmdir('newdir')
# For more information on the available functions, see [the documentation](https://docs.python.org/3/library/os.html#process-parameters).
#
# `os` [**DOCS**](https://docs.python.org/3/library/os.html)
# ## `glob`
# [**DOCS**](https://docs.python.org/3/library/glob.html)
#
# It's sometimes necessary to find file or pathnames matching a particular pattern. Python's built-in `glob` module uses Unix shell-style wildcards for pattern matching. Note that these are different from regular expressions.
#
# There is no shell variable (e.g., `$PATH`) or tilde (`~`, which typically refers to the "home" directory) expansion in `glob`. In addition, `glob` does not show hidden files—those that start with dots (`.`).
#
# Below we describe the behavior of the shell-style wildcards.
#
# Pattern | Meaning
# ------- | -------
# `*` | matches everything
# `?` | matches any single character
# `[seq]` | matches any character in seq
# `[!seq]` | matches any character not in seq
#
# Above, when we used `os.listdir()` in our current directory, the returned list included the Jupyter notebook files as well as a directory and the `.ipynb_checkpoints` file. Let's see what `glob` returns.
# In[194]:
import glob
# In[195]:
glob.glob('*')
# Notice that the list does not include `.ipynb_checkpoints`.
#
# Let's use `glob` to show only the `.ipynb` files.
# In[196]:
glob.glob('*.ipynb')
# If we want directories only.
# In[197]:
glob.glob('*/')
# The `*` matches zero or more characters.
#
# Let's create a few directories (and a file) to make this concrete.
# In[198]:
get_ipython().system('mkdir test')
get_ipython().system('mkdir test1')
get_ipython().system('mkdir test10')
get_ipython().system('mkdir test100')
get_ipython().system('touch test.txt')
# Note that the `!` before each line above allows us to run shell commands from within the notebook.
# In[199]:
glob.glob('test*')
# This returns any file or directory that begins with `'test'` and end with any (or no other) character.
#
# We can also match directories only.
# In[200]:
glob.glob('test*/')
# To match a single character, we can use the `?` wildcard character. This matches any character in the specified position of the name.
# In[201]:
glob.glob('test?')
# In this case, the only match is `test1`, which we know is a directory.
# Next, let's show what the character range (`[]`) wildcard can do. We'll create a few more directories (we'll clean this up when we're done).
# In[202]:
get_ipython().system('mkdir tset0')
get_ipython().system('mkdir tset1')
get_ipython().system('mkdir tset5')
get_ipython().system('mkdir tset10')
get_ipython().system('mkdir tset50')
# The character range wildcard matches a single character in the specified range.
# In[203]:
glob.glob('tset[0-1]')
# The code above matches files or directories that start with `tset` and that end with either `0` or `1`. If we were to have used `0-9` in the brackets, it would have also returned `'tset5'`.
#
# If we want the directories that end with *two* digits, we can do the following.
# In[204]:
glob.glob('tset[0-9][0-9]')
# The character range wildcard also works on letters.
# In[205]:
glob.glob('t[a-z][a-z]t?')
# This matches files or directories that begin with a `'t'` and are followed by two letters, a `'t'`, and a single character.
#
# An alternative way of getting the same result is as follows.
# In[206]:
glob.glob('t??t?')
# This is because we don't have any files or directories with numbers in the second and third positions.
#
# Let's clean up our directory.
# In[207]:
get_ipython().system('rm -rf test*')
get_ipython().system('rm -rf tset*')
# `glob` [**DOCS**](https://docs.python.org/3/library/glob.html)
# ## `subprocess`
# [**DOCS**](https://docs.python.org/3/library/subprocess.html)
#
# >A running program is called a **process**.
#
# It contains code and its associated activity (or state). For example, this includes memory, lists of open files, etc.
#
# Programs, which are processes, can also create new processes. These are known as **subprocesses** and independently of the processes which created (or spawned) them. This means this new process can run at the same time as the original.
#
# Python's `subprocess` module provides an interface for creating and working with additional processes.
#
# When might we want to spawn new processes? One example is executing a Python script—much like you would from the command line—within Python. Although we know that we can use the `!` to run shell commands, this only works from within the notebook. So, let's use `subprocess` to execute a Python script in `scripts/` named `simple.py`.
# In[208]:
import subprocess
# In[209]:
subprocess.check_output(['python', '../scripts/simple.py'])
# This file print's `IOKN2K!` (and a new line character, `\n`), which is an abbreviation for, "it's okay not to know!"
#
# With `check_output()`, the command to be executed must be passed in as a list. Each argument of the command should be a separate list element. `check_output()` lets us execute an external command and collect its output.
#
# The `b''` prefex indicates that the returned value is a bytes type as opposed to a `str` type. If needed, we can convert this using the following.
# In[210]:
subprocess.check_output(['python', '../scripts/simple.py']).decode('utf-8')
# `subprocess` [**DOCS**](https://docs.python.org/3/library/subprocess.html)
| is(x |
transport.go | package ssh
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/pkg/errors"
"github.com/mutagen-io/mutagen/pkg/agent"
"github.com/mutagen-io/mutagen/pkg/agent/transport"
"github.com/mutagen-io/mutagen/pkg/process"
"github.com/mutagen-io/mutagen/pkg/ssh"
)
const (
// connectTimeoutSeconds is the number of seconds to use for OpenSSH's
// ConnectTimeout configuration option.
connectTimeoutSeconds = 5
// serverAliveIntervalSeconds is the number of seconds to use for OpenSSH's
// ServerAliveInterval configuration option. Multiplied by
// serverAliveCountMax, it effectively limits the maximum allowed latency.
serverAliveIntervalSeconds = 10
// serverAliveCountMax is the count to use for OpenSSH's ServerAliveCountMax
// configuration option.
serverAliveCountMax = 1
)
// sshTransport implements the agent.Transport interface using SSH.
type sshTransport struct {
// user is the SSH user under which agents should be invoked.
user string
// host is the target host.
host string
// port is the target port.
port uint16
// prompter is the prompter identifier to use for prompting.
prompter string
}
// NewTransport creates a new SSH transport using the specified parameters.
func NewTransport(user, host string, port uint16, prompter string) (agent.Transport, error) |
// Copy implements the Copy method of agent.Transport.
func (t *sshTransport) Copy(localPath, remoteName string) error {
// HACK: On Windows, we attempt to use SCP executables that might not
// understand Windows paths because they're designed to run inside a POSIX-
// style environment (e.g. MSYS or Cygwin). To work around this, we run them
// in the same directory as the source file and just pass them the source
// base name. In order to compute the working directory, we need the local
// path to be absolute, but fortunately this is the case anyway for paths
// supplied to agent.Transport.Copy. This works fine on non-Windows-POSIX
// systems as well. We probably don't need this IsAbs sanity check, since
// path behavior is guaranteed by the Transport interface, but it's better
// to have as an invariant check.
if !filepath.IsAbs(localPath) {
return errors.New("scp source path must be absolute")
}
workingDirectory, sourceBase := filepath.Split(localPath)
// Compute the destination URL.
// HACK: Since the remote name is supposed to be relative to the user's home
// directory, we'd ideally want to specify a URL of the form
// [user@]host:~/remoteName, but the ~/ paradigm isn't understood by
// Windows. Consequently, we assume that the default destination for SCP
// copies without a path prefix is the user's home directory, i.e. that the
// default working directory for the SCP receiving process is the user's
// home directory. Since we already make the assumption that the home
// directory is the default working directory for SSH commands, this is a
// reasonable additional assumption.
destinationURL := fmt.Sprintf("%s:%s", t.host, remoteName)
if t.user != "" {
destinationURL = fmt.Sprintf("%s@%s", t.user, destinationURL)
}
// Set up arguments.
var scpArguments []string
scpArguments = append(scpArguments, ssh.CompressionFlag())
scpArguments = append(scpArguments, ssh.ConnectTimeoutFlag(connectTimeoutSeconds))
scpArguments = append(scpArguments, ssh.ServerAliveFlags(serverAliveIntervalSeconds, serverAliveCountMax)...)
if t.port != 0 {
scpArguments = append(scpArguments, "-P", fmt.Sprintf("%d", t.port))
}
scpArguments = append(scpArguments, sourceBase, destinationURL)
// Create the process.
scpCommand, err := ssh.SCPCommand(context.Background(), scpArguments...)
if err != nil {
return errors.Wrap(err, "unable to set up SCP invocation")
}
// Set the working directory.
scpCommand.Dir = workingDirectory
// Set the process attributes.
scpCommand.SysProcAttr = transport.ProcessAttributes()
// Create a copy of the current environment.
environment := os.Environ()
// Add locale environment variables.
environment = addLocaleVariables(environment)
// Set prompting environment variables
environment, err = SetPrompterVariables(environment, t.prompter)
if err != nil {
return errors.Wrap(err, "unable to create prompter environment")
}
// Set the environment.
scpCommand.Env = environment
// Run the operation.
if err = scpCommand.Run(); err != nil {
return errors.Wrap(err, "unable to run SCP process")
}
// Success.
return nil
}
// Command implements the Command method of agent.Transport.
func (t *sshTransport) Command(command string) (*exec.Cmd, error) {
// Compute the target.
target := t.host
if t.user != "" {
target = fmt.Sprintf("%s@%s", t.user, t.host)
}
// Set up arguments. We intentionally don't use compression on SSH commands
// since the agent stream uses the FLATE algorithm internally and it's much
// more efficient to compress at that layer, even with the slower Go
// implementation.
var sshArguments []string
sshArguments = append(sshArguments, ssh.ConnectTimeoutFlag(connectTimeoutSeconds))
sshArguments = append(sshArguments, ssh.ServerAliveFlags(serverAliveIntervalSeconds, serverAliveCountMax)...)
if t.port != 0 {
sshArguments = append(sshArguments, "-p", fmt.Sprintf("%d", t.port))
}
sshArguments = append(sshArguments, target, command)
// Create the process.
sshCommand, err := ssh.SSHCommand(context.Background(), sshArguments...)
if err != nil {
return nil, errors.Wrap(err, "unable to set up SSH invocation")
}
// Force it to run detached.
sshCommand.SysProcAttr = transport.ProcessAttributes()
// Create a copy of the current environment.
environment := os.Environ()
// Add locale environment variables.
environment = addLocaleVariables(environment)
// Set prompting environment variables
environment, err = SetPrompterVariables(environment, t.prompter)
if err != nil {
return nil, errors.Wrap(err, "unable to create prompter environment")
}
// Set the environment.
sshCommand.Env = environment
// Done.
return sshCommand, nil
}
// ClassifyError implements the ClassifyError method of agent.Transport.
func (t *sshTransport) ClassifyError(processState *os.ProcessState, errorOutput string) (bool, bool, error) {
// SSH faithfully returns exit codes and error output, so we can use direct
// methods for testing and classification. Note that we may get POSIX-like
// error codes back even from Windows remotes, but that indicates a POSIX
// shell on the remote and thus we should continue connecting under that
// hypothesis (instead of the cmd.exe hypothesis).
if process.IsPOSIXShellInvalidCommand(processState) {
return true, false, nil
} else if process.IsPOSIXShellCommandNotFound(processState) {
return true, false, nil
} else if process.OutputIsWindowsInvalidCommand(errorOutput) {
// A Windows invalid command error doesn't necessarily indicate that
// the agent isn't installed, but instead usually indicates that we were
// trying to invoke the agent using the POSIX shell syntax in a Windows
// cmd.exe environment. Thus we return false here for re-installation,
// but we still indicate that this is a Windows platform to potentially
// change the dialer's platform hypothesis and force it to reconnect
// under the Windows hypothesis.
// HACK: We're relying on the fact that the agent dialing logic will
// attempt a reconnect under the cmd.exe hypothesis, which it will, but
// this is potentially a bit fragile. We've sort of codified this
// behavior in the transport interface definition, but it's hard to make
// super explicit.
return false, true, nil
} else if process.OutputIsWindowsCommandNotFound(errorOutput) {
return true, true, nil
}
// Just bail if we weren't able to determine the nature of the error.
return false, false, errors.New("unknown error condition encountered")
}
| {
return &sshTransport{
user: user,
host: host,
port: port,
prompter: prompter,
}, nil
} |
tx_builder.rs | // Bitcoin Dev Kit
// Written in 2020 by Alekos Filini <[email protected]>
//
// Copyright (c) 2020-2021 Bitcoin Dev Kit Developers
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Transaction builder
//!
//! ## Example
//!
//! ```
//! # use std::str::FromStr;
//! # use bitcoin::*;
//! # use bdk::*;
//! # use bdk::wallet::tx_builder::CreateTx;
//! # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
//! # let wallet = doctest_wallet!();
//! // create a TxBuilder from a wallet
//! let mut tx_builder = wallet.build_tx();
//!
//! tx_builder
//! // Create a transaction with one output to `to_address` of 50_000 satoshi
//! .add_recipient(to_address.script_pubkey(), 50_000)
//! // With a custom fee rate of 5.0 satoshi/vbyte
//! .fee_rate(FeeRate::from_sat_per_vb(5.0))
//! // Only spend non-change outputs
//! .do_not_spend_change()
//! // Turn on RBF signaling
//! .enable_rbf();
//! let (psbt, tx_details) = tx_builder.finish()?;
//! # Ok::<(), bdk::Error>(())
//! ```
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::default::Default;
use std::marker::PhantomData;
use bitcoin::util::psbt::{self, PartiallySignedTransaction as Psbt};
use bitcoin::{OutPoint, Script, SigHashType, Transaction};
use miniscript::descriptor::DescriptorTrait;
use super::coin_selection::{CoinSelectionAlgorithm, DefaultCoinSelectionAlgorithm};
use crate::{database::BatchDatabase, Error, Utxo, Wallet};
use crate::{
types::{FeeRate, KeychainKind, LocalUtxo, WeightedUtxo},
TransactionDetails,
};
/// Context in which the [`TxBuilder`] is valid
pub trait TxBuilderContext: std::fmt::Debug + Default + Clone {}
/// Marker type to indicate the [`TxBuilder`] is being used to create a new transaction (as opposed
/// to bumping the fee of an existing one).
#[derive(Debug, Default, Clone)]
pub struct CreateTx;
impl TxBuilderContext for CreateTx {}
/// Marker type to indicate the [`TxBuilder`] is being used to bump the fee of an existing transaction.
#[derive(Debug, Default, Clone)]
pub struct BumpFee;
impl TxBuilderContext for BumpFee {}
/// A transaction builder
///
/// A `TxBuilder` is created by calling [`build_tx`] or [`build_fee_bump`] on a wallet. After
/// assigning it, you set options on it until finally calling [`finish`] to consume the builder and
/// generate the transaction.
///
/// Each option setting method on `TxBuilder` takes and returns `&mut self` so you can chain calls
/// as in the following example:
///
/// ```
/// # use bdk::*;
/// # use bdk::wallet::tx_builder::*;
/// # use bitcoin::*;
/// # use core::str::FromStr;
/// # let wallet = doctest_wallet!();
/// # let addr1 = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
/// # let addr2 = addr1.clone();
/// // chaining
/// let (psbt1, details) = {
/// let mut builder = wallet.build_tx();
/// builder
/// .ordering(TxOrdering::Untouched)
/// .add_recipient(addr1.script_pubkey(), 50_000)
/// .add_recipient(addr2.script_pubkey(), 50_000);
/// builder.finish()?
/// };
///
/// // non-chaining
/// let (psbt2, details) = {
/// let mut builder = wallet.build_tx();
/// builder.ordering(TxOrdering::Untouched);
/// for addr in &[addr1, addr2] {
/// builder.add_recipient(addr.script_pubkey(), 50_000);
/// }
/// builder.finish()?
/// };
///
/// assert_eq!(psbt1.global.unsigned_tx.output[..2], psbt2.global.unsigned_tx.output[..2]);
/// # Ok::<(), bdk::Error>(())
/// ```
///
/// At the moment [`coin_selection`] is an exception to the rule as it consumes `self`.
/// This means it is usually best to call [`coin_selection`] on the return value of `build_tx` before assigning it.
///
/// For further examples see [this module](super::tx_builder)'s documentation;
///
/// [`build_tx`]: Wallet::build_tx
/// [`build_fee_bump`]: Wallet::build_fee_bump
/// [`finish`]: Self::finish
/// [`coin_selection`]: Self::coin_selection
#[derive(Debug)]
pub struct TxBuilder<'a, B, D, Cs, Ctx> {
pub(crate) wallet: &'a Wallet<B, D>,
pub(crate) params: TxParams,
pub(crate) coin_selection: Cs,
pub(crate) phantom: PhantomData<Ctx>,
}
/// The parameters for transaction creation sans coin selection algorithm.
//TODO: TxParams should eventually be exposed publicly.
#[derive(Default, Debug, Clone)]
pub(crate) struct TxParams {
pub(crate) recipients: Vec<(Script, u64)>,
pub(crate) drain_wallet: bool,
pub(crate) single_recipient: Option<Script>,
pub(crate) fee_policy: Option<FeePolicy>,
pub(crate) internal_policy_path: Option<BTreeMap<String, Vec<usize>>>,
pub(crate) external_policy_path: Option<BTreeMap<String, Vec<usize>>>,
pub(crate) utxos: Vec<WeightedUtxo>,
pub(crate) unspendable: HashSet<OutPoint>,
pub(crate) manually_selected_only: bool,
pub(crate) sighash: Option<SigHashType>,
pub(crate) ordering: TxOrdering,
pub(crate) locktime: Option<u32>,
pub(crate) rbf: Option<RbfValue>,
pub(crate) version: Option<Version>,
pub(crate) change_policy: ChangeSpendPolicy,
pub(crate) only_witness_utxo: bool,
pub(crate) add_global_xpubs: bool,
pub(crate) include_output_redeem_witness_script: bool,
pub(crate) bumping_fee: Option<PreviousFee>,
}
#[derive(Clone, Copy, Debug)]
pub(crate) struct PreviousFee {
pub absolute: u64,
pub rate: f32,
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum FeePolicy {
FeeRate(FeeRate),
FeeAmount(u64),
}
impl std::default::Default for FeePolicy {
fn default() -> Self {
FeePolicy::FeeRate(FeeRate::default_min_relay_fee())
}
}
impl<'a, Cs: Clone, Ctx, B, D> Clone for TxBuilder<'a, B, D, Cs, Ctx> {
fn clone(&self) -> Self {
TxBuilder {
wallet: self.wallet,
params: self.params.clone(),
coin_selection: self.coin_selection.clone(),
phantom: PhantomData,
}
}
}
// methods supported by both contexts, for any CoinSelectionAlgorithm
impl<'a, B, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>, Ctx: TxBuilderContext>
TxBuilder<'a, B, D, Cs, Ctx>
{
/// Set a custom fee rate
pub fn fee_rate(&mut self, fee_rate: FeeRate) -> &mut Self {
self.params.fee_policy = Some(FeePolicy::FeeRate(fee_rate));
self
}
/// Set an absolute fee
pub fn fee_absolute(&mut self, fee_amount: u64) -> &mut Self {
self.params.fee_policy = Some(FeePolicy::FeeAmount(fee_amount));
self
}
/// Set the policy path to use while creating the transaction for a given keychain.
///
/// This method accepts a map where the key is the policy node id (see
/// [`Policy::id`](crate::descriptor::Policy::id)) and the value is the list of the indexes of
/// the items that are intended to be satisfied from the policy node (see
/// [`SatisfiableItem::Thresh::items`](crate::descriptor::policy::SatisfiableItem::Thresh::items)).
///
/// ## Example
///
/// An example of when the policy path is needed is the following descriptor:
/// `wsh(thresh(2,pk(A),sj:and_v(v:pk(B),n:older(6)),snj:and_v(v:pk(C),after(630000))))`,
/// derived from the miniscript policy `thresh(2,pk(A),and(pk(B),older(6)),and(pk(C),after(630000)))`.
/// It declares three descriptor fragments, and at the top level it uses `thresh()` to
/// ensure that at least two of them are satisfied. The individual fragments are:
///
/// 1. `pk(A)`
/// 2. `and(pk(B),older(6))`
/// 3. `and(pk(C),after(630000))`
///
/// When those conditions are combined in pairs, it's clear that the transaction needs to be created
/// differently depending on how the user intends to satisfy the policy afterwards:
///
/// * If fragments `1` and `2` are used, the transaction will need to use a specific
/// `n_sequence` in order to spend an `OP_CSV` branch.
/// * If fragments `1` and `3` are used, the transaction will need to use a specific `locktime`
/// in order to spend an `OP_CLTV` branch.
/// * If fragments `2` and `3` are used, the transaction will need both.
///
/// When the spending policy is represented as a tree (see
/// [`Wallet::policies`](super::Wallet::policies)), every node
/// is assigned a unique identifier that can be used in the policy path to specify which of
/// the node's children the user intends to satisfy: for instance, assuming the `thresh()`
/// root node of this example has an id of `aabbccdd`, the policy path map would look like:
///
/// `{ "aabbccdd" => [0, 1] }`
///
/// where the key is the node's id, and the value is a list of the children that should be
/// used, in no particular order.
///
/// If a particularly complex descriptor has multiple ambiguous thresholds in its structure,
/// multiple entries can be added to the map, one for each node that requires an explicit path.
///
/// ```
/// # use std::str::FromStr;
/// # use std::collections::BTreeMap;
/// # use bitcoin::*;
/// # use bdk::*;
/// # let to_address = Address::from_str("2N4eQYCbKUHCCTUjBJeHcJp9ok6J2GZsTDt").unwrap();
/// # let wallet = doctest_wallet!();
/// let mut path = BTreeMap::new();
/// path.insert("aabbccdd".to_string(), vec![0, 1]);
///
/// let builder = wallet.build_tx()
/// .add_recipient(to_address.script_pubkey(), 50_000)
/// .policy_path(path, KeychainKind::External);
///
/// # Ok::<(), bdk::Error>(())
/// ```
pub fn policy_path(
&mut self,
policy_path: BTreeMap<String, Vec<usize>>,
keychain: KeychainKind,
) -> &mut Self {
let to_update = match keychain {
KeychainKind::Internal => &mut self.params.internal_policy_path,
KeychainKind::External => &mut self.params.external_policy_path,
};
*to_update = Some(policy_path);
self
}
/// Add the list of outpoints to the internal list of UTXOs that **must** be spent.
///
/// If an error occurs while adding any of the UTXOs then none of them are added and the error is returned.
///
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
/// the "utxos" and the "unspendable" list, it will be spent.
pub fn add_utxos(&mut self, outpoints: &[OutPoint]) -> Result<&mut Self, Error> {
let utxos = outpoints
.iter()
.map(|outpoint| self.wallet.get_utxo(*outpoint)?.ok_or(Error::UnknownUtxo))
.collect::<Result<Vec<_>, _>>()?;
for utxo in utxos {
let descriptor = self.wallet.get_descriptor_for_keychain(utxo.keychain);
let satisfaction_weight = descriptor.max_satisfaction_weight().unwrap();
self.params.utxos.push(WeightedUtxo {
satisfaction_weight,
utxo: Utxo::Local(utxo),
});
}
Ok(self)
}
/// Add a utxo to the internal list of utxos that **must** be spent
///
/// These have priority over the "unspendable" utxos, meaning that if a utxo is present both in
/// the "utxos" and the "unspendable" list, it will be spent.
pub fn add_utxo(&mut self, outpoint: OutPoint) -> Result<&mut Self, Error> {
self.add_utxos(&[outpoint])
}
/// Add a foreign UTXO i.e. a UTXO not owned by this wallet.
///
/// At a minimum to add a foreign UTXO we need:
///
/// 1. `outpoint`: To add it to the raw transaction.
/// 2. `psbt_input`: To know the value.
/// 3. `satisfaction_weight`: To know how much weight/vbytes the input will add to the transaction for fee calculation.
///
/// There are several security concerns about adding foregin UTXOs that application
/// developers should consider. First, how do you know the value of the input is correct? If a
/// `non_witness_utxo` is provided in the `psbt_input` then this method implicitly verifies the
/// value by checking it against the transaction. If only a `witness_utxo` is provided then this
/// method doesn't verify the value but just takes it as a given -- it is up to you to check
/// that whoever sent you the `input_psbt` was not lying!
///
/// Secondly, you must somehow provide `satisfaction_weight` of the input. Depending on your
/// application it may be important that this be known precisely. If not, a malicious
/// counterparty may fool you into putting in a value that is too low, giving the transaction a
/// lower than expected feerate. They could also fool you into putting a value that is too high
/// causing you to pay a fee that is too high. The party who is broadcasting the transaction can
/// of course check the real input weight matches the expected weight prior to broadcasting.
///
/// To guarantee the `satisfaction_weight` is correct, you can require the party providing the
/// `psbt_input` provide a miniscript descriptor for the input so you can check it against the
/// `script_pubkey` and then ask it for the [`max_satisfaction_weight`].
///
/// This is an **EXPERIMENTAL** feature, API and other major changes are expected.
///
/// # Errors
///
/// This method returns errors in the following circumstances:
///
/// 1. The `psbt_input` does not contain a `witness_utxo` or `non_witness_utxo`.
/// 2. The data in `non_witness_utxo` does not match what is in `outpoint`.
///
/// Note unless you set [`only_witness_utxo`] any `psbt_input` you pass to this method must
/// have `non_witness_utxo` set otherwise you will get an error when [`finish`] is called.
///
/// [`only_witness_utxo`]: Self::only_witness_utxo
/// [`finish`]: Self::finish
/// [`max_satisfaction_weight`]: miniscript::Descriptor::max_satisfaction_weight
pub fn add_foreign_utxo(
&mut self,
outpoint: OutPoint,
psbt_input: psbt::Input,
satisfaction_weight: usize,
) -> Result<&mut Self, Error> {
if psbt_input.witness_utxo.is_none() {
match psbt_input.non_witness_utxo.as_ref() {
Some(tx) => {
if tx.txid() != outpoint.txid {
return Err(Error::Generic(
"Foreign utxo outpoint does not match PSBT input".into(),
));
}
if tx.output.len() <= outpoint.vout as usize {
return Err(Error::InvalidOutpoint(outpoint));
}
}
None => {
return Err(Error::Generic(
"Foreign utxo missing witness_utxo or non_witness_utxo".into(),
))
}
}
}
self.params.utxos.push(WeightedUtxo {
satisfaction_weight,
utxo: Utxo::Foreign {
outpoint,
psbt_input: Box::new(psbt_input),
},
});
Ok(self)
}
/// Only spend utxos added by [`add_utxo`].
///
/// The wallet will **not** add additional utxos to the transaction even if they are needed to
/// make the transaction valid.
///
/// [`add_utxo`]: Self::add_utxo
pub fn manually_selected_only(&mut self) -> &mut Self {
self.params.manually_selected_only = true;
self
}
/// Replace the internal list of unspendable utxos with a new list
///
/// It's important to note that the "must-be-spent" utxos added with [`TxBuilder::add_utxo`]
/// have priority over these. See the docs of the two linked methods for more details.
pub fn unspendable(&mut self, unspendable: Vec<OutPoint>) -> &mut Self {
self.params.unspendable = unspendable.into_iter().collect();
self
}
/// Add a utxo to the internal list of unspendable utxos
///
/// It's important to note that the "must-be-spent" utxos added with [`TxBuilder::add_utxo`]
/// have priority over this. See the docs of the two linked methods for more details.
pub fn add_unspendable(&mut self, unspendable: OutPoint) -> &mut Self {
self.params.unspendable.insert(unspendable);
self
}
/// Sign with a specific sig hash
///
/// **Use this option very carefully**
pub fn sighash(&mut self, sighash: SigHashType) -> &mut Self {
self.params.sighash = Some(sighash);
self
}
/// Choose the ordering for inputs and outputs of the transaction
pub fn ordering(&mut self, ordering: TxOrdering) -> &mut Self {
self.params.ordering = ordering;
self
}
/// Use a specific nLockTime while creating the transaction
///
/// This can cause conflicts if the wallet's descriptors contain an "after" (OP_CLTV) operator.
pub fn nlocktime(&mut self, locktime: u32) -> &mut Self {
self.params.locktime = Some(locktime);
self
}
/// Build a transaction with a specific version
///
/// The `version` should always be greater than `0` and greater than `1` if the wallet's
/// descriptors contain an "older" (OP_CSV) operator.
pub fn version(&mut self, version: i32) -> &mut Self {
self.params.version = Some(Version(version));
self
}
/// Do not spend change outputs
///
/// This effectively adds all the change outputs to the "unspendable" list. See
/// [`TxBuilder::unspendable`].
pub fn do_not_spend_change(&mut self) -> &mut Self {
self.params.change_policy = ChangeSpendPolicy::ChangeForbidden;
self
}
/// Only spend change outputs
///
/// This effectively adds all the non-change outputs to the "unspendable" list. See
/// [`TxBuilder::unspendable`].
pub fn only_spend_change(&mut self) -> &mut Self {
self.params.change_policy = ChangeSpendPolicy::OnlyChange;
self
}
/// Set a specific [`ChangeSpendPolicy`]. See [`TxBuilder::do_not_spend_change`] and
/// [`TxBuilder::only_spend_change`] for some shortcuts.
pub fn change_policy(&mut self, change_policy: ChangeSpendPolicy) -> &mut Self {
self.params.change_policy = change_policy;
self
}
/// Only Fill-in the [`psbt::Input::witness_utxo`](bitcoin::util::psbt::Input::witness_utxo) field when spending from
/// SegWit descriptors.
///
/// This reduces the size of the PSBT, but some signers might reject them due to the lack of
/// the `non_witness_utxo`.
pub fn only_witness_utxo(&mut self) -> &mut Self {
self.params.only_witness_utxo = true;
self
}
/// Fill-in the [`psbt::Output::redeem_script`](bitcoin::util::psbt::Output::redeem_script) and
/// [`psbt::Output::witness_script`](bitcoin::util::psbt::Output::witness_script) fields.
///
/// This is useful for signers which always require it, like ColdCard hardware wallets.
pub fn include_output_redeem_witness_script(&mut self) -> &mut Self {
self.params.include_output_redeem_witness_script = true;
self
}
/// Fill-in the `PSBT_GLOBAL_XPUB` field with the extended keys contained in both the external
/// and internal descriptors
///
/// This is useful for offline signers that take part to a multisig. Some hardware wallets like
/// BitBox and ColdCard are known to require this.
pub fn add_global_xpubs(&mut self) -> &mut Self {
self.params.add_global_xpubs = true;
self
}
/// Spend all the available inputs. This respects filters like [`TxBuilder::unspendable`] and the change policy.
pub fn drain_wallet(&mut self) -> &mut Self {
self.params.drain_wallet = true;
self
}
/// Choose the coin selection algorithm
///
/// Overrides the [`DefaultCoinSelectionAlgorithm`](super::coin_selection::DefaultCoinSelectionAlgorithm).
///
/// Note that this function consumes the builder and returns it so it is usually best to put this as the first call on the builder.
pub fn coin_selection<P: CoinSelectionAlgorithm<D>>(
self,
coin_selection: P,
) -> TxBuilder<'a, B, D, P, Ctx> {
TxBuilder {
wallet: self.wallet,
params: self.params,
coin_selection,
phantom: PhantomData,
}
}
/// Finish the building the transaction.
///
/// Returns the [`BIP174`] "PSBT" and summary details about the transaction.
///
/// [`BIP174`]: https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki
pub fn finish(self) -> Result<(Psbt, TransactionDetails), Error> |
/// Enable signaling RBF
///
/// This will use the default nSequence value of `0xFFFFFFFD`.
pub fn enable_rbf(&mut self) -> &mut Self {
self.params.rbf = Some(RbfValue::Default);
self
}
/// Enable signaling RBF with a specific nSequence value
///
/// This can cause conflicts if the wallet's descriptors contain an "older" (OP_CSV) operator
/// and the given `nsequence` is lower than the CSV value.
///
/// If the `nsequence` is higher than `0xFFFFFFFD` an error will be thrown, since it would not
/// be a valid nSequence to signal RBF.
pub fn enable_rbf_with_sequence(&mut self, nsequence: u32) -> &mut Self {
self.params.rbf = Some(RbfValue::Value(nsequence));
self
}
}
impl<'a, B, D: BatchDatabase, Cs: CoinSelectionAlgorithm<D>> TxBuilder<'a, B, D, Cs, CreateTx> {
/// Replace the recipients already added with a new list
pub fn set_recipients(&mut self, recipients: Vec<(Script, u64)>) -> &mut Self {
self.params.recipients = recipients;
self
}
/// Add a recipient to the internal list
pub fn add_recipient(&mut self, script_pubkey: Script, amount: u64) -> &mut Self {
self.params.recipients.push((script_pubkey, amount));
self
}
/// Set a single recipient that will get all the selected funds minus the fee. No change will
/// be created
///
/// This method overrides any recipient set with [`set_recipients`](Self::set_recipients) or
/// [`add_recipient`](Self::add_recipient).
///
/// It can only be used in conjunction with [`drain_wallet`](Self::drain_wallet) to send the
/// entire content of the wallet (minus filters) to a single recipient or with a
/// list of manually selected UTXOs by enabling [`manually_selected_only`](Self::manually_selected_only)
/// and selecting them with or [`add_utxo`](Self::add_utxo).
///
/// When bumping the fees of a transaction made with this option, the user should remeber to
/// add [`maintain_single_recipient`](Self::maintain_single_recipient) to correctly update the
/// single output instead of adding one more for the change.
pub fn set_single_recipient(&mut self, recipient: Script) -> &mut Self {
self.params.single_recipient = Some(recipient);
self.params.recipients.clear();
self
}
}
// methods supported only by bump_fee
impl<'a, B, D: BatchDatabase> TxBuilder<'a, B, D, DefaultCoinSelectionAlgorithm, BumpFee> {
/// Bump the fees of a transaction made with [`set_single_recipient`](Self::set_single_recipient)
///
/// Unless extra inputs are specified with [`add_utxo`], this flag will make
/// `bump_fee` reduce the value of the existing output, or fail if it would be consumed
/// entirely given the higher new fee rate.
///
/// If extra inputs are added and they are not entirely consumed in fees, a change output will not
/// be added; the existing output will simply grow in value.
///
/// Fails if the transaction has more than one outputs.
///
/// [`add_utxo`]: Self::add_utxo
pub fn maintain_single_recipient(&mut self) -> Result<&mut Self, Error> {
let mut recipients = self.params.recipients.drain(..).collect::<Vec<_>>();
if recipients.len() != 1 {
return Err(Error::SingleRecipientMultipleOutputs);
}
self.params.single_recipient = Some(recipients.pop().unwrap().0);
Ok(self)
}
}
/// Ordering of the transaction's inputs and outputs
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum TxOrdering {
/// Randomized (default)
Shuffle,
/// Unchanged
Untouched,
/// BIP69 / Lexicographic
Bip69Lexicographic,
}
impl Default for TxOrdering {
fn default() -> Self {
TxOrdering::Shuffle
}
}
impl TxOrdering {
/// Sort transaction inputs and outputs by [`TxOrdering`] variant
pub fn sort_tx(&self, tx: &mut Transaction) {
match self {
TxOrdering::Untouched => {}
TxOrdering::Shuffle => {
use rand::seq::SliceRandom;
#[cfg(test)]
use rand::SeedableRng;
#[cfg(not(test))]
let mut rng = rand::thread_rng();
#[cfg(test)]
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
tx.output.shuffle(&mut rng);
}
TxOrdering::Bip69Lexicographic => {
tx.input.sort_unstable_by_key(|txin| {
(txin.previous_output.txid, txin.previous_output.vout)
});
tx.output
.sort_unstable_by_key(|txout| (txout.value, txout.script_pubkey.clone()));
}
}
}
}
/// Transaction version
///
/// Has a default value of `1`
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub(crate) struct Version(pub(crate) i32);
impl Default for Version {
fn default() -> Self {
Version(1)
}
}
/// RBF nSequence value
///
/// Has a default value of `0xFFFFFFFD`
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub(crate) enum RbfValue {
Default,
Value(u32),
}
impl RbfValue {
pub(crate) fn get_value(&self) -> u32 {
match self {
RbfValue::Default => 0xFFFFFFFD,
RbfValue::Value(v) => *v,
}
}
}
/// Policy regarding the use of change outputs when creating a transaction
#[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Hash, Clone, Copy)]
pub enum ChangeSpendPolicy {
/// Use both change and non-change outputs (default)
ChangeAllowed,
/// Only use change outputs (see [`TxBuilder::only_spend_change`])
OnlyChange,
/// Only use non-change outputs (see [`TxBuilder::do_not_spend_change`])
ChangeForbidden,
}
impl Default for ChangeSpendPolicy {
fn default() -> Self {
ChangeSpendPolicy::ChangeAllowed
}
}
impl ChangeSpendPolicy {
pub(crate) fn is_satisfied_by(&self, utxo: &LocalUtxo) -> bool {
match self {
ChangeSpendPolicy::ChangeAllowed => true,
ChangeSpendPolicy::OnlyChange => utxo.keychain == KeychainKind::Internal,
ChangeSpendPolicy::ChangeForbidden => utxo.keychain == KeychainKind::External,
}
}
}
#[cfg(test)]
mod test {
const ORDERING_TEST_TX: &str = "0200000003c26f3eb7932f7acddc5ddd26602b77e7516079b03090a16e2c2f54\
85d1fd600f0100000000ffffffffc26f3eb7932f7acddc5ddd26602b77e75160\
79b03090a16e2c2f5485d1fd600f0000000000ffffffff571fb3e02278217852\
dd5d299947e2b7354a639adc32ec1fa7b82cfb5dec530e0500000000ffffffff\
03e80300000000000002aaeee80300000000000001aa200300000000000001ff\
00000000";
macro_rules! ordering_test_tx {
() => {
deserialize::<bitcoin::Transaction>(&Vec::<u8>::from_hex(ORDERING_TEST_TX).unwrap())
.unwrap()
};
}
use bitcoin::consensus::deserialize;
use bitcoin::hashes::hex::FromHex;
use super::*;
#[test]
fn test_output_ordering_default_shuffle() {
assert_eq!(TxOrdering::default(), TxOrdering::Shuffle);
}
#[test]
fn test_output_ordering_untouched() {
let original_tx = ordering_test_tx!();
let mut tx = original_tx.clone();
TxOrdering::Untouched.sort_tx(&mut tx);
assert_eq!(original_tx, tx);
}
#[test]
fn test_output_ordering_shuffle() {
let original_tx = ordering_test_tx!();
let mut tx = original_tx.clone();
TxOrdering::Shuffle.sort_tx(&mut tx);
assert_eq!(original_tx.input, tx.input);
assert_ne!(original_tx.output, tx.output);
}
#[test]
fn test_output_ordering_bip69() {
use std::str::FromStr;
let original_tx = ordering_test_tx!();
let mut tx = original_tx;
TxOrdering::Bip69Lexicographic.sort_tx(&mut tx);
assert_eq!(
tx.input[0].previous_output,
bitcoin::OutPoint::from_str(
"0e53ec5dfb2cb8a71fec32dc9a634a35b7e24799295ddd5278217822e0b31f57:5"
)
.unwrap()
);
assert_eq!(
tx.input[1].previous_output,
bitcoin::OutPoint::from_str(
"0f60fdd185542f2c6ea19030b0796051e7772b6026dd5ddccd7a2f93b73e6fc2:0"
)
.unwrap()
);
assert_eq!(
tx.input[2].previous_output,
bitcoin::OutPoint::from_str(
"0f60fdd185542f2c6ea19030b0796051e7772b6026dd5ddccd7a2f93b73e6fc2:1"
)
.unwrap()
);
assert_eq!(tx.output[0].value, 800);
assert_eq!(tx.output[1].script_pubkey, From::from(vec![0xAA]));
assert_eq!(tx.output[2].script_pubkey, From::from(vec![0xAA, 0xEE]));
}
fn get_test_utxos() -> Vec<LocalUtxo> {
vec![
LocalUtxo {
outpoint: OutPoint {
txid: Default::default(),
vout: 0,
},
txout: Default::default(),
keychain: KeychainKind::External,
},
LocalUtxo {
outpoint: OutPoint {
txid: Default::default(),
vout: 1,
},
txout: Default::default(),
keychain: KeychainKind::Internal,
},
]
}
#[test]
fn test_change_spend_policy_default() {
let change_spend_policy = ChangeSpendPolicy::default();
let filtered = get_test_utxos()
.into_iter()
.filter(|u| change_spend_policy.is_satisfied_by(u))
.count();
assert_eq!(filtered, 2);
}
#[test]
fn test_change_spend_policy_no_internal() {
let change_spend_policy = ChangeSpendPolicy::ChangeForbidden;
let filtered = get_test_utxos()
.into_iter()
.filter(|u| change_spend_policy.is_satisfied_by(u))
.collect::<Vec<_>>();
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].keychain, KeychainKind::External);
}
#[test]
fn test_change_spend_policy_only_internal() {
let change_spend_policy = ChangeSpendPolicy::OnlyChange;
let filtered = get_test_utxos()
.into_iter()
.filter(|u| change_spend_policy.is_satisfied_by(u))
.collect::<Vec<_>>();
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].keychain, KeychainKind::Internal);
}
#[test]
fn test_default_tx_version_1() {
let version = Version::default();
assert_eq!(version.0, 1);
}
}
| {
self.wallet.create_tx(self.coin_selection, self.params)
} |
woql.js | ////@ts-check
//I HAVE TO REVIEW THE Inheritance and the prototype chain
const typedef = require('./typedef')
const WOQLQuery = require('./query/woqlBuilder')
/**
* @license Apache Version 2
* @module WOQL
* @constructor WOQL
* @description The WOQL object is a wrapper around the WOQLQuery object
* Syntactic sugar to allow writing WOQL.triple()... instead of new WOQLQuery().triple()
* Every function matches one of the public api functions of the woql query object
*/
let WOQL = {}
/**
* WOQL primitives are WOQL.js functions which directly map onto words in
* the underlying JSON-LD language. All other WOQL.js functions are compound functions
* which translate into multiple WOQL primitives, or are helper functions which reduce
* the need to write verbose JSON-LD directly.
*/
/**
* WOQL Literals, Prefixes & IRI Constant
*/
/*
* We expose all the real woql predicates via the WOQL object,
* for ease of typing all return a WOQL query object
*/
/**
* Query running against any specific commit Id
* @param {string} refPath - path to specific reference Id or commit Id
* @param {WOQLQuery} subquery - subquery for the specific commit point
* @returns {WOQLQuery}
* @example
* WOQL.using("userName/dbName/local/commit|branch/commitID").triple("v:A", "v:B", "v:C")
*/
WOQL.using = function(refPath, subquery) {
return new WOQLQuery().using(refPath, subquery)
}
/**
* Adds a text comment to a query - can also be used to wrap any part of a query to turn it off
* @param {string} comment - text comment
* @param {WOQLQuery} subquery - query that is "commented out"
* @returns {WOQLQuery}
*/
WOQL.comment = function(comment, subquery) {
return new WOQLQuery().comment(comment, subquery)
}
/**
* @example WOQL.select("v:a",triple("v:a","v:b","v:c"))
* Filters the query so that only the variables included in [V1...Vn] are returned in the bindings
* @param {...string} varNames - only these variables are returned
* @returns {WOQLQuery}
*/
WOQL.select = function(...varNames) {
return new WOQLQuery().select(...varNames)
}
/**
* Filter the query to return only results that are distinct in the given variables
* @param {...string} varNames - these variables are guaranteed to be unique as a tuple
* @returns {WOQLQuery}
*/
WOQL.distinct = function(...varNames) {
return new WOQLQuery().distinct(...varNames)
}
/**
* Logical conjunction of the contained queries - all queries must match or the entire clause fails
* @param {...WOQLQuery} subqueries - A list of one or more woql queries to execute as a conjunction
* @returns {WOQLQuery} - A WOQLQuery object containing the conjunction of queries
* @example
* //find triples that are of type scm:Journey, and have
* //a start_station v:Start, and that start_station is labeled
* //v:Start_Label
*
* WOQL.and(
* WOQL.triple("v:Journey", "type", "scm:Journey"),
* WOQL.triple("v:Journey", "start_station", "v:Start"),
* WOQL.triple("v:Start", "label", "v:Start_Label"))
*
*/
WOQL.and = function(...subqueries) {
return new WOQLQuery().and(...subqueries)
}
/**
* Read a node identified by an IRI as a JSON-LD document
* @param {string} IRI - The document id or a variable
* @param {string} output - variable name
* @param {object} formatObj
* @return {object} WOQLQuery
*/
WOQL.read_object = function(IRI, output, formatObj) {
return new WOQLQuery().read_object(IRI, output, formatObj)
}
/**
* Creates a logical OR of the arguments
* @param {...WOQLQuery} subqueries - A list of one or more woql queries to execute as alternatives
* @returns {WOQLQuery} - A WOQLQuery object containing the logical Or of the subqueries
* @example
* or(
* triple("v:Subject", 'label', "A"),
* triple("v:Subject", "label", "a")
* )
*/
WOQL.or = function(...subqueries) {
return new WOQLQuery().or(...subqueries)
}
/**
* Specifies the database URL that will be the default database for the enclosed query
* @param {typedef.GraphRef} graphRef- A valid graph resource identifier string
* @param {WOQLQuery} [query] - The query
* @returns {WOQLQuery} A WOQLQuery object containing the from expression
*/
WOQL.from = function(graphRef, query) {
return new WOQLQuery().from(graphRef, query)
}
/**
* Specifies the graph resource to write the contained query into
* @param {typedef.GraphRef} graphRef- A valid graph resource identifier string
* @param {WOQLQuery} subquery - The query which will be written into the graph
* @returns {WOQLQuery} A WOQLQuery which will be written into the graph in question
* @example
* //Subq is an argument or a chained query
* using("admin/minecraft").into("instance/main").add_triple("a", "type", "scm:X")
* //writes a single tripe (doc:a, rdf:type, scm:X) into the main instance graph
*
*/
WOQL.into = function(graphRef, subquery) {
return new WOQLQuery().into(graphRef, subquery)
}
/**
* Creates a triple pattern matching rule for the triple [S, P, O] (Subject, Predicate, Object)
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @returns {WOQLQuery}
*/
WOQL.triple = function(subject, predicate, object) {
return new WOQLQuery().triple(subject, predicate, object)
}
/**
* Creates a triple pattern matching rule for the triple [S, P, O] (Subject, Predicate, Object) added in the current layer
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @returns {WOQLQuery}
*/
WOQL.added_triple = function(subject, predicate, object) {
return new WOQLQuery().added_triple(subject, predicate, object)
}
/**
* Creates a triple pattern matching rule for the triple [S, P, O] (Subject, Predicate, Object) added in the current commit
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @returns {WOQLQuery}
*/
WOQL.removed_triple = function(subject, predicate, object) {
return new WOQLQuery().removed_triple(subject, predicate, object)
}
/**
* Creates a pattern matching rule for the quad [S, P, O, G] (Subject, Predicate, Object, Graph)
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} graphRef - A valid graph resource identifier string
* @returns {WOQLQuery}
*/
WOQL.quad = function(subject, predicate, object, graphRef) {
return new WOQLQuery().quad(subject, predicate, object, graphRef)
}
/**
* Creates a pattern matching rule for the quad [S, P, O, G] (Subject, Predicate, Object, Graph) removed from the current commit
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} graphRef- A valid graph resource identifier string
* @returns {WOQLQuery}
*/
WOQL.added_quad = function(subject, predicate, object, graphRef) {
return new WOQLQuery().added_quad(subject, predicate, object, graphRef)
}
/**
* Creates a pattern matching rule for the quad [S, P, O, G] (Subject, Predicate, Object, Graph) removed from the current commit
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} graphRef- A valid graph resource identifier string
* @returns {WOQLQuery}
*/
WOQL.removed_quad = function(subject, predicate, object, graphRef) {
return new WOQLQuery().removed_quad(subject, predicate, object, graphRef)
}
/**
* Returns true if ClassA subsumes ClassB, according to the current DB schema
* @param {string} classA - ClassA
* @param {string} classB - ClassB
* @returns {boolean} WOQLQuery
*/
WOQL.sub = function(classA, classB) {
return new WOQLQuery().sub(classA, classB)
}
WOQL.subsumption = WOQL.sub
/**
* Matches if a is equal to b
* @param {string} varName - literal, variable or id
* @param {string} varValue - literal, variable or id
* @returns {WOQLQuery}
*
*
*/
WOQL.eq = function(varName, varValue) {
return new WOQLQuery().eq(varName, varValue)
}
WOQL.equals = WOQL.eq
/**
* Substring
* @param {string} string - String or variable
* @param {number} before - integer or variable (characters from start to begin)
* @param {number} [length] - integer or variable (length of substring)
* @param {number} [after] - integer or variable (number of characters after substring)
* @param {string} [substring] - String or variable
* @returns {WOQLQuery}
*/
WOQL.substr = function(string, before, length, after, substring) {
return new WOQLQuery().substr(string, before, length, after, substring)
}
WOQL.substring = WOQL.substr
/**
* Retrieves the exernal resource defined by QueryResource and copies values from it into variables defined in AsVars
* @param {WOQLQuery} asvars - an array of AsVar variable mappings (see as for format below)
* @param {WOQLQuery} queryResource - an external resource (remote, file, post) to query
* @returns {WOQLQuery} A WOQLQuery which contains the get expression
* @example
* let [a, b] = vars("a", "b")
* get(as("a", a).as("b", b)).remote("http://my.url.com/x.csv")
* //copies the values from column headed "a" into a variable a and from column
* //"b" into a variable b from remote CSV
*/
WOQL.get = function(asvars, queryResource) {
return new WOQLQuery().get(asvars, queryResource)
}
/**
* @put Outputs the results of a query to a file
* @param {WOQLQuery} varsToExp - an array of AsVar variable mappings (see as for format below)
* @param {WOQLQuery} query - The query which will be executed to produce the results
* @param {string} fileResource - an file resource local to the server
* @returns {WOQLQuery} A WOQLQuery which contains the put expression
* @example
* let [s, p, o] = vars("Subject", "Predicate", "Object")
* WOQL.put(WOQL.as("s", s).as("p", p).as("o", o), WOQL.all())
* .file({file:"/app/local_files/dump.csv"})
*/
WOQL.put = function(varsToExp, query, fileResource) {
return new WOQLQuery().put(varsToExp, query, fileResource)
}
/**
* Imports the value identified by Source to a Target variable
* @param {string | number} source - Source
* @param {string} target - Target
* @param {string} [type] - type to cast value to string|number etc...
* @returns {WOQLQuery}
* @example
* WOQL.as("first var", "v:First_Var",{} "string").as("second var", "v:Second_Var")
* WOQL.as(["first var", "v:First_Var", "string"], ["second var", "v:Second_Var"])
*/
WOQL.as = function(source, target, type) {
return new WOQLQuery().as(source, target, type)
}
/**
* Identifies a remote resource by URL and specifies the format of the resource through the options
* @param {object} remoteObj - The URL at which the remote resource can be accessed
* @param {typedef.DataFormatObj} [formatObj] - The format of the resource data {}
* @returns {WOQLQuery} A WOQLQuery which contains the remote resource identifier
* @example
* remote({url:"http://url.of.resource"}, {type: "csv"})
*/
//
WOQL.remote = function(remoteObj, formatObj) {
return new WOQLQuery().remote(remoteObj, formatObj)
}
/**
* Identifies a file resource as a path on the server and specifies the format through the options
* @param {object} url - The Path on the server at which the file resource can be accessed
* @param {typedef.DataFormatObj} [formatObj] - imput options
* @returns {WOQLQuery} A WOQLQuery which contains the file resource identifier
* @example
* file("/path/to/file", {type: 'turtle'} )
*/
WOQL.file = function(url, formatObj) {
return new WOQLQuery().file(url, formatObj)
}
/**
* Identifies a resource as a local path on the client, to be sent to the server through a
* HTTP POST request, with the format defined through the options
* @param {string} url - The Path on the server at which the file resource can be accessed
* @param {typedef.DataFormatObj} [formatObj] - imput options, optional
* @returns {WOQLQuery} A WOQLQuery which contains the Post resource identifier
* @example
* post("/.../.../", {type:'csv'})
*/
WOQL.post = function(url, formatObj) {
return new WOQLQuery().post(url, formatObj)
}
/**
* Deletes a single triple from the default graph of the database
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @returns {WOQLQuery} - A WOQLQuery which contains the Triple Deletion statement
* @example
* delete_triple("john", "age", 42)
*/
WOQL.delete_triple = function(subject, predicate, object) {
return new WOQLQuery().delete_triple(subject, predicate, object)
}
/**
* Deletes a single triple from the graph [Subject, Predicate, Object, Graph]
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} graphRef - A valid graph resource identifier string
* @returns {WOQLQuery} - A WOQLQuery which contains the Delete Quad Statement
* @example remove the class Person from the schema/main graph
* WOQL.delete_quad("Person", "type", "owl:Class", "schema/main")
*/
WOQL.delete_quad = function(subject, predicate, object, graphRef) {
return new WOQLQuery().delete_quad(subject, predicate, object, graphRef)
}
/**
* Adds triples according to the the pattern [subject,predicate,object]
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @returns {object} WOQLQuery
*/
WOQL.add_triple = function(subject, predicate, object) {
return new WOQLQuery().add_triple(subject, predicate, object)
}
/**
* Adds quads according to the pattern [S,P,O,G]
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} graphRef- A valid graph resource identifier string
* @returns {object} WOQLQuery
*/
WOQL.add_quad = function(subject, predicate, object, graphRef) {
return new WOQLQuery().add_quad(subject, predicate, object, graphRef)
}
/**
*
* When the subquery is met, the update query is executed
* @param {WOQLQuery} subquery - the condition query
* @param {WOQLQuery} [updateQuery]
* @returns {WOQLQuery} - A WOQLQuery which contains the when expression
* @example
* when(true()).triple("a", "b", "c")
*/
//Functions which take a query as an argument advance the cursor to make the chaining of queries fall into the corrent place in the encompassing json.
/*WOQL.when = function(subquery, updateQuery) {
return new WOQLQuery().when(subquery, updateQuery)
}*/
/**
*
* Remove whitespace from both sides of a string:
* @param {string} inputStr - A string or variable containing the untrimmed version of the string
* @param {string} resultVarName - A string or variable containing the trimmed version of the string
* @returns {WOQLQuery} A WOQLQuery which contains the Trim pattern matching expression
* @example
* trim("hello ","v:trimmed")
* //trimmed contains "hello"
*/
WOQL.trim = function(inputStr, resultVarName) {
return new WOQLQuery().trim(inputStr, resultVarName)
}
/**
*
* Evaluates the passed arithmetic expression and generates or matches the result value
* @param {object| WOQLQuery | string} arithExp - A WOQL query containing a valid WOQL Arithmetic Expression, which is evaluated by the function
* @param {string | number} resultVarName - Either a variable, in which the result of the expression will be stored, or a numeric literal which will be used as a test of result of the evaluated expression
* @returns {WOQLQuery} A WOQLQuery which contains the Arithmetic function
* @example
* evaluate(plus(2, minus(3, 1)), "v:result")
*/
WOQL.evaluate = function(arithExp, resultVarName) {
return new WOQLQuery().eval(arithExp, resultVarName)
}
/**
* Evaluates the passed arithmetic expression and generates or matches the result value
* @param {object| WOQLQuery | string} arithExp - query or JSON-LD representing the query
* @param {string} resultVarName - output variable
* @returns {WOQLQuery} WOQLQuery
*/
WOQL.eval = WOQL.evaluate
/**
*
* Adds the numbers together
* @param {...(string |number)} args - a variable or numeric containing the values to add
* @returns {WOQLQuery} A WOQLQuery which contains the addition expression
* @example
* evaluate(plus(2, plus(3, 1)), "v:result")
*/
WOQL.plus = function(...args) {
return new WOQLQuery().plus(...args)
}
/**
*
* Subtracts Numbers N1..Nn
* @param {...(string |number)} args - variable or numeric containing the value that will be subtracted from
* @returns {WOQLQuery} A WOQLQuery which contains the subtraction expression
* @example
* evaluate(minus(2.1, plus(0.2, 1)), "v:result")
*/
WOQL.minus = function(...args) {
return new WOQLQuery().minus(...args)
}
/**
*
* Multiplies numbers N1...Nn together
* @param {...(string |number)} args - a variable or numeric containing the value
* @returns {WOQLQuery} A WOQLQuery which contains the multiplication expression
* @example
* evaluate(times(10, minus(2.1, plus(0.2, 1))), "v:result")
* //result contains 9.000000000000002y
*/
WOQL.times = function(...args) {
return new WOQLQuery().times(...args)
}
/**
*
* Divides numbers N1...Nn by each other left, to right precedence
* @param {...(string |number)} args - numbers to tbe divided
* @returns {WOQLQuery} A WOQLQuery which contains the division expression
* evaluate(divide(times(10, minus(2.1, plus(0.2, 1))), 10), "v:result")
* //result contains 0.9000000000000001
*/
WOQL.divide = function(...args) {
return new WOQLQuery().divide(...args)
}
/**
*
* Division - integer division - args are divided left to right
* @param {...(string |number)} args - numbers for division
* @returns {WOQLQuery} A WOQLQuery which contains the division expression
* @example
* let [result] = vars("result")
* evaluate(div(10, 3), result)
* //result contains 3
*/
WOQL.div = function(...args) {
return new WOQLQuery().div(...args)
}
/*
* @param {integer or double} a - base number
* @param {integer or double} b - power of
* @returns {object} WOQLQuery*/
/**
*
* Exponent - raises varNum01 to the power of varNum02
* @param {string | number} varNum - a variable or numeric containing the number to be raised to the power of the second number
* @param {number} expNum - a variable or numeric containing the exponent
* @returns {WOQLQuery} A WOQLQuery which contains the exponent expression
* @example
* evaluate(exp(3, 2), "v:result")
* //result contains 9
*/
WOQL.exp = function(varNum, expNum) {
return new WOQLQuery().exp(varNum, expNum)
}
/**
*
* Generates the nearest lower integer to the passed number
* @param {string | number} varNum - Variable or numeric containing the number to be floored
* @returns {WOQLQuery} A WOQLQuery which contains the floor expression
* @example
* let [result] = vars("result")
* evaluate(divide(floor(times(10, minus(2.1, plus(0.2, 1)))), 10), result)
* //result contains 0.9 - floating point error removed
*/
WOQL.floor = function(varNum) {
return new WOQLQuery().floor(varNum)
}
/**
*
* Tests whether a given instance IRI has type Class, according to the current state of the DB
* @param {string} instanceIRI - A string IRI or a variable that identify the class instance
* @param {string} classId - A Class IRI or a variable
* @returns {WOQLQuery} A WOQLQuery object containing the type test
* @example
* let [subject] = vars("subject")
* isa(subject, "Person")
*/
WOQL.isa = function(instanceIRI, classId) {
return new WOQLQuery().isa(instanceIRI, classId)
}
/**
*
* Generates a string Leverstein distance measure between stringA and stringB
* @param {string} stringA - string literal or variable representing a string to be compared
* @param {string} stringB - string literal or variable representing the other string to be compared
* @param {number | string} distance - variable representing the distance between the variables
* @returns {WOQLQuery} A WOQLQuery which contains the Like pattern matching expression
* @example
* let [dist] = vars('dist')
* like("hello", "hallo", dist)
* //dist contains 0.7265420560747664
*/
WOQL.like = function(stringA, stringB, distance) {
return new WOQLQuery().like(stringA, stringB, distance)
}
/**
*
* Compares the value of v1 against v2 and returns true if v1 is less than v2
* @param {string | number} varNum01 - a variable or numeric containing the number to be compared
* @param {string | number} varNum02 - a variable or numeric containing the second comporator
* @returns {WOQLQuery} A WOQLQuery which contains the comparison expression
* @example
* less(1, 1.1).eq("v:result", literal(true, "boolean"))
* //result contains true
*/
WOQL.less = function(varNum01, varNum02) {
return new WOQLQuery().less(varNum01, varNum02)
}
/**
*
* Compares the value of v1 against v2 and returns true if v1 is greater than v2
* @param {string | number} varNum01 - a variable or numeric containing the number to be compared
* @param {string | number} varNum02 - a variable or numeric containing the second comporator
* @returns {WOQLQuery} A WOQLQuery which contains the comparison expression
* @example
* greater(1.2, 1.1).eq("v:result", literal(true, "boolean"))
* //result contains true
*/
WOQL.greater = function(varNum01, varNum02) {
return new WOQLQuery().greater(varNum01, varNum02)
}
/**
*
* Specifies that the Subquery is optional - if it does not match the query will not fail
* @param {WOQLQuery} [subquery] - A subquery which will be optionally matched
* @returns {WOQLQuery} A WOQLQuery object containing the optional sub Query
* @example
* let [subject] = vars("subject")
* opt(triple(subject, 'label', "A"))
* //Subq is an argument or a chained query
* opt().triple(subject, 'label', "A")
*/
WOQL.opt = function(subquery) {
return new WOQLQuery().opt(subquery)
}
WOQL.optional = WOQL.opt
/**
*
* Generate a new IRI from the prefix and a hash of the variables which will be unique for any given combination of variables
* @param {string} prefix - A prefix for the IRI - typically formed of the doc prefix and the classtype of the entity (“doc:Person”)
* @param {array | string} inputVarList - An array of variables and / or strings from which the unique hash will be generated
* @param {string} resultVarName - Variable in which the unique ID is stored
* @returns {WOQLQuery} A WOQLQuery object containing the unique ID generating function
* @example
* unique("doc:Person", ["John", "Smith"], "v:newid")
*/
WOQL.unique = function(prefix, inputVarList, resultVarName) {
return new WOQLQuery().unique(prefix, inputVarList, resultVarName)
}
/**
*
* Generate a new IRI from the prefix and concatention of the variables
* @param {string} prefix - A prefix for the IRI - typically formed of the doc prefix and the classtype of the entity (“doc:Person”)
* @param {array | string} inputVarList - An array of variables and / or strings from which the unique hash will be generated
* @param {string} resultVarName - Variable in which the unique ID is stored
* @returns {WOQLQuery} A WOQLQuery object containing the ID generating function
* @example
* let [newid] = vars("newid")
* idgen("doc:Person", ["John", "Smith"], newid)
*/
WOQL.idgen = function(prefix, inputVarList, resultVarName) {
return new WOQLQuery().idgen(prefix, inputVarList, resultVarName)
}
WOQL.idgenerator = WOQL.idgen
/**
*
* Changes a string to upper-case
* @param {string} inputVarName - string or variable representing the uncapitalized string
* @param {string} resultVarName - variable that stores the capitalized string output
* @returns {WOQLQuery} A WOQLQuery which contains the Upper case pattern matching expression
* @example
* upper("aBCe", "v:allcaps")
* //upper contains "ABCE"
*/
WOQL.upper = function(inputVarName, resultVarName) {
return new WOQLQuery().upper(inputVarName, resultVarName)
}
/**
*
* Changes a string to lower-case
* @param {string} inputVarName - string or variable representing the non-lowercased string
* @param {string} resultVarName - variable that stores the lowercased string output
* @returns {WOQLQuery} A WOQLQuery which contains the Lower case pattern matching expression
* @example
* let [lower] = var("l")
* lower("aBCe", lower)
* //lower contains "abce"
*/
WOQL.lower = function(inputVarName, resultVarName) {
return new WOQLQuery().lower(inputVarName, resultVarName)
}
/**
*
* Pads out the string input to be exactly len long by appending the pad character pad to form output
* @param {string} inputVarName - The input string or variable in unpadded state
* @param {string} pad - The characters to use to pad the string or a variable representing them
* @param {number | string} len - The variable or integer value representing the length of the output string
* @param {string} resultVarName - stores output
* @returns {WOQLQuery} A WOQLQuery which contains the Pad pattern matching expression
* @example
* let [fixed] = vars("fixed length")
* pad("joe", " ", 8, fixed)
* //fixed contains "joe "
*/
WOQL.pad = function(inputVarName, pad, len, resultVarName) {
return new WOQLQuery().pad(inputVarName, pad, len, resultVarName)
}
/**
* Splits a string (Input) into a list strings (Output) by removing separator
* @param {string} inputVarName - A string or variable representing the unsplit string
* @param {string} separator - A string or variable containing a sequence of charatcters to use as a separator
* @param {string} resultVarName - variable that stores output list
* @returns {WOQLQuery} A WOQLQuery which contains the Split pattern matching expression
* @example
* split("joe has a hat", " ", "v:words")
*/
WOQL.split = function(inputVarName, separator, resultVarName) {
return new WOQLQuery().split(inputVarName, separator, resultVarName)
}
/**
* Matches if List includes Element
* @param {string | object} element - Either a variable, IRI or any simple datatype
* @param {string} list - List ([string, literal] or string*) Either a variable representing a list or a list of variables or literals
* @returns {WOQLQuery} A WOQLQuery which contains the List inclusion pattern matching expression
* @example
* let [name] = vars("name")
* member("name", ["john", "joe", "frank"])
*/
WOQL.member = function(element, list) {
return new WOQLQuery().member(element, list)
}
/**
*
* takes a variable number of string arguments and concatenates them into a single string
* @param {array | string} varList - a variable representing a list or a list of variables or strings - variables can be embedded in the string if they do not contain spaces
* @param {string} resultVarName - A variable or string containing the output string
* @returns {WOQLQuery} A WOQLQuery which contains the Concatenation pattern matching expression
* @example
* concat(["v:first_name", " ", "v:last_name"], "v:full_name")
* WOQL.concat(["first_name", " ", "last_name"], "full_name")
* //both versions work
*/
WOQL.concat = function(varList, resultVarName) {
return new WOQLQuery().concat(varList, resultVarName)
}
/**
*
* Joins a list variable together (Input) into a string variable (Output) by glueing the strings together with Glue
* @param {string | array} varList - a variable representing a list or a list of strings and / or variables
* @param {string} glue - A variable (v:glue) or (glue) string representing the characters to put in between the joined strings in input
* @param {string} resultVarName - A variable or string containing the output string
* @returns {WOQLQuery} A WOQLQuery which contains the Join pattern matching expression
* @example
* join(["joe", "has", "a", "hat", " ", "v:sentence")
*/
WOQL.join = function(varList, glue, resultVarName) {
return new WOQLQuery().join(varList, glue, resultVarName)
}
/**
* computes the sum of the List of values passed. In contrast to other arithmetic functions, sum self-evaluates - it does not have to be passed to evaluate()
* @param {WOQLQuery} subquery - a subquery or ([string or numeric]) - a list variable, or a list of variables or numeric literals
* @param {number} total - the variable name with the sum result of the values in List
* @returns {WOQLQuery} - A WOQLQuery which contains the Sum expression
* @example
* sum([2, 3, 4, 5], "v:total")
*/
WOQL.sum = function(subquery, total) {
return new WOQLQuery().sum(subquery, total)
}
/**
*
* Specifies an offset position in the results to start listing results from
* @param {number | string} start - A variable that refers to an interger or an integer literal
* @param {WOQLQuery} [subquery] - WOQL Query object, you can pass a subquery as an argument or a chained query
* @returns {WOQLQuery} A WOQLQuery whose results will be returned starting from the specified offset
* @example
* let [a, b, c] = vars("a", "b", "c")
* start(100).triple(a, b, c)
*/
WOQL.start = function(start, subquery) {
return new WOQLQuery().start(start, subquery)
}
/**
*
* Specifies a maximum number of results that will be returned from the subquery
* @param {number | string} limit - A variable that refers to an non-negative integer or a non-negative integer
* @param {WOQLQuery} [subquery] - A subquery whose results will be limited
* @returns {WOQLQuery} A WOQLQuery whose results will be returned starting from the specified offset
* @example
* let [a, b, c] = vars("a", "b", "c")
* limit(100).triple(a, b, c)
* //subquery is an argument or a chained query
* limit(100,triple(a, b, c))
*/
WOQL.limit = function(limit, subquery) {
return new WOQLQuery().limit(limit, subquery)
}
/**
*
* Matches the regular expression defined in Patern against the Test string, to produce the matched patterns in Matches
* @param {string} pattern - string or variable using normal PCRE regular expression syntax with the exception that special characters have to be escaped twice (to enable transport in JSONLD)
* @param {string} inputVarName - string or variable containing the string to be tested for patterns with the regex
* @param {string | array | object} resultVarList - variable representing the list of matches or a list of strings or variables
* @returns {WOQLQuery} A WOQLQuery which contains the Regular Expression pattern matching expression
* @example
* WOQL.re("h(.).*", "hello", ["v:All", "v:Sub"])
* //e contains 'e', llo contains 'llo'
* //p is a regex pattern (.*) using normal regular expression syntax, the only unusual thing is that special characters have to be escaped twice, s is the string to be matched and m is a list of matches:
*/
WOQL.re = function(pattern, inputVarName, resultVarList) {
return new WOQLQuery().re(pattern, inputVarName, resultVarList)
}
WOQL.regexp = WOQL.re
/**
*
* Calculates the length of the list in va and stores it in vb
* @param {string | array} inputVarList - Either a variable representing a list or a list of variables or literals
* @param {string} resultVarName - A variable in which the length of the list is stored or the length of the list as a non-negative integer
* @returns {WOQLQuery} A WOQLQuery which contains the Length pattern matching expression
* @example
* let [count] = vars("count")
* length(["john", "joe", "frank"], count)
*/
WOQL.length = function(inputVarList, resultVarName) {
return new WOQLQuery().length(inputVarList, resultVarName)
}
/**
*
* Logical negation of the contained subquery - if the subquery matches, the query will fail to match
* @param {string | WOQLQuery} [subquery] - A subquery which will be negated
* @returns {WOQLQuery} A WOQLQuery object containing the negated sub Query
* @example
* let [subject, label] = vars("subject", "label")
* not().triple(subject, 'label', label)
*/
WOQL.not = function(subquery) {
return new WOQLQuery().not(subquery)
}
/**
* Results in one solution of the subqueries
* @param {string| WOQLQuery } [subquery] - WOQL Query objects
* @returns {WOQLQuery} A WOQLQuery object containing the once sub Query
*/
WOQL.once = function(subquery) {
return new WOQLQuery().once(subquery)
}
/**
* Runs the query without backtracking on side-effects
* @param {string| WOQLQuery } [subquery] - WOQL Query objects
* @returns {WOQLQuery} A WOQLQuery object containing the immediately sub Query
*/
WOQL.immediately = function(subquery) {
return new WOQLQuery().immediately(subquery)
}
/**
* Creates a count of the results of the query
* @param {string | number} countVarName - variable or integer count
* @param {WOQLQuery} [subquery]
* @returns {WOQLQuery} A WOQLQuery object containing the count sub Query
* @example
* WOQL.count("v:count").triple("v:Person","type","scm:Person")
*/
WOQL.count = function(countVarName, subquery) {
return new WOQLQuery().count(countVarName, subquery)
}
/**
*
* Casts the value of Input to a new value of type Type and stores the result in CastVar
* @param {string | number | object} varName - Either a single variable or a literal of any basic type
* @param {string} varType - Either a variable or a basic datatype (xsd / xdd)
* @param {string} resultVarName - save the return variable
* @returns {WOQLQuery} A WOQLQuery which contains the casting expression
* @example
* cast("22/3/98", "xsd:dateTime", "v:time")
*/
WOQL.typecast = function(varName, varType, resultVarName) {
return new WOQLQuery().typecast(varName, varType, resultVarName)
}
WOQL.cast = WOQL.typecast
/**
* Orders the results of the contained subquery by a precedence list of variables
* @param {...string} varNames - A sequence of variables, by which to order the results, each optionally followed by either “asc” or “desc” to represent order
* @returns {WOQLQuery} A WOQLQuery which contains the ordering expression
* @example
* WOQL.order_by("v:A", "v:B asc", "v:C desc").triple("v:A", "v:B", "v:C");
*/
WOQL.order_by = function(...varNames) {
return new WOQLQuery().order_by(...varNames)
}
/**
*
* Groups the results of the contained subquery on the basis of identical values for Groupvars, extracts the patterns defined in PatternVars and stores the results in GroupedVar
* @param {array | string} varList - Either a single variable or an array of variables
* @param {array | string} patternVars - Either a single variable or an array of variables
* @param {string} resultVarName - output variable name
* @param {WOQLQuery} [subquery] - The query whose results will be grouped
* @returns {WOQLQuery} A WOQLQuery which contains the grouping expression
* @example
* //subquery is an argument or a chained query
* let [age, last_name, first_name, age_group, person] = vars("age", "last name", "first name", "age group", "person")
* group_by(age, [last_name, first_name], age_group)
* .triple(person, "first_name", first_name)
* .triple(person, "last_name", last_name)
* .triple(person, "age", age)
*/
WOQL.group_by = function(varList, patternVars, resultVarName, subquery) {
return new WOQLQuery().group_by(varList, patternVars, resultVarName, subquery)
}
/**
*
* A function that always matches, always returns true
* @returns {WOQLQuery} A WOQLQuery object containing the true value that will match any pattern
* @example
* when(true()).triple("a", "b", "c")
*/
WOQL.true = function() {
return new WOQLQuery().true()
}
/**
*
* Performs a path regular expression match on the graph
* @param {string} subject - An IRI or variable that refers to an IRI representing the subject, i.e. the starting point of the path
* @param {string} pattern -(string) - A path regular expression describing a pattern through multiple edges of the graph
* Path regular expressions consist of a sequence of predicates and / or a set of alternatives, with quantification operators
* The characters that are interpreted specially are the following:
* | representing alternative choices
* , - representing a sequence of predcitates
* + - Representing a quantification of 1 or more of the preceding pattern in a sequence
* {min, max} - Representing at least min examples and at most max examples of the preceding pattern
* - Representing any predicate
* () - Parentheses, interpreted in the normal way to group clauses
* @param {string} object - An IRI or variable that refers to an IRI representing the object, i.e. ending point of the path
* @param {string} resultVarName - A variable in which the actual paths traversed will be stored
* @returns {WOQLQuery} - A WOQLQuery which contains the path regular expression matching expression
* @example
* let [person, grand_uncle, lineage] = vars("person", "grand uncle", "lineage")
* path(person, ((father|mother) {2,2}), brother), grand_uncle, lineage)
*/
WOQL.path = function(subject, pattern, object, resultVarName) {
return new WOQLQuery().path(subject, pattern, object, resultVarName)
}
/**
*
* Calculates the size in bytes of the contents of the resource identified in ResourceID
* @param {string} resourceId - A valid resource identifier string (can refer to any graph / branch / commit / db)
* @param {string} resultVarName - The variable name
* @example
* size("admin/minecraft/local/branch/main/instance/main", "v:varSize")
* //returns the number of bytes in the main instance graph on the main branch
*/
WOQL.size = function(resourceId, resultVarName) {
return new WOQLQuery().size(resourceId, resultVarName)
}
/**
*
* Calculates the number of triples of the contents of the resource identified in ResourceID
* @param {string} resourceId - A valid resource identifier string (can refer to any graph / branch / commit / db)
* @param {string | number} tripleCount - An integer literal with the size in bytes or a variable containing that integer
* @returns {WOQLQuery} A WOQLQuery which contains the size expression
* @example
* triple_count("admin/minecraft/local/_commits", "v:count")
* //returns the number of bytes in the local commit graph
*/
WOQL.triple_count = function(resourceId, tripleCount) {
return new WOQLQuery().triple_count(resourceId, tripleCount)
}
/**
*
* Returns true if 'elementId' is of type 'elementType', according to the current DB schema
* @param {string} elementId - the id of a schema graph element
* @param {string} elementType - the element type
* @returns {WOQLQuery} A WOQLQuery object containing the type_of pattern matching rule
*
*/
WOQL.type_of = function(elementId, elementType) {
return new WOQLQuery().type_of(elementId, elementType)
}
/**
*
* Generates a query that by default matches all triples in a graph identified by "graph" or in all the current terminusDB's graph
* @param {string | boolean} [graph] - false or the resource identifier of a graph possible value are schema/{main - myschema - *} | instance/{main - myschema - *} | inference/{main - myschema - *}
* @param {string} [subject] - The IRI of a triple’s subject or a variable, default value "v:Subject"
* @param {string} [predicate] - The IRI of a property or a variable, default value "v:Predicate"
* @param {string} [object] - The IRI of a node or a variable, or a literal, default value "v:Object"
* @returns {WOQLQuery} A WOQLQuery which contains the pattern matching expression
* @example
* star("schema/main")
* //will return every triple in schema/main graph
*/
WOQL.star = function(graph, subject, predicate, object) {
return new WOQLQuery().star(graph, subject, predicate, object)
}
/**
*
* Generates a query that by default matches all triples in a graph - identical to star() except for order of arguments
* @param {string} [subject] - The IRI of a triple’s subject or a variable
* @param {string} [predicate] - The IRI of a property or a variable
* @param {string} [object] - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} [graphRef] - the resource identifier of a graph possible value are schema/{main - myschema - *} | instance/{main - myschema - *} | inference/{main - myschema - *}
* @returns {WOQLQuery} - A WOQLQuery which contains the pattern matching expression
* all("mydoc")
* //will return every triple in the instance/main graph that has "doc:mydoc" as its subject
*/
WOQL.all = function(subject, predicate, object, graphRef) {
return new WOQLQuery().all(subject, predicate, object, graphRef)
}
/**
*
* Specifies the identity of a node that can then be used in subsequent builder functions. Note that node() requires subsequent chained functions to complete the triples / quads that it produces - by itself it only generates the subject.
* @param {string} nodeid - The IRI of a node or a variable containing an IRI which will be the subject of the builder functions
* @param {typedef.FuntionType} [chainType] - Optional type of builder function to build (default is triple)
* @returns {WOQLQuery} - A WOQLQuery which contains the partial Node pattern matching expression
* @example
* node("mydoc").label("my label")
* //equivalent to triple("mydoc", "label", "my label")
*/
WOQL.node = function(nodeid, chainType) {
return new WOQLQuery().node(nodeid, chainType)
}
//These ones are special ones for dealing with the schema only...
/**
* Inserts a single triple into the database declaring the Node to have type Type, optionally into the specified graph
* @param {string} classId - IRI string or variable containing the IRI of the node to be inserted
* @param {string} classType - IRI string or variable containing the IRI of the type of the node (class/document name)
* @param {typedef.GraphRef} [graphRef] - Optional Graph resource identifier
* @returns {WOQLQuery} A WOQLQuery which contains the insert expression
* @example
* insert("mydoc", "MyType")
* //equivalent to add_triple("mydoc", "type", "MyType")
*/
WOQL.insert = function(classId, classType, graphRef) {
return new WOQLQuery().insert(classId, classType, graphRef)
}
/**
* Sets the graph resource ID that will be used for subsequent chained function calls
* @param {typedef.GraphRef} [graphRef] Resource String identifying the graph which will be used for subsequent chained schema calls
* @returns {WOQLQuery} A WOQLQuery which contains the partial Graph pattern matching expression
* @example
* WOQL.graph("schema")
* //equivalent to add_quad("MyClass", "label", "My Class Label", "schema/main")
*/
WOQL.graph = function(graphRef) {
return new WOQLQuery().graph(graphRef)
}
//to be review
/**
* Deletes all triples in the passed graph (defaults to instance/main)
* @param {typedef.GraphRef} [graphRef] - Resource String identifying the graph from which all triples will be removed
* @returns {WOQLQuery} - A WOQLQuery which contains the deletion expression
* @example
* nuke("schema/main")
* //will delete everything from the schema/main graph
*/
WOQL.nuke = function(graphRef) {
return new WOQLQuery().nuke(graphRef)
}
/**
* Generates an empty WOQLQuery object
* @returns {WOQLQuery}
* @example
* let q = query()
* //then q.triple(1, 1) ...
*/
WOQL.query = function() {
return new WOQLQuery()
}
/**
* Generates a WOQLQuery object from the passed WOQL JSON - if an argument is passed, the query object is created from it, if none is passed, the current state is returned as a JSON-LD
* @param {object} [JSON_LD] - JSON-LD woql document encoding a query
* @returns {WOQLQuery | object} either a JSON-LD or a WOQLQuery object
*
* json version of query for passing to api
*/
WOQL.json = function(JSON_LD) {
return new WOQLQuery().json(JSON_LD)
}
/**
* get the predefined library query [WOQLLibrary](/api/woqlLibrary.js?id=WOQLLibrary)
* @returns {WOQLQuery} WOQLQuery object
* @example
* //get commits older than the specified commit id
* const query = WOQL.lib().previousCommits('m8vpxewh2aovfauebfkbzwmj4qwr5lb')
*
* //return the commits of a specific branch starting from the head
* //you can add the limit (how many results to return.) and the start point
* //if a timestamp is given, gets the commits before the specified timestamp
* //WOQL.lib().commits(branch='main',limit=0,start=0,timestamp=0)
*
* const query = WOQL.lib().commits('main',10,2,1630683082.9278786)
*
* //return the branches list with the timestamp and commits id
* const query = WOQL.lib().branches()
*/
WOQL.lib = function() {
return new WOQLQuery().lib()
}
/**
* Generates explicitly a JSON-LD string literal from the input
* @param {string | boolean | number} val - any primitive literal type
* @returns {object} - A JSON-LD string literal
* @example
* string(1)
* //returns { "@type": "xsd:string", "@value": "1" }
*/
WOQL.string = function(val) {
return new WOQLQuery().string(val)
}
WOQL.read_object = function(IRI, output, formatObj) {
return new WOQLQuery().read_object(IRI, output, formatObj)
}
/**
* Generates explicitly a JSON-LD string literal from the input
* @param {string} val - any literal type
* @param {string} type - an xsd or xdd type
* @returns {object} - A JSON-LD literal
* @example
* literal(1, "nonNegativeInteger")
* //returns { "@type": "xsd:nonNegativeInteger", "@value": 1 }
*/
WOQL.literal = function(val, type) {
return new WOQLQuery().literal(val, type)
}
/**
* Explicitly sets a value to be an IRI - avoiding automatic type marshalling
* @param {string} val string which will be treated as an IRI
* @returns {object} - A JSON-LD IRI value
*/
WOQL.iri = function(val) {
return new WOQLQuery().iri(val)
}
/**
* Generates javascript variables for use as WOQL variables within a query
* @param {...string} varNames
* @returns {array} an array of javascript variables which can be dereferenced using the array destructuring operation
* @example
* const [a, b, c] = WOQL.vars("a", "b", "c")
* //a, b, c are javascript variables which can be used as WOQL variables in subsequent queries
*/
WOQL.vars = function(...varNames) {
return varNames.map(item => 'v:' + item)
}
/**
* Gets/Sets woqlClient
* @param {WOQLClient}
* @returns {WOQLClient}
*/
WOQL.client = function(client) {
if (client) this._client = client
return this._client
}
/**
*
* query module
* allow you to use WOQL words as top level functions
* @param {*} auto_eval
*/
WOQL.emerge = function(auto_eval) {
let unemerged = ['emerge', 'true', 'eval']
function _emerge_str(k) {
let str = `func | ..args){
return WOQL.${k}(...args)
}`
return str
}
let funcs = []
for (var k in this) {
if (typeof this[k] == 'function') {
if (unemerged.indexOf(k) == -1) {
funcs.push(_emerge_str(k))
}
}
}
let str = funcs.join(';\n')
if (auto_eval) eval(str)
return str
}
/**
* Update a pattern matching rule for the triple (Subject, Predicate, oldObjValue) with the new one (Subject, Predicate, newObjValue)
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} newObjValue - The value to update or a literal
* @param {string} oldObjValue - The old value of the object
* @returns {WOQLQuery} A WOQLQuery which contains the a Update Triple Statement
*/
WOQL.update_triple = function(subject, predicate, newObjValue, oldObjValue) {
return new WOQLQuery().update_triple(subject, predicate, newObjValue, oldObjValue)
}
/**
* Update a pattern matching rule for the quad [S, P, O, G] (Subject, Predicate, Object, Graph)
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} newObject - The value to update or a literal
* @param {typedef.GraphRef} graphRef - A valid graph resource identifier string
* @returns {WOQLQuery} A WOQLQuery which contains the a Update Quad Statement
*/
WOQL.update_quad = function(subject, predicate, newObject, graphRef) {
return new WOQLQuery().update_quad(subject, predicate, newObject, graphRef)
}
/**
* Creates a pattern matching rule for a quad [Subject, Predicate, Object, Graph] or for a triple [Subject, Predicate, Object]
* add extra information about the type of the value object
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string | number | boolean} objValue - an specific value
* @param {typedef.GraphRef} [graphRef] - specify a graph type, default is instance schema|instance
* @returns {WOQLQuery} A WOQLQuery which contains the a quad or a triple Statement
*/
WOQL.value = function(subject, predicate, objValue, graphRef) {
return new WOQLQuery().value(subject, predicate, objValue, graphRef)
}
/**
* Creates a pattern matching rule for a quad [Subject, Predicate, Object, Graph] or for a triple [Subject, Predicate, Object]
* @param {string} subject - The IRI of a triple’s subject or a variable
* @param {string} predicate - The IRI of a property or a variable
* @param {string} object - The IRI of a node or a variable, or a literal
* @param {typedef.GraphRef} [graphRef] - specify a graph type, default is instance schema|instance
* @returns {WOQLQuery} A WOQLQuery which contains the a quad or a triple Statement
*/
WOQL.link = function(subject, predicate, object, graphRef) {
return new WOQLQuery().link(subject, predicate, object, graphRef)
}
module.exports = WOQL
| tion ${k}(. |
plugin_interface_type.rs | /*
* Ory APIs
*
* Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers.
*
* The version of the OpenAPI document: v0.0.1-alpha.17
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
/// PluginInterfaceType : PluginInterfaceType plugin interface type
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PluginInterfaceType {
/// capability
#[serde(rename = "Capability")]
pub capability: String,
/// prefix
#[serde(rename = "Prefix")]
pub prefix: String,
/// version
#[serde(rename = "Version")]
pub version: String,
}
impl PluginInterfaceType {
/// PluginInterfaceType plugin interface type
pub fn new(capability: String, prefix: String, version: String) -> PluginInterfaceType |
}
| {
PluginInterfaceType {
capability,
prefix,
version,
}
} |
version.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2020 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Version information for Invenio-I18N.
This file is imported by ``invenio_i18n.__init__``,
and parsed by ``setup.py``.
""" |
from __future__ import absolute_import, print_function
__version__ = '1.2.0' | |
stepper-mover-1-curses.py | #!/usr/bin/python
# from raspberrypi-spy.co.uk
import sys
import time
import RPi.GPIO as GPIO
import curses
# set up curses
stdscr = curses.initscr()
#curses.noecho()
curses.cbreak()
stdscr.keypad(1)
# use BCM GPIO refs
GPIO.setmode(GPIO.BCM)
# define pins
#StepPins = [17, 22, 23, 24]
#StepPins = [35, 36, 37, 38] # order on board
#StepPins = [19, 16, 26, 20]
# motor 2
##StepPins = [19, 6, 16, 12]
# motor 1
#StepPins = [17, 18, 22, 23] # GPIO numbering
#StepPins = [18, 17, 22, 23] # GPIO numbering
# lined up on left of header
StepPins = [19, 13, 6, 5]
# and on right down to last, skipping ground on phys 34
StepPins2 = [12, 16, 20, 21]
# set all pins as ouput
for pin in StepPins:
#print "Setup pins"
stdscr.addstr(2,2, "Setup pins " + str(pin))
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, False)
#define sequence for halfstepping
HalfSeq = [[1,0,0,1],
[1,0,0,0],
[1,1,0,0],
[0,1,0,0], | [0,0,1,1],
[0,0,0,1]]
# full stepping
FullSeq = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
Seq = FullSeq
StepCount = len(Seq)
StepDir = 1 # positive clockwise, negative counterclockwise
# read wait time from command line
if len(sys.argv) > 1:
WaitTime = int(sys.argv[1])/float(1000)
else:
WaitTime = 10/float(1000)
# init
StepCounter = 0
# main loop
#while True:
key = ''
while key != ord('q'):
key = stdscr.getch()
stdscr.addch(2, 25, key)
stdscr.refresh()
if key == curses.KEY_UP:
StepDir = 1
elif key == curses.KEY_DOWN:
StepDir = -1
if key == ord('h'):
# go to half steps
Seq = HalfSeq
#print len(HalfSeq)
StepCount = len(Seq)
elif key == ord('f'):
Seq = FullSeq
#print("full is " + str(len(Seq)))
StepCount = len(Seq)
# fewer steps in full than half, so chop down
if (StepCounter >= StepCount):
StepCounter = 0
#print "counting ",
#print StepCounter,
#print Seq[StepCounter]
stdscr.addstr(3, 3, "counting " + str(StepCounter) + " " + str(Seq[StepCounter]))
# 4 pins
for pin in range(0, 4):
xpin = StepPins[pin]
if Seq[StepCounter][pin] != 0:
#print " enable GPIO %i" % (xpin)
stdscr.addstr(4 + pin, 4, "enable GPIO %i" % (xpin))
GPIO.output(xpin, True)
else:
GPIO.output(xpin, False)
StepCounter += StepDir
# if end, start again
if (StepCounter >= StepCount):
StepCounter = 0
if (StepCounter < 0):
StepCounter = StepCount + StepDir
time.sleep(WaitTime)
GPIO.cleanup()
# curses cleanup
curses.nocbreak(); stdscr.keypad(0)
#curses.echo()
curses.endwin() | [0,1,1,0],
[0,0,1,0], |
transport.pb.validate.go | // Code generated by protoc-gen-validate. DO NOT EDIT.
// source: envoy/data/tap/v2alpha/transport.proto
package envoy_data_tap_v2alpha
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"github.com/gogo/protobuf/types"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = types.DynamicAny{}
)
// define the regex for a UUID once up-front
var _transport_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
// Validate checks the field values on Connection with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *Connection) Validate() error {
if m == nil {
return nil
}
{
tmp := m.GetLocalAddress()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ConnectionValidationError{
field: "LocalAddress",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
{
tmp := m.GetRemoteAddress()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ConnectionValidationError{
field: "RemoteAddress",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
return nil
}
// ConnectionValidationError is the validation error returned by
// Connection.Validate if the designated constraints aren't met.
type ConnectionValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e ConnectionValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e ConnectionValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e ConnectionValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e ConnectionValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e ConnectionValidationError) ErrorName() string { return "ConnectionValidationError" }
// Error satisfies the builtin error interface
func (e ConnectionValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sConnection.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = ConnectionValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = ConnectionValidationError{}
// Validate checks the field values on SocketEvent with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *SocketEvent) Validate() error {
if m == nil {
return nil
}
{
tmp := m.GetTimestamp()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketEventValidationError{
field: "Timestamp",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
switch m.EventSelector.(type) {
case *SocketEvent_Read_:
{
tmp := m.GetRead()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketEventValidationError{
field: "Read",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
case *SocketEvent_Write_:
{
tmp := m.GetWrite()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketEventValidationError{
field: "Write",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
case *SocketEvent_Closed_:
{
tmp := m.GetClosed()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketEventValidationError{
field: "Closed",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
}
return nil
}
// SocketEventValidationError is the validation error returned by
// SocketEvent.Validate if the designated constraints aren't met.
type SocketEventValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketEventValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketEventValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketEventValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketEventValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketEventValidationError) ErrorName() string { return "SocketEventValidationError" }
// Error satisfies the builtin error interface
func (e SocketEventValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketEvent.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketEventValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketEventValidationError{}
// Validate checks the field values on SocketBufferedTrace with the rules
// defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *SocketBufferedTrace) Validate() error {
if m == nil {
return nil
}
// no validation rules for TraceId
{
tmp := m.GetConnection()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketBufferedTraceValidationError{
field: "Connection",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
for idx, item := range m.GetEvents() {
_, _ = idx, item
{
tmp := item
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketBufferedTraceValidationError{
field: fmt.Sprintf("Events[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
}
// no validation rules for ReadTruncated
// no validation rules for WriteTruncated
return nil
}
// SocketBufferedTraceValidationError is the validation error returned by
// SocketBufferedTrace.Validate if the designated constraints aren't met.
type SocketBufferedTraceValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketBufferedTraceValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketBufferedTraceValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketBufferedTraceValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketBufferedTraceValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketBufferedTraceValidationError) ErrorName() string {
return "SocketBufferedTraceValidationError"
}
// Error satisfies the builtin error interface
func (e SocketBufferedTraceValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketBufferedTrace.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketBufferedTraceValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketBufferedTraceValidationError{}
// Validate checks the field values on SocketStreamedTraceSegment with the
// rules defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *SocketStreamedTraceSegment) Validate() error {
if m == nil {
return nil
}
// no validation rules for TraceId
switch m.MessagePiece.(type) {
case *SocketStreamedTraceSegment_Connection:
{
tmp := m.GetConnection()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketStreamedTraceSegmentValidationError{
field: "Connection",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
case *SocketStreamedTraceSegment_Event:
{
tmp := m.GetEvent()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketStreamedTraceSegmentValidationError{
field: "Event",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
}
return nil
}
// SocketStreamedTraceSegmentValidationError is the validation error returned
// by SocketStreamedTraceSegment.Validate if the designated constraints aren't met.
type SocketStreamedTraceSegmentValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketStreamedTraceSegmentValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketStreamedTraceSegmentValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketStreamedTraceSegmentValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketStreamedTraceSegmentValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketStreamedTraceSegmentValidationError) ErrorName() string {
return "SocketStreamedTraceSegmentValidationError"
}
// Error satisfies the builtin error interface
func (e SocketStreamedTraceSegmentValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketStreamedTraceSegment.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketStreamedTraceSegmentValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketStreamedTraceSegmentValidationError{}
// Validate checks the field values on SocketEvent_Read with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *SocketEvent_Read) Validate() error {
if m == nil {
return nil
}
{
tmp := m.GetData()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SocketEvent_ReadValidationError{
field: "Data",
reason: "embedded message failed validation",
cause: err,
}
}
}
}
return nil
}
// SocketEvent_ReadValidationError is the validation error returned by
// SocketEvent_Read.Validate if the designated constraints aren't met.
type SocketEvent_ReadValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketEvent_ReadValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketEvent_ReadValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketEvent_ReadValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketEvent_ReadValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketEvent_ReadValidationError) ErrorName() string { return "SocketEvent_ReadValidationError" }
// Error satisfies the builtin error interface
func (e SocketEvent_ReadValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketEvent_Read.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketEvent_ReadValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketEvent_ReadValidationError{}
// Validate checks the field values on SocketEvent_Write with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *SocketEvent_Write) Validate() error {
if m == nil {
return nil
}
{
tmp := m.GetData()
if v, ok := interface{}(tmp).(interface{ Validate() error }); ok |
}
// no validation rules for EndStream
return nil
}
// SocketEvent_WriteValidationError is the validation error returned by
// SocketEvent_Write.Validate if the designated constraints aren't met.
type SocketEvent_WriteValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketEvent_WriteValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketEvent_WriteValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketEvent_WriteValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketEvent_WriteValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketEvent_WriteValidationError) ErrorName() string {
return "SocketEvent_WriteValidationError"
}
// Error satisfies the builtin error interface
func (e SocketEvent_WriteValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketEvent_Write.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketEvent_WriteValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketEvent_WriteValidationError{}
// Validate checks the field values on SocketEvent_Closed with the rules
// defined in the proto definition for this message. If any rules are
// violated, an error is returned.
func (m *SocketEvent_Closed) Validate() error {
if m == nil {
return nil
}
return nil
}
// SocketEvent_ClosedValidationError is the validation error returned by
// SocketEvent_Closed.Validate if the designated constraints aren't met.
type SocketEvent_ClosedValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SocketEvent_ClosedValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SocketEvent_ClosedValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SocketEvent_ClosedValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SocketEvent_ClosedValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SocketEvent_ClosedValidationError) ErrorName() string {
return "SocketEvent_ClosedValidationError"
}
// Error satisfies the builtin error interface
func (e SocketEvent_ClosedValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSocketEvent_Closed.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SocketEvent_ClosedValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SocketEvent_ClosedValidationError{}
| {
if err := v.Validate(); err != nil {
return SocketEvent_WriteValidationError{
field: "Data",
reason: "embedded message failed validation",
cause: err,
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.