file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
extended_kalman_filter.py
from math import cos, sin, atan2, exp import numpy as np from heading_range_robot.parameters import * class EKF: def
(self, sample_period): self._change_t = sample_period self.mean_belief = np.vstack((INITIAL_X, INITIAL_Y, INITIAL_THETA)) self.covariance_belief = np.eye(3) self.Qt = np.eye(2)*np.vstack((STD_DEV_LOCATION_RANGE**2, STD_DEV_LOCATION_BEARING**2)) self.all_features = np.vstack((LANDMARK_1_LOCATION, LANDMARK_2_LOCATION, LANDMARK_3_LOCATION)) def prediction_step(self, theta_prev, vc, wc): change_t = self._change_t theta = theta_prev # Jacobian of ut at xt-1 Gt = np.array([ [1, 0, -vc/wc*cos(theta) + vc/wc*cos(theta + wc*change_t)], [0, 1, -vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)], [0, 0, 1]]) # Jacobian to map noise in control space to state space Vt = np.array([ [(-sin(theta) + sin(theta + wc*change_t))/wc, vc*(sin(theta)-sin(theta + wc*change_t))/(wc**2) + (vc*cos(theta + wc*change_t)*change_t)/wc], [(-cos(theta) + cos(theta + wc*change_t))/wc, vc*(cos(theta)-cos(theta + wc*change_t))/(wc**2) + (vc*sin(theta + wc*change_t)*change_t)/wc], [0, change_t]]) Mt = np.array([ [ALPHA1*vc**2 + ALPHA2*wc**2, 0], [0, ALPHA3*vc**2 + ALPHA4*wc**2] ]) self.mean_belief = self.mean_belief + np.array([ [-vc/wc*sin(theta) + vc/wc*sin(theta + wc*change_t)], [vc/wc*cos(theta) - vc/wc*cos(theta + wc*change_t)], [wc*change_t] ]) self.covariance_belief = Gt @ self.covariance_belief @ Gt.T + Vt @ Mt @ Vt.T def measurement_step(self, true_state): Qt = self.Qt for feature in self.all_features: f_x = feature[0] f_y = feature[1] mean_x = self.mean_belief[0] mean_y = self.mean_belief[1] mean_theta = self.mean_belief[2] # Range and bearing from mean belief q = (f_x - mean_x)**2 + (f_y - mean_y)**2 zti = np.array([ [np.sqrt(q)], [np.arctan2((f_y - mean_y), (f_x - mean_x)) - mean_theta]]).reshape((2,1)) measurement = simulate_measurement(true_state, f_x, f_y) Ht = np.array([ [-(f_x - mean_x)/np.sqrt(q), -(f_y - mean_y)/np.sqrt(q), np.array([0])], [(f_y - mean_y)/q, -(f_x - mean_x)/q, np.array([-1])]]).reshape((2,3)) covariance_belief = self.covariance_belief mean_belief = self.mean_belief St = Ht @ covariance_belief @ Ht.T + Qt Kt = covariance_belief @ Ht.T @ np.linalg.inv(St) self.mean_belief = mean_belief + Kt @ (measurement - zti) self.covariance_belief = (np.eye(len(Kt)) - Kt @ Ht) @ covariance_belief self.kt = Kt #pzt = np.linalg.det(2*pi*St)**(-1/2) @ exp(-1/2*(zti - measurement[index]).T @ np.linalg.inv(St) @ (zti - measurement[index])) def simulate_measurement(true_state, f_x, f_y): true_x = true_state[0] true_y = true_state[1] true_theta = true_state[2] q = (f_x - true_x)**2 + (f_y - true_y)**2 zt = np.array([ [np.sqrt(q)], [np.arctan2((f_y - true_y), (f_x - true_x)) - true_theta]]).reshape((2,1)) return zt + np.vstack((range_measurement_noise(), bearing_measurement_noise())) def range_measurement_noise(): return np.random.normal(0, STD_DEV_LOCATION_RANGE) def bearing_measurement_noise(): return np.random.normal(0, STD_DEV_LOCATION_BEARING)
__init__
form.py
from wtforms import Form from wtforms import StringField from wtforms import IntegerField from wtforms.validators import DataRequired class EmailForm(Form): name = StringField('name', validators=[DataRequired()]) email = StringField('email', validators=[DataRequired()]) class LoginForm(Form):
username = StringField('username', validators=[DataRequired()]) password = StringField('password', validators=[DataRequired()])
interrupt.rs
//! Interrupt handling. use crate::{ arch::{enable_irq, SpinLock}, net::process_packets, }; use alloc::boxed::Box; use alloc::collections::BTreeMap; // TODO: Use a simple array for faster access. static IRQ_HANDLERS: SpinLock<BTreeMap<u8, Box<dyn FnMut() + Send + Sync>>> = SpinLock::new(BTreeMap::new()); pub fn attach_irq<F: FnMut() + Send + Sync + 'static>(irq: u8, f: F)
pub fn handle_irq(irq: u8) { if let Some(handler) = IRQ_HANDLERS.lock().get_mut(&irq) { (*handler)(); process_packets(); } }
{ IRQ_HANDLERS.lock().insert(irq, Box::new(f)); enable_irq(irq); }
job_queue.rs
use std::collections::{HashMap, HashSet}; use std::io; use std::marker; use std::process::Output; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Arc; use crossbeam_utils::thread::Scope; use jobserver::{Acquired, HelperThread}; use log::{debug, info, trace}; use super::context::OutputFile; use super::job::{ Freshness::{self, Dirty, Fresh}, Job, }; use super::{BuildContext, BuildPlan, CompileMode, Context, Unit}; use crate::core::{PackageId, TargetKind}; use crate::handle_error; use crate::util; use crate::util::diagnostic_server::{self, DiagnosticPrinter}; use crate::util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder}; use crate::util::{Config, DependencyQueue}; use crate::util::{Progress, ProgressStyle}; /// A management structure of the entire dependency graph to compile. /// /// This structure is backed by the `DependencyQueue` type and manages the /// actual compilation step of each package. Packages enqueue units of work and /// then later on the entire graph is processed and compiled. pub struct JobQueue<'a, 'cfg> { queue: DependencyQueue<Unit<'a>, Job>, tx: Sender<Message>, rx: Receiver<Message>, active: HashMap<u32, Unit<'a>>, compiled: HashSet<PackageId>, documented: HashSet<PackageId>, counts: HashMap<PackageId, usize>, is_release: bool, progress: Progress<'cfg>, next_id: u32, } pub struct JobState<'a> { tx: Sender<Message>, // Historical versions of Cargo made use of the `'a` argument here, so to // leave the door open to future refactorings keep it here. _marker: marker::PhantomData<&'a ()>, } enum Message { Run(String), BuildPlanMsg(String, ProcessBuilder, Arc<Vec<OutputFile>>), Stdout(String), Stderr(String), FixDiagnostic(diagnostic_server::Message), Token(io::Result<Acquired>), Finish(u32, CargoResult<()>), } impl<'a> JobState<'a> { pub fn running(&self, cmd: &ProcessBuilder) { let _ = self.tx.send(Message::Run(cmd.to_string())); } pub fn build_plan( &self, module_name: String, cmd: ProcessBuilder, filenames: Arc<Vec<OutputFile>>, ) { let _ = self .tx .send(Message::BuildPlanMsg(module_name, cmd, filenames)); } pub fn capture_output( &self, cmd: &ProcessBuilder, prefix: Option<String>, capture_output: bool, ) -> CargoResult<Output> { let prefix = prefix.unwrap_or_else(String::new); cmd.exec_with_streaming( &mut |out| { let _ = self.tx.send(Message::Stdout(format!("{}{}", prefix, out))); Ok(()) }, &mut |err| { let _ = self.tx.send(Message::Stderr(format!("{}{}", prefix, err))); Ok(()) }, capture_output, ) } } impl<'a, 'cfg> JobQueue<'a, 'cfg> { pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> { let (tx, rx) = channel(); let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); JobQueue { queue: DependencyQueue::new(), tx, rx, active: HashMap::new(), compiled: HashSet::new(), documented: HashSet::new(), counts: HashMap::new(), is_release: bcx.build_config.release, progress, next_id: 0, } } pub fn enqueue( &mut self, cx: &Context<'a, 'cfg>, unit: &Unit<'a>, job: Job, ) -> CargoResult<()> { let dependencies = cx.dep_targets(unit); let dependencies = dependencies .iter() .filter(|unit| { // Binaries aren't actually needed to *compile* tests, just to run // them, so we don't include this dependency edge in the job graph. !unit.target.is_test() || !unit.target.is_bin() }) .cloned() .collect::<Vec<_>>(); self.queue.queue(unit, job, &dependencies); *self.counts.entry(unit.pkg.package_id()).or_insert(0) += 1; Ok(()) } /// Executes all jobs necessary to build the dependency graph. /// /// This function will spawn off `config.jobs()` workers to build all of the /// necessary dependencies, in order. Freshness is propagated as far as /// possible along each dependency chain. pub fn execute(&mut self, cx: &mut Context<'_, '_>, plan: &mut BuildPlan) -> CargoResult<()> { let _p = profile::start("executing the job graph"); self.queue.queue_finished(); // Create a helper thread for acquiring jobserver tokens let tx = self.tx.clone(); let helper = cx .jobserver .clone() .into_helper_thread(move |token| { drop(tx.send(Message::Token(token))); }) .chain_err(|| "failed to create helper thread for jobserver management")?; // Create a helper thread to manage the diagnostics for rustfix if // necessary. let tx = self.tx.clone(); let _diagnostic_server = cx .bcx .build_config .rustfix_diagnostic_server .borrow_mut() .take() .map(move |srv| srv.start(move |msg| drop(tx.send(Message::FixDiagnostic(msg))))); // Use `crossbeam` to create a scope in which we can execute scoped // threads. Note that this isn't currently required by Cargo but it was // historically required. This is left in for now in case we need the // `'a` ability for child threads in the near future, but if this // comment has been sitting here for a long time feel free to refactor // away crossbeam. crossbeam_utils::thread::scope(|scope| self.drain_the_queue(cx, plan, scope, &helper)) .expect("child threads should't panic") } fn
( &mut self, cx: &mut Context<'_, '_>, plan: &mut BuildPlan, scope: &Scope<'a>, jobserver_helper: &HelperThread, ) -> CargoResult<()> { let mut tokens = Vec::new(); let mut queue = Vec::new(); let mut print = DiagnosticPrinter::new(cx.bcx.config); trace!("queue: {:#?}", self.queue); // Iteratively execute the entire dependency graph. Each turn of the // loop starts out by scheduling as much work as possible (up to the // maximum number of parallel jobs we have tokens for). A local queue // is maintained separately from the main dependency queue as one // dequeue may actually dequeue quite a bit of work (e.g., 10 binaries // in one package). // // After a job has finished we update our internal state if it was // successful and otherwise wait for pending work to finish if it failed // and then immediately return. let mut error = None; let total = self.queue.len(); loop { // Dequeue as much work as we can, learning about everything // possible that can run. Note that this is also the point where we // start requesting job tokens. Each job after the first needs to // request a token. while let Some((unit, job)) = self.queue.dequeue() { queue.push((unit, job)); if self.active.len() + queue.len() > 1 { jobserver_helper.request_token(); } } // Now that we've learned of all possible work that we can execute // try to spawn it so long as we've got a jobserver token which says // we're able to perform some parallel work. while error.is_none() && self.active.len() < tokens.len() + 1 && !queue.is_empty() { let (unit, job) = queue.remove(0); self.run(&unit, job, cx, scope)?; } // If after all that we're not actually running anything then we're // done! if self.active.is_empty() { break; } // And finally, before we block waiting for the next event, drop any // excess tokens we may have accidentally acquired. Due to how our // jobserver interface is architected we may acquire a token that we // don't actually use, and if this happens just relinquish it back // to the jobserver itself. tokens.truncate(self.active.len() - 1); // Drain all events at once to avoid displaying the progress bar // unnecessarily. let events: Vec<_> = self.rx.try_iter().collect(); let events = if events.is_empty() { self.show_progress(total); vec![self.rx.recv().unwrap()] } else { events }; for event in events { match event { Message::Run(cmd) => { cx.bcx .config .shell() .verbose(|c| c.status("Running", &cmd))?; } Message::BuildPlanMsg(module_name, cmd, filenames) => { plan.update(&module_name, &cmd, &filenames)?; } Message::Stdout(out) => { self.progress.clear(); println!("{}", out); } Message::Stderr(err) => { let mut shell = cx.bcx.config.shell(); shell.print_ansi(err.as_bytes())?; shell.err().write_all(b"\n")?; } Message::FixDiagnostic(msg) => { print.print(&msg)?; } Message::Finish(id, result) => { let unit = self.active.remove(&id).unwrap(); info!("end: {:?}", unit); if !self.active.is_empty() { assert!(!tokens.is_empty()); drop(tokens.pop()); } match result { Ok(()) => self.finish(&unit, cx)?, Err(e) => { let msg = "The following warnings were emitted during compilation:"; self.emit_warnings(Some(msg), &unit, cx)?; if !self.active.is_empty() { error = Some(failure::format_err!("build failed")); handle_error(&e, &mut *cx.bcx.config.shell()); cx.bcx.config.shell().warn( "build failed, waiting for other \ jobs to finish...", )?; } else { error = Some(e); } } } } Message::Token(acquired_token) => { tokens.push( acquired_token.chain_err(|| "failed to acquire jobserver token")?, ); } } } } self.progress.clear(); let build_type = if self.is_release { "release" } else { "dev" }; // NOTE: this may be a bit inaccurate, since this may not display the // profile for what was actually built. Profile overrides can change // these settings, and in some cases different targets are built with // different profiles. To be accurate, it would need to collect a // list of Units built, and maybe display a list of the different // profiles used. However, to keep it simple and compatible with old // behavior, we just display what the base profile is. let profile = cx.bcx.profiles.base_profile(self.is_release); let mut opt_type = String::from(if profile.opt_level.as_str() == "0" { "unoptimized" } else { "optimized" }); if profile.debuginfo.is_some() { opt_type += " + debuginfo"; } let time_elapsed = util::elapsed(cx.bcx.config.creation_time().elapsed()); if self.queue.is_empty() { let message = format!( "{} [{}] target(s) in {}", build_type, opt_type, time_elapsed ); if !cx.bcx.build_config.build_plan { cx.bcx.config.shell().status("Finished", message)?; } Ok(()) } else if let Some(e) = error { Err(e) } else { debug!("queue: {:#?}", self.queue); Err(internal("finished with jobs still left in the queue")) } } fn show_progress(&mut self, total: usize) { let count = total - self.queue.len(); let active_names = self .active .values() .map(|u| self.name_for_progress(u)) .collect::<Vec<_>>(); drop( self.progress .tick_now(count, total, &format!(": {}", active_names.join(", "))), ); } fn name_for_progress(&self, unit: &Unit<'_>) -> String { let pkg_name = unit.pkg.name(); match unit.mode { CompileMode::Doc { .. } => format!("{}(doc)", pkg_name), CompileMode::RunCustomBuild => format!("{}(build)", pkg_name), _ => { let annotation = match unit.target.kind() { TargetKind::Lib(_) => return pkg_name.to_string(), TargetKind::CustomBuild => return format!("{}(build.rs)", pkg_name), TargetKind::Bin => "bin", TargetKind::Test => "test", TargetKind::Bench => "bench", TargetKind::ExampleBin | TargetKind::ExampleLib(_) => "example", }; format!("{}({})", unit.target.name(), annotation) } } } /// Executes a job in the `scope` given, pushing the spawned thread's /// handled onto `threads`. fn run( &mut self, unit: &Unit<'a>, job: Job, cx: &Context<'_, '_>, scope: &Scope<'a>, ) -> CargoResult<()> { info!("start: {:?}", unit); let id = self.next_id; self.next_id = id.checked_add(1).unwrap(); assert!(self.active.insert(id, *unit).is_none()); *self.counts.get_mut(&unit.pkg.package_id()).unwrap() -= 1; let my_tx = self.tx.clone(); let fresh = job.freshness(); let doit = move || { let res = job.run(&JobState { tx: my_tx.clone(), _marker: marker::PhantomData, }); my_tx.send(Message::Finish(id, res)).unwrap(); }; if !cx.bcx.build_config.build_plan { // Print out some nice progress information. self.note_working_on(cx.bcx.config, unit, fresh)?; } match fresh { Freshness::Fresh => doit(), Freshness::Dirty => { scope.spawn(move |_| doit()); } } Ok(()) } fn emit_warnings( &mut self, msg: Option<&str>, unit: &Unit<'a>, cx: &mut Context<'_, '_>, ) -> CargoResult<()> { let output = cx.build_state.outputs.lock().unwrap(); let bcx = &mut cx.bcx; if let Some(output) = output.get(&(unit.pkg.package_id(), unit.kind)) { if !output.warnings.is_empty() { if let Some(msg) = msg { writeln!(bcx.config.shell().err(), "{}\n", msg)?; } for warning in output.warnings.iter() { bcx.config.shell().warn(warning)?; } if msg.is_some() { // Output an empty line. writeln!(bcx.config.shell().err())?; } } } Ok(()) } fn finish(&mut self, unit: &Unit<'a>, cx: &mut Context<'_, '_>) -> CargoResult<()> { if unit.mode.is_run_custom_build() && cx.bcx.show_warnings(unit.pkg.package_id()) { self.emit_warnings(None, unit, cx)?; } self.queue.finish(unit); Ok(()) } // This isn't super trivial because we don't want to print loads and // loads of information to the console, but we also want to produce a // faithful representation of what's happening. This is somewhat nuanced // as a package can start compiling *very* early on because of custom // build commands and such. // // In general, we try to print "Compiling" for the first nontrivial task // run for a package, regardless of when that is. We then don't print // out any more information for a package after we've printed it once. fn note_working_on( &mut self, config: &Config, unit: &Unit<'a>, fresh: Freshness, ) -> CargoResult<()> { if (self.compiled.contains(&unit.pkg.package_id()) && !unit.mode.is_doc()) || (self.documented.contains(&unit.pkg.package_id()) && unit.mode.is_doc()) { return Ok(()); } match fresh { // Any dirty stage which runs at least one command gets printed as // being a compiled package. Dirty => { if unit.mode.is_doc() { // Skip doc test. if !unit.mode.is_any_test() { self.documented.insert(unit.pkg.package_id()); config.shell().status("Documenting", unit.pkg)?; } } else { self.compiled.insert(unit.pkg.package_id()); if unit.mode.is_check() { config.shell().status("Checking", unit.pkg)?; } else { config.shell().status("Compiling", unit.pkg)?; } } } Fresh => { // If doc test are last, only print "Fresh" if nothing has been printed. if self.counts[&unit.pkg.package_id()] == 0 && !(unit.mode == CompileMode::Doctest && self.compiled.contains(&unit.pkg.package_id())) { self.compiled.insert(unit.pkg.package_id()); config.shell().verbose(|c| c.status("Fresh", unit.pkg))?; } } } Ok(()) } }
drain_the_queue
test_homography.py
import random import pytest import torch from torch.autograd import gradcheck import kornia from kornia.geometry.homography import find_homography_dlt, find_homography_dlt_iterated from kornia.testing import assert_close class TestFindHomographyDLT: def test_smoke(self, device, dtype): points1 = torch.rand(1, 4, 2, device=device, dtype=dtype) points2 = torch.rand(1, 4, 2, device=device, dtype=dtype) weights = torch.ones(1, 4, device=device, dtype=dtype) H = find_homography_dlt(points1, points2, weights) assert H.shape == (1, 3, 3) @pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)]) def test_shape(self, batch_size, num_points, device, dtype): B, N = batch_size, num_points points1 = torch.rand(B, N, 2, device=device, dtype=dtype) points2 = torch.rand(B, N, 2, device=device, dtype=dtype) weights = torch.ones(B, N, device=device, dtype=dtype) H = find_homography_dlt(points1, points2, weights) assert H.shape == (B, 3, 3) @pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)]) def test_shape_noweights(self, batch_size, num_points, device, dtype): B, N = batch_size, num_points points1 = torch.rand(B, N, 2, device=device, dtype=dtype) points2 = torch.rand(B, N, 2, device=device, dtype=dtype) H = find_homography_dlt(points1, points2, None) assert H.shape == (B, 3, 3) @pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)]) def test_points_noweights(self, batch_size, num_points, device, dtype): B, N = batch_size, num_points points1 = torch.rand(B, N, 2, device=device, dtype=dtype) points2 = torch.rand(B, N, 2, device=device, dtype=dtype) weights = torch.ones(B, N, device=device, dtype=dtype) H_noweights = find_homography_dlt(points1, points2, None) H_withweights = find_homography_dlt(points1, points2, weights) assert H_noweights.shape == (B, 3, 3) and H_withweights.shape == (B, 3, 3) assert_close(H_noweights, H_withweights, rtol=1e-3, atol=1e-4) @pytest.mark.parametrize("batch_size", [1, 2, 5]) def test_clean_points(self, batch_size, device, dtype): # generate input data points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype) H = kornia.eye_like(3, points_src) H = H * 0.3 * torch.rand_like(H) H = H / H[:, 2:3, 2:3] points_dst = kornia.transform_points(H, points_src) weights = torch.ones(batch_size, 10, device=device, dtype=dtype) # compute transform from source to target dst_homo_src = find_homography_dlt(points_src, points_dst, weights) assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4) @pytest.mark.grad @pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7") def test_gradcheck(self, device): # Save initial seed initial_seed = torch.random.initial_seed() max_number_of_checks = 10 # Test gradients for a max_number_of_checks times current_seed = initial_seed for i in range(max_number_of_checks): torch.manual_seed(current_seed) points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True) points_dst = torch.rand_like(points_src) weights = torch.ones_like(points_src)[..., 0] try: gradcheck( find_homography_dlt, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True ) # Gradcheck failed except RuntimeError: # All iterations failed if i == max_number_of_checks - 1: assert gradcheck( find_homography_dlt, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True, ) # Next iteration else: current_seed = random.randrange(0xFFFFFFFFFFFFFFFF) continue # Gradcheck succeed torch.manual_seed(initial_seed) return class TestFindHomographyDLTIter: def
(self, device, dtype): points1 = torch.rand(1, 4, 2, device=device, dtype=dtype) points2 = torch.rand(1, 4, 2, device=device, dtype=dtype) weights = torch.ones(1, 4, device=device, dtype=dtype) H = find_homography_dlt_iterated(points1, points2, weights, 5) assert H.shape == (1, 3, 3) @pytest.mark.parametrize("batch_size, num_points", [(1, 4), (2, 5), (3, 6)]) def test_shape(self, batch_size, num_points, device, dtype): B, N = batch_size, num_points points1 = torch.rand(B, N, 2, device=device, dtype=dtype) points2 = torch.rand(B, N, 2, device=device, dtype=dtype) weights = torch.ones(B, N, device=device, dtype=dtype) H = find_homography_dlt_iterated(points1, points2, weights, 5) assert H.shape == (B, 3, 3) @pytest.mark.parametrize("batch_size", [1, 2]) def test_clean_points(self, batch_size, device, dtype): # generate input data points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype) H = kornia.eye_like(3, points_src) H = H * 0.3 * torch.rand_like(H) H = H / H[:, 2:3, 2:3] points_dst = kornia.transform_points(H, points_src) weights = torch.ones(batch_size, 10, device=device, dtype=dtype) # compute transform from source to target dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 10) assert_close(kornia.transform_points(dst_homo_src, points_src), points_dst, rtol=1e-3, atol=1e-4) @pytest.mark.grad @pytest.mark.skipif(torch.__version__ < '1.7', reason="pytorch bug of incopatible types: #33546 fixed in v1.7") def test_gradcheck(self, device): # Save initial seed initial_seed = torch.random.initial_seed() max_number_of_checks = 10 # Test gradients for a max_number_of_checks times current_seed = initial_seed for i in range(max_number_of_checks): torch.manual_seed(current_seed) points_src = torch.rand(1, 10, 2, device=device, dtype=torch.float64, requires_grad=True) points_dst = torch.rand_like(points_src) weights = torch.ones_like(points_src)[..., 0] try: gradcheck( find_homography_dlt_iterated, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True, ) # Gradcheck failed except RuntimeError: # All iterations failed if i == max_number_of_checks - 1: assert gradcheck( find_homography_dlt_iterated, (points_src, points_dst, weights), rtol=1e-6, atol=1e-6, raise_exception=True, ) # Next iteration else: current_seed = random.randrange(0xFFFFFFFFFFFFFFFF) continue # Gradcheck succeed torch.manual_seed(initial_seed) return @pytest.mark.grad @pytest.mark.parametrize("batch_size", [1, 2]) def test_dirty_points_and_gradcheck(self, batch_size, device, dtype): # generate input data points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype) H = kornia.eye_like(3, points_src) H = H * 0.3 * torch.rand_like(H) H = H / H[:, 2:3, 2:3] points_src = 100.0 * torch.rand(batch_size, 20, 2, device=device, dtype=dtype) points_dst = kornia.transform_points(H, points_src) # making last point an outlier points_dst[:, -1, :] += 20 weights = torch.ones(batch_size, 20, device=device, dtype=dtype) # compute transform from source to target dst_homo_src = find_homography_dlt_iterated(points_src, points_dst, weights, 0.5, 10) assert_close( kornia.transform_points(dst_homo_src, points_src[:, :-1]), points_dst[:, :-1], rtol=1e-3, atol=1e-3 )
test_smoke
sessionresolver.go
// Package sessionresolver contains the resolver used by the session. This // resolver uses Powerdns DoH by default and falls back on the system // provided resolver if Powerdns DoH is not working. package sessionresolver import ( "context" "time" "github.com/ooni/probe-engine/atomicx" "github.com/ooni/probe-engine/internal/runtimex" "github.com/ooni/probe-engine/netx" ) // Resolver is the session resolver. type Resolver struct { Primary netx.DNSClient PrimaryFailure *atomicx.Int64 Fallback netx.DNSClient FallbackFailure *atomicx.Int64 } // New creates a new session resolver. func
(config netx.Config) *Resolver { primary, err := netx.NewDNSClient(config, "doh://powerdns") runtimex.PanicOnError(err, "cannot create powerdns resolver") fallback, err := netx.NewDNSClient(config, "system:///") runtimex.PanicOnError(err, "cannot create system resolver") return &Resolver{ Primary: primary, PrimaryFailure: atomicx.NewInt64(), Fallback: fallback, FallbackFailure: atomicx.NewInt64(), } } // CloseIdleConnections closes the idle connections, if any func (r *Resolver) CloseIdleConnections() { r.Primary.CloseIdleConnections() r.Fallback.CloseIdleConnections() } // LookupHost implements Resolver.LookupHost func (r *Resolver) LookupHost(ctx context.Context, hostname string) ([]string, error) { // Algorithm similar to Firefox TRR2 mode. See: // https://wiki.mozilla.org/Trusted_Recursive_Resolver#DNS-over-HTTPS_Prefs_in_Firefox // We use a higher timeout than Firefox's timeout (1.5s) to be on the safe side // and therefore see to use DoH more often. trr2, cancel := context.WithTimeout(ctx, 4*time.Second) defer cancel() addrs, err := r.Primary.LookupHost(trr2, hostname) if err != nil { r.PrimaryFailure.Add(1) addrs, err = r.Fallback.LookupHost(ctx, hostname) if err != nil { r.FallbackFailure.Add(1) } } return addrs, err } // Network implements Resolver.Network func (r *Resolver) Network() string { return "sessionresolver" } // Address implements Resolver.Address func (r *Resolver) Address() string { return "" }
New
sys.rs
//! Netlink socket related functions use libc; use std::fmt; use std::hash::{Hash, Hasher}; use std::io::{Error, Result}; use std::mem; use std::os::unix::io::{AsRawFd, RawFd}; use super::Protocol; #[derive(Clone, Debug)] pub struct Socket(RawFd); impl AsRawFd for Socket { fn as_raw_fd(&self) -> RawFd { self.0 } } impl Drop for Socket { fn drop(&mut self) { unsafe { libc::close(self.0) }; } } #[derive(Copy, Clone)] pub struct SocketAddr(libc::sockaddr_nl); impl Hash for SocketAddr { fn hash<H: Hasher>(&self, state: &mut H) { self.0.nl_family.hash(state); self.0.nl_pid.hash(state); self.0.nl_groups.hash(state); } } impl PartialEq for SocketAddr { fn eq(&self, other: &SocketAddr) -> bool { self.0.nl_family == other.0.nl_family && self.0.nl_pid == other.0.nl_pid && self.0.nl_groups == other.0.nl_groups } } impl fmt::Debug for SocketAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "SocketAddr(nl_family={}, nl_pid={}, nl_groups={})", self.0.nl_family, self.0.nl_pid, self.0.nl_groups ) } } impl Eq for SocketAddr {} impl fmt::Display for SocketAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "address family: {}, pid: {}, multicast groups: {})", self.0.nl_family, self.0.nl_pid, self.0.nl_groups ) } } impl SocketAddr { pub fn new(port_number: u32, multicast_groups: u32) -> Self { let mut addr: libc::sockaddr_nl = unsafe { mem::zeroed() }; addr.nl_family = libc::PF_NETLINK as libc::sa_family_t; addr.nl_pid = port_number; addr.nl_groups = multicast_groups; SocketAddr(addr) } pub fn port_number(&self) -> u32 { self.0.nl_pid } pub fn multicast_groups(&self) -> u32 { self.0.nl_groups } fn as_raw(&self) -> (*const libc::sockaddr, libc::socklen_t) { let addr_ptr = &self.0 as *const libc::sockaddr_nl as *const libc::sockaddr; // \ / \ / // +---------------+---------------+ +----------+---------+ // | | // v | // create a raw pointer to the sockaddr_nl | // v // cast *sockaddr_nl -> *sockaddr // // This kind of things seems to be pretty usual when using C APIs from Rust. It could be // written in a shorter way thank to type inference: // // let addr_ptr: *const libc:sockaddr = &self.0 as *const _ as *const _; // // But since this is my first time dealing with this kind of things I chose the most // explicit form. let addr_len = mem::size_of::<libc::sockaddr_nl>() as libc::socklen_t; (addr_ptr, addr_len) } fn as_raw_mut(&mut self) -> (*mut libc::sockaddr, libc::socklen_t) { let addr_ptr = &mut self.0 as *mut libc::sockaddr_nl as *mut libc::sockaddr; let addr_len = mem::size_of::<libc::sockaddr_nl>() as libc::socklen_t; (addr_ptr, addr_len) } } impl Socket { pub fn new(protocol: Protocol) -> Result<Self> { let res = unsafe { libc::socket(libc::PF_NETLINK, libc::SOCK_DGRAM, protocol as libc::c_int) }; if res < 0 { return Err(Error::last_os_error()); } Ok(Socket(res)) } pub fn bind(&mut self, addr: &SocketAddr) -> Result<()> { let (addr_ptr, addr_len) = addr.as_raw(); let res = unsafe { libc::bind(self.0, addr_ptr, addr_len) }; if res < 0 { return Err(Error::last_os_error()); } Ok(()) } pub fn bind_auto(&mut self) -> Result<SocketAddr> { let mut addr = SocketAddr::new(0, 0); self.bind(&addr)?; self.get_address(&mut addr)?; Ok(addr) } pub fn get_address(&self, addr: &mut SocketAddr) -> Result<()> { let (addr_ptr, mut addr_len) = addr.as_raw_mut(); let addr_len_copy = addr_len; let addr_len_ptr = &mut addr_len as *mut libc::socklen_t; let res = unsafe { libc::getsockname(self.0, addr_ptr, addr_len_ptr) }; if res < 0 { return Err(Error::last_os_error()); } assert_eq!(addr_len, addr_len_copy); Ok(()) } pub fn set_non_blocking(&self, non_blocking: bool) -> Result<()> { let mut non_blocking = non_blocking as libc::c_int; let res = unsafe { libc::ioctl(self.0, libc::FIONBIO, &mut non_blocking) }; if res < 0 { return Err(Error::last_os_error()); } Ok(()) } pub fn connect(&self, remote_addr: &SocketAddr) -> Result<()> { // Event though for SOCK_DGRAM sockets there's no IO, since our socket is non-blocking, // connect() might return EINPROGRESS. In theory, the right way to treat EINPROGRESS would // be to ignore the error, and let the user poll the socket to check when it becomes // writable, indicating that the connection succeeded. The code already exists in mio for // TcpStream: // // > pub fn connect(stream: net::TcpStream, addr: &SocketAddr) -> io::Result<TcpStream> { // > set_non_block(stream.as_raw_fd())?; // > match stream.connect(addr) { // > Ok(..) => {} // > Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} // > Err(e) => return Err(e), // > } // > Ok(TcpStream { inner: stream }) // > } // // The polling to wait for the connection is available in the tokio-tcp crate. See: // https://github.com/tokio-rs/tokio/blob/363b207f2b6c25857c70d76b303356db87212f59/tokio-tcp/src/stream.rs#L706 // // In practice, since the connection does not require any IO for SOCK_DGRAM sockets, it // almost never returns EINPROGRESS and so for now, we just return whatever libc::connect // returns. If it returns EINPROGRESS, the caller will have to handle the error themself // // Refs: // // - https://stackoverflow.com/a/14046386/1836144 // - https://lists.isc.org/pipermail/bind-users/2009-August/077527.html let (addr, addr_len) = remote_addr.as_raw(); let res = unsafe { libc::connect(self.0, addr, addr_len) }; if res < 0 { return Err(Error::last_os_error()); } Ok(()) } // Most of the comments in this method come from a discussion on rust users forum. // [thread]: https://users.rust-lang.org/t/help-understanding-libc-call/17308/9 pub fn recv_from(&self, buf: &mut [u8], flags: libc::c_int) -> Result<(usize, SocketAddr)> { // Create an empty storage for the address. Note that Rust standard library create a // sockaddr_storage so that it works for any address family, but here, we already know that // we'll have a Netlink address, so we can create the appropriate storage. let mut addr = unsafe { mem::zeroed::<libc::sockaddr_nl>() }; // recvfrom takes a *sockaddr as parameter so that it can accept any kind of address // storage, so we need to create such a pointer for the sockaddr_nl we just initialized. // // Create a raw pointer to Cast our raw pointer to a // our storage. We cannot generic pointer to *sockaddr // pass it to recvfrom yet. that recvfrom can use // ^ ^ // | | // +--------------+---------------+ +---------+--------+ // / \ / \ let addr_ptr = &mut addr as *mut libc::sockaddr_nl as *mut libc::sockaddr; // Why do we need to pass the address length? We're passing a generic *sockaddr to // recvfrom. Somehow recvfrom needs to make sure that the address of the received packet // would fit into the actual type that is behind *sockaddr: it could be a sockaddr_nl but // also a sockaddr_in, a sockaddr_in6, or even the generic sockaddr_storage that can store // any address. let mut addrlen = mem::size_of_val(&addr); // recvfrom does not take the address length by value (see [thread]), so we need to create // a pointer to it. let addrlen_ptr = &mut addrlen as *mut usize as *mut libc::socklen_t; // Cast the *mut u8 into *mut void. // This is equivalent to casting a *char into *void // See [thread] // ^ // Create a *mut u8 | // ^ | // | | // +-----+-----+ +--------+-------+ // / \ / \ let buf_ptr = buf.as_mut_ptr() as *mut libc::c_void; let buf_len = buf.len() as libc::size_t; let res = unsafe { libc::recvfrom(self.0, buf_ptr, buf_len, flags, addr_ptr, addrlen_ptr) }; if res < 0 { return Err(Error::last_os_error()); } Ok((res as usize, SocketAddr(addr))) } pub fn recv(&self, buf: &mut [u8], flags: libc::c_int) -> Result<usize> { let buf_ptr = buf.as_mut_ptr() as *mut libc::c_void; let buf_len = buf.len() as libc::size_t; let res = unsafe { libc::recv(self.0, buf_ptr, buf_len, flags) }; if res < 0 { return Err(Error::last_os_error()); } Ok(res as usize) } pub fn send_to(&self, buf: &[u8], addr: &SocketAddr, flags: libc::c_int) -> Result<usize> { let (addr_ptr, addr_len) = addr.as_raw(); let buf_ptr = buf.as_ptr() as *const libc::c_void; let buf_len = buf.len() as libc::size_t; let res = unsafe { libc::sendto(self.0, buf_ptr, buf_len, flags, addr_ptr, addr_len) }; if res < 0 {
Ok(res as usize) } pub fn send(&self, buf: &[u8], flags: libc::c_int) -> Result<usize> { let buf_ptr = buf.as_ptr() as *const libc::c_void; let buf_len = buf.len() as libc::size_t; let res = unsafe { libc::send(self.0, buf_ptr, buf_len, flags) }; if res < 0 { return Err(Error::last_os_error()); } Ok(res as usize) } pub fn set_pktinfo(&mut self, value: bool) -> Result<()> { let value: libc::c_int = if value { 1 } else { 0 }; setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO, value) } pub fn get_pktinfo(&self) -> Result<bool> { let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_PKTINFO)?; Ok(res == 1) } pub fn add_membership(&mut self, group: u32) -> Result<()> { setsockopt( self.0, libc::SOL_NETLINK, libc::NETLINK_ADD_MEMBERSHIP, group, ) } pub fn drop_membership(&mut self, group: u32) -> Result<()> { setsockopt( self.0, libc::SOL_NETLINK, libc::NETLINK_DROP_MEMBERSHIP, group, ) } pub fn list_membership(&self) -> Vec<u32> { unimplemented!(); // getsockopt won't be enough here, because we may need to perform 2 calls, and because the // length of the list returned by libc::getsockopt is returned by mutating the length // argument, which our implementation of getsockopt forbids. } /// `NETLINK_BROADCAST_ERROR` (since Linux 2.6.30). When not set, `netlink_broadcast()` only /// reports `ESRCH` errors and silently ignore `NOBUFS` errors. pub fn set_broadcast_error(&mut self, value: bool) -> Result<()> { let value: libc::c_int = if value { 1 } else { 0 }; setsockopt( self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR, value, ) } pub fn get_broadcast_error(&self) -> Result<bool> { let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_BROADCAST_ERROR)?; Ok(res == 1) } /// `NETLINK_NO_ENOBUFS` (since Linux 2.6.30). This flag can be used by unicast and broadcast /// listeners to avoid receiving `ENOBUFS` errors. pub fn set_no_enobufs(&mut self, value: bool) -> Result<()> { let value: libc::c_int = if value { 1 } else { 0 }; setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS, value) } pub fn get_no_enobufs(&self) -> Result<bool> { let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_NO_ENOBUFS)?; Ok(res == 1) } /// `NETLINK_LISTEN_ALL_NSID` (since Linux 4.2). When set, this socket will receive netlink /// notifications from all network namespaces that have an nsid assigned into the network /// namespace where the socket has been opened. The nsid is sent to user space via an ancillary /// data. pub fn set_listen_all_namespaces(&mut self, value: bool) -> Result<()> { let value: libc::c_int = if value { 1 } else { 0 }; setsockopt( self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID, value, ) } pub fn get_listen_all_namespaces(&self) -> Result<bool> { let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_LISTEN_ALL_NSID)?; Ok(res == 1) } /// `NETLINK_CAP_ACK` (since Linux 4.2). The kernel may fail to allocate the necessary room /// for the acknowledgment message back to user space. This option trims off the payload of /// the original netlink message. The netlink message header is still included, so the user can /// guess from the sequence number which message triggered the acknowledgment. pub fn set_cap_ack(&mut self, value: bool) -> Result<()> { let value: libc::c_int = if value { 1 } else { 0 }; setsockopt(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK, value) } pub fn get_cap_ack(&self) -> Result<bool> { let res = getsockopt::<libc::c_int>(self.0, libc::SOL_NETLINK, libc::NETLINK_CAP_ACK)?; Ok(res == 1) } } /// Wrapper around `getsockopt`: /// /// ```no_rust /// int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len); /// ``` fn getsockopt<T: Copy>(fd: RawFd, level: libc::c_int, option: libc::c_int) -> Result<T> { unsafe { // Create storage for the options we're fetching let mut slot: T = mem::zeroed(); // Create a mutable raw pointer to the storage so that getsockopt can fill the value let slot_ptr = &mut slot as *mut T as *mut libc::c_void; // Let getsockopt know how big our storage is let mut slot_len = mem::size_of::<T>() as libc::socklen_t; // getsockopt takes a mutable pointer to the length, because for some options like // NETLINK_LIST_MEMBERSHIP where the option value is a list with arbitrary length, // getsockopt uses this parameter to signal how big the storage needs to be. let slot_len_ptr = &mut slot_len as *mut libc::socklen_t; let res = libc::getsockopt(fd, level, option, slot_ptr, slot_len_ptr); if res < 0 { return Err(Error::last_os_error()); } // Ignore the options that require the legnth to be set by getsockopt. // We'll deal with them individually. assert_eq!(slot_len as usize, mem::size_of::<T>()); Ok(slot) } } // adapted from rust standard library fn setsockopt<T>(fd: RawFd, level: libc::c_int, option: libc::c_int, payload: T) -> Result<()> { unsafe { let payload = &payload as *const T as *const libc::c_void; let payload_len = mem::size_of::<T>() as libc::socklen_t; let res = libc::setsockopt(fd, level, option, payload, payload_len); if res < 0 { return Err(Error::last_os_error()); } } Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn new() { Socket::new(Protocol::Route).unwrap(); } #[test] fn connect() { let sock = Socket::new(Protocol::Route).unwrap(); sock.connect(&SocketAddr::new(0, 0)).unwrap(); } #[test] fn bind() { let mut sock = Socket::new(Protocol::Route).unwrap(); sock.bind(&SocketAddr::new(4321, 0)).unwrap(); } #[test] fn bind_auto() { let mut sock = Socket::new(Protocol::Route).unwrap(); let addr = sock.bind_auto().unwrap(); // make sure that the address we got from the kernel is there assert!(addr.port_number() != 0); } #[test] fn set_non_blocking() { let sock = Socket::new(Protocol::Route).unwrap(); sock.set_non_blocking(true).unwrap(); sock.set_non_blocking(false).unwrap(); } #[test] fn options() { let mut sock = Socket::new(Protocol::Route).unwrap(); sock.set_cap_ack(true).unwrap(); assert!(sock.get_cap_ack().unwrap()); sock.set_cap_ack(false).unwrap(); assert!(!sock.get_cap_ack().unwrap()); sock.set_no_enobufs(true).unwrap(); assert!(sock.get_no_enobufs().unwrap()); sock.set_no_enobufs(false).unwrap(); assert!(!sock.get_no_enobufs().unwrap()); sock.set_broadcast_error(true).unwrap(); assert!(sock.get_broadcast_error().unwrap()); sock.set_broadcast_error(false).unwrap(); assert!(!sock.get_broadcast_error().unwrap()); // FIXME: these require root permissions // sock.set_listen_all_namespaces(true).unwrap(); // assert!(sock.get_listen_all_namespaces().unwrap()); // sock.set_listen_all_namespaces(false).unwrap(); // assert!(!sock.get_listen_all_namespaces().unwrap()); } #[test] fn address() { let mut addr = SocketAddr::new(42, 1234); assert_eq!(addr.port_number(), 42); assert_eq!(addr.multicast_groups(), 1234); { let (addr_ptr, _) = addr.as_raw(); let inner_addr = unsafe { *(addr_ptr as *const libc::sockaddr_nl) }; assert_eq!(inner_addr.nl_pid, 42); assert_eq!(inner_addr.nl_groups, 1234); } { let (addr_ptr, _) = addr.as_raw_mut(); let sockaddr_nl = addr_ptr as *mut libc::sockaddr_nl; unsafe { sockaddr_nl.as_mut().unwrap().nl_pid = 24; sockaddr_nl.as_mut().unwrap().nl_groups = 4321 } } assert_eq!(addr.port_number(), 24); assert_eq!(addr.multicast_groups(), 4321); } }
return Err(Error::last_os_error()); }
Org.tsx
/* Copyright 2021 the Deno authors. All rights reserved. MIT license. */ import React from "react"; import { renderToStaticMarkup } from "react-dom/server"; import dompurify from "dompurify"; import { markup, MarkupProps, scrollEffect, slugify, transformImageUri, transformLinkUri, } from "./Markup"; import { RawCodeBlock } from "./CodeBlock"; import { fileTypeFromURL } from "../util/registry_utils"; import { Block, Document, Drawer, Footnote, FootnoteReference, HTML, Headline, HorizontalRule, Keyword, List, ListItem, Paragraph, PhrasingContent, Planning, Section, StyledText, Table, TableCell, TableRow, Token, } from "orga/dist/types"; import { Node, Parent } from "unist"; function foreachTree(tree: Node, iteratee: (node: Node) => void) { iteratee(tree); if ("children" in tree) { (tree as Parent).children.forEach((c) => foreachTree(c, iteratee)); } } import { parse as parseSource } from "orga"; type TopLevelContent = Content | Keyword | Footnote; type Content = | Section | Paragraph | Block | Drawer | Planning | List | Table | HorizontalRule | Headline | HTML; function orgToHTML(props: MarkupProps, node: Document): string { function nonHTML(text: string) { return text .replace(/&/g, "&amp;") .replace(/</g, "&lt;") .replace(/>/g, "&gt;"); } function forAttr(text: string) { return text.replace(/&/g, "&amp;").replace(/"/g, "&quot;"); } // Footnote handling: footnotes are grouped at end of document and numbered according to initial usage. function extractFootnotes(node: Document): [FootnoteReference[], Footnote[]] { const refs: FootnoteReference[] = []; const defs: Footnote[] = []; foreachTree(node, (n) => { if (n.type === "footnote") { defs.push(n as Footnote); } else if (n.type === "footnote.reference") { refs.push(n as FootnoteReference); } }); return [refs, defs]; } function buildFootnoteMap( refs: FootnoteReference[], defs: Footnote[] ): Map<string, number> { const res = new Map(); let curr = 1; const definedLabels = defs.map((c) => c.label); refs.forEach((ref) => { const label = ref.label; if (res.has(label) || !definedLabels.includes(label)) { return; } res.set(label, curr); curr++; }); return res; } const [footnoteUses, footnotes] = extractFootnotes(node); const fnmap = buildFootnoteMap(footnoteUses, footnotes); function
( fmap: Map<string, number>, defs: Footnote[] ): Footnote[] { const res: Footnote[] = []; const invMap = new Map( Array.from(fmap.entries()).map((kv) => [kv[1], kv[0]]) ); const defsMap = new Map(defs.map((v) => [v.label, v])); for (let fnum = 1; fnum < fmap.size + 1; fnum++) { res.push(defsMap.get(invMap.get(fnum)!)!); } return res; } function footnoteId(fnum: number): string { return `fn.${fnum}`; } function wrapped(tag: string, text: string) { return `<${tag}>${nonHTML(text)}</${tag}>`; } function isStyledText(node: { type: string }): node is StyledText { return [ "text.plain", "text.bold", "text.code", "text.verbatim", "text.italic", "text.strikeThrough", "text.underline", ].includes(node.type); } function isPhrasingContent(node: { type: string }): node is PhrasingContent { return ( ["link", "footnote.reference", "newline"].includes(node.type) || isStyledText(node) ); } function isToken(node: { type: string }): node is Token { return ( [ "keyword", "todo", "hr", "stars", "priority", "tags", "planning.keyword", "planning.timestamp", "list.item.tag", "list.item.checkbox", "list.item.bullet", "table.hr", "table.columnSeparator", "footnote.label", "block.begin", "block.end", "drawer.begin", "drawer.end", ].includes(node.type) || isPhrasingContent(node) ); } function styledTextToHTML(node: StyledText): string { switch (node.type) { case "text.plain": return nonHTML(node.value); case "text.bold": return wrapped("strong", node.value); case "text.code": case "text.verbatim": return wrapped("code", node.value); case "text.italic": return wrapped("em", node.value); case "text.strikeThrough": return wrapped("del", node.value); case "text.underline": return `<span style="text-decoration: underline;">${nonHTML( node.value )}</span>`; } } function tokenToHTML(node: Token): string { if (isPhrasingContent(node)) { return phrasingContentToHTML(node); } switch (node.type) { case "todo": { const kw = node.keyword; switch (kw) { case "TODO": return '<span class="heading-kw-todo">TODO</span>'; case "DONE": return '<span class="heading-kw-done">DONE</span>'; default: return `<span class="heading-kw-unknown">${kw}</span>`; } } // we don't render heading priorities case "priority": return ""; case "tags": return `<span class="tags">${node.tags.join(" ")}</span>`; } // we shouldn't encounter any other tokens. console.log("Not rendering unexpected token of type: ${node.type}"); return ""; } function phrasingContentToHTML(node: PhrasingContent): string { if (isStyledText(node)) { return styledTextToHTML(node); } switch (node.type) { case "link": { // for links like [[https://duckduckgo.com]] const urlRaw = node.value; const text = nonHTML(node.description ?? urlRaw); const attrText = forAttr(node.description ?? urlRaw); const isImage = fileTypeFromURL(urlRaw) === "image"; const url = node.protocol === "internal" ? "#" + slugify(urlRaw) : isImage ? transformImageUri(props.sourceURL)(urlRaw) : transformLinkUri(props.displayURL, props.baseURL)(urlRaw); const title = node.text; return isImage ? `<img${url ? ` src="${url}"` : ""}${ attrText ? ` alt="${attrText}"` : "" }${title ? ` title="${title}"` : ""} style="max-width:100%;">` : `<a${url ? ` href="${url}"` : ""}${ title ? ` title="${title}"` : "" }>${text}</a>`; } case "newline": return ""; case "footnote.reference": { const resolvedLabel = fnmap.get(node.label); if (resolvedLabel === undefined) { // no definition for footnote return "<sup><strong>?</strong></sup>"; } return `<sup><a href="#${footnoteId( resolvedLabel )}">${resolvedLabel}</a></sup>`; } } } function anyToHTML(node: Content | Token): string { if (isToken(node)) { return tokenToHTML(node); } return contentToHTML(node); } function listItemToHTML(node: ListItem | List): string { if (node.type === "list") { return contentToHTML(node); } const content = node.children.slice(1); if (content.length > 0 && content[0].type === "list.item.checkbox") { return `<li><input${ content[0].checked ? ' checked=""' : "" } disabled="" type="checkbox">${content.length > 1 ? " " : ""}${content .slice(1) .map((c) => anyToHTML(c as Content | Token)) .join("")}</li>`; } return `<li>${content .map((c) => anyToHTML(c as Content | Token)) .join("")}</li>`; } function topLevelContentToHTML(node: TopLevelContent): string { switch (node.type) { // NOTE: orgajs never actually yields Keyword at the top level (yet), so ignored here (2021-06-27) case "keyword": return ""; case "footnote": { // footnotes handled elsewhere return ""; } } return contentToHTML(node); } function tableCellToHTML(col: TableCell, isHead = false) { const tag = isHead ? "th" : "td"; return `<${tag}>${col.children .map(phrasingContentToHTML) .join("") .trim()}</${tag}>`; } function tableRowToHTML(row: TableRow, isHead = false) { return `<tr>${row.children .map((c) => tableCellToHTML(c, isHead)) .join("")}</tr>`; } function mkHeaderHTML(level: number, text: string, slug: string): string { return `<h${level}> <a name="${slug}" class="anchor" href="#${slug}"> <span class="octicon-link"></span> </a> ${text} </h${level}>`; } function contentToHTML(node: Content): string { switch (node.type) { case "section": { // treating this as a headline, first child should be headline content return node.children.map(contentToHTML).join(""); } case "paragraph": { const lines: string[] = node.children.map((c) => phrasingContentToHTML(c) ); return `<p>${lines.map((c) => (c === " " ? "\n" : c)).join("")}</p>`; } case "headline": { const level = node.level; const contentChildren = node.children.slice(1); const slug = slugify( contentChildren .filter((c) => c.type !== "priority") .map((c) => c.value) .join("") ); const headingContent: string = contentChildren .map( (c) => (c.type === "tags" ? " " : "") + tokenToHTML(c as Token) + (c.type === "todo" ? " " : "") ) .join(""); return mkHeaderHTML(level, headingContent, slug); } case "hr": { return "<hr>"; } case "html": { return dompurify.sanitize(node.value); } case "block": { if (node.name === "EXPORT" && node.params.includes("html")) { return dompurify.sanitize(node.value); } if (node.name === "SRC" && node.params.length > 0) { const language = node.params[0]; const markup = renderToStaticMarkup( <RawCodeBlock code={node.value} language={language as any} disablePrefixes={true} enableLineRef={false} /> ); return `<pre>${markup}</pre>`; } switch (node.name) { case "QUOTE": { const contents = nonHTML(node.value) .split("\n") .map((c) => `<p>${c}</p>`) .join(""); return `<blockquote>${contents}</blockquote>`; } // comments aren't exported case "COMMENT": return ""; } // if a block is unknown, just give it some reasonable formatting return `<pre>${nonHTML(node.value)}</pre>`; } case "list": { const items: string[] = node.children.map(listItemToHTML); if (node.ordered) { return `<ol>${items.join("")}</ol>`; } else { return `<ul>${items.join("")}</ul>`; } } case "table": { const nonRules = node.children.filter( (v) => v.type !== "table.hr" ) as TableRow[]; if (nonRules.length === 0) { return ""; } const [hrow, ...rows] = nonRules; const body = rows.map((c) => tableRowToHTML(c)).join(""); return `<table><thead>${tableRowToHTML(hrow, true)}</thead>${ body ? `<tbody>${body}</tbody>` : "" }</table>`; } // we currently ignore drawers (2021-06-27) case "drawer": return ""; // we currently ignore planning (e.g., SCHEDULED, DEADLINE) (2021-06-27) case "planning": return ""; } } const res: string[] = []; if ("title" in node.properties) { const title = node.properties.title; res.push(mkHeaderHTML(1, title, slugify(title))); } res.push(node.children.map(topLevelContentToHTML).join("")); const orderedFootnotes = orderFootnotes(fnmap, footnotes); if (orderedFootnotes.length > 0) { const fnHTML = ["<hr>"]; for (let fnum = 1; fnum <= orderedFootnotes.length; fnum++) { const description = orderedFootnotes[fnum - 1].children .map((c) => contentToHTML(c as Content)) .join(""); fnHTML.push( `<div class="footdef"><sup><a id="${footnoteId( fnum )}" href="#fnr.${fnum}">${fnum}</a></sup> <div class="footpara">${description}</div></div>` ); } res.push(fnHTML.join("\n")); } return res.join(""); } function Org(props: MarkupProps, testing = false): React.ReactElement | null { if (!testing) { scrollEffect(); } if (!props.source) { return null; } try { const raw = orgToHTML(props, parseSource(props.source)); return markup(props, raw); } catch (err) { console.log(err); return null; } } export default Org;
orderFootnotes
ceval.rs
use libc::{c_char, c_int, c_void}; use crate::object::PyObject; use crate::pystate::PyThreadState; #[cfg_attr(windows, link(name = "pythonXY"))] extern "C" { #[deprecated(since = "0.5.2", note = "Deprecated since Python 3.9")] pub fn PyEval_CallObjectWithKeywords( callable: *mut PyObject, obj: *mut PyObject, kwargs: *mut PyObject, ) -> *mut PyObject; } #[inline] #[deprecated(since = "0.5.2", note = "Deprecated since Python 3.9")] pub unsafe fn PyEval_CallObject(callable: *mut PyObject, arg: *mut PyObject) -> *mut PyObject { #[allow(deprecated)] PyEval_CallObjectWithKeywords(callable, arg, core::ptr::null_mut()) } #[cfg_attr(windows, link(name = "pythonXY"))] extern "C" { #[deprecated(since = "0.5.2", note = "Deprecated since Python 3.9")] pub fn PyEval_CallFunction( callable: *mut PyObject, format: *const c_char, ... ) -> *mut PyObject; #[deprecated(since = "0.5.2", note = "Deprecated since Python 3.9")] pub fn PyEval_CallMethod( obj: *mut PyObject, name: *const c_char, format: *const c_char, ... ) -> *mut PyObject; pub fn PyEval_GetBuiltins() -> *mut PyObject; pub fn PyEval_GetGlobals() -> *mut PyObject; pub fn PyEval_GetLocals() -> *mut PyObject; pub fn PyEval_GetFrame() -> *mut crate::PyFrameObject; pub fn Py_AddPendingCall( func: Option<extern "C" fn(arg1: *mut c_void) -> c_int>, arg: *mut c_void, ) -> c_int; pub fn Py_MakePendingCalls() -> c_int; pub fn Py_SetRecursionLimit(arg1: c_int) -> (); pub fn Py_GetRecursionLimit() -> c_int; ignore! { fn _Py_CheckRecursiveCall(_where: *mut c_char) -> c_int; static mut _Py_CheckRecursionLimit: c_int; } #[cfg(Py_3_9)] pub fn Py_EnterRecursiveCall(_where: *const c_char) -> c_int; #[cfg(Py_3_9)] pub fn Py_LeaveRecursiveCall() -> c_void; } // TODO: Py_EnterRecursiveCall for Python <3.9 #[cfg_attr(windows, link(name = "pythonXY"))] extern "C" { pub fn PyEval_GetFuncName(arg1: *mut PyObject) -> *const c_char; pub fn PyEval_GetFuncDesc(arg1: *mut PyObject) -> *const c_char; #[cfg(not(Py_3_7))] pub fn PyEval_GetCallStats(arg1: *mut PyObject) -> *mut PyObject; pub fn PyEval_EvalFrame(arg1: *mut crate::PyFrameObject) -> *mut PyObject; pub fn PyEval_EvalFrameEx(f: *mut crate::PyFrameObject, exc: c_int) -> *mut PyObject; pub fn PyEval_SaveThread() -> *mut PyThreadState; pub fn PyEval_RestoreThread(arg1: *mut PyThreadState) -> (); } #[cfg(any(Py_3_7, py_sys_config = "WITH_THREAD"))] #[cfg_attr(windows, link(name = "pythonXY"))] extern "C" { pub fn PyEval_ThreadsInitialized() -> c_int; pub fn PyEval_InitThreads() -> (); #[deprecated( since = "0.2.1", note = "Deprecated since Python 3.2: This function does not update the current thread state. Please use PyEval_RestoreThread() or PyEval_AcquireThread() instead." )]
note = "Deprecated since Python 3.2: This function does not update the current thread state. Please use PyEval_RestoreThread() or PyEval_AcquireThread() instead." )] pub fn PyEval_ReleaseLock() -> (); pub fn PyEval_AcquireThread(tstate: *mut PyThreadState) -> (); pub fn PyEval_ReleaseThread(tstate: *mut PyThreadState) -> (); #[cfg(not(Py_3_8))] pub fn PyEval_ReInitThreads() -> (); }
pub fn PyEval_AcquireLock() -> (); #[deprecated( since = "0.2.1",
index.js
var express = require('express'); var bodyParser = require('body-parser'); var app = express(); var ideaController = require('./controllers/ideasController'); app.use(bodyParser.urlencoded({ extended: false })); app.use(bodyParser.json());
console.log('Server is running'); });
app.use('/api/ideas', ideaController); app.listen(process.env.PORT || 3000, () => {
order_by.rs
use query_engine_tests::*; #[test_suite(schema(schema))] mod basic_order_by { use indoc::indoc; use query_engine_tests::run_query; fn schema() -> String { let schema = indoc! { r#"model OrderTest { #id(id, Int, @id) uniqueField Int @unique nonUniqFieldA String nonUniqFieldB String }"# }; schema.to_owned() } #[connector_test] async fn unique_asc(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; insta::assert_snapshot!( run_query!(runner, r#"{ findManyOrderTest(orderBy: { uniqueField: asc }) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":1},{"uniqueField":2},{"uniqueField":3},{"uniqueField":4},{"uniqueField":5},{"uniqueField":6}]}}"### ); Ok(()) } #[connector_test] async fn unique_desc(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; insta::assert_snapshot!( run_query!(runner, r#"{ findManyOrderTest(orderBy: { uniqueField: desc }) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":6},{"uniqueField":5},{"uniqueField":4},{"uniqueField":3},{"uniqueField":2},{"uniqueField":1}]}}"### ); Ok(()) } #[connector_test] async fn multiple_fields_basic(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; insta::assert_snapshot!( run_query!(runner, r#"{ findManyOrderTest(orderBy: [{ nonUniqFieldA: desc }, { uniqueField: desc}]) { nonUniqFieldA uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"nonUniqFieldA":"C","uniqueField":6},{"nonUniqFieldA":"C","uniqueField":5},{"nonUniqFieldA":"B","uniqueField":4},{"nonUniqFieldA":"B","uniqueField":3},{"nonUniqFieldA":"A","uniqueField":2},{"nonUniqFieldA":"A","uniqueField":1}]}}"### ); Ok(()) } // Ordering by multiple fields should honor the order of the ordering fields defined in the query. #[connector_test] async fn multiple_fields_ordering(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; // B ASC, A ASC, U ASC // A, A, 1 // A, B, 4 // B, A, 2 // B, C, 5 // C, B, 3 // C, C, 6 insta::assert_snapshot!( run_query!(runner, r#"{ findManyOrderTest(orderBy: [{ nonUniqFieldB: asc }, { nonUniqFieldA: asc }, { uniqueField: asc}]) { nonUniqFieldB nonUniqFieldA uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"nonUniqFieldB":"A","nonUniqFieldA":"A","uniqueField":1},{"nonUniqFieldB":"A","nonUniqFieldA":"B","uniqueField":4},{"nonUniqFieldB":"B","nonUniqFieldA":"A","uniqueField":2},{"nonUniqFieldB":"B","nonUniqFieldA":"C","uniqueField":5},{"nonUniqFieldB":"C","nonUniqFieldA":"B","uniqueField":3},{"nonUniqFieldB":"C","nonUniqFieldA":"C","uniqueField":6}]}}"### ); Ok(()) } #[connector_test] async fn
(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; insta::assert_snapshot!( run_query!(runner, r#"{ findManyOrderTest(take: -3, orderBy: { uniqueField: desc }) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":3},{"uniqueField":2},{"uniqueField":1}]}}"### ); Ok(()) } #[connector_test] async fn empty_order_objects(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; insta::assert_snapshot!( run_query!(runner, r#" { findManyOrderTest(orderBy: {}) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":1},{"uniqueField":2},{"uniqueField":3},{"uniqueField":4},{"uniqueField":5},{"uniqueField":6}]}}"### ); insta::assert_snapshot!( run_query!(runner, r#" { findManyOrderTest(orderBy: [{}]) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":1},{"uniqueField":2},{"uniqueField":3},{"uniqueField":4},{"uniqueField":5},{"uniqueField":6}]}}"### ); insta::assert_snapshot!( run_query!(runner, r#" { findManyOrderTest(orderBy: [{}, {}]) { uniqueField } }"#), @r###"{"data":{"findManyOrderTest":[{"uniqueField":1},{"uniqueField":2},{"uniqueField":3},{"uniqueField":4},{"uniqueField":5},{"uniqueField":6}]}}"### ); Ok(()) } async fn create_test_data(runner: &Runner) -> TestResult<()> { create_row( runner, r#"{ id: 1, uniqueField: 1, nonUniqFieldA: "A", nonUniqFieldB: "A"}"#, ) .await?; create_row( runner, r#"{ id: 2, uniqueField: 2, nonUniqFieldA: "A", nonUniqFieldB: "B"}"#, ) .await?; create_row( runner, r#"{ id: 3, uniqueField: 3, nonUniqFieldA: "B", nonUniqFieldB: "C"}"#, ) .await?; create_row( runner, r#"{ id: 4, uniqueField: 4, nonUniqFieldA: "B", nonUniqFieldB: "A"}"#, ) .await?; create_row( runner, r#"{ id: 5, uniqueField: 5, nonUniqFieldA: "C", nonUniqFieldB: "B"}"#, ) .await?; create_row( runner, r#"{ id: 6, uniqueField: 6, nonUniqFieldA: "C", nonUniqFieldB: "C"}"#, ) .await?; Ok(()) } async fn create_row(runner: &Runner, data: &str) -> TestResult<()> { runner .query(format!("mutation {{ createOneOrderTest(data: {}) {{ id }} }}", data)) .await? .assert_success(); Ok(()) } }
negative_cursor
path_fixer.rs
//! Special utility that allows you to fix paths to resources. It is very useful if you've //! moved a resource in a file system, but a scene has old path. use crate::{ gui::{BuildContext, Ui, UiMessage, UiNode}, make_scene_file_filter, Message, }; use rg3d::core::replace_slashes; use rg3d::material::PropertyValue; use rg3d::{ asset::ResourceData, core::{ color::Color, futures::executor::block_on, pool::Handle, visitor::{Visit, Visitor}, }, gui::{ border::BorderBuilder, brush::Brush, button::ButtonBuilder, decorator::DecoratorBuilder, file_browser::FileSelectorBuilder, formatted_text::WrapMode, grid::{Column, GridBuilder, Row}, list_view::ListViewBuilder, message::{ ButtonMessage, FileSelectorMessage, ListViewMessage, MessageDirection, TextMessage, UiMessageData, WidgetMessage, WindowMessage, }, stack_panel::StackPanelBuilder, text::TextBuilder, widget::WidgetBuilder, window::{WindowBuilder, WindowTitle}, HorizontalAlignment, Orientation, Thickness, VerticalAlignment, }, resource::{model::Model, texture::Texture}, scene::{light::Light, node::Node, Scene}, }; use std::path::Path; use std::{ collections::HashSet, hash::{Hash, Hasher}, path::PathBuf, }; pub struct PathFixer { pub window: Handle<UiNode>, scene_path_value: PathBuf, scene_path: Handle<UiNode>, scene_selector: Handle<UiNode>, load_scene: Handle<UiNode>, scene: Option<Scene>, orphaned_scene_resources: Vec<SceneResource>, resources_list: Handle<UiNode>, cancel: Handle<UiNode>, ok: Handle<UiNode>, selection: Option<usize>, fix: Handle<UiNode>, resource_path: Handle<UiNode>, new_path_selector: Handle<UiNode>, auto_fix: Handle<UiNode>, } #[derive(Clone)] enum SceneResource { Model(Model), Texture(Texture), // TODO: Add sound buffers. } impl SceneResource { fn path(&self) -> PathBuf { match self { SceneResource::Model(model) => model.state().path().to_path_buf(), SceneResource::Texture(texture) => texture.state().path().to_path_buf(), } } fn set_path(&mut self, path: PathBuf) { match self { SceneResource::Model(model) => model.data_ref().set_path(path), SceneResource::Texture(texture) => texture.data_ref().set_path(path), } } fn key(&self) -> usize { match self { SceneResource::Model(model) => model.key(), SceneResource::Texture(texture) => texture.key(), } } } impl Hash for SceneResource { fn hash<H: Hasher>(&self, state: &mut H) { state.write_usize(self.key()); } } impl PartialEq for SceneResource { fn eq(&self, other: &Self) -> bool { self.key() == other.key() } } impl Eq for SceneResource {} fn find_file(name: &Path) -> Vec<PathBuf> { let mut files = Vec::new(); for dir in rg3d::walkdir::WalkDir::new(".").into_iter().flatten() { let path = dir.path(); if let Some(file_name) = path.file_name() { if file_name == name { files.push(path.to_owned()); } } } files } impl PathFixer { pub fn new(ctx: &mut BuildContext) -> Self { let scene_selector = FileSelectorBuilder::new( WindowBuilder::new(WidgetBuilder::new().with_width(300.0).with_height(400.0)) .open(false) .with_title(WindowTitle::Text("Select a scene for diagnostics".into())), ) .with_filter(make_scene_file_filter()) .build(ctx); let new_path_selector = FileSelectorBuilder::new( WindowBuilder::new(WidgetBuilder::new().with_width(300.0).with_height(400.0)) .open(false) .with_title(WindowTitle::Text( "Select a new path to the resource".into(), )), ) .build(ctx); let load_scene; let scene_path; let resources_list; let cancel; let ok; let auto_fix; let fix; let resource_path; let window = WindowBuilder::new(WidgetBuilder::new().with_width(400.0).with_height(500.0)) .with_title(WindowTitle::text("Path Fixer")) .open(false) .with_content( GridBuilder::new( WidgetBuilder::new() .with_child({ scene_path = TextBuilder::new(WidgetBuilder::new().on_row(0)) .with_text("Scene: No scene loaded!") .with_wrap(WrapMode::Word) .build(ctx); scene_path }) .with_child( GridBuilder::new( WidgetBuilder::new() .on_row(1) .with_child({ resource_path = TextBuilder::new(WidgetBuilder::new().on_column(0)) .with_vertical_text_alignment( VerticalAlignment::Center, ) .build(ctx); resource_path }) .with_child({ fix = ButtonBuilder::new( WidgetBuilder::new() .with_width(40.0) .on_column(1) .with_enabled(false) .with_margin(Thickness::uniform(1.0)), ) .with_text("Fix...") .build(ctx); fix }), ) .add_column(Column::stretch()) .add_column(Column::auto()) .add_row(Row::stretch()) .build(ctx), ) .with_child({ resources_list = ListViewBuilder::new(WidgetBuilder::new().on_row(2)).build(ctx); resources_list }) .with_child( StackPanelBuilder::new( WidgetBuilder::new() .with_horizontal_alignment(HorizontalAlignment::Right) .on_row(3) .with_child({ load_scene = ButtonBuilder::new( WidgetBuilder::new() .with_width(100.0) .with_margin(Thickness::uniform(1.0)), ) .with_text("Load Scene...") .build(ctx); load_scene }) .with_child({ auto_fix = ButtonBuilder::new( WidgetBuilder::new() .with_width(100.0) .with_margin(Thickness::uniform(1.0)), ) .with_text("Auto Fix") .build(ctx); auto_fix }) .with_child({ ok = ButtonBuilder::new( WidgetBuilder::new() .with_width(100.0) .with_margin(Thickness::uniform(1.0)), ) .with_text("OK") .build(ctx); ok }) .with_child({ cancel = ButtonBuilder::new( WidgetBuilder::new() .with_width(100.0) .with_margin(Thickness::uniform(1.0)), ) .with_text("Cancel") .build(ctx); cancel }), ) .with_orientation(Orientation::Horizontal) .build(ctx), ), ) .add_row(Row::auto()) .add_row(Row::strict(28.0)) .add_row(Row::stretch()) .add_row(Row::strict(28.0)) .add_column(Column::stretch()) .build(ctx), ) .build(ctx); Self { window, scene_selector, load_scene, scene_path, scene: None, orphaned_scene_resources: Default::default(), resources_list, ok, cancel, resource_path, fix, selection: None, new_path_selector, auto_fix, scene_path_value: Default::default(), } } fn fix_path(&mut self, index: usize, new_path: PathBuf, ui: &Ui) { let text = new_path.to_string_lossy().to_string(); self.orphaned_scene_resources[index].set_path(new_path); let item = ui.node(self.resources_list).as_list_view().items()[index]; let item_text = ui.find_by_criteria_down(item, &|n| matches!(n, UiNode::Text(_))); assert!(item_text.is_some()); ui.send_message(WidgetMessage::foreground( item_text, MessageDirection::ToWidget, Brush::Solid(Color::GREEN), )); ui.send_message(TextMessage::text( item_text, MessageDirection::ToWidget, text.clone(), )); ui.send_message(TextMessage::text( self.resource_path, MessageDirection::ToWidget, text, )); } pub fn handle_ui_message(&mut self, message: &UiMessage, ui: &mut Ui) { match message.data() { UiMessageData::FileSelector(FileSelectorMessage::Commit(path)) => { if message.destination() == self.scene_selector { let mut scene = Scene::default(); let message; match block_on(Visitor::load_binary(path)) { Ok(mut visitor) => { if let Err(e) = scene.visit("Scene", &mut visitor) { message = format!( "Failed to load a scene {}\nReason: {}", path.display(), e ); } else { // Gather resources. // Use hash map to remove duplicates. let mut scene_resources = HashSet::new(); for node in scene.graph.linear_iter() { if let Some(model) = node.resource() { scene_resources.insert(SceneResource::Model(model)); } match node { Node::Light(light) => { if let Light::Spot(spot) = light { if let Some(texture) = spot.cookie_texture() { scene_resources.insert(SceneResource::Texture( texture.clone(), )); } } } Node::Camera(camera) => { if let Some(skybox) = camera.skybox_ref() { for texture in skybox.textures().iter().flatten() { scene_resources.insert(SceneResource::Texture( texture.clone(), )); } } } Node::Mesh(mesh) => { for surface in mesh.surfaces() { for texture in surface .material() .lock() .unwrap() .properties() .values() .filter_map(|v| { if let PropertyValue::Sampler { value, .. } = v { value.clone() } else { None } }) { scene_resources.insert(SceneResource::Texture( texture.clone(), )); } } } Node::Sprite(sprite) => { if let Some(texture) = sprite.texture() { scene_resources .insert(SceneResource::Texture(texture)); } } Node::Decal(decal) => { if let Some(texture) = decal.diffuse_texture() { scene_resources.insert(SceneResource::Texture( texture.clone(), )); } if let Some(texture) = decal.normal_texture() { scene_resources.insert(SceneResource::Texture( texture.clone(), )); } } Node::ParticleSystem(particle_system) => { if let Some(texture) = particle_system.texture() { scene_resources .insert(SceneResource::Texture(texture)); } } Node::Terrain(terrain) => { if let Some(first) = terrain.chunks_ref().first() { for layer in first.layers() { for texture in layer .material .lock() .unwrap() .properties() .values() .filter_map(|v| { if let PropertyValue::Sampler { value, .. } = v { value.clone() } else { None } })
{ scene_resources.insert( SceneResource::Texture(texture.clone()), ); } } } } Node::Base(_) => { // Nothing } } } // Turn hash map into vec to be able to index it. self.orphaned_scene_resources = scene_resources .into_iter() .filter(|r| !r.path().exists()) .collect::<Vec<_>>(); let ctx = &mut ui.build_ctx(); let items = self .orphaned_scene_resources .iter() .map(|r| { DecoratorBuilder::new(BorderBuilder::new( WidgetBuilder::new().with_height(22.0).with_child( TextBuilder::new( WidgetBuilder::new() .with_margin(Thickness::uniform(1.0)) .with_foreground(Brush::Solid(Color::RED)), ) .with_vertical_text_alignment( VerticalAlignment::Center, ) .with_text(r.path().to_string_lossy().to_string()) .build(ctx), ), )) .build(ctx) }) .collect::<Vec<_>>(); ui.send_message(ListViewMessage::items( self.resources_list, MessageDirection::ToWidget, items, )); ui.send_message(ListViewMessage::selection( self.resources_list, MessageDirection::ToWidget, None, )); self.scene = Some(scene); self.scene_path_value = path.clone(); message = format!("Scene: {}", path.display()); } } Err(e) => { message = format!("Failed to load a scene {}\nReason: {}", path.display(), e); } } ui.send_message(TextMessage::text( self.scene_path, MessageDirection::ToWidget, message, )); } else if message.destination() == self.new_path_selector { if let Some(selection) = self.selection { self.fix_path(selection, replace_slashes(path), ui); } } } UiMessageData::Button(ButtonMessage::Click) => { if message.destination() == self.load_scene { ui.send_message(WindowMessage::open_modal( self.scene_selector, MessageDirection::ToWidget, true, )); } else if message.destination() == self.cancel { ui.send_message(WindowMessage::close( self.window, MessageDirection::ToWidget, )); } else if message.destination() == self.ok { ui.send_message(WindowMessage::close( self.window, MessageDirection::ToWidget, )); if let Some(mut scene) = self.scene.take() { let mut visitor = Visitor::new(); scene .visit("Scene", &mut visitor) .expect("Unable to visit a scene!"); visitor .save_binary(&self.scene_path_value) .expect("Unable to save a scene!"); } ui.send_message(TextMessage::text( self.scene_path, MessageDirection::ToWidget, "No scene loaded!".to_owned(), )); ui.send_message(ListViewMessage::items( self.resources_list, MessageDirection::ToWidget, Default::default(), )); ui.send_message(TextMessage::text( self.resource_path, MessageDirection::ToWidget, Default::default(), )); ui.send_message(WidgetMessage::enabled( self.fix, MessageDirection::ToWidget, false, )); } else if message.destination() == self.fix { if let Some(selection) = self.selection { // Try to find a resource by its file name. let mut resource_path = self.orphaned_scene_resources[selection].path(); if let Some(file_name) = resource_path.file_name() { let candidates = find_file(file_name.as_ref()); // Skip ambiguous file paths. if candidates.len() == 1 { resource_path = candidates.first().unwrap().clone(); } } // Pop parts of the path one by one until existing found. while !resource_path.exists() { resource_path.pop(); } // Set it as a path for the selector to reduce amount of clicks needed. ui.send_message(FileSelectorMessage::path( self.new_path_selector, MessageDirection::ToWidget, resource_path, )); ui.send_message(WindowMessage::open_modal( self.new_path_selector, MessageDirection::ToWidget, true, )); } } else if message.destination() == self.auto_fix { for (i, orphaned_resource) in self.orphaned_scene_resources.clone().iter().enumerate() { if let Some(file_name) = orphaned_resource.path().file_name() { let candidates = find_file(file_name.as_ref()); // Skip ambiguous file paths. if candidates.len() == 1 { let new_path = candidates.first().unwrap().clone(); self.fix_path(i, replace_slashes(new_path), ui); } } } } } UiMessageData::ListView(ListViewMessage::SelectionChanged(selection)) => { if message.destination() == self.resources_list { self.selection = *selection; if let Some(selection) = selection { ui.send_message(TextMessage::text( self.resource_path, MessageDirection::ToWidget, format!( "Resource: {}", self.orphaned_scene_resources[*selection].path().display() ), )) } else { ui.send_message(TextMessage::text( self.resource_path, MessageDirection::ToWidget, "No resource selected".to_owned(), )); } ui.send_message(WidgetMessage::enabled( self.fix, MessageDirection::ToWidget, selection.is_some(), )); } } _ => {} } } pub fn handle_message(&mut self, message: &Message, ui: &mut Ui) { if let Message::Configure { working_directory } = message { ui.send_message(FileSelectorMessage::root( self.new_path_selector, MessageDirection::ToWidget, Some(working_directory.to_owned()), )); ui.send_message(FileSelectorMessage::root( self.scene_selector, MessageDirection::ToWidget, Some(working_directory.to_owned()), )); } } }
version_cmd.go
package version import ( "github.com/ZachiNachshon/anchor/internal/cmd" "github.com/ZachiNachshon/anchor/internal/common" "github.com/spf13/cobra" ) type versionCmd struct { cmd.AnchorCommand cobraCmd *cobra.Command ctx common.Context } type NewCommandFunc func(ctx common.Context, versionFunc VersionVersionFunc) *versionCmd func NewCommand(ctx common.Context, versionFunc VersionVersionFunc) *versionCmd { var cobraCmd = &cobra.Command{ Use: "version", Short: "Print anchor CLI version", Long: `Print anchor CLI version`, RunE: func(cmd *cobra.Command, args []string) error { return versionFunc(ctx, NewOrchestrator()) }, }
cobraCmd: cobraCmd, ctx: ctx, } } func (c *versionCmd) GetCobraCmd() *cobra.Command { return c.cobraCmd } func (c *versionCmd) GetContext() common.Context { return c.ctx } func AddCommand(parent cmd.AnchorCommand, createCmd NewCommandFunc) error { newCmd := createCmd(parent.GetContext(), VersionVersion) parent.GetCobraCmd().AddCommand(newCmd.GetCobraCmd()) return nil }
return &versionCmd{
events_test.go
//(C) Copyright [2020] Hewlett Packard Enterprise Development LP // //Licensed under the Apache License, Version 2.0 (the "License"); you may //not use this file except in compliance with the License. You may obtain //a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, WITHOUT //WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the //License for the specific language governing permissions and limitations // under the License. // Package evresponse have the struct models and DB functionalties package evresponse import ( "sync" "testing" "github.com/ODIM-Project/ODIM/lib-utilities/common" "gotest.tools/assert" ) func TestResponse(t *testing.T)
{ common.SetUpMockConfig() var wg sync.WaitGroup var originResource = []string{ "4228c0db-253b-4dc8-93d1-dab9359139ba.1", "423e8254-e3ef-42bd-a130-f096c93a6c42.1", "37646c88-a7d7-468c-af58-49e8a0adbbb2.1", } var hosts = []string{"10.24.1.10", "10.24.1.11", "10.24.1.12"} var responses = []EventResponse{{StatusCode: 201}, {StatusCode: 400}, {StatusCode: 201}} var result = &MutexLock{ Response: make(map[string]EventResponse), Lock: &sync.Mutex{}, } for i, origin := range originResource { wg.Add(1) go func(originResource string, result *MutexLock, wg *sync.WaitGroup, i int) { defer wg.Done() result.AddResponse(originResource, hosts[i], responses[i]) }(origin, result, &wg, i) } wg.Wait() _, hostAddresses := result.ReadResponse("1") assert.Equal(t, len(hostAddresses), 2, "should be 3 document") }
Lista4ex7.py
def concat(s1, s2): if not s1: return s2 return s1[0:1] + concat(s1[1:], s2) def reverse(s1): if not s1: return s1 return concat(reverse(s1[1:]), s1[0]) def prefix(s1, s2): if s1 == '' and s2 != '':
if s1[:1] == s2[:1]: return prefix(s1[1:], s2[1:]) return False s1 = input() s2 = input() print(concat(s1, s2)) print(reverse(s1)) print(prefix(s1, s2))
return True
wof-placetype-descendants.go
package main import ( "flag" "github.com/whosonfirst/go-whosonfirst-placetypes" "github.com/whosonfirst/go-whosonfirst-cli/flags" "log" ) func main() { var roles flags.MultiString flag.Var(&roles, "role", "...") flag.Parse() for _, str_pt := range flag.Args() { pt, err := placetypes.GetPlacetypeByName(str_pt) if err != nil { log.Fatal(err) } var descendants []*placetypes.WOFPlacetype if len(roles) > 0
else { descendants = placetypes.Descendants(pt) } for i, p := range descendants { log.Println(i, p.Name) } } }
{ descendants = placetypes.DescendantsForRoles(pt, roles) }
contextmenu-all.module.ts
import { NgModule, ValueProvider } from '@angular/core';
/** * NgModule definition for the ContextMenu component with providers. */ @NgModule({ imports: [CommonModule, ContextMenuModule], exports: [ ContextMenuModule ], providers:[ ] }) export class ContextMenuAllModule { }
import { CommonModule } from '@angular/common'; import { ContextMenuComponent } from './contextmenu.component'; import { ContextMenuModule } from './contextmenu.module';
Renovavel.py
from Usina import Usina; from RecebeDados import RecebeDados; class
(Usina): def __init__(self, recebe_dados, abaRenov, offset, iRenov): # define fonte_dados como o objeto da classe RecebeDados e o nome da aba com as usinas UHE self.nomeAba = abaRenov; self.fonte_dados = recebe_dados; self.indexUsinaInterno = iRenov; # a variavel offset e importante pq a quantidade de linhas que devem ser puladas na planilha # pode ser diferente do index da usina. self.linhaOffset = offset; # metodo referente a classe pai super(Renovavel, self).__init__(self.fonte_dados, self.nomeAba, self.linhaOffset); return;
Renovavel
build.rs
// Copyright (C) 2017-2019 Baidu, Inc. All Rights Reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the // distribution. // * Neither the name of Baidu, Inc., nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::env; fn main () { let sdk_dir = env::var("SGX_SDK") .unwrap_or_else(|_| "/opt/intel/sgxsdk".to_string()); println!("cargo:rustc-link-search=native=../lib"); println!("cargo:rustc-link-lib=static=Enclave_u"); println!("cargo:rustc-link-search=native={}/lib64", sdk_dir); println!("cargo:rustc-link-lib=dylib=sgx_urts"); println!("cargo:rustc-link-lib=dylib=sgx_uae_service"); println!("cargo:rustc-link-lib=dylib=sgx_uprotected_fs"); }
//
script.js
function verificar() { var data = new Date() var ano = data.getFullYear() var fano = window.document.getElementById('txtano') var res = window.document.querySelector('div#res') if (fano.value.length == 0 || Number(fano.value) > ano) { window.alert ('[ERRO] Verifique os dados e tente novamente!') } else { var fsex = window.document.getElementsByName('radsex') var idade = ano - Number(fano.value) var gênero = '' var img = window.document.createElement('img') img.setAttribute ('id', 'foto') // MESMA COISA QUE <img id='foto'> NO HTML if (fsex[0].checked) { gênero = 'Homem' if (idade >= 0 && idade < 10) { //criança img.setAttribute('src','bebemas.png') } else if (idade < 21) { //jovem img.setAttribute('src', 'jovemmas.png') } else if (idade < 50) { //adulto img.setAttribute('src', 'homem.png') } else { //idoso img.setAttribute('src', 'idoso.png')
//criança img.setAttribute('src',' bebefem.png') } else if (idade < 21) { //jovem img.setAttribute('src', 'jovemfem.png') } else if (idade < 50) { //adulto img.setAttribute('src', 'mulher.png') } else { //idoso img.setAttribute('src', 'idosa.png') } } res.style.textAlign = 'center' res.innerHTML = `Detectamos ${gênero} com ${idade} anos.` res.appendChild(img) } }
} } else if (fsex[1].checked) { gênero = 'Mulher' if (idade >= 0 && idade < 10) {
actions.ts
import { Action } from 'redux'; import { WindowChannel } from '@kbase/ui-lib'; import { BaseStoreState } from '../store'; import { ThunkDispatch } from 'redux-thunk'; import { AppConfig, Params } from '../integration/store'; import { Auth, AuthenticationStatus } from '@kbase/ui-lib'; import { WindowChannelInit } from "@kbase/ui-lib/lib/windowChannel"; import { v4 as uuidv4 } from 'uuid'; export enum DevelopActionType { DEVELOP_SET_TITLE = '@kbase-ui-components:develop_set_title', DEVELOP_START = '@kbase-ui-components:develop_start', DEVELOP_LOAD_SUCCESS = '@kbase-ui-components:develop_load_success', DEVELOP_SET_VIEW = '@kbase-ui-components:develop_set_view', DEVELOP_SET_PARAMS = '@kbase-ui-components:develop_set_params' } // Action Types export interface DevelopSetTitle extends Action<DevelopActionType.DEVELOP_SET_TITLE> { type: DevelopActionType.DEVELOP_SET_TITLE; title: string; } export interface DevelopStart extends Action<DevelopActionType.DEVELOP_START> { type: DevelopActionType.DEVELOP_START; } export interface DevelopLoadSuccess extends Action<DevelopActionType.DEVELOP_LOAD_SUCCESS> { type: DevelopActionType.DEVELOP_LOAD_SUCCESS; hostChannelId: string; pluginChannelId: string; } export interface DevelopSetView extends Action<DevelopActionType.DEVELOP_SET_VIEW> { type: DevelopActionType.DEVELOP_SET_VIEW, view: string; } export interface DevelopSetParams extends Action<DevelopActionType.DEVELOP_SET_PARAMS> { type: DevelopActionType.DEVELOP_SET_PARAMS, // TODO: can we make params generic? params: Params<string>; } // Action generators export function setTitle(title: string): DevelopSetTitle { return { type: DevelopActionType.DEVELOP_SET_TITLE, title }; } export function loadSuccess(hostChannelId: string, pluginChannelId: string): DevelopLoadSuccess { return { type: DevelopActionType.DEVELOP_LOAD_SUCCESS, hostChannelId, pluginChannelId }; } export function setView(view: string): DevelopSetView { return { type: DevelopActionType.DEVELOP_SET_VIEW, view }; } export function setParams(params: Params<string>): DevelopSetParams { return { type: DevelopActionType.DEVELOP_SET_PARAMS, params }; }
let channel: WindowChannel; // dev config uses current host const devOrigin = document.location.origin; const devConfig: AppConfig = { baseUrl: devOrigin, defaultPath: '', services: { Groups: { url: `${devOrigin}/services/groups` }, UserProfile: { url: `${devOrigin}/services/user_profile/rpc` }, Workspace: { url: `${devOrigin}/services/ws` }, SampleService: { url: `${devOrigin}/services/sampleservice` }, SearchAPI2: { url: `${devOrigin}/services/searchapi2/rpc` }, SearchAPI2Legacy: { url: `${devOrigin}/services/searchapi2/legacy` }, ServiceWizard: { url: `${devOrigin}/services/service_wizard` }, Auth: { url: `${devOrigin}/services/auth` }, NarrativeMethodStore: { url: `${devOrigin}/services/narrative_method_store/rpc` }, Catalog: { url: `${devOrigin}/services/catalog/rpc` }, NarrativeJobService: { url: `${devOrigin}/services/njs_wrapper` }, RelationEngine: { url: `${devOrigin}/services/relation_engine_api` } }, dynamicServices: { JobBrowserBFF: { version: 'dev' }, OntologyAPI: { version: 'dev' }, TaxonomyAPI: { version: 'dev' } } }; function setupAndStartChannel(dispatch: ThunkDispatch<BaseStoreState, void, Action>): WindowChannel { // The following simulates what a host environment would do. // Create a host channel. const chan = new WindowChannelInit() channel = chan.makeChannel(uuidv4()) channel.on('ready', async (params) => { channel.setPartner(params.channelId); // We get the initial auth info for this kbase session. const auth = new Auth(devConfig.services.Auth.url); const authInfo = await auth.checkAuth(); if (authInfo.status === AuthenticationStatus.AUTHENTICATED) { const {token, username, realname, roles} = authInfo.userAuthentication; channel.send('start', { authentication: { token, username, realname, roles }, config: devConfig, // TODO: refactor this to reflect the actual view and params in the dev tool. view: '', params: { } }); } else { channel.send('start', { authentication: null, config: devConfig }); } }); channel.on('get-auth-status', async () => { const auth = new Auth(devConfig.services.Auth.url); const authInfo = await auth.checkAuth(); if (authInfo.status === AuthenticationStatus.AUTHENTICATED) { const {token, username, realname, roles} = authInfo.userAuthentication; channel.send('auth-status', { token, username, realname, roles }); } else { channel.send('auth-status', { token: null, username: '', realname: '', roles: [] }); } }); channel.on('get-config', () => { channel.send('config', { value: devConfig }); }); channel.on('add-button', ({ button }) => { console.warn('add button not yet supported'); }); channel.on('open-window', ({ url }) => { window.location.href = url; }); // this.channel.on('send-instrumentation', (instrumentation) => { // }); channel.on('ui-navigate', (to) => { console.warn('ui-navigate not yet supported'); }); channel.on('post-form', (config) => { console.warn('form-post not yet supported'); // this.formPost(config); }); channel.on('set-title', (config) => { dispatch(setTitle(config.title)); }); channel.start(); return channel; } export function start(window: Window) { return async (dispatch: ThunkDispatch<BaseStoreState, void, Action>, getState: () => BaseStoreState) => { dispatch({ type: DevelopActionType.DEVELOP_START } as DevelopStart); // create channel const channel = setupAndStartChannel(dispatch); // set channel id via action dispatch(loadSuccess(channel.getId(), channel.getPartnerId())); // set up channel handlers, etc. }; }
cyclers.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 18-02-2021 """ __all__ = [ "monochrome_hatch_cycler", "simple_hatch_cycler", "monochrome_line_no_marker_cycler", "monochrome_line_cycler", ] from matplotlib import cycler from draugr.visualisation.matplotlib_utilities.styles.hatching import ( four_times_denser_hatch, ) from draugr.visualisation.matplotlib_utilities.styles.lines import ( line_styles, marker_styles, ) simple_hatch_cycler = cycler("hatch", four_times_denser_hatch) monochrome_hatch_cycler = ( cycler("color", "w") * cycler("facecolor", "w") * cycler("edgecolor", "k") * simple_hatch_cycler ) monochrome_line_no_marker_cycler = cycler("color", ["k"]) * cycler( "linestyle", line_styles ) monochrome_line_cycler = ( cycler("color", ["k"])
if __name__ == "__main__": print([a for _, a in zip(range(10), monochrome_line_cycler)])
* cycler("linestyle", line_styles) * cycler("marker", marker_styles) )
NodeIterator-impl.js
"use strict"; const { hasWeakRefs } = require("../../utils"); const { domSymbolTree } = require("../helpers/internal-constants"); const { filter, FILTER_ACCEPT } = require("./helpers"); exports.implementation = class NodeIteratorImpl { constructor(globalObject, args, privateData) { this._active = false; this.root = privateData.root; this.whatToShow = privateData.whatToShow; this.filter = privateData.filter; this._referenceNode = this.root; this._pointerBeforeReferenceNode = true; // This is used to deactivate the NodeIterator if there are too many working in a Document at the same time. // Without weak references, a JS implementation of NodeIterator will leak, since we can't know when to clean it up. // This ensures we force a clean up of those beyond some maximum (specified by the Document). if (!hasWeakRefs) { this._working = true; } this._globalObject = globalObject; } get referenceNode() { this._throwIfNotWorking(); return this._referenceNode; } get pointerBeforeReferenceNode() { this._throwIfNotWorking(); return this._pointerBeforeReferenceNode; } nextNode() { this._throwIfNotWorking(); return this._traverse("next"); } previousNode() { this._throwIfNotWorking(); return this._traverse("previous"); } detach() { // Intentionally do nothing, per spec. } // Called by Documents. _preRemovingSteps(toBeRemovedNode) { // Second clause is https://github.com/whatwg/dom/issues/496 if (!toBeRemovedNode.contains(this._referenceNode) || toBeRemovedNode === this.root) { return; } if (this._pointerBeforeReferenceNode) { let next = null; let candidateForNext = domSymbolTree.following(toBeRemovedNode, { skipChildren: true }); while (candidateForNext !== null) { if (this.root.contains(candidateForNext)) { next = candidateForNext; break; } candidateForNext = domSymbolTree.following(candidateForNext, { skipChildren: true }); } if (next !== null) { this._referenceNode = next;
this._pointerBeforeReferenceNode = false; } const { previousSibling } = toBeRemovedNode; this._referenceNode = previousSibling === null ? toBeRemovedNode.parentNode : domSymbolTree.lastInclusiveDescendant(toBeRemovedNode.previousSibling); } // Only called by getters and methods that are affected by the pre-removing steps _throwIfNotWorking() { if (!hasWeakRefs && !this._working) { throw Error(`This NodeIterator is no longer working. More than 10 iterators are being used concurrently. ` + `Using more than 10 node iterators requires WeakRef support.`); } } _traverse(direction) { let node = this._referenceNode; let beforeNode = this._pointerBeforeReferenceNode; while (true) { if (direction === "next") { if (!beforeNode) { node = domSymbolTree.following(node, { root: this.root }); if (!node) { return null; } } beforeNode = false; } else if (direction === "previous") { if (beforeNode) { node = domSymbolTree.preceding(node, { root: this.root }); if (!node) { return null; } } beforeNode = true; } const result = filter(this, node); if (result === FILTER_ACCEPT) { break; } } this._referenceNode = node; this._pointerBeforeReferenceNode = beforeNode; return node; } };
return; }
OtpRrItineraryBody.story.js
import coreUtils from "@opentripplanner/core-utils"; import { ClassicLegIcon } from "@opentripplanner/icons"; import PropTypes from "prop-types"; import React from "react"; import ItineraryBody from ".."; import { CustomTimeColumnContent, WrappedOtpRRTransitLegSubheader } from "../demos"; import ItineraryBodyDefaultsWrapper from "./itinerary-body-defaults-wrapper"; import OtpRRLineColumnContent from "../otp-react-redux/line-column-content"; import OtpRRPlaceName from "../otp-react-redux/place-name"; import OtpRRRouteDescription from "../otp-react-redux/route-description"; // import mock itinaries. These are all trip plan outputs from OTP. const bikeOnlyItinerary = require("../__mocks__/itineraries/bike-only.json"); const bikeRentalItinerary = require("../__mocks__/itineraries/bike-rental.json"); const bikeRentalTransitBikeRentalItinerary = require("../__mocks__/itineraries/bike-rental-transit-bike-rental.json"); const bikeTransitBikeItinerary = require("../__mocks__/itineraries/bike-transit-bike.json"); const eScooterRentalItinerary = require("../__mocks__/itineraries/e-scooter-rental.json"); const eScooterRentalTransiteScooterRentalItinerary = require("../__mocks__/itineraries/e-scooter-transit-e-scooter.json"); const fareComponentsItinerary = require("../__mocks__/itineraries/fare-components.json"); const parkAndRideItinerary = require("../__mocks__/itineraries/park-and-ride.json"); const tncTransitTncItinerary = require("../__mocks__/itineraries/tnc-transit-tnc.json"); const walkInterlinedTransitItinerary = require("../__mocks__/itineraries/walk-interlined-transit-walk.json"); const walkOnlyItinerary = require("../__mocks__/itineraries/walk-only.json"); const walkTransitWalkItinerary = require("../__mocks__/itineraries/walk-transit-walk.json"); const walkTransitWalkTransitWalkItinerary = require("../__mocks__/itineraries/walk-transit-walk-transit-walk.json"); const walkTransitWalkTransitWalkA11yItinerary = require("../__mocks__/itineraries/walk-transit-walk-transit-walk-with-accessibility-scores.json"); function OtpRRItineraryBodyWrapper({ itinerary, showRouteFares, TimeColumnContent }) { return ( <ItineraryBodyDefaultsWrapper itinerary={itinerary} LegIcon={ClassicLegIcon} LineColumnContent={OtpRRLineColumnContent} PlaceName={OtpRRPlaceName} RouteDescription={OtpRRRouteDescription} showAgencyInfo showLegIcon showMapButtonColumn={false} showRouteFares={showRouteFares} showViewTripButton styledItinerary="otp-rr" TimeColumnContent={TimeColumnContent} TransitLegSubheader={WrappedOtpRRTransitLegSubheader} /> ); } OtpRRItineraryBodyWrapper.propTypes = { itinerary: coreUtils.types.itineraryType.isRequired, showRouteFares: PropTypes.bool, TimeColumnContent: PropTypes.elementType }; OtpRRItineraryBodyWrapper.defaultProps = { showRouteFares: undefined, TimeColumnContent: undefined }; export default { title: "ItineraryBody/otp-react-redux", component: ItineraryBody }; export const WalkOnlyItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={walkOnlyItinerary} /> ); export const BikeOnlyItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={bikeOnlyItinerary} /> ); export const WalkTransitWalkItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={walkTransitWalkItinerary} />
export const BikeTransitBikeItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={bikeTransitBikeItinerary} /> ); export const WalkInterlinedTransitItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={walkInterlinedTransitItinerary} /> ); export const WalkTransitTransferItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={walkTransitWalkTransitWalkItinerary} /> ); export const WalkTransitTransferWithA11yItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={walkTransitWalkTransitWalkA11yItinerary} /> ); export const BikeRentalItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={bikeRentalItinerary} /> ); export const EScooterRentalItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={eScooterRentalItinerary} /> ); export const ParkAndRideItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={parkAndRideItinerary} /> ); export const BikeRentalTransitItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={bikeRentalTransitBikeRentalItinerary} /> ); export const EScooterRentalTransitItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={eScooterRentalTransiteScooterRentalItinerary} /> ); export const TncTransitItinerary = () => ( <OtpRRItineraryBodyWrapper itinerary={tncTransitTncItinerary} /> ); export const IndividualLegFareComponents = () => ( <OtpRRItineraryBodyWrapper itinerary={fareComponentsItinerary} showRouteFares /> ); export const CustomTimeColumn = () => ( <OtpRRItineraryBodyWrapper itinerary={tncTransitTncItinerary} TimeColumnContent={CustomTimeColumnContent} /> );
);
frame_converter.rs
//! Provides a set of tools convert raw packets from Ouster sensors. use super::{ config::Config, consts::COLUMNS_PER_PACKET, packet::{Column, Packet}, pcd_converter::{Point, PointCloudConverter}, }; use crate::common::*; /// A frame is a collection of points gathered in one /// LIDAR rotation. #[derive(Debug, Clone)] pub struct Frame { /// The ID marked by [FrameConverter](FrameConverter). pub frame_id: u16, /// The IDs of dropped frames before this frame comes in. pub skipped_frame_ids: Range<u16>, /// Pairs of `(measurement_id, timestamp)`. pub timestamps: Vec<(u16, u64)>, /// Point cloud data. pub points: Vec<Point>, } /// It reads [columns](Column) of sensor data, and /// gathers points into sequence of frames. /// /// It internally computes point cloud using /// [PointCloudConverter](PointCloudConverter). /// The columns must be pushed in the same order /// of LIDAR output. It keeps track of skipped /// columns and dropped frames. #[derive(Debug)] pub struct FrameConverter { pcd_converter: PointCloudConverter, state: Option<FrameConverterState>, } impl FrameConverter { /// Creates converter from config. pub fn from_config(config: Config) -> Self { Self { pcd_converter: PointCloudConverter::from_config(config), state: None, } } /// Returns the resolution in `(width, height)` pair. pub fn resolution(&self) -> (u16, u16) { let width = self.pcd_converter.columns_per_revolution(); (width, 64) } /// Returns the number of columns per revolution. pub fn columns_per_revolution(&self) -> u16 { self.pcd_converter.columns_per_revolution() } /// Pushes new [Column] to converter. pub fn push_column(&mut self, column: &Column) -> Result<Vec<Frame>> { let curr_fid = column.frame_id; let curr_mid = column.measurement_id; let curr_ts = column.timestamp; let curr_points = self.pcd_converter.column_to_points(&column)?; // If received column is not valid, update last_{fid,mid} only if !column.valid() { let (frame_opt, new_state) = match self.state.take() { Some(mut state) => { let frame_opt = match state.last_fid.cmp(&curr_fid) { Ordering::Less => state.frame.take(), Ordering::Equal => None, Ordering::Greater => { bail!( "Measurement ID of received column is less than that of previous column" ); } }; state.last_fid = curr_fid; state.last_mid = curr_mid; (frame_opt, state) } None => { let new_state = FrameConverterState { last_fid: curr_fid, last_mid: curr_mid, frame: None, }; (None, new_state) } }; self.state = Some(new_state); return Ok(frame_opt.into_iter().collect()); } let (new_state, output_frames) = match self.state.take() { Some(mut state) => { match state.last_fid.cmp(&curr_fid) { Ordering::Less => { // Case: New frame ID // Pop out saved frame and conditionally save or output second frame let first_frame_opt = state.frame.take(); let second_frame = Frame { frame_id: curr_fid, skipped_frame_ids: (state.last_fid + 1)..curr_fid, timestamps: { let mut timestamps = Vec::with_capacity(COLUMNS_PER_PACKET); timestamps.push((curr_mid, curr_ts)); timestamps }, points: curr_points, }; let mut new_state = FrameConverterState { last_mid: curr_mid, last_fid: curr_fid, frame: None, }; // Produce frame if measurement ID is exactly the latest ID of frame let (second_frame_opt, new_state) = if curr_mid + 1 == self.pcd_converter.columns_per_revolution() { (Some(second_frame), new_state) } else { new_state.frame = Some(second_frame); (None, new_state) }; let output_frames = first_frame_opt .into_iter() .chain(second_frame_opt.into_iter()) .collect(); (new_state, output_frames) } Ordering::Equal => { if state.last_mid >= curr_mid { let error = format_err!( "Measurement ID of received column is less than that of previous column" ); return Err(error); } // Conditionally produce frame if measurement ID is the latest one let mut new_state = FrameConverterState { last_mid: curr_mid, last_fid: curr_fid, frame: None, }; let frame = { let mut frame = state.frame.take().unwrap_or_else(|| { unreachable!("Please report bug to upstream"); }); frame.timestamps.push((curr_mid, curr_ts)); frame.points.extend(curr_points); frame }; let (frame_opt, new_state) = if curr_mid + 1 == self.pcd_converter.columns_per_revolution() { (Some(frame), new_state) } else { new_state.frame = Some(frame); (None, new_state) }; let output_frames = frame_opt.into_iter().collect(); (new_state, output_frames) } Ordering::Greater => { let error = format_err!( "Frame ID of received column is less than that of previous column" ); return Err(error); } } } None => { let frame = Frame { frame_id: curr_fid, skipped_frame_ids: curr_fid..curr_fid, timestamps: { let mut timestamps = Vec::with_capacity(COLUMNS_PER_PACKET); timestamps.push((curr_mid, curr_ts)); timestamps }, points: curr_points, }; let mut new_state = FrameConverterState { last_mid: curr_mid, last_fid: curr_fid, frame: None, }; let frame_opt = if curr_mid + 1 == self.pcd_converter.columns_per_revolution() { Some(frame) } else
; (new_state, frame_opt.into_iter().collect()) } }; self.state = Some(new_state); Ok(output_frames) } /// Pushes new [Packet] to converter. pub fn push_packet<P>(&mut self, packet: P) -> Result<Vec<Frame>> where P: AsRef<Packet>, { let mut frames = vec![]; for column in packet.as_ref().columns.iter() { frames.extend(self.push_column(&column)?); } Ok(frames) } /// Consumes the instance and outputs last maybe /// incomplete frame. pub fn finish(mut self) -> Option<Frame> { self.state .take() .map(|mut state| state.frame.take()) .unwrap_or(None) } } #[derive(Clone, Debug)] struct FrameConverterState { last_mid: u16, last_fid: u16, frame: Option<Frame>, }
{ new_state.frame = Some(frame); None }
main.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::fs; use std::path::PathBuf; use structopt::StructOpt; use wayland_scanner_lib::{Codegen, CodegenTarget, Parser, Protocol}; /// Generates wayland server bindings for the given protocol file. #[derive(StructOpt, Debug)] struct Options { /// Flag to control which set of bindings is generated - client or server. #[structopt(long = "target", parse(try_from_str))] target: CodegenTarget, /// Input XML file #[structopt(short = "i", long = "input", parse(from_os_str))] input: PathBuf, /// Generated rust source #[structopt(short = "o", long = "output", parse(from_os_str))] output: PathBuf, /// Additional crate dependencies of this protocol. These should be the /// names of any additional crates that the generated module will depend on. /// This just emits a `use <dep>::*` for each crate, so these crates must /// also be provided to properly build the generated module. #[structopt(name = "dep", short = "d", long = "dep")] dependencies: Vec<String>, } fn main()
{ let options = Options::from_args(); // Open input/output files. let infile = match fs::File::open(&options.input) { Ok(file) => file, Err(_) => { println!("Failed to open input file {:?}", &options.input); return; } }; let outfile = match fs::File::create(&options.output) { Ok(file) => file, Err(_) => { println!("Failed to open output file {:?}", &options.output); return; } }; // Parse XML and generate rust module. let mut parser = Parser::new(infile); let mut codegen = Codegen::new(outfile); let parse_tree = match parser.read_document() { Ok(parse_tree) => parse_tree, Err(msg) => { println!("Failed to parse document {}", msg); return; } }; let protocol = match Protocol::from_parse_tree(parse_tree) { Ok(protocol) => protocol, Err(msg) => { println!("Failed to build AST {}", msg); return; } }; if let Err(e) = codegen.codegen(options.target, protocol, options.dependencies.as_slice()) { println!("Failed to codegen rust module {}", e); } }
__init__.py
#-*- coding: utf-8 -*- # Author: Matt Earnshaw <[email protected]> from __future__ import absolute_import import os import sys import sunpy from PyQt4.QtGui import QApplication from sunpy.gui.mainwindow import MainWindow from sunpy.io import UnrecognizedFileTypeError class
(object): """ Wraps a MainWindow so PlotMan instances can be created via the CLI. Examples -------- from sunpy.gui import Plotman plots = Plotman("data/examples") plots.show() """ def __init__(self, *paths): """ *paths: directories containing FITS paths or FITS paths to be opened in PlotMan """ self.app = QApplication(sys.argv) self.main = MainWindow() self.open_files(paths) def open_files(self, inputs): VALID_EXTENSIONS = [".jp2", ".fits", ".fts"] to_open = [] # Determine files to process for input_ in inputs: if os.path.isfile(input_): to_open.append(input_) elif os.path.isdir(input_): for file_ in os.listdir(input_): to_open.append(file_) else: raise IOError("Path " + input_ + " does not exist.") # Load files for filepath in to_open: name, ext = os.path.splitext(filepath) #pylint: disable=W0612 if ext.lower() in VALID_EXTENSIONS: try: self.main.add_tab(filepath, os.path.basename(filepath)) except UnrecognizedFileTypeError: pass def show(self): self.main.show() self.app.exec_() if __name__=="__main__": from sunpy.gui import Plotman plots = Plotman(sunpy.AIA_171_IMAGE) plots.show()
Plotman
main.go
package main import "fmt" func main() { fmt.Printf("%d\n", lengthOfLongestSubstring("a")) } // 动态规划方法解题 利用空间换时间 func lengthOfLongestSubstring(s string) int { maxLength := 0 posOfChar := make(map[rune]int) // 动态规划 map startPos := 0 // 起始下标 for i, c := range s { // 当字符再次出现时 触发计算子串长度 更新起始下标值(不能小于当前起始下标值) if pos, ok := posOfChar[c]; ok == true && pos >= startPos { // 当前下标 - 起始下标 = 当前子串的长度 if i-startPos > maxLength { maxLength = i - startPos } // 起始下标前移一位 startPos = pos + 1 } // 记录当前字符的下标位置 posOfChar[c] = i }
if len(s)-startPos > maxLength { maxLength = len(s) - startPos } return maxLength }
// 处理特殊情况子串等于串的时候 example: abcde
extern-crate-self-macro-alias.rs
// run-pass // Test that a macro can correctly expand the alias // in an `extern crate self as ALIAS` item. fn
() -> usize { 42 } macro_rules! alias_self { ($alias:ident) => { extern crate self as $alias; } } alias_self!(the_alias); fn main() { assert_eq!(the_alias::the_answer(), 42); }
the_answer
test_git_speed.py
from git_speed import __version__
assert __version__ == "1.1.1"
def test_version():
wrappers.go
package log import ( "context" "github.com/go-kit/log" kitlog "github.com/go-kit/log" "github.com/weaveworks/common/tracing" "github.com/cortexproject/cortex/pkg/tenant" ) // WithUserID returns a Logger that has information about the current user in // its details. func WithUserID(userID string, l kitlog.Logger) kitlog.Logger { // See note in WithContext. return kitlog.With(l, "org_id", userID) } // WithTraceID returns a Logger that has information about the traceID in // its details. func WithTraceID(traceID string, l kitlog.Logger) kitlog.Logger { // See note in WithContext. return kitlog.With(l, "traceID", traceID) } // WithContext returns a Logger that has information about the current user in // its details. // // e.g. // log := util.WithContext(ctx) // log.Errorf("Could not chunk chunks: %v", err) func WithContext(ctx context.Context, l kitlog.Logger) kitlog.Logger { // Weaveworks uses "orgs" and "orgID" to represent Cortex users, // even though the code-base generally uses `userID` to refer to the same thing. userID, err := tenant.TenantID(ctx) if err == nil { l = WithUserID(userID, l) } traceID, ok := tracing.ExtractSampledTraceID(ctx) if !ok
return WithTraceID(traceID, l) } // WithSourceIPs returns a Logger that has information about the source IPs in // its details. func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger { return log.With(l, "sourceIPs", sourceIPs) }
{ return l }
Plugin_8h.js
var Plugin_8h = [ [ "Plugin", "classSimTK_1_1Plugin.html", "classSimTK_1_1Plugin" ], [ "SimTK_PLUGIN_XXX_MAKE_HOLDER", "Plugin_8h.html#ac172341527a4e3d847dfbb7d8d4d20aa", null ], [ "SimTK_PLUGIN_XXX_MAKE_BODY", "Plugin_8h.html#ad76891fab1443a6ac997bca31c9e9aa2", null ], [ "SimTK_PLUGIN_XXX_MAKE_SYMTEST", "Plugin_8h.html#aeb27ad314901348ecdaa7009e17a3801", null ], [ "SimTK_PLUGIN_DEFINE_SYMBOL", "Plugin_8h.html#a4f1c1b9f46ff0e92bd6ccfcbd0f1776f", null ], [ "SimTK_PLUGIN_DEFINE_FUNCTION", "Plugin_8h.html#aba62241fe4114437bdb6a77607e26d7d", null ], [ "SimTK_PLUGIN_DEFINE_FUNCTION1", "Plugin_8h.html#ad2fd601eb2ed8c50a931e0f4883b1ec6", null ], [ "SimTK_PLUGIN_DEFINE_FUNCTION2", "Plugin_8h.html#a069dba13037e9a8342e1966f72ad6a9e", null ]
];
graceful-stop.go
package main import ( "fmt" "os" "os/signal" "syscall" ) func gracefulStop()
{ // Handle ^C and SIGTERM gracefully var gracefulStop = make(chan os.Signal) signal.Notify(gracefulStop, syscall.SIGTERM, syscall.SIGINT) go func() { sig := <-gracefulStop fmt.Fprintf(os.Stderr, "Killed: %+v", sig) os.Exit(0) }() }
resharder.go
/* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package wrangler import ( "fmt" "sync" "time" "github.com/golang/protobuf/proto" "github.com/pkg/errors" "golang.org/x/net/context" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" ) type resharder struct { wr *Wrangler keyspace string workflow string sourceShards []*topo.ShardInfo sourceMasters map[string]*topo.TabletInfo targetShards []*topo.ShardInfo targetMasters map[string]*topo.TabletInfo vschema *vschemapb.Keyspace refStreams map[string]*refStream } type refStream struct { workflow string bls *binlogdatapb.BinlogSource cell string tabletTypes string } // Reshard initiates a resharding workflow. func (wr *Wrangler) Reshard(ctx context.Context, keyspace, workflow string, sources, targets []string, skipSchemaCopy bool) error { if err := wr.validateNewWorkflow(ctx, keyspace, workflow); err != nil { return err } rs, err := wr.buildResharder(ctx, keyspace, workflow, sources, targets) if err != nil { return vterrors.Wrap(err, "buildResharder") } if !skipSchemaCopy { if err := rs.copySchema(ctx); err != nil { return vterrors.Wrap(err, "copySchema") } } if err := rs.createStreams(ctx); err != nil { return vterrors.Wrap(err, "createStreams") } if err := rs.startStreams(ctx); err != nil { return vterrors.Wrap(err, "startStream") } return nil } func (wr *Wrangler) buildResharder(ctx context.Context, keyspace, workflow string, sources, targets []string) (*resharder, error) { rs := &resharder{ wr: wr, keyspace: keyspace, workflow: workflow, sourceMasters: make(map[string]*topo.TabletInfo), targetMasters: make(map[string]*topo.TabletInfo), } for _, shard := range sources { si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) } if !si.IsMasterServing { return nil, fmt.Errorf("source shard %v is not in serving state", shard) } rs.sourceShards = append(rs.sourceShards, si) master, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.MasterAlias) } rs.sourceMasters[si.ShardName()] = master } for _, shard := range targets { si, err := wr.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) } if si.IsMasterServing { return nil, fmt.Errorf("target shard %v is in serving state", shard) } rs.targetShards = append(rs.targetShards, si) master, err := wr.ts.GetTablet(ctx, si.MasterAlias) if err != nil { return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.MasterAlias) } rs.targetMasters[si.ShardName()] = master } if err := topotools.ValidateForReshard(rs.sourceShards, rs.targetShards); err != nil { return nil, vterrors.Wrap(err, "ValidateForReshard") } if err := rs.validateTargets(ctx); err != nil { return nil, vterrors.Wrap(err, "validateTargets") } vschema, err := wr.ts.GetVSchema(ctx, keyspace) if err != nil { return nil, vterrors.Wrap(err, "GetVSchema") } rs.vschema = vschema if err := rs.readRefStreams(ctx); err != nil { return nil, vterrors.Wrap(err, "readRefStreams") } return rs, nil } func (rs *resharder) validateTargets(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetMaster := rs.targetMasters[target.ShardName()] query := fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s", encodeString(targetMaster.DbName())) p3qr, err := rs.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, query) if err != nil { return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetMaster.Tablet, query) } if len(p3qr.Rows) != 0
return nil }) return err } func (rs *resharder) readRefStreams(ctx context.Context) error { var mu sync.Mutex err := rs.forAll(rs.sourceShards, func(source *topo.ShardInfo) error { sourceMaster := rs.sourceMasters[source.ShardName()] query := fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name=%s", encodeString(sourceMaster.DbName())) p3qr, err := rs.wr.tmc.VReplicationExec(ctx, sourceMaster.Tablet, query) if err != nil { return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", sourceMaster.Tablet, query) } qr := sqltypes.Proto3ToResult(p3qr) mu.Lock() defer mu.Unlock() mustCreate := false var ref map[string]bool if rs.refStreams == nil { rs.refStreams = make(map[string]*refStream) mustCreate = true } else { // Copy the ref streams for comparison. ref = make(map[string]bool, len(rs.refStreams)) for k := range rs.refStreams { ref[k] = true } } for _, row := range qr.Rows { workflow := row[0].ToString() if workflow == "" { return fmt.Errorf("VReplication streams must have named workflows for migration: shard: %s:%s", source.Keyspace(), source.ShardName()) } var bls binlogdatapb.BinlogSource if err := proto.UnmarshalText(row[1].ToString(), &bls); err != nil { return vterrors.Wrapf(err, "UnmarshalText: %v", row) } isReference, err := rs.blsIsReference(&bls) if err != nil { return vterrors.Wrap(err, "blsIsReference") } if !isReference { continue } key := fmt.Sprintf("%s:%s:%s", workflow, bls.Keyspace, bls.Shard) if mustCreate { rs.refStreams[key] = &refStream{ workflow: workflow, bls: &bls, cell: row[2].ToString(), tabletTypes: row[3].ToString(), } } else { if !ref[key] { return fmt.Errorf("streams are mismatched across source shards for workflow: %s", workflow) } delete(ref, key) } } if len(ref) != 0 { return fmt.Errorf("streams are mismatched across source shards: %v", ref) } return nil }) return err } // blsIsReference is partially copied from streamMigrater.templatize. // It reuses the constants from that function also. func (rs *resharder) blsIsReference(bls *binlogdatapb.BinlogSource) (bool, error) { streamType := unknown for _, rule := range bls.Filter.Rules { typ, err := rs.identifyRuleType(rule) if err != nil { return false, err } switch typ { case sharded: if streamType == reference { return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) } streamType = sharded case reference: if streamType == sharded { return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) } streamType = reference } } return streamType == reference, nil } func (rs *resharder) identifyRuleType(rule *binlogdatapb.Rule) (int, error) { vtable, ok := rs.vschema.Tables[rule.Match] if !ok { return 0, fmt.Errorf("table %v not found in vschema", rule.Match) } if vtable.Type == vindexes.TypeReference { return reference, nil } // In this case, 'sharded' means that it's not a reference // table. We don't care about any other subtleties. return sharded, nil } func (rs *resharder) copySchema(ctx context.Context) error { oneSource := rs.sourceShards[0].MasterAlias err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { return rs.wr.CopySchemaShard(ctx, oneSource, []string{"/.*"}, nil, false, rs.keyspace, target.ShardName(), 1*time.Second) }) return err } func (rs *resharder) createStreams(ctx context.Context) error { var excludeRules []*binlogdatapb.Rule for tableName, table := range rs.vschema.Tables { if table.Type == vindexes.TypeReference { excludeRules = append(excludeRules, &binlogdatapb.Rule{ Match: tableName, Filter: "exclude", }) } } err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetMaster := rs.targetMasters[target.ShardName()] ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, targetMaster.DbName()) // copy excludeRules to prevent data race. copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) for _, source := range rs.sourceShards { if !key.KeyRangesIntersect(target.KeyRange, source.KeyRange) { continue } filter := &binlogdatapb.Filter{ Rules: append(copyExcludeRules, &binlogdatapb.Rule{ Match: "/.*", Filter: key.KeyRangeString(target.KeyRange), }), } bls := &binlogdatapb.BinlogSource{ Keyspace: rs.keyspace, Shard: source.ShardName(), Filter: filter, } ig.AddRow(rs.workflow, bls, "", "", "") } for _, rstream := range rs.refStreams { ig.AddRow(rstream.workflow, rstream.bls, "", rstream.cell, rstream.tabletTypes) } query := ig.String() if _, err := rs.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, query); err != nil { return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetMaster.Tablet, query) } return nil }) return err } func (rs *resharder) startStreams(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetMaster := rs.targetMasters[target.ShardName()] query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s", encodeString(targetMaster.DbName())) if _, err := rs.wr.tmc.VReplicationExec(ctx, targetMaster.Tablet, query); err != nil { return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetMaster.Tablet, query) } return nil }) return err } func (rs *resharder) forAll(shards []*topo.ShardInfo, f func(*topo.ShardInfo) error) error { var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} for _, shard := range shards { wg.Add(1) go func(shard *topo.ShardInfo) { defer wg.Done() if err := f(shard); err != nil { allErrors.RecordError(err) } }(shard) } wg.Wait() return allErrors.AggrError(vterrors.Aggregate) }
{ return errors.New("some streams already exist in the target shards, please clean them up and retry the command") }
DeleteVpcPeeringConnectionInput.ts
import { BrowserHttpOptions as __HttpOptions__ } from "@aws-sdk/types"; import * as __aws_sdk_types from "@aws-sdk/types"; /** * <p>Represents the input for a request action.</p> */ export interface DeleteVpcPeeringConnectionInput { /** * <p>Unique identifier for a fleet. This value must match the fleet ID referenced in the VPC peering connection record.</p> */ FleetId: string; /** * <p>Unique identifier for a VPC peering connection. This value is included in the <a>VpcPeeringConnection</a> object, which can be retrieved by calling <a>DescribeVpcPeeringConnections</a>.</p> */ VpcPeeringConnectionId: string; /** * The maximum number of times this operation should be retried. If set, this value will override the `maxRetries` configuration set on the client for this command. */ $maxRetries?: number; /** * An object that may be queried to determine if the underlying operation has been aborted. * * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal */
$abortSignal?: __aws_sdk_types.AbortSignal; /** * Per-request HTTP configuration options. If set, any options specified will override the corresponding HTTP option set on the client for this command. */ $httpOptions?: __HttpOptions__; }
AuthContext.js
import React from 'react'; import { useGoogleLogin } from 'react-use-googlelogin'; const GoogleAuthContext = React.createContext({}); export const AuthProvider = ({ children }) => { const { REACT_APP_GOOGLE_CLIENT_ID } = process.env; const googleAuth = useGoogleLogin({ clientId: REACT_APP_GOOGLE_CLIENT_ID || ' ', }); return ( <GoogleAuthContext.Provider value={googleAuth}> {children} </GoogleAuthContext.Provider> ); };
export const useAuth = () => React.useContext(GoogleAuthContext);
fetch.go
package action import ( "log" "time" "sync" "errors" "fmt" "os" "strconv" "strings" sdk "CocosSDK" "github.com/siddontang/go-mysql/client" "github.com/wanliqun/bcx-witnode-vote-award/lib/util" ) func FetchBlocks(startBlock, endBlock int64) { defer Close() // fetch blocks and write to db syncHeight := startBlock blockHeight := int64(0) for { if syncHeight >= endBlock { log.Printf("Block number has been synced to %v. Work is done!", syncHeight) //syscall.Kill(syscall.Getpid(), syscall.SIGINT) os.Exit(0) } if blockHeight == 0 || syncHeight >= blockHeight { chainInfo := sdk.GetChainInfo() if chainInfo == nil { time.Sleep(1 * time.Second) continue } blockHeight = int64(chainInfo.LastIrreversibleBlockNum) } if syncHeight >= blockHeight { time.Sleep(5 * time.Second) continue } numDiffBlocks := util.MinInt64(blockHeight - syncHeight, 100) numWorkers := 1 if numDiffBlocks >= 20 { numWorkers = 4 } log.Printf("fetching cocosbcx blocks with syncHeight - %v; blockHeight - %v; numWorkers - %v", syncHeight, blockHeight, numWorkers) var wg sync.WaitGroup errChans := make(chan error, numWorkers) for i := 1; i <= numWorkers; i++ { mod := 0 avg := int(numDiffBlocks) / numWorkers if i == numWorkers { mod = int(numDiffBlocks) % numWorkers } start := syncHeight + int64(1 + (i - 1) * avg) end := start + int64((avg - 1) + mod) dbCon := dbConnPools[i-1] wg.Add(1) go worker(i, &wg, dbCon, errChans, start, end) } wg.Wait() close(errChans) if len(errChans) > 0 { for err := range errChans { log.Println(err.Error()) } } else { syncHeight += numDiffBlocks } } } func worker (id int, wg *sync.WaitGroup, dbConn *client.Conn, errChans chan error, block_start, block_end int64) { defer wg.Done() log.Printf("Worker %d started to fetch block height start from %d to %d", id, block_start, block_end) logPrefix := fmt.Sprintf("Inside worker(id-%d, block_start-%d, block_end-%d)", id, block_start, block_end) blockOpsData := map[int64][]map[string]interface{}{} for i := block_start; i <= block_end; i++ { block := sdk.GetBlock(i) if len(block.Transactions) > 0 { opsData := []map[string]interface{}{} for _, trx := range block.Transactions { trxId := trx[0] trxInfo := trx[1].(map[string]interface{}) switch trxOps := trxInfo["operations"].(type) { case []interface{}: for _, ops := range trxOps { trxOpData := map[string]interface{}(nil) switch opv := ops.(type) { case []interface{}: opType := opv[0].(float64) if int(opType) != 6 { log.Printf("%v: skip transaction operation with trx type - %v for trxid - %v", logPrefix, opType, trxId) continue } opInfo := opv[1].(map[string]interface {}) opAccount := opInfo["account"].(string) opNewOptions := opInfo["new_options"].(map[string]interface{}) opLock := opInfo["lock_with_vote"] optionVotes := opNewOptions["votes"] switch opLockWithVote := opLock.(type) { case []interface{}: voteType := opLockWithVote[0].(float64) if int(voteType) != 1 { log.Printf("%v warning: vote with type - %v is not a witness vote for transaction with trxid - %v", logPrefix, voteType, trxId) continue } voteInfo := opLockWithVote[1].(map[string]interface{}) voteAmount := voteInfo["amount"].(float64) voteAssetId := voteInfo["asset_id"].(string) trxOpData = map[string]interface{}{ "op_type": opType, "op_account": opAccount, "vote_type": voteType, "vote_amount": voteAmount, "vote_asset_id": voteAssetId, } default: log.Printf("%v warning: unknown op lock_with_vote for transaction with trxid - %v", logPrefix, trxId) continue } switch newOptionsVotes := optionVotes.(type) { case []interface{}: votes := []string{} for _, v := range newOptionsVotes { vote := v.(string) votes = append(votes, vote) } trxOpData["votes"] = votes default: log.Printf("%v warning: unknown new option votes for transaction with trxid - %v", logPrefix, trxId) continue } default: log.Printf("%v warning: unknown transaction operation detail for trxid - %v", logPrefix, trxId) continue } if trxOpData != nil { trxOpData["block_num"] = i trxOpData["block_id"] = block.BlockID trxOpData["trx_id"] = trxId trxOpData["timestamp"] = block.Timestamp opsData = append(opsData, trxOpData) } } default: log.Printf("%v warning: unknown transaction operations for trxid - %v", logPrefix, trxId) continue } } if len(opsData) > 0 { blockOpsData[i] = opsData } } } if len(blockOpsData) == 0 { log.Printf("%v: no witness vote operations found", logPrefix) return } log.Printf("%v: saving block opsdata to db", logPrefix) logMsg, err := SaveBlockOpsDataToDB(dbConn, blockOpsData) if len(logMsg) > 0 { log.Printf("%v: %v", logPrefix, logMsg) } if err != nil { err = errors.New(fmt.Sprintf("%v error: %v", logPrefix, err.Error())) errChans <- err } } func SaveBlockOpsDataToDB(dbConn *client.Conn, blockOpsData map[int64][]map[string]interface{}) (string, error)
{ // get all map keys keys := []string{} for bn, _ := range blockOpsData { keys = append(keys, strconv.FormatInt(bn,10)) } fmt.Printf("keys: %#v\n", keys) sql := fmt.Sprintf("select distinct block_num from vote_ops where block_num in (%v)", strings.Join(keys, ",")) fmt.Printf("sql: %#v\n", sql) r, err := dbConn.Execute(sql) if err != nil { return "", err } if r.RowNumber() == len(keys) { return "skip now because all blocks are already saved in DB", nil } // remove duplicate blocks for i := 0; i < r.RowNumber(); i++ { bn, _ := r.GetStringByName(i, "block_num") idx := util.SearchStringSlice(keys, bn) if idx >= 0 { keys = util.RemoveStringSliceAt(keys, idx) } } for _, key := range keys { blockNum, _ := strconv.ParseInt(key,10,64) opsd := blockOpsData[blockNum] bulkInsertSql := "INSERT INTO vote_ops(block_id,block_num,trx_id,op_account,op_type,vote_type,votee_id,vote_asset_id,vote_amount,timestamp) VALUES" for _, opData := range opsd { blockId := opData["block_id"] blockNum := opData["block_num"] trxId := opData["trx_id"] opAccount := opData["op_account"] opType := opData["op_type"] voteType := opData["vote_type"] voteAssetId := opData["vote_asset_id"] voteAmount := opData["vote_amount"] timestamp := opData["timestamp"] opVotes := opData["votes"].([]string) for j := 0; j < len(opVotes); j++ { voteeId := opVotes[j] seperator := "" if j < (len(opVotes) - 1) { seperator = "," } innerSql := fmt.Sprintf(`("%v",%v,"%v","%v",%v,%v,"%v","%v",%v,"%v")%v`, blockId, blockNum, trxId, opAccount, opType, voteType, voteeId, voteAssetId, voteAmount, timestamp, seperator) bulkInsertSql += innerSql } } _, err := dbConn.Execute(bulkInsertSql) if err != nil { return "", err } fmt.Printf("insert result: %v\n", r) fmt.Printf("insert result: %v\n", r.Resultset) fmt.Printf("insert result: %#v\n", r.Resultset) if r.AffectedRows < 1 { return fmt.Sprintf("Mysql db unknown error (%v)", bulkInsertSql), errors.New("Mysql db insert unknown error") } } return fmt.Sprintf("blocks(%v) ops data saved done", strings.Join(keys, ",")), nil }
can_node0.rs
#[doc = r"Register block"] #[repr(C)] pub struct RegisterBlock {
#[doc = "0x00 - Node Control Register"] pub ncr: crate::Reg<ncr::NCR_SPEC>, #[doc = "0x04 - Node Status Register"] pub nsr: crate::Reg<nsr::NSR_SPEC>, #[doc = "0x08 - Node Interrupt Pointer Register"] pub nipr: crate::Reg<nipr::NIPR_SPEC>, #[doc = "0x0c - Node Port Control Register"] pub npcr: crate::Reg<npcr::NPCR_SPEC>, #[doc = "0x10 - Node Bit Timing Register"] pub nbtr: crate::Reg<nbtr::NBTR_SPEC>, #[doc = "0x14 - Node Error Counter Register"] pub necnt: crate::Reg<necnt::NECNT_SPEC>, #[doc = "0x18 - Node Frame Counter Register"] pub nfcr: crate::Reg<nfcr::NFCR_SPEC>, } #[doc = "NCR register accessor: an alias for `Reg<NCR_SPEC>`"] pub type NCR = crate::Reg<ncr::NCR_SPEC>; #[doc = "Node Control Register"] pub mod ncr; #[doc = "NSR register accessor: an alias for `Reg<NSR_SPEC>`"] pub type NSR = crate::Reg<nsr::NSR_SPEC>; #[doc = "Node Status Register"] pub mod nsr; #[doc = "NIPR register accessor: an alias for `Reg<NIPR_SPEC>`"] pub type NIPR = crate::Reg<nipr::NIPR_SPEC>; #[doc = "Node Interrupt Pointer Register"] pub mod nipr; #[doc = "NPCR register accessor: an alias for `Reg<NPCR_SPEC>`"] pub type NPCR = crate::Reg<npcr::NPCR_SPEC>; #[doc = "Node Port Control Register"] pub mod npcr; #[doc = "NBTR register accessor: an alias for `Reg<NBTR_SPEC>`"] pub type NBTR = crate::Reg<nbtr::NBTR_SPEC>; #[doc = "Node Bit Timing Register"] pub mod nbtr; #[doc = "NECNT register accessor: an alias for `Reg<NECNT_SPEC>`"] pub type NECNT = crate::Reg<necnt::NECNT_SPEC>; #[doc = "Node Error Counter Register"] pub mod necnt; #[doc = "NFCR register accessor: an alias for `Reg<NFCR_SPEC>`"] pub type NFCR = crate::Reg<nfcr::NFCR_SPEC>; #[doc = "Node Frame Counter Register"] pub mod nfcr;
archivos.js
const fs = require('fs') const { multiplicar } = require('../math/multiply') let createDir = (path) => { return new Promise((resolve, reject) => { fs.exists('tablas/', (exists)=>{ if(!exists){ fs.mkdir('tablas/', (err) => {
return resolve(true) }) } else{ resolve(true) } }) }) } let crearArchivo = (base, limite) => { return new Promise((resolve, reject) => { createDir('tablas') .then(exists => { fs.writeFile(`tablas/tabla-${base}.txt`, multiplicar(base, limite), (err)=>{ if (err) return reject(err) resolve(`tabla-${base}.txt`) }) }) .catch(err => { reject(err) }) }) } module.exports = { crearArchivo }
if(err) return reject(err)
translations-registry.ts
import Translation from "./translation"; function setProto(of: any, proto: any) { if (typeof (Object as any).setPrototypeOf === "undefined") { of.__proto__ = proto; } else { (Object as any).setPrototypeOf(of, proto); } } export class TranslationRegistrationError extends Error { constructor(public message: string) { super(message); setProto(this, TranslationRegistrationError.prototype); } } export class EmptyTranslationIdError extends TranslationRegistrationError { constructor(translation: Translation) { super( `Invalid angular-translate translation '${translation}' found. The id of the translation is empty, consider removing the translate attribute (html) or defining the translation id (js).` ); setProto(this, EmptyTranslationIdError.prototype); } } export class TranslationMergeError extends TranslationRegistrationError { constructor( private existing: Translation, private newTranslation: Translation ) { super( `Webpack-Angular-Translate: Two translations with the same id but different default text found.\n\tExisting: ${existing}\n\tnew: ${newTranslation}\n\tPlease define the same default text twice or specify the default text only once.` ); } } export default class TranslationsRegistry { private translations: { [translationId: string]: Translation } = {}; // Array with resource -> translation keys; private translationsByResource: { [resource: string]: string[] } = {}; registerTranslation(translation: Translation): Translation { this.validateTranslation(translation); for (let usage of translation.usages) { var translations = (this.translationsByResource[usage.resource] = this.translationsByResource[usage.resource] || []); if (translations.indexOf(translation.id) === -1) { translations.push(translation.id); } } const existingEntry = this.translations[translation.id]; return (this.translations[translation.id] = existingEntry ? translation.merge(existingEntry) : translation); } /** * Validates the passed in translation. The returned boolean indicates if the translation should be * registered or not. * @param translation the translation to validate */ private validateTranslation(translation: Translation): void { if (!translation.id || translation.id.trim().length === 0) { throw new EmptyTranslationIdError(translation); } const existingEntry = this.getTranslation(translation.id); // If both entries define a default text that doesn't match, emit an error if ( existingEntry && existingEntry.defaultText !== translation.defaultText && existingEntry.defaultText && translation.defaultText ) { throw new TranslationMergeError(existingEntry, translation); } } pruneTranslations(resource: string): void { const translationIds = this.translationsByResource[resource] || []; for (let translationId of translationIds) { let translation = this.translations[translationId];
for (let usage of translation.usages) { if (usage.resource === resource) { translation.usages.splice(translation.usages.indexOf(usage), 1); if (translation.usages.length === 0) { delete this.translations[translation.id]; } break; } } } delete this.translationsByResource[resource]; } getTranslation(translationId: string): Translation { return this.translations[translationId]; } get empty(): boolean { return Object.keys(this.translations).length === 0; } toJSON(): any { const translationIds = Object.keys(this.translations); const result: { [translationId: string]: string } = {}; translationIds.forEach(translationId => { const translation = this.translations[translationId]; result[translationId] = translation.text; }); return result; } }
if (!translation) { continue; }
mutvec.rs
use crate::{MutVecg,MutVecf64}; /// Mutable vector operations on `&mut [f64]`, where the operand endtype is generic impl<U> MutVecg<U> for &mut [f64] where U: Copy+PartialOrd, f64: From<U> { /// Scalar multiplication of a vector, mutates self fn mutsmult(self, s:U) { let sf = f64::from(s); self.iter_mut().for_each(|x| *x *= sf); } /// Vector subtraction, mutates self fn mutvsub(self, v: &[U]) { self.iter_mut().zip(v).for_each(|(x,&vi)| *x -= f64::from(vi)) } /// Vector addition, mutates self fn mutvadd(self, v: &[U]) { self.iter_mut().zip(v).for_each(|(x,&vi)| *x += f64::from(vi)) } } /// Mutable operations on `&mut [f64]`, where the operand endtype is also f64 impl MutVecf64 for &mut [f64] { /// Scalar multiplication of a vector, mutates self fn mutsmultf64(self, s:f64)
/// Vector subtraction, mutates self fn mutvsubf64(self, v: &[f64]) { self.iter_mut().zip(v).for_each(|(x,&vi)| *x -= vi) } /// Vector addition, mutates self fn mutvaddf64(self, v: &[f64]) { self.iter_mut().zip(v).for_each(|(x,&vi)| *x += vi) } }
{ self.iter_mut().for_each(|x| *x *= s); }
container_engine.go
package init import ( "errors" "github.com/yuyicai/kubei/cmd/phases" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" "github.com/yuyicai/kubei/internal/options" containerphases "github.com/yuyicai/kubei/internal/phases/container" ) // NewContainerEnginePhase creates a kubei workflow phase that implements handling of container engine. func NewContainerEnginePhase() workflow.Phase { phase := workflow.Phase{ Name: "container-engine", Short: "install container engine", Long: "install container engine", InheritFlags: getContainerEnginePhaseFlags(), Run: runContainerEngine, } return phase } func getContainerEnginePhaseFlags() []string
func runContainerEngine(c workflow.RunData) error { data, ok := c.(phases.RunData) if !ok { return errors.New("runtime phase invoked with an invalid data struct") } cluster := data.Cluster() return containerphases.InstallContainerEngine(cluster) }
{ flags := []string{ options.OfflineFile, options.JumpServer, options.ContainerEngineVersion, options.Masters, options.Workers, options.Password, options.Port, options.User, options.Key, } return flags }
process_event_api.py
""" Processes event api from slack :license: MIT """ import json import os from typing import Dict from src.modules.create_signedup_homepage import create_home_tap from src.dependencies.dependency_typing import Requests, PynamoDBConsultant from src.dependencies.requests_provider import get_requests_provider from src.dependencies.pynamodb_consultant_provider import get_consultants_provider def process(event, context): ''' AWS Serverless Handler - :param event: AWS event :param context: AWS Lambda context ''' print(event) print(context) requests_client = get_requests_provider() consultant_model = get_consultants_provider() return proccess_request(event, requests_client, consultant_model) def proccess_request(event, requests_client: Requests, consultant_model: PynamoDBConsultant) -> None: ''' Proccess request - :param event: AWS event :param requests_client: Request Client :param consultant_model: Consultant Client ''' event_body = event['body'] if event_body['type'] == 'event_callback': if 'event' in event_body and event_body['event']['type'] == 'app_home_opened': user_id = event_body['event']['user'] consultant = next(consultant_model.slack_id_index.query(user_id), None) if consultant is not None: home_tap = create_home_tap(consultant.uuid, consultant_model) else: with open("src/templates/{0}.json".format('home_tap_template_signup'), "r")\ as body: home_tap = json.load(body) data = { 'user_id': user_id, 'view': home_tap } response = post('https://slack.com/api/views.publish', data, requests_client) elif event_body['type'] == 'url_verification': response = { 'challenge': event_body['challenge'] } print(response) return response def post(url: str, data: Dict, requests_client: Requests) -> Requests: ''' Posts the data
:param url: Url to slack api :param data: The data to post :param requests_client: Request client ''' auth_token = os.environ['SlackAuth'] hed = {'Authorization': 'Bearer ' + auth_token} response = requests_client.post(url, json=data, headers=hed) print('RESPONSE: ', response.json()) return response.json()
-
TaobaoCrmMembersGet.go
package crm import ( "github.com/bububa/opentaobao/core" "github.com/bububa/opentaobao/model/crm" ) /* 获取卖家的会员(基本查询) taobao.crm.members.get 查询卖家的会员,进行基本的查询,返回符合条件的会员列表 */ func TaobaoCrmMembersGet(clt *core.SDKClient, req *crm.TaobaoCrmMembersGetRequest, session string) (*crm.TaobaoCrmMembersGetAPIResponse, error) { var resp crm.TaobaoCrmMembersGetAPIResponse err := clt.Post(req, &resp
, session) if err != nil { return nil, err } return &resp, nil }
wrappers.go
// Copyright 2017 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package helpers import "fmt" // PerfTest represents a type of test to run when running `netperf`. type PerfTest string const ( // TCP_RR represents a netperf test for TCP Request/Response performance. // For more information, consult : http://www.cs.kent.edu/~farrell/dist/ref/Netperf.html TCP_RR = PerfTest("TCP_RR") // TCP_STREAM represents a netperf test for TCP throughput performance. // For more information, consult : http://www.cs.kent.edu/~farrell/dist/ref/Netperf.html TCP_STREAM = PerfTest("TCP_STREAM") // TCP_CRR represents a netperf test that connects and sends single request/response // For more information, consult : http://www.cs.kent.edu/~farrell/dist/ref/Netperf.html TCP_CRR = PerfTest("TCP_CRR") // UDP_RR represents a netperf test for UDP Request/Response performance. // For more information, consult : http://www.cs.kent.edu/~farrell/dist/ref/Netperf.html UDP_RR = PerfTest("UDP_RR") ) // Ping returns the string representing the ping command to ping the specified // endpoint. func Ping(endpoint string) string { return fmt.Sprintf("ping -W 2 -c %d %s", PingCount, endpoint) } // Ping6 returns the string representing the ping6 command to ping6 the // specified endpoint. func Ping6(endpoint string) string { return fmt.Sprintf("ping6 -c %d %s", PingCount, endpoint) } // Wrk runs a standard wrk test for http func Wrk(endpoint string) string { return fmt.Sprintf("wrk -t2 -c100 -d30s -R2000 http://%s", endpoint) } // CurlFail returns the string representing the curl command with `-s` and // `--fail` options enabled to curl the specified endpoint. It takes a // variadic optinalValues argument. This is passed on to fmt.Sprintf() and uses // into the curl message func CurlFail(endpoint string, optionalValues ...interface{}) string { statsInfo := `time-> DNS: '%{time_namelookup}(%{remote_ip})', Connect: '%{time_connect}',` + `Transfer '%{time_starttransfer}', total '%{time_total}'` if len(optionalValues) > 0 { endpoint = fmt.Sprintf(endpoint, optionalValues...) } return fmt.Sprintf( `curl --path-as-is -s -D /dev/stderr --fail --connect-timeout %[1]d --max-time %[2]d %[3]s -w "%[4]s"`, CurlConnectTimeout, CurlMaxTimeout, endpoint, statsInfo) } // CurlWithHTTPCode retunrs the string representation of the curl command which // only outputs the HTTP code returned by its execution against the specified // endpoint. It takes a variadic optinalValues argument. This is passed on to // fmt.Sprintf() and uses into the curl message func CurlWithHTTPCode(endpoint string, optionalValues ...interface{}) string { if len(optionalValues) > 0 { endpoint = fmt.Sprintf(endpoint, optionalValues...) } return fmt.Sprintf( `curl --path-as-is -s -D /dev/stderr --output /dev/stderr -w '%%{http_code}' --connect-timeout %d %s`, CurlConnectTimeout, endpoint) } // Netperf returns the string representing the netperf command to use when testing // connectivity between endpoints. func Netperf(endpoint string, perfTest PerfTest, options string) string { return fmt.Sprintf("netperf -l 3 -t %s -H %s %s", perfTest, endpoint, options) } // Netcat returns the string representing the netcat command to the specified // endpoint. It takes a variadic optionalValues arguments, This is passed to // fmt.Sprintf uses in the netcat message func Netcat(endpoint string, optionalValues ...interface{}) string
{ if len(optionalValues) > 0 { endpoint = fmt.Sprintf(endpoint, optionalValues...) } return fmt.Sprintf("nc -w 4 %s", endpoint) }
pseudoalteromonastunicata.py
""" This file offers the methods to automatically retrieve the graph Pseudoalteromonas tunicata. The graph is automatically retrieved from the STRING repository. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 18:59:41.922410 The undirected graph Pseudoalteromonas tunicata has 4449 nodes and 452740 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04576 and has 32 connected components, where the component with most nodes has 4376 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 190, the mean node degree is 203.52, and the node degree mode is 2. The top 5 most central nodes are 87626.PTD2_18840 (degree 1398), 87626.PTD2_06569 (degree 1364), 87626.PTD2_12984 (degree 1139), 87626.PTD2_18285 (degree 1124) and 87626.PTD2_12989 (degree 956). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import PseudoalteromonasTunicata # Then load the graph graph = PseudoalteromonasTunicata() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error def
( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph: """Return new instance of the Pseudoalteromonas tunicata graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Pseudoalteromonas tunicata graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 18:59:41.922410 The undirected graph Pseudoalteromonas tunicata has 4449 nodes and 452740 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04576 and has 32 connected components, where the component with most nodes has 4376 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 190, the mean node degree is 203.52, and the node degree mode is 2. The top 5 most central nodes are 87626.PTD2_18840 (degree 1398), 87626.PTD2_06569 (degree 1364), 87626.PTD2_12984 (degree 1139), 87626.PTD2_18285 (degree 1124) and 87626.PTD2_12989 (degree 956). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import PseudoalteromonasTunicata # Then load the graph graph = PseudoalteromonasTunicata() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ return AutomaticallyRetrievedGraph( graph_name="PseudoalteromonasTunicata", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
PseudoalteromonasTunicata
user.go
package views import "webrtc-china.org/models" type UserView struct { Id string `json:"id"` Email string `json:"email"` Username string `json:"username"` FullName string `json:"full_name"` AvatarURL string `json:"avatar_url"` } func
(user *models.User) UserView { userView := UserView{ Id: user.Id, Email: user.Email, Username: user.Username, FullName: user.FullName, AvatarURL: user.AvatarURL, } return userView }
BuildUserView
agentFromLab.go
package removesubcommands import ( "fmt" "os" snmpsimclient "github.com/inexio/snmpsim-restapi-go-client" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/spf13/viper" ) // AgentFromLabCmd represents the agentFromLab command var AgentFromLabCmd = &cobra.Command{ Use: "agent-from-lab", Args: cobra.ExactArgs(0), Short: "Removes an agent from a lab", Long: `Removes the agent with the given agent-id from the lab with the given lab-id`, Run: func(cmd *cobra.Command, args []string) { //Load the client data from the config baseURL := viper.GetString("mgmt.http.baseURL") username := viper.GetString("mgmt.http.authUsername") password := viper.GetString("mgmt.http.authPassword") //Create a new client client, err := snmpsimclient.NewManagementClient(baseURL) if err != nil { log.Error(). Err(err). Msg("Error while creating management client") os.Exit(1) } if username != "" && password != "" { err = client.SetUsernameAndPassword(username, password) if err != nil { log.Error(). Err(err). Msg("Error while setting username and password") os.Exit(1) } } //Read in the agent-id agentID, err := cmd.Flags().GetInt("agent") if err != nil { log.Error(). Err(err). Msg("Error while retrieving agentID") os.Exit(1) } //Read in the lab-id labID, err := cmd.Flags().GetInt("lab") if err != nil { log.Error(). Err(err). Msg("Error while retrieving labID") os.Exit(1) } //Remove the agent from the lab err = client.RemoveAgentFromLab(labID, agentID) if err != nil { log.Error(). Err(err). Msg("Error while removing the agent from the lab") os.Exit(1) } fmt.Println("Agent", agentID, "has been removed from lab", labID) }, } func init()
{ //Set agent flag AgentFromLabCmd.Flags().Int("agent", 0, "Id of the agent that is to be removed from the lab") err := AgentFromLabCmd.MarkFlagRequired("agent") if err != nil { log.Error(). Err(err). Msg("Could not mark 'agent' flag required") os.Exit(1) } //Set lab flag AgentFromLabCmd.Flags().Int("lab", 0, "Id of the lab the agent will be removed from") err = AgentFromLabCmd.MarkFlagRequired("lab") if err != nil { log.Error(). Err(err). Msg("Could not mark 'lab' flag required") os.Exit(1) } }
map.rs
//! Map and support structures pub use std::{ hash::{ Hash, Hasher, }, collections::hash_map::DefaultHasher, mem::replace, slice::{ Iter as SliceIter, IterMut as SliceIterMut, }, ops::{ Index, IndexMut, }, marker::PhantomData, iter::FromIterator, vec::IntoIter as VecIntoIter, }; /// An associative array of keys to values /// /// Allows bi-directional lookup, /// using hashing for keys and direct comparison for values /// /// Key types must implement PartialEq, Clone, and Hash /// /// Value types must implement PartialEq #[derive(Debug, Clone)] pub struct Map<K: PartialEq + Hash, V: PartialEq> { keys: Vec<K>, values: Vec<V>, hashes: Vec<u64>, } impl<K: PartialEq + Hash, V: PartialEq> Map<K, V> { const DEFAULT_CAPACITY: usize = 256; /// Used by all Maps of a given type to generate hashes from keys #[inline] pub fn hash<EqK: Hash + ?Sized> (key: &EqK) -> u64 where K: PartialEq<EqK> { let mut hasher = DefaultHasher::new(); key.hash(&mut hasher); hasher.finish() } /// Create a Map and pre-allocate its Vecs with a specified capacity #[inline] pub fn with_capacity (cap: usize) -> Self { Self { keys: Vec::with_capacity(cap), values: Vec::with_capacity(cap), hashes: Vec::with_capacity(cap), } } /// Create a Map and pre-allocate its Vecs with the Map::DEFAULT_CAPACITY #[inline] pub fn new () -> Self { Self::with_capacity(Self::DEFAULT_CAPACITY) } #[inline] fn index_of_hashed_key<EqK: Hash + ?Sized> (&self, hash: u64, key: &EqK) -> Option<usize> where K: PartialEq<EqK> { for (idx, own_hash) in self.hashes.iter().enumerate() { if *own_hash == hash { let own_key = unsafe { self.keys.get_unchecked(idx) }; if own_key == key { return Some(idx) } } } None } /// Find the vec index of a key if it exists in a Map pub fn index_of_key<EqK: Hash + ?Sized> (&self, key: &EqK) -> Option<usize> where K: PartialEq<EqK> { self.index_of_hashed_key(Self::hash(key), key) } /// Find the vec index of a value if it exists in a Map pub fn index_of_value (&self, value: &V) -> Option<usize> { for (idx, own_value) in self.values.iter().enumerate() { if own_value == value { return Some(idx) } } None } /// Determine if a Map contains a given key #[inline] pub fn contains_key<EqK: Hash + ?Sized> (&self, key: &EqK) -> bool where K: PartialEq<EqK> { self.index_of_key(key).is_some() } /// Determine if a Map contains a given value #[inline] pub fn contains_value (&self, value: &V) -> bool { self.index_of_value(value).is_some() } /// Determine if a Map potentially contains a given key /// /// This works by comparing hashes only, and may yield false positives, /// but will never yield a false negative pub fn maybe_contains_key<EqK: Hash + ?Sized> (&self, key: &EqK) -> bool where K: PartialEq<EqK> { let hash = Self::hash(key); for own_hash in self.hashes.iter() { if *own_hash == hash { return true } } false } /// Get the number of (key, value) pairs in a Map #[inline] pub fn len (&self) -> usize { self.values.len() } /// Determine if a Map contains any values #[inline] pub fn is_empty (&self) -> bool { self.values.is_empty() } /// Get an immutable reference to a value associated with a given key in a Map, /// if it contains a pair with a matching key #[inline] pub fn find_value<EqK: Hash + ?Sized> (&self, key: &EqK) -> Option<&V> where K: PartialEq<EqK> { if let Some(idx) = self.index_of_key(key) { Some(unsafe { self.values.get_unchecked(idx) }) } else { None } } /// Get a mutable reference to a value associated with a given key in a Map, /// if it contains a pair with a matching key #[inline] pub fn find_value_mut<EqK: Hash + ?Sized> (&mut self, key: &EqK) -> Option<&mut V> where K: PartialEq<EqK> { if let Some(idx) = self.index_of_key(key) { Some(unsafe { self.values.get_unchecked_mut(idx) }) } else { None } } /// Get an immutable reference to a key associated with a given value in a Map, /// if it contains a pair with a matching value #[inline] pub fn find_key (&self, value: &V) -> Option<&K> { if let Some(idx) = self.index_of_value(value) { Some(unsafe { self.keys.get_unchecked(idx) }) } else { None } } /// Get a mutable reference to a key associated with a given value in a Map, /// if it contains a pair with a matching value #[inline] pub fn find_key_mut (&mut self, value: &V) -> Option<&mut K> { if let Some(idx) = self.index_of_value(value) { Some(unsafe { self.keys.get_unchecked_mut(idx) }) } else { None } } /// Get an immutable references to a (key, value) pair in a Map by index /// /// # Safety /// Does not range check the index /// /// Note that the Map type does not necessarily preserve its order, /// so index-based referencing is temporaly unstable #[inline] pub unsafe fn get_pair_unchecked (&self, idx: usize) -> (&K, &V) { (self.keys.get_unchecked(idx), self.values.get_unchecked(idx)) } /// Get a mutable references to a (key, value) pair in a Map by index /// /// # Safety /// Does not range check the index /// /// Note that the Map type does not necessarily preserve its order, /// so index-based referencing is temporaly unstable #[inline] pub unsafe fn get_pair_unchecked_mut (&mut self, idx: usize) -> (&mut K, &mut V) { (self.keys.get_unchecked_mut(idx), self.values.get_unchecked_mut(idx)) } /// Get an immutable references to a (key, value) pair in a Map by index /// /// A range check is performed on the index /// /// Note that the Map type does not necessarily preserve its order, /// so index-based referencing is temporaly unstable #[inline] pub fn get_pair (&self, idx: usize) -> Option<(&K, &V)> { if idx < self.len() { Some(unsafe { self.get_pair_unchecked(idx) }) } else { None } } /// Get a mutable references to a (key, value) pair in a Map by index /// /// A range check is performed on the index /// /// Note that the Map type does not necessarily preserve its order, /// so index-based referencing is temporaly unstable #[inline] pub fn get_pair_mut (&mut self, idx: usize) -> Option<(&mut K, &mut V)> { if idx < self.len() { Some(unsafe { self.get_pair_unchecked_mut(idx) }) } else { None } } /// Insert a value at the given key in a Map even if one already exists /// /// Returns the existing value if one is already bound to the key /// (The opposite of `insert_unique`) #[inline] pub fn insert (&mut self, key: K, value: V) -> Option<V> { let hash = Self::hash(&key); for (idx, own_hash) in self.hashes.iter().enumerate() { if *own_hash == hash { let own_key = unsafe { self.keys.get_unchecked(idx) }; if own_key == &key { return Some(replace(unsafe { self.values.get_unchecked_mut(idx) }, value)) } } } self.keys.push(key); self.values.push(value); self.hashes.push(hash); None } /// Insert a value at the given key in a Map if they key does not already exist /// /// Returns the (key, value) pair provided and does nothing if an existing key is found /// (The opposite of `insert`) #[inline] pub fn insert_unique_key (&mut self, key: K, value: V) -> Option<(K, V)> { let hash = Self::hash(&key); if self.index_of_hashed_key(hash, &key).is_some() { return Some((key, value)) } self.hashes.push(hash); self.keys.push(key); self.values.push(value); None } /// Insert a value at the given key in a Map if the value does not already exist /// /// Returns the (key, value) pair provided and does nothing if an existing value is found /// (The opposite of `insert`) #[inline] pub fn insert_unique_value (&mut self, key: K, value: V) -> Option<(K, V)> { if self.contains_value(&value) { return Some((key, value)) } self.hashes.push(Self::hash(&key)); self.keys.push(key); self.values.push(value); None } /// Removes a (key, value) pair at the given index in a Map if it is in range /// /// Returns the pair if one is found /// /// Does not preserve order #[inline] pub fn remove_by_index (&mut self, idx: usize) -> Option<(K, V)> { if idx < self.len() { self.hashes.swap_remove(idx); self.keys.swap_remove(idx); Some((self.keys.swap_remove(idx), self.values.swap_remove(idx))) } else { None } } /// Removes a (key, value) pair matching the given key in a Map if one exists /// /// Returns the pair if one is found /// /// Does not preserve order #[inline] pub fn remove_by_key<EqK: Hash + ?Sized> (&mut self, key: &EqK) -> Option<(K, V)> where K: PartialEq<EqK> { self.index_of_key(key).and_then(|idx| self.remove_by_index(idx)) } /// Removes the first (key, value) pair matching the given value in a Map if one exists /// /// Returns the pair if one is found /// /// Does not preserve order #[inline] pub fn remove_by_value (&mut self, value: &V) -> Option<(K, V)> { self.index_of_value(value).and_then(|idx| self.remove_by_index(idx)) } /// Remove a (key, value) pair from a Map if there are any /// /// Returns the pair if one exists /// /// Preserves order, removing the last pair of the Map #[inline] pub fn pop (&mut self) -> Option<(K, V)> { if !self.is_empty() { self.hashes.pop(); Some((self.keys.pop().unwrap(), self.values.pop().unwrap())) } else { None } } /// Get an immutable slice of the keys of a Map #[inline] pub fn keys (&self) -> &[K] { self.keys.as_slice() } /// Get a mutable iterator over the keys of a Map #[inline] pub fn keys_mut (&mut self) -> &mut [K] { self.keys.as_mut_slice() } /// Get a mutable slice of the values of a Map #[inline] pub fn values (&self) -> &[V] { self.values.as_slice() } /// Get a mutable iterator over the values of a Map #[inline] pub fn values_mut (&mut self) -> &mut [V]
/// Get an immutable iterator over the keys of a Map #[inline] pub fn key_iter (&self) -> SliceIter<K> { self.keys.iter() } /// Get a mutable iterator over the keys of a Map #[inline] pub fn key_iter_mut (&mut self) -> SliceIterMut<K> { self.keys.iter_mut() } /// Get an immutable iterator over the values of a Map #[inline] pub fn value_iter (&self) -> SliceIter<V> { self.values.iter() } /// Get a mutable iterator over the values of a Map #[inline] pub fn value_iter_mut (&mut self) -> SliceIterMut<V> { self.values.iter_mut() } /// Get an immutable iterator over the (key, value) pairs of a Map #[inline] pub fn iter (&self) -> PairIter<K, V> { PairIter::new(self) } /// Get a mutable iterator over the (key, value) pairs of a Map #[inline] pub fn iter_mut (&mut self) -> PairIterMut<K, V> { PairIterMut::new(self) } /// Move the (key, value) pairs of another Map into a Map /// /// Uses `insert_unique` to move values, thereby discarding values from the other Map, /// if they share a key with an existing entry /// /// Consumes the other Map /// /// Use `merge_discard_to_vec` to retain the discarded values pub fn merge_discard (&mut self, other: Self) { for (key, value) in other { self.insert_unique_key(key, value); } } /// Move the (key, value) pairs of another Map into a Map /// /// Uses `insert_unique` to move values, thereby discarding values from the other Map, /// if they share a key with an existing entry /// /// Consumes the other Map and retains discarded values in a Vec /// /// Use `merge_discard` to drop discard values immediately pub fn merge_discard_to_vec (&mut self, other: Self) -> Vec<(K, V)> { let mut discard = Vec::new(); for (key, value) in other { if let Some(value) = self.insert_unique_key(key, value) { discard.push(value); } } discard } /// Move the (key, value) pairs of another Map into a Map /// /// Uses `insert` to move values, therby overwriting values from the Map, /// if they share a key with an entry from the other Map /// /// Consumes the other Map pub fn merge_overwrite (&mut self, other: Self) { for (key, value) in other { self.insert(key, value); } } } impl<EqK: Hash + ?Sized, K: PartialEq + Hash, V: PartialEq> Index<&EqK> for Map<K, V> where K: PartialEq<EqK> { type Output = V; fn index (&self, key: &EqK) -> &Self::Output { self.find_value(key).expect("Attempted Map[] access to invalid key") } } impl<EqK: Hash + ?Sized, K: PartialEq + Hash, V: PartialEq> IndexMut<&EqK> for Map<K, V> where K: PartialEq<EqK> { fn index_mut (&mut self, key: &EqK) -> &mut Self::Output { self.find_value_mut(key).expect("Attempted Map[] access to invalid key") } } /// An iterator over (Key, Value) for a Map pub struct PairIter<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> { keys: *const K, values: *const V, idx: usize, len: usize, k_phantom: PhantomData<&'a K>, v_phantom: PhantomData<&'a V>, } impl<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> PairIter<'a, K, V> { /// Create a new PairIter for a Map #[inline] pub fn new (dict: &'a Map<K, V>) -> Self { Self { keys: dict.keys.as_ptr(), values: dict.values.as_ptr(), idx: 0, len: dict.len(), k_phantom: PhantomData, v_phantom: PhantomData, } } } impl<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> Iterator for PairIter<'a, K, V> { type Item = (&'a K, &'a V); fn next (&mut self) -> Option<Self::Item> { if self.idx < self.len { let pair_idx = self.idx; self.idx += 1; Some(unsafe { (&*self.keys.add(pair_idx), &*self.values.add(pair_idx)) }) } else { None } } } /// An iterator over (mut Key, mut Value) for a Map pub struct PairIterMut<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> { keys: *mut K, values: *mut V, idx: usize, len: usize, k_phantom: PhantomData<&'a mut K>, v_phantom: PhantomData<&'a mut V>, } impl<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> PairIterMut<'a, K, V> { /// Create a new PairIterMut for a Map #[inline] pub fn new (dict: &'a mut Map<K, V>) -> Self { Self { keys: dict.keys.as_mut_ptr(), values: dict.values.as_mut_ptr(), idx: 0, len: dict.len(), k_phantom: PhantomData, v_phantom: PhantomData, } } } impl<'a, K: PartialEq + Hash + 'a, V: PartialEq + 'a> Iterator for PairIterMut<'a, K, V> { type Item = (&'a mut K, &'a mut V); fn next (&mut self) -> Option<Self::Item> { if self.idx < self.len { let pair_idx = self.idx; self.idx += 1; Some(unsafe { (&mut *self.keys.add(pair_idx), &mut *self.values.add(pair_idx)) }) } else { None } } } /// A by-value consuming iterator for a Map pub struct IntoIter<K: PartialEq + Hash, V: PartialEq> { keys: VecIntoIter<K>, values: VecIntoIter<V>, } impl<K: PartialEq + Hash, V: PartialEq> Iterator for IntoIter<K, V> { type Item = (K, V); fn next (&mut self) -> Option<Self::Item> { if let Some(key) = self.keys.next() { Some((key, self.values.next().unwrap())) } else { None } } } impl<K: PartialEq + Hash, V: PartialEq> IntoIterator for Map<K, V> { type Item = (K, V); type IntoIter = IntoIter<K, V>; fn into_iter (self) -> Self::IntoIter { Self::IntoIter { keys: self.keys.into_iter(), values: self.values.into_iter() } } } impl<K: PartialEq + Hash, V: PartialEq> FromIterator<(K, V)> for Map<K, V> { fn from_iter<I: IntoIterator<Item=(K, V)>> (iter: I) -> Self { let mut dict = Self::new(); for (key, value) in iter { dict.insert(key, value); } dict } }
{ self.values.as_mut_slice() }
test_patients.py
from bson.json_util import dumps from ..app import app from json import dumps as pretty class glo: patient_id = [] g = glo() userid = ['1k33224', '60961d77a7090edb5b69c62c'] patient = { 'name': 'Abhishek shrivastava', 'age': 19, 'gender': 'M', 'mobile': '9022930339' } patient2 = { 'name': 'Avinash', 'age': 39, 'gender': 'M', 'mobile': '2992123212', 'stats': { 'bp': 223, 'glucose': 213, 'weight': 922 } } data = [{ 'userid': userid[0], 'patient':patient, }, { 'userid': userid[1], 'patient':patient, }] def pprint(data):
def test_add_patient(): with app.test_client() as client: for item in data: uri = '/patients' res = client.post(uri, json=item) pprint(res.json) assert res.status_code == 200 def test_get_all_patients(): with app.test_client() as client: for id in userid: res = client.get('/patients/'+id) pprint(res.json) if type(res.json) == list: g.patient_id = [(d.get('id')) for d in res.json] g.patient_id.append({'$oid': userid[1]}) assert res.status_code == 200 def test_patient_get(): with app.test_client() as client: for uid in userid: for pid in g.patient_id: uri = '/patients/' + uid+'/'+pid['$oid'] res = client.get(uri) pprint(res.json) assert res.status_code == 200 def test_patient_update(): with app.test_client() as client: for uid in userid: for pid in g.patient_id: uri = '/patients/'+uid+'/'+pid['$oid'] res = client.put(uri, json=patient2) pprint(res.json) assert res.status_code == 200 def test_patient_delete(): with app.test_client() as client: for uid in userid: for pid in g.patient_id: uri = '/patients/'+uid+'/'+pid['$oid'] res = client.delete(uri) pprint(res.json) assert res.status_code == 200 def test_patient_get_after_delete(): test_patient_get()
print(pretty(data, sort_keys=True, indent=4))
pautomator.js
#!/usr/bin/env node 'use strict'; var net = require('net'), tls = require('tls'); var HTTPParser = process.binding('http_parser').HTTPParser; var http = require('http'), https = require('https'); var url = require('url'); function main() { //convert `-key value` to cfg[key]=value var cfg = process.argv.slice(2/*skip ["node", "xxx.js"]*/).reduce(function (cfg, arg, i, argv) { return (i % 2 === 0 && (arg.slice(0, 1) === '-' && (cfg[arg.slice(1)] = argv[i + 1])), cfg); }, {local_host: '', local_port: 0, remote_host: '', remote_port: 0, usr: '', pwd: '', as_pac_server: 0}); cfg.local_host = cfg.local_host || 'localhost'; cfg.local_port = (cfg.local_port & 0xffff) || 8080; cfg.remote_port = (cfg.remote_port & 0xffff) || 8080; cfg.as_pac_server = cfg.as_pac_server === 'true'; cfg.is_remote_https = cfg.is_remote_https === 'true'; cfg.ignore_https_cert = cfg.ignore_https_cert === 'true'; cfg.are_remotes_in_pac_https = cfg.are_remotes_in_pac_https === 'true'; if (!cfg.local_host || !cfg.local_port || !cfg.remote_host || !cfg.remote_port) return console.error('Usage of parameters:\n' + '-local_host host\t' + 'Listening address. Default: localhost. (* means all interfaces)\n' + '-local_port port\t' + 'Listening port. Default: 8080\n' + '-remote_host host\t' + 'Real proxy/PAC server address\n' + '-remote_port port\t' + 'Real proxy/PAC server port. Default: 8080\n' + '-usr user\t\t' + 'Real proxy/PAC server user id\n' + '-pwd password\t\t' + 'Real proxy/PAC user password\n' + '-as_pac_server true/false\t' + 'Treat `remote_host` as a PAC server. Default: false\n' + '\n' + '-is_remote_https true/false\t' + 'Talk to `remote_host` with HTTPS. Default: false\n' + '-ignore_https_cert true/false\t' + 'ignore error when verificate HTTPS server certificate. Default: false\n' + '-are_remotes_in_pac_https true/false\t' + 'Talk to proxy servers defined in PAC with HTTPS. Default: false\n' ); if (cfg.as_pac_server && (cfg.local_host === '*' || cfg.local_host === '0.0.0.0' || cfg.local_host === '::')) { return console.error('when use as a PAC server, the local_host parameter must be a definite address'); } console.log('Using parameters: ' + JSON.stringify(cfg, null, ' ')); cfg.buf_proxy_basic_auth = new Buffer('Proxy-Authorization: Basic ' + new Buffer(cfg.usr + ':' + cfg.pwd).toString('base64')); if (cfg.as_pac_server) { createPacServer(cfg.local_host, cfg.local_port, cfg.remote_host, cfg.remote_port, cfg.buf_proxy_basic_auth, cfg.is_remote_https, cfg.ignore_https_cert, cfg.are_remotes_in_pac_https); } else { createPortForwarder(cfg.local_host, cfg.local_port, cfg.remote_host, cfg.remote_port, cfg.buf_proxy_basic_auth, cfg.is_remote_https, cfg.ignore_https_cert); } } var CR = 0xd, LF = 0xa, BUF_CR = new Buffer([0xd]), BUF_CR_LF_CR_LF = new Buffer([0xd, 0xa, 0xd, 0xa]), BUF_LF_LF = new Buffer([0xa, 0xa]); var STATE_NONE = 0, STATE_FOUND_LF = 1, STATE_FOUND_LF_CR = 2; function createPortForwarder(local_host, local_port, remote_host, remote_port, buf_proxy_basic_auth, is_remote_https, ignore_https_cert) { net.createServer({allowHalfOpen: true}, function (socket) { var realCon = (is_remote_https ? tls : net).connect({ port: remote_port, host: remote_host, allowHalfOpen: true, rejectUnauthorized: !ignore_https_cert /*not used when is_remote_https false*/ }); realCon.on('data', function (buf) { //console.log('<<<<' + (Date.t=new Date()) + '.' + Date.t.getMilliseconds() + '\n' + buf.toString('ascii')); socket.write(buf); realCon.__haveGotData = true; }).on('end', function () { socket.end(); if (!realCon.__haveGotData && !realCon.__haveShownError) { console.error('[LocalProxy(:' + local_port + ')][Connection to ' + remote_host + ':' + remote_port + '] Error: ended by remote peer'); realCon.__haveShownError = true; } }).on('close', function () { socket.end(); if (!realCon.__haveGotData && !realCon.__haveShownError) { console.error('[LocalProxy(:' + local_port + ')][Connection to ' + remote_host + ':' + remote_port + '] Error: reset by remote peer'); realCon.__haveShownError = true; } }).on('error', function (err) { console.error('[LocalProxy(:' + local_port + ')][Connection to ' + remote_host + ':' + remote_port + '] ' + err); realCon.__haveShownError = true; }); var parser = new HTTPParser(HTTPParser.REQUEST); parser[HTTPParser.kOnHeadersComplete] = function () { //console.log('---- kOnHeadersComplete----'); //console.log(arguments); parser.__is_headers_complete = true; }; //parser[HTTPParser.kOnMessageComplete] = function () { // console.log('---- kOnMessageComplete----'); // console.log(arguments); //}; var state = STATE_NONE; socket.on('data', function (buf) { //console.log('[' + remote_host + ':' + remote_port + ']>>>>' + (Date.t = new Date()) + '.' + Date.t.getMilliseconds() + '\n' + buf.toString('ascii')); //var ret = parser.execute(buf); //console.log('\n\n----parser result: ' + ret + ' buf len:' + buf.length); //realCon.write(buf); //return; var buf_ary = [], unsavedStart = 0, buf_len = buf.length; //process orphan CR if (state === STATE_FOUND_LF_CR && buf[0] !== LF) { parser.execute(BUF_CR); buf_ary.push(BUF_CR); } for (var i = 0; i < buf_len; i++) { //find first LF if (state === STATE_NONE) { if (buf[i] === LF) { state = STATE_FOUND_LF; } continue; } //find second CR LF or LF if (buf[i] === LF) { parser.__is_headers_complete = false; parser.execute(buf.slice(unsavedStart, i + 1)); if (parser.__is_headers_complete) { buf_ary.push(buf.slice(unsavedStart, buf[i - 1] === CR ? i - 1 : i)); //console.log('insert auth header'); buf_ary.push(buf_proxy_basic_auth); buf_ary.push(state === STATE_FOUND_LF_CR ? BUF_CR_LF_CR_LF : BUF_LF_LF); unsavedStart = i + 1; state = STATE_NONE; } else { state = STATE_FOUND_LF; }
} else { state = STATE_NONE; } } if (unsavedStart < buf_len) { //strip last CR if found LF_CR buf = buf.slice(unsavedStart, state === STATE_FOUND_LF_CR ? buf_len - 1 : buf_len); if (buf.length) { parser.execute(buf); buf_ary.push(buf); } } buf = Buffer.concat(buf_ary); realCon.write(buf); }).on('end', cleanup).on('close', cleanup).on('error', function (err) { console.error('[LocalProxy(:' + local_port + ')][Incoming connection] ' + err); }); function cleanup() { if (parser) { parser.close(); parser = null; } realCon.end(); } }).on('error', function (err) { console.error('[LocalProxy(:' + local_port + ')] ' + err); process.exit(1); }).listen(local_port, local_host === '*' ? undefined : local_host, function () { console.log('[LocalProxy(:' + local_port + ')] OK: forward http://' + local_host + ':' + local_port + ' to ' + ' to http' + (is_remote_https ? 's' : '') + '://' + remote_host + ':' + remote_port); }); } var proxyAddrMap = {}; function createPacServer(local_host, local_port, remote_host, remote_port, buf_proxy_basic_auth, is_remote_https, ignore_https_cert, are_remotes_in_pac_https) { http.createServer(function (req, res) { var internal_req = url.parse(req.url); internal_req.host = remote_host; internal_req.port = remote_port; req.headers['host'] = remote_host + ':' + remote_port; if (!req.headers['authorization']) { req.headers['authorization'] = buf_proxy_basic_auth.slice('Proxy-Authorization: '.length).toString(); } internal_req.headers = req.headers; internal_req.rejectUnauthorized = !ignore_https_cert; //only used for SSL (is_remote_https ? https : http).get(internal_req, function (internal_res) { delete internal_res.headers['content-length']; delete internal_res.headers['transfer-encoding']; res.writeHead(internal_res.statusCode, internal_res.headers); res.__haveWrittenData = true; var buf_ary = []; internal_res.on('data', function (buf) { // console.log('<<<<' + (Date.t=new Date()) + '.' + Date.t.getMilliseconds() + '\n' + buf.toString('ascii')); buf_ary.push(buf); }).on('end', function () { var s = Buffer.concat(buf_ary).toString(); buf_ary = []; s = s.replace(/\bPROXY\s+([^'":;\s]+):(\d+)/g, function (_, host, port) { var remoteAddr = host + ':' + port; var _local_port = proxyAddrMap[remoteAddr]; if (!_local_port) { _local_port = local_port + Object.keys(proxyAddrMap).length + 1; proxyAddrMap[remoteAddr] = _local_port; createPortForwarder(local_host, _local_port, host, Number(port), buf_proxy_basic_auth, are_remotes_in_pac_https, ignore_https_cert); } return 'PROXY ' + local_host + ':' + _local_port; }); //console.log('return patched pac'); res.end(s); }).on('error', function (err) { res.end(); console.error('[LocalPAC][Reading response from ' + remote_host + ':' + remote_port + '] ' + err); }); }).on('error', function (err) { if (!res.__haveWrittenData) { res.statusCode = 500; res.end(); } console.error('[LocalPAC][Connection to ' + remote_host + ':' + remote_port + '] ' + err); }); res.on('error', function (err) { console.error('[LocalPAC][Writing response] ' + err); }); }).on('error', function (err) { console.error('[LocalPAC] ' + err); process.exit(1); }).listen(local_port, local_host === '*' ? undefined : local_host, function () { console.log('[LocalPAC] OK: forward http://' + local_host + ':' + local_port + ' to http' + (is_remote_https ? 's' : '') + '://' + remote_host + ':' + remote_port); }); } main();
} else if (buf[i] === CR && state === STATE_FOUND_LF) { state = STATE_FOUND_LF_CR;
Values.go
package main import "fmt" func main() { fmt.Println("go" + "lang") fmt.Println("1 + 1 = ", 1 + 1) fmt.Println("7.0 / 3.0 = ", 7.0 / 3.0) fmt.Println(true && false)
fmt.Println(true || false) fmt.Println(!true) }
redis-cache.py
#!/usr/bin/env python import cPickle from functools import wraps def redis_lru(capacity=5000, slice=slice(None)): def
(func): cache_keys = "lru:keys:%s" % (func.__name__,) cache_vals = "lru:vals:%s" % (func.__name__,) cache_hits = "lru:hits:%s" % (func.__name__,) cache_miss = "lru:miss:%s" % (func.__name__,) lvars = [None] # closure mutable def add(key, value): eject() conn = lvars[0] conn.incr(cache_miss) conn.hset(cache_vals, key, cPickle.dumps(value)) conn.zadd(cache_keys, 0, key) return value def get(key): conn = lvars[0] value = conn.hget(cache_vals, key) if value: conn.incr(cache_hits) conn.zincrby(cache_keys, key, 1.0) value = cPickle.loads(value) return value def eject(): conn = lvars[0] count = min((capacity / 10) or 1, 1000) if conn.zcard(cache_keys) >= capacity: eject = conn.zrange(cache_keys, 0, count) conn.zremrangebyrank(cache_keys, 0, count) conn.hdel(cache_vals, *eject) @wraps(func) def wrapper(*args, **kwargs): conn = lvars[0] if conn: items = args + tuple(sorted(kwargs.items())) key = cPickle.dumps(items[slice]) return get(key) or add(key, func(*args, **kwargs)) else: return func(*args, **kwargs) def info(): conn = lvars[0] size = int(conn.zcard(cache_keys) or 0) hits, misses = int(conn.get(cache_hits) or 0), int(conn.get(cache_miss) or 0) return hits, misses, capacity, size def clear(): conn = lvars[0] conn.delete(cache_keys, cache_vals) conn.delete(cache_hits, cache_miss) def init(conn): lvars[0] = conn wrapper.init = init wrapper.info = info wrapper.clear = clear return wrapper return decorator
decorator
rw_test.go
// Copyright ©2018 The go-hep Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package riofs import ( "io" "reflect" "testing" "go-hep.org/x/hep/groot/internal/rtests" "go-hep.org/x/hep/groot/rbase" "go-hep.org/x/hep/groot/rbytes" "go-hep.org/x/hep/groot/rtypes" ) func TestWRBuffer(t *testing.T) { for _, tc := range []struct { name string want rtests.ROOTer }{ { name: "TFree", want: &freeSegment{ first: 21, last: 24, }, }, { name: "TFree", want: &freeSegment{ first: 21, last: kStartBigFile + 24, }, }, { name: "TKey", want: &Key{ nbytes: 1024, rvers: 4, // small file objlen: 10, datetime: datime2time(1576331001), keylen: 12, cycle: 2, seekkey: 1024, seekpdir: 2048, class: "MyClass", name: "my-key", title: "my key title", }, }, { name: "TKey", want: &Key{ nbytes: 1024, rvers: 1004, // big file objlen: 10, datetime: datime2time(1576331001), keylen: 12, cycle: 2, seekkey: 1024, seekpdir: 2048, class: "MyClass", name: "my-key", title: "my key title", }, }, { name: "TDirectory", want: &tdirectory{ rvers: 4, // small file named: *rbase.NewNamed("my-name", "my-title"), uuid: rbase.UUID{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }, }, }, { name: "TDirectory", want: &tdirectory{ rvers: 1004, // big file named: *rbase.NewNamed("my-name", "my-title"), uuid: rbase.UUID{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }, }, }, { name: "TDirectoryFile", want: &tdirectoryFile{ dir: tdirectory{ rvers: 4, // small file named: *rbase.NewNamed("", ""), uuid: rbase.UUID{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }, }, ctime: datime2time(1576331001), mtime: datime2time(1576331010), nbyteskeys: 1, nbytesname: 2, seekdir: 3, seekparent: 4, seekkeys: 5, }, }, { name: "TDirectoryFile", want: &tdirectoryFile{ dir: tdirectory{ rvers: 1004, // big file named: *rbase.NewNamed("", ""), uuid: rbase.UUID{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }, }, ctime: datime2time(1576331001), mtime: datime2time(1576331010), nbyteskeys: 1, nbytesname: 2, seekdir: 3,
seekkeys: 5, }, }, } { t.Run(tc.name, func(t *testing.T) { { wbuf := rbytes.NewWBuffer(nil, nil, 0, nil) wbuf.SetErr(io.EOF) _, err := tc.want.MarshalROOT(wbuf) if err == nil { t.Fatalf("expected an error") } if err != io.EOF { t.Fatalf("got=%v, want=%v", err, io.EOF) } } wbuf := rbytes.NewWBuffer(nil, nil, 0, nil) _, err := tc.want.MarshalROOT(wbuf) if err != nil { t.Fatalf("could not marshal ROOT: %v", err) } rbuf := rbytes.NewRBuffer(wbuf.Bytes(), nil, 0, nil) class := tc.want.Class() obj := rtypes.Factory.Get(class)().Interface().(rbytes.Unmarshaler) { rbuf.SetErr(io.EOF) err = obj.UnmarshalROOT(rbuf) if err == nil { t.Fatalf("expected an error") } if err != io.EOF { t.Fatalf("got=%v, want=%v", err, io.EOF) } rbuf.SetErr(nil) } err = obj.UnmarshalROOT(rbuf) if err != nil { t.Fatalf("could not unmarshal ROOT: %v", err) } if !reflect.DeepEqual(obj, tc.want) { t.Fatalf("error\ngot= %+v\nwant=%+v\n", obj, tc.want) } }) } }
seekparent: 4,
adapter.go
package manta import ( "context" //nolint:gosec "encoding/hex" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "log" "net/http" "os" "path" "strings" "time" "regexp" triton "github.com/joyent/triton-go/v2" "github.com/joyent/triton-go/v2/authentication" "github.com/joyent/triton-go/v2/storage" "github.com/treeverse/lakefs/pkg/block" "github.com/treeverse/lakefs/pkg/block/params" "github.com/treeverse/lakefs/pkg/logging" ) const ( BlockstoreType = "manta" defaultMantaRoot = "/stor" mantaUploadIDRegex = "(?m)^([a-f0-9]{8})([a-f0-9]{4})([a-f0-9]{4})([a-f0-9]{4})([a-f0-9]{12})$" partSuffix = ".part_" marker = " " ) type Adapter struct { client *storage.StorageClient uploadIDTranslator block.UploadIDTranslator accountName string } var ( ErrInventoryNotSupported = errors.New("inventory feature not implemented for mantastorage adapter") ErrInvalidUploadIDFormat = errors.New("invalid upload id format") ErrFinalizedMultiPart = errors.New("cannot upload to a finalized multipart") ) /* func WithTranslator(t block.UploadIDTranslator) func(a *Adapter) { return func(a *Adapter) { a.uploadIDTranslator = t } } */ type MultiPartIDTranslator struct { } func (m MultiPartIDTranslator) SetUploadID(uploadID string) string { return strings.ReplaceAll(uploadID, "-", "") } func (m MultiPartIDTranslator) TranslateUploadID(simulationID string) string { var outputStringArray []string pathMetadata := regexp.MustCompile(mantaUploadIDRegex) matches := pathMetadata.FindStringSubmatch(simulationID) for index := 1; index < len(matches); index++ { outputStringArray = append(outputStringArray, matches[index]) } return strings.Join(outputStringArray, "-") } func (m MultiPartIDTranslator) RemoveUploadID(inputUploadID string) { } func
(sc *storage.StorageClient, accountName string, opts ...func(a *Adapter)) (*Adapter, error) { adapter := &Adapter{ client: sc, uploadIDTranslator: MultiPartIDTranslator{}, accountName: accountName, } for _, opt := range opts { opt(adapter) } return adapter, nil } func resolveNamespace(obj block.ObjectPointer) (block.QualifiedKey, error) { qualifiedKey, err := block.ResolveNamespace(obj.StorageNamespace, obj.Identifier, obj.IdentifierType) if err != nil { return qualifiedKey, err } if qualifiedKey.StorageType != block.StorageTypeManta { return qualifiedKey, block.ErrInvalidNamespace } return qualifiedKey, nil } func (l *Adapter) Put(ctx context.Context, obj block.ObjectPointer, size int64, reader io.Reader, _ block.PutOpts) error { var err error defer reportMetrics("Put", time.Now(), &size, &err) _, err = resolveNamespace(obj) if err != nil { return err } metadata := make(map[string]string) objectPathFromStorageNameSpace := strings.ReplaceAll(obj.StorageNamespace, "manta://", "") objectPathWithBucket := path.Join(defaultMantaRoot, objectPathFromStorageNameSpace) return l.client.Objects().Put(ctx, &storage.PutObjectInput{ForceInsert: true, ObjectReader: reader, ObjectPath: path.Join(objectPathWithBucket, obj.Identifier), ContentType: metadata["content-type"], ContentMD5: metadata["content-md5"], ContentLength: uint64(size)}) } func (l *Adapter) Remove(ctx context.Context, obj block.ObjectPointer) error { return l.client.Objects().Delete(ctx, &storage.DeleteObjectInput{ObjectPath: l.translateToMantaPath(obj)}) } func (l *Adapter) Copy(ctx context.Context, sourceObj, destinationObj block.ObjectPointer) error { return l.client.SnapLinks().Put(ctx, &storage.PutSnapLinkInput{LinkPath: l.translateToMantaPath(destinationObj), SourcePath: l.translateToMantaPath(sourceObj)}) } func (l *Adapter) UploadCopyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int64) (string, error) { if err := isValidUploadID(uploadID); err != nil { return "", err } r, err := l.Get(ctx, sourceObj, 0) if err != nil { return "", err } etag, err := l.UploadPart(ctx, destinationObj, 0, r, uploadID, partNumber) return etag, err } func (l *Adapter) UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber, startPosition, endPosition int64) (string, error) { var err error defer reportMetrics("UploadCopyPartRange", time.Now(), nil, &err) if err := isValidUploadID(uploadID); err != nil { return "", err } r, err := l.GetRange(ctx, sourceObj, startPosition, endPosition) if err != nil { return "", err } etag, err := l.UploadPart(ctx, destinationObj, 0, r, uploadID, partNumber) if err != nil { return "", err } return etag, err } func (l *Adapter) Get(ctx context.Context, obj block.ObjectPointer, size int64) (reader io.ReadCloser, err error) { defer reportMetrics("Get", time.Now(), &size, &err) output, err := l.client.Objects().Get(ctx, &storage.GetObjectInput{ObjectPath: l.translateToMantaPath(obj)}) //output.ContentLength = uint64(size) return output.ObjectReader, err } func (l *Adapter) Walk(ctx context.Context, walkOpt block.WalkOpts, walkFn block.WalkFunc) error { var err error log := l.log(ctx).WithField("operation", "Walk") pathBase := path.Base(walkOpt.Prefix) defer reportMetrics("Walk", time.Now(), nil, &err) bucketPath := strings.ReplaceAll(walkOpt.StorageNamespace, "manta://", "") var dirName string if pathDir := path.Dir(walkOpt.Prefix); pathDir == "." { dirName = path.Join(defaultMantaRoot, bucketPath) } else { dirName = path.Join(defaultMantaRoot, bucketPath, pathDir) } input := &storage.ListDirectoryInput{ DirectoryName: dirName, } objs, err := l.client.Dir().List(ctx, input) if err != nil { log.WithError(err). WithField("input", input.DirectoryName). Error(err) return err } for _, obj := range objs.Entries { // If the base name of our prefix was found to be of type "directory" // than we need to pull the directory entries for that instead. if err := walkFn(obj.Name); err != nil { return err } if obj.Name == pathBase && obj.Type == "directory" { input.DirectoryName = path.Join(defaultMantaRoot, bucketPath, walkOpt.Prefix) objs, err = l.client.Dir().List(ctx, input) if err != nil { log.WithError(err). WithField("input", input.DirectoryName). Error(err) return err } } } return nil } func (l *Adapter) Exists(ctx context.Context, obj block.ObjectPointer) (bool, error) { var err error defer reportMetrics("Exists", time.Now(), nil, &err) fmt.Println("EXISTS.....") _, err = l.client.Objects().Get(ctx, &storage.GetObjectInput{ObjectPath: l.translateToMantaPath(obj)}) if err != nil { return false, err } return true, err } func (l *Adapter) GetRange(ctx context.Context, obj block.ObjectPointer, start int64, end int64) (io.ReadCloser, error) { var err error defer reportMetrics("GetRange", time.Now(), nil, &err) output, err := l.client.Objects().Get(ctx, &storage.GetObjectInput{ ObjectPath: l.translateToMantaPath(obj), }) if err != nil { return nil, err } _, err = io.CopyN(io.Discard, output.ObjectReader, start) if err != nil { return nil, err } return &struct { io.Reader io.Closer }{ Reader: io.LimitReader(output.ObjectReader, end-start+1), Closer: output.ObjectReader, }, err } func (l *Adapter) GetProperties(ctx context.Context, obj block.ObjectPointer) (block.Properties, error) { var err error defer reportMetrics("GetProperties", time.Now(), nil, &err) fmt.Println("GET PROPERTIES CALLED...") _, err = l.client.Objects().GetInfo(ctx, &storage.GetInfoInput{ObjectPath: l.translateToMantaPath(obj)}) if err != nil { return block.Properties{}, err } // No properties, just return that it exists return block.Properties{}, nil } func (l *Adapter) CreateMultiPartUpload(ctx context.Context, obj block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (string, error) { mBody := storage.CreateMpuBody{ObjectPath: l.translateToMantaPath(obj)} cmo, err := l.client.Objects().CreateMultipartUpload(ctx, &storage.CreateMpuInput{Body: mBody, ForceInsert: true}) if err != nil { return "", nil } return l.uploadIDTranslator.SetUploadID(cmo.Id), nil } func (l *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, uploadID string, partNumber int64) (string, error) { uploadPart, err := l.GetMultiPartUpload(ctx, uploadID) if err := isValidUploadID(uploadID); err != nil { return "", err } if uploadPart.State == "done" { return "", ErrFinalizedMultiPart } //md5Read := block.NewHashingReader(reader, block.HashFunctionMD5) lp, err := l.client.Objects().UploadPart(ctx, &storage.UploadPartInput{Id: l.uploadIDTranslator.TranslateUploadID(uploadID), PartNum: uint64(partNumber), ObjectReader: reader}) if err != nil { return "", err } return lp.Part, err } func (l *Adapter) AbortMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string) error { err := l.client.Objects().AbortMultipartUpload(ctx, &storage.AbortMpuInput{PartsDirectoryPath: path.Join(l.accountName, "uploads", uploadID[:3], l.uploadIDTranslator.TranslateUploadID(uploadID))}) if err != nil { return err } l.uploadIDTranslator.RemoveUploadID(uploadID) return nil } func (l *Adapter) translateToMantaPath(obj block.ObjectPointer) string { objectPathFromStorageNameSpace := strings.ReplaceAll(obj.StorageNamespace, "manta://", "") objectPathWithBucket := path.Join(defaultMantaRoot, objectPathFromStorageNameSpace, obj.Identifier) return objectPathWithBucket } func (l *Adapter) CompleteMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string, multipartList *block.MultipartUploadCompletion) (*string, int64, error) { lmop, err := l.client.Objects().ListMultipartUploadParts(ctx, &storage.ListMpuPartsInput{Id: l.uploadIDTranslator.TranslateUploadID(uploadID)}) if err != nil { return nil, 0, err } var parts []string var size int64 for _, es := range lmop.Parts { parts = append(parts, es.ETag) size = size + es.Size } cob := storage.CommitMpuBody{Parts: parts} err = l.client.Objects().CommitMultipartUpload(ctx, &storage.CommitMpuInput{Id: l.uploadIDTranslator.TranslateUploadID(uploadID), Body: cob}) op, err := l.GetMultiPartUpload(ctx, uploadID) if err != nil { return nil, 0, err } output, err := l.client.Objects().Get(ctx, &storage.GetObjectInput{ObjectPath: op.TargetObject}) if err != nil { return nil, -1, err } l.uploadIDTranslator.RemoveUploadID(uploadID) return &output.ETag, size, err } func (l *Adapter) GetMultiPartUpload(ctx context.Context, uploadID string) (*storage.GetMpuOutput, error) { return l.client.Objects().GetMultipartUpload(ctx, &storage.GetMpuInput{PartsDirectoryPath: path.Join(l.accountName, "uploads", uploadID[:3], l.uploadIDTranslator.TranslateUploadID(uploadID))}) } func (l *Adapter) ValidateConfiguration(_ context.Context, _ string) error { return nil } func (l *Adapter) GenerateInventory(_ context.Context, _ logging.Logger, _ string, _ bool, _ []string) (block.Inventory, error) { return nil, ErrInventoryNotSupported } func (l *Adapter) BlockstoreType() string { return BlockstoreType } func (l *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { return block.DefaultStorageNamespaceInfo(BlockstoreType) } func (l *Adapter) RuntimeStats() map[string]string { return nil } func isValidUploadID(uploadID string) error { _, err := hex.DecodeString(uploadID) if err != nil { return fmt.Errorf("%w: %s", ErrInvalidUploadIDFormat, err) } return nil } func NewMantaClient(mantaConfig params.Manta) *storage.StorageClient { keyID := mantaConfig.MantaKeyID accountName := os.Getenv("TRITON_ACCOUNT") keyMaterial := mantaConfig.MantaKeyPath userName := mantaConfig.MantaUser var signer authentication.Signer var err error if keyMaterial == "" { input := authentication.SSHAgentSignerInput{ KeyID: keyID, AccountName: accountName, Username: userName, } signer, err = authentication.NewSSHAgentSigner(input) if err != nil { log.Fatalf("Error Creating SSH Agent Signer: {{err}}", err) } } else { var keyBytes []byte if _, err = os.Stat(keyMaterial); err == nil { keyBytes, err = ioutil.ReadFile(keyMaterial) if err != nil { log.Fatalf("Error reading key material from %s: %s", keyMaterial, err) } block, _ := pem.Decode(keyBytes) if block == nil { log.Fatalf( "Failed to read key material '%s': no key found", keyMaterial) } if block.Headers["Proc-Type"] == "4,ENCRYPTED" { log.Fatalf( "Failed to read key '%s': password protected keys are\n"+ "not currently supported. Please decrypt the key prior to use.", keyMaterial) } } else { keyBytes = []byte(keyMaterial) } input := authentication.PrivateKeySignerInput{ KeyID: keyID, PrivateKeyMaterial: keyBytes, AccountName: accountName, Username: userName, } signer, err = authentication.NewPrivateKeySigner(input) if err != nil { log.Fatalf("Error Creating SSH Private Key Signer: {{err}}", err) } } config := &triton.ClientConfig{ MantaURL: mantaConfig.MantaUrl, AccountName: mantaConfig.MantaUser, Username: "", Signers: []authentication.Signer{signer}, } c, err := storage.NewClient(config) if err != nil { log.Fatalf("compute.NewClient: %s", err) } return c } func formatMultipartFilename(uploadID string, partNumber int64) string { // keep natural sort order with zero padding return fmt.Sprintf("%s"+partSuffix+"%05d", uploadID, partNumber) } func (a *Adapter) log(ctx context.Context) logging.Logger { return logging.FromContext(ctx) }
NewAdapter
dsound.rs
#![allow(non_snake_case)] use crate::{ context::SAMPLE_RATE, device::{Device, FeedCallback, MixContext, NativeSample}, error::SoundError, }; use std::mem::size_of; use winapi::{ ctypes::c_void, shared::{ guiddef::IID_NULL, minwindef::DWORD, mmreg::{WAVEFORMATEX, WAVE_FORMAT_PCM}, ntdef::{HANDLE, PVOID}, winerror::HRESULT, }, um::{ dsound::*, synchapi::{CreateEventA, WaitForMultipleObjects}, unknwnbase::{IUnknown, IUnknownVtbl}, winbase::{INFINITE, WAIT_OBJECT_0}, winuser::GetForegroundWindow, }, }; // Declare missing structs and interfaces. STRUCT! {struct DSBPOSITIONNOTIFY { dwOffset: DWORD, hEventNotify: HANDLE, }} RIDL! {#[uuid(0xb021_0783, 0x89cd, 0x11d0, 0xaf, 0x8, 0x0, 0xa0, 0xc9, 0x25, 0xcd, 0x16)] interface IDirectSoundNotify(IDirectSoundNotifyVtbl): IUnknown(IUnknownVtbl) { fn SetNotificationPositions( dwPositionNotifies : DWORD, pcPositionNotifies : PVOID, ) -> HRESULT, }} pub struct DirectSoundDevice { direct_sound: *mut IDirectSound, buffer: *mut IDirectSoundBuffer, notify_points: [*mut c_void; 2], buffer_len_bytes: u32, out_data: Vec<NativeSample>, mix_buffer: Vec<(f32, f32)>, callback: Box<FeedCallback>, } unsafe impl Send for DirectSoundDevice {} fn check<S: Into<String>>(code: i32, message: S) -> Result<(), SoundError> { if code == DS_OK { Ok(()) } else { Err(SoundError::FailedToInitializeDevice(message.into())) } } impl DirectSoundDevice { pub fn new(buffer_len_bytes: u32, callback: Box<FeedCallback>) -> Result<Self, SoundError> { unsafe { let mut direct_sound = std::ptr::null_mut(); check( DirectSoundCreate(std::ptr::null(), &mut direct_sound, std::ptr::null_mut()), "Failed to initialize DirectSound", )?; check( (*direct_sound).SetCooperativeLevel(GetForegroundWindow(), DSSCL_PRIORITY), "Failed to set cooperative level", )?; let channels_count = 2; let byte_per_sample = size_of::<i16>() as u16; let block_align = byte_per_sample * channels_count; let mut buffer_format = WAVEFORMATEX { wFormatTag: WAVE_FORMAT_PCM, nChannels: channels_count, nSamplesPerSec: SAMPLE_RATE, nAvgBytesPerSec: SAMPLE_RATE * u32::from(block_align), nBlockAlign: block_align, wBitsPerSample: 8 * byte_per_sample, cbSize: size_of::<WAVEFORMATEX>() as u16, }; let buffer_desc = DSBUFFERDESC { dwSize: size_of::<DSBUFFERDESC>() as u32, dwFlags: DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GLOBALFOCUS, dwBufferBytes: 2 * buffer_len_bytes, dwReserved: 0, lpwfxFormat: &mut buffer_format, guid3DAlgorithm: IID_NULL, }; let mut buffer = std::ptr::null_mut(); check( (*direct_sound).CreateSoundBuffer(&buffer_desc, &mut buffer, std::ptr::null_mut()), "Failed to create back buffer.", )?; let mut notify: *mut IDirectSoundNotify = std::ptr::null_mut(); check( (*buffer).QueryInterface( &IID_IDirectSoundNotify, ((&mut notify) as *mut *mut _) as *mut *mut c_void, ), "Failed to obtain IDirectSoundNotify interface.", )?; let notify_points = [ CreateEventA(std::ptr::null_mut(), 0, 0, std::ptr::null()), CreateEventA(std::ptr::null_mut(), 0, 0, std::ptr::null()), ]; let mut pos = [ DSBPOSITIONNOTIFY { dwOffset: 0, hEventNotify: notify_points[0], }, DSBPOSITIONNOTIFY { dwOffset: buffer_desc.dwBufferBytes / 2, hEventNotify: notify_points[1], }, ]; check( (*notify) .SetNotificationPositions(pos.len() as u32, &mut pos as *mut _ as *mut c_void), "Failed to set notification positions.", )?; check( (*buffer).Play(0, 0, DSBPLAY_LOOPING), "Failed to begin playing back buffer.", )?; let samples_per_channel = buffer_len_bytes as usize / size_of::<NativeSample>(); Ok(Self { direct_sound, buffer, out_data: vec![Default::default(); samples_per_channel], mix_buffer: vec![(0.0, 0.0); samples_per_channel], notify_points, buffer_len_bytes, callback, }) } } } impl Drop for DirectSoundDevice { fn drop(&mut self) {
} } } unsafe fn write( ds_buffer: *mut IDirectSoundBuffer, offset_bytes: u32, len_bytes: u32, data: &[NativeSample], ) { let mut size = 0; let mut device_buffer = std::ptr::null_mut(); (*ds_buffer).Lock( offset_bytes, len_bytes, &mut device_buffer, &mut size, std::ptr::null_mut(), std::ptr::null_mut(), 0, ); std::ptr::copy_nonoverlapping( data.as_ptr() as *mut u8, device_buffer as *mut u8, size as usize, ); (*ds_buffer).Unlock(device_buffer, size, std::ptr::null_mut(), 0); } impl Device for DirectSoundDevice { fn get_mix_context(&mut self) -> MixContext { MixContext { mix_buffer: self.mix_buffer.as_mut_slice(), out_data: &mut self.out_data, callback: &mut self.callback, } } fn feed(&mut self) { self.mix(); // Wait and send. unsafe { const WAIT_OBJECT_1: u32 = WAIT_OBJECT_0 + 1; match WaitForMultipleObjects(2, self.notify_points.as_ptr(), 0, INFINITE) { WAIT_OBJECT_0 => write( self.buffer, self.buffer_len_bytes, self.buffer_len_bytes, &self.out_data, ), WAIT_OBJECT_1 => write(self.buffer, 0, self.buffer_len_bytes, &self.out_data), _ => panic!("Unknown buffer point!"), } } } }
unsafe { assert_eq!((*self.direct_sound).Release(), 0);
config.go
package dump // default values. const ( DefaultManifestFile = ".pdd.yaml" )
// Config contains export configuration options. type Config struct { ManifestFile string }
pxText.ts
/** * Copyright 2018 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.
* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { pxObject, pxObjectProps, pxsceneObject } from './pxObject'; /** * A pxText serves as a stand-in for the actual pxScene Text instance * that it represents. */ export class pxText extends pxObject { __root: pxsceneText = null; constructor(props: pxTextProps = {} as pxTextProps) { super(props); this.props.t = 'text'; } /** * Getters for read-only pxScene Text properties. */ get ready() { return this.__root.ready; } /** * Getters/setters for pxScene Text properties. */ set text(text) { this.__root.text = text; } get text() { return this.__root.text; } set textColor(textColor) { this.__root.textColor = textColor; } get textColor() { return this.__root.textColor; } set pixelSize(pixelSize) { this.__root.pixelSize = pixelSize; } get pixelSize() { return this.__root.pixelSize; } set fontUrl(fontUrl) { this.__root.fontUrl = fontUrl; } get fontUrl() { return this.__root.fontUrl; } set font(font) { this.__root.font = font; } get font() { return this.__root.font; } } // -------------------------------------------------------------------- // // Type definitions // -------------------------------------------------------------------- // /** The props for creating pxText instances. */ export interface pxTextProps extends pxObjectProps { text?: String; textColor?: number; pixelSize?: number; fontUrl?: String; font?: Object; } /** The underlying pxScene Text instance to be managed through pxsceneUI. */ export interface pxsceneText extends pxsceneObject { text: String; textColor: number; pixelSize: number; fontUrl: String; ready: Promise<any>; font: Object; }
* You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 *
image-scan-types.go
package scanners // ImageScanResult contains details about all the found vulnerabilities type ImageScanResult struct { Image string `json:"image"` ScanResult string `json:"scanResult"` Description string `json:"description"` Targets []TrivyScanTarget `json:"targets"` } type TrivyScanTarget struct { Target string `json:"Target"` Vulnerabilities []VulnerabilityDescription `json:"Vulnerabilities"` } type VulnerabilityDescription struct { CVE string `json:"VulnerabilityID"` Package string `json:"PkgName"` InstalledVersion string `json:"InstalledVersion"` FixedVersion string `json:"FixedVersion"` Title string `json:"Title"` Description string `json:"Description"` Severity string `json:"Severity"` References []string `json:"References"` } // ImageScanResultSummary contains vulnerabilities summary type ImageScanResultSummary struct { Image string `json:"image"` ScanResult string `json:"scanResult"` Description string `json:"description"` Counters []VulnerabilityCounter `json:"counters"` } // VulnerabilityCounter represents amount of issues with specified severity
Count int `json:"count"` } type ContainerImageScansSummary struct { Images []ContainerImageScanResult `json:"images"` } // ImageScanResult is a short description of a single container image vulnerabilities audit type ContainerImageScanResult struct { Image string `json:"image"` ScanResult string `json:"scanResult"` Description string `json:"description"` Counters []VulnerabilityCounter `json:"counters"` Attributes []string `json:"attributes"` Pods []string `json:"pods"` }
type VulnerabilityCounter struct { Severity string `json:"severity"`
index.js
const Domain = require('url-domain-name'); module.exports = (url) => { const domainName = Domain.from(url); if (!domainName) { throw new Error('Invalid URL');
} return domainName.replace(/\./g, '-'); };
mobile-filter.js
/** * Created by moveosoftware on 12/23/18. */ let toggleScreenLock = require('./screenLock'); jQuery(document).ready(($) => { // mobile filter handlers if (foodyGlobals.isMobile) { let $mobileFilterBtn = $('.filter-mobile'); let $mobileWhatsappIcon = $('#whatsapp.whatsapp-mobile'); if (foodyGlobals.hideFilter) { $mobileFilterBtn.remove(); } else { let $mobileFilter = $('.mobile-filter'); let $closeBtn = $('.close', $mobileFilter); let filterShown = false; if ($mobileFilterBtn.length) { $('.md-checkbox').on('click', (e) => { e.stopPropagation(); }); $('.show-recipes', $mobileFilter).click(function () { closeMobileFilter(); }); $mobileFilterBtn.click((event) => { event.stopPropagation(); // if brands avenue is open => close it if($('.brands-toggle-mobile .brands-avenue-mobile').length){ if($('.brands-toggle-mobile .brands-avenue-mobile').hasClass('open')){ $('.brands-avenue-mobile').removeClass('open'); } } // if mobile menu open => close it if($('header .navbar-toggler').length && $('header .quadmenu-navbar-toggle').length && !$('header .quadmenu-navbar-toggle').hasClass('collapsed')){ $('header .quadmenu-navbar-toggle').click(); } $mobileFilter.addClass('open'); $(window).click(function () { closeMobileFilter(false); }); $mobileFilter.click(function (event) { event.stopPropagation(); }); toggleScreenLock(true, $mobileFilter, true); filterShown = true; }); $closeBtn.click(function () { closeMobileFilter(true); }); } function
(clear) { $mobileFilter.removeClass('open'); toggleScreenLock(false, $mobileFilter, true); document.removeEventListener('click', closeMobileFilter); filterShown = false; if (clear) { clearFilter(); } } function clearFilter() { $('.md-checkbox input[type="checkbox"]:checked', $mobileFilter).each(function () { $(this).next('label').click(); }); } // fades the floating filter button in/out // and hides/shows based on scroll position to make sure // no content is blocked $(window).scroll(function () { let threshold = 600; // number of pixels before bottom of page that you want to start fading let op = (($(document).height() - $(window).height()) - $(window).scrollTop()) / threshold; let pointerEvents = "all"; if (op <= 0.2) { op = 0; pointerEvents = "none" } $mobileFilterBtn.css("opacity", op); $mobileFilterBtn.css("pointer-events", pointerEvents); $mobileWhatsappIcon.css("opacity", op); $mobileWhatsappIcon.css("pointer-events", pointerEvents); }); window.addEventListener("orientationchange", function () { if (foodyGlobals.isTablet && filterShown) { if (screen.orientation.angle == 90) { closeMobileFilter(); } } }); } } });
closeMobileFilter
block_volume_replica_info.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT.
// // API covering the Networking (https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/overview.htm), // Compute (https://docs.cloud.oracle.com/iaas/Content/Compute/Concepts/computeoverview.htm), and // Block Volume (https://docs.cloud.oracle.com/iaas/Content/Block/Concepts/overview.htm) services. Use this API // to manage resources such as virtual cloud networks (VCNs), compute instances, and // block storage volumes. // package core import ( "github.com/oracle/oci-go-sdk/v49/common" ) // BlockVolumeReplicaInfo Information about the block volume replica in the destination availability domain. type BlockVolumeReplicaInfo struct { // The display name of the block volume replica DisplayName *string `mandatory:"true" json:"displayName"` // The block volume replica's Oracle ID (OCID). BlockVolumeReplicaId *string `mandatory:"true" json:"blockVolumeReplicaId"` // The availability domain of the block volume replica. // Example: `Uocm:PHX-AD-1` AvailabilityDomain *string `mandatory:"true" json:"availabilityDomain"` } func (m BlockVolumeReplicaInfo) String() string { return common.PointerString(m) }
// Core Services API
test_cyclegan.py
from __future__ import print_function, division import scipy from keras.datasets import mnist from instanceNormalization import InstanceNormalization from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, GRU, CuDNNGRU, Bidirectional, Lambda from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers.merge import add, concatenate from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import Adam from keras import backend as K import datetime import matplotlib #matplotlib.use('TKAgg') import matplotlib.pyplot as plt import sys from data_loader import DataLoader from options import train_options, test_options import para import json import numpy as np import os from os.path import join class CycleGAN(): def __init__(self, args): # Input shape self.args = args self.img_rows = 120#128 self.img_cols = 240#128 self.channels = 3 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.phase = 'test' if args.test else 'train' # Configure data loader self.dataset_name = args.dataset self.data_loader = DataLoader(dataset_name=self.dataset_name, img_res=(self.img_rows, self.img_cols)) # Calculate output shape of D (PatchGAN) patch_rows = int(self.img_rows / 2**3) patch_cols = int(self.img_cols / 2**3) self.disc_patch = (patch_rows, patch_cols, 1) # Number of filters in the first layer of G and D self.gf = 64 self.df = 64 # Loss weights self.lambda_cycle = 10.0 # Cycle-consistency loss self.lambda_id = 0.1 * self.lambda_cycle # Identity loss self.lambda_condition = self.lambda_cycle * .1 self.lr = 2e-4 self.args_append_attr() #@TODO load args.json and overwrite the default #with open(join(args.exp_dir, 'args.json'), 'r') as f: # json.dump(vars(args), f, ensure_ascii=False, indent=2, sort_keys=True) #optimizer = Adam(self.lr) # Build ctc net if args.ctc_condition: self.ctc_model = self.build_condition_network(training=True, condition='ctc') if self.args.verbose: print('------------ctc-model-----------') self.ctc_model.summary() # Build and compile the discriminators self.d_A = self.build_discriminator() self.d_B = self.build_discriminator() if self.args.verbose: print('------------d_A-----------') self.d_A.summary() print('------------d_B-----------') self.d_B.summary() #self.d_A.compile(loss='mse', # optimizer=Adam(self.lr/2), # metrics=['accuracy']) #self.d_B.compile(loss='mse', # optimizer=Adam(self.lr/2), # metrics=['accuracy']) #self.ctc_model.compile(optimizer=optimizer, loss={'ctc': lambda y_true, y_pred: y_pred}) #------------------------- # Construct Computational # Graph of Generators #------------------------- # Build the generators self.g_AB = self.build_generator() self.g_BA = self.build_generator() self.load_model(self.args.exp_dir, self.args.resume_epoch) def
(self): self.args.lr = self.lr self.args.img_rows = self.img_rows self.args.img_cols = self.img_cols self.args.channels = self.channels self.args.img_shape = self.img_shape self.args.phase = self.phase self.args.dataset_name = self.dataset_name self.args.disc_patch = self.disc_patch # Number of filters in the first layer of G and D self.args.gf = self.gf self.args.df = self.df # Loss weights self.args.lambda_cycle = self.lambda_cycle self.args.lambda_id = self.lambda_id self.args.lambda_condition = self.lambda_condition def load_model(self, exp_dir, resume_epoch): if self.args.ctc_condition: self.ctc_model.load_weights(join(exp_dir, 'ctc_weights_{}.h5').format(resume_epoch)) self.d_A.load_weights(join(exp_dir, 'd_A_weights_{}.h5').format(resume_epoch)) self.d_B.load_weights(join(exp_dir, 'd_B_weights_{}.h5').format(resume_epoch)) self.g_AB.load_weights(join(exp_dir, 'g_AB_weights_{}.h5').format(resume_epoch)) self.g_BA.load_weights(join(exp_dir, 'g_BA_weights_{}.h5').format(resume_epoch)) #self.combined.load_weights(join(exp_dir, 'combined_weights_{}.h5').format(resume_epoch)) def build_generator(self): """U-Net Generator""" def conv2d(layer_input, filters, f_size=4): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) d = InstanceNormalization()(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = InstanceNormalization()(u) u = Concatenate()([u, skip_input]) return u # Image input d0 = Input(shape=self.img_shape) # Downsampling #d1 = conv2d(d0, self.gf) d2 = conv2d(d0, self.gf*2) d3 = conv2d(d2, self.gf*4) d4 = conv2d(d3, self.gf*8) # Upsampling u1 = deconv2d(d4, d3, self.gf*4) u2 = deconv2d(u1, d2, self.gf*2) #u3 = deconv2d(u2, d1, self.gf) u3 = UpSampling2D(size=2)(u2) output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u3) return Model(d0, output_img) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) #d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(img, self.df*2, normalization=False) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity) def build_condition_network(self, training='train', condition='ctc'): if condition == 'ctc': from model import build_ctc_network bool_training = True if training == 'train' else False return build_ctc_network(self.args, training=bool_training) def test_A2B(self, batch_size=1, iteration=64, set='test', save_dir='test_images'): assert args.test == True save_dir = join(save_dir, self.dataset_name) #os.makedirs(join(save_dir, '%s/comparison/%s' % (self.dataset_name, set)), exist_ok=True) os.makedirs(join(save_dir, '%s/transfered_image_A2B/%s' % (args.exp_dir, set)), exist_ok=True) from tqdm import tqdm for batch_i, (imgs_A, lbl_A) in enumerate(tqdm(self.data_loader.load_batch_A(batch_size=batch_size, set=set, is_testing=True, iteration=iteration, condition=True))): # Translate images to opposite domain fake_B = self.g_AB.predict(imgs_A) #fake_A = self.g_BA.predict(imgs_B) #reconstr_B = self.g_AB.predict(fake_A) reconstr_A = self.g_BA.predict(fake_B) # Save transfered image self.saved_transfered_image(fake_B, None, lbl_A, None, save_dir=join(save_dir, '%s/transfered_image_A2B/%s' % (args.exp_dir, set)), set=set, batch_id=batch_i) def test_B2A(self, batch_size=1, iteration=64, set='test', save_dir='test_images'): assert args.test == True save_dir = join(save_dir, self.dataset_name) #os.makedirs(join(save_dir, '%s/comparison/%s' % (self.dataset_name, set)), exist_ok=True) os.makedirs(join(save_dir, '%s/transfered_image_B2A/%s' % (args.exp_dir, set)), exist_ok=True) from tqdm import tqdm for batch_i, (imgs_B, lbl_B) in enumerate(tqdm(self.data_loader.load_batch_B(batch_size=batch_size, set=set, is_testing=True, iteration=iteration, condition=True))): # Translate images to opposite domain #fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) reconstr_B = self.g_AB.predict(fake_A) #reconstr_A = self.g_BA.predict(fake_B) # Save transfered image self.saved_transfered_image(None, fake_A, None, lbl_B, save_dir=join(save_dir, '%s/transfered_image_B2A/%s' % (args.exp_dir, set)), set=set, batch_id=batch_i) def test_both(self, batch_size=1, iteration=64, set='test', save_dir='test_images'): assert args.test == True save_dir = join(save_dir, self.dataset_name) os.makedirs(join(save_dir, '%s/comparison/%s' % (args.exp_dir, set)), exist_ok=True) #os.makedirs(join(save_dir, '%s/transfered_image/%s' % (args.exp_dir, set)), exist_ok=True) from tqdm import tqdm for batch_i, (imgs_A, imgs_B ,lbl_A, lbl_B) in enumerate(tqdm(self.data_loader.load_batch(batch_size=batch_size, set=set, is_testing=True, iteration=iteration, condition=True))): # Translate images to opposite domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) reconstr_B = self.g_AB.predict(fake_A) reconstr_A = self.g_BA.predict(fake_B) # Save transfered image #self.saved_transfered_image(fake_B, fake_A, lbl_A, lbl_B, save_dir=save_dir, set=set, batch_id=batch_i) # Comparison result gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] r, c = 2, 3 fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt]) axs[i, j].set_title(titles[j]) axs[i,j].axis('off') cnt += 1 fig.savefig(join(save_dir, "%s/comparison/%s/%d.png" % (args.exp_dir, set, batch_i))) plt.close() # Comparison result def sample_images(self, epoch, batch_i): os.makedirs('images/%s' % self.dataset_name, exist_ok=True) r, c = 2, 3 imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True) imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=True) # Demo (for GIF) #imgs_A = self.data_loader.load_img('datasets/apple2orange/testA/n07740461_1541.jpg') #imgs_B = self.data_loader.load_img('datasets/apple2orange/testB/n07749192_4241.jpg') # Translate images to the other domain fake_B = self.g_AB.predict(imgs_A) fake_A = self.g_BA.predict(imgs_B) # Translate back to original domain reconstr_A = self.g_BA.predict(fake_B) reconstr_B = self.g_AB.predict(fake_A) gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt]) axs[i, j].set_title(titles[j]) axs[i,j].axis('off') cnt += 1 fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i)) plt.close() def save_model(self, epochs, exp_dir): if self.args.ctc_condition: self.ctc_model.save_weights(join(exp_dir,'ctc_weights_{}.h5').format(epochs)) self.d_A.save_weights(join(exp_dir, 'd_A_weights_{}.h5').format(epochs)) self.d_B.save_weights(join(exp_dir, 'd_B_weights_{}.h5').format(epochs)) self.g_AB.save_weights(join(exp_dir, 'g_AB_weights_{}.h5').format(epochs)) self.g_BA.save_weights(join(exp_dir, 'g_BA_weights_{}.h5').format(epochs)) self.combined.save_weights(join(exp_dir,'combined_weights_{}.h5').format(epochs)) def saved_transfered_image(self, fake_B, fake_A, lbl_A, lbl_B, save_dir, set, batch_id): """ fake_B is from imageA send into GAB (so the label is A) fake)A is from imageB send into GBA (so the label is B) """ path = save_dir #join(save_dir, '%s/transfered_image/%s' % (self.dataset_name, set)) import cv2 os.makedirs(path, exist_ok=True) #print(type(lbl_A), type(fake_B)) #print(fake_B.shape) for b in range(args.batch): image_id = batch_id * args.batch + b if isinstance(fake_B, np.ndarray) & isinstance(lbl_A, list): cv2.imwrite(join(path, '%d_%s.png'%(image_id, lbl_A[b])), self.unnormalize(fake_B[b])) print('wrote to ', join(path, '%d_%s.png'%(image_id, lbl_A[b]))) if isinstance(fake_A, np.ndarray) & isinstance(lbl_B, list): cv2.imwrite(join(path, '%d_%s.png'%(image_id, lbl_B[b])), self.unnormalize(fake_A[b])) print('wrote to ', join(path, '%d_%s.png'%(image_id, lbl_B[b]))) def unnormalize(self, im): #im = np.array(im) im = np.array(255 * (0.5 * im + 0.5), dtype=np.uint8) #print(im.shape) #print(im) return im if __name__ == '__main__': args = test_options() gan = CycleGAN(args) if args.direction == 'both': gan.test_both(batch_size=args.batch, iteration=args.iteration, set=args.set) elif args.direction == 'A2B': gan.test_A2B(batch_size=args.batch, iteration=args.iteration, set=args.set) elif args.direction == 'B2A': gan.test_B2A(batch_size=args.batch, iteration=args.iteration, set=args.set)
args_append_attr
image_feature.py
#! /usr/bin/env python # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import logging import os import h5py import numpy as np import tensorflow as tf from skimage.io import imread from ludwig.constants import * from ludwig.features.base_feature import BaseFeature from ludwig.features.base_feature import InputFeature from ludwig.models.modules.image_encoders import ResNetEncoder from ludwig.models.modules.image_encoders import Stacked2DCNN from ludwig.utils.image_utils import resize_image from ludwig.utils.misc import get_from_registry from ludwig.utils.misc import set_default_value class ImageBaseFeature(BaseFeature): def __init__(self, feature): super().__init__(feature) self.type = IMAGE preprocessing_defaults = { 'missing_value_strategy': BACKFILL, 'in_memory': True, 'resize_method': 'crop_or_pad' } @staticmethod def get_feature_meta(column, preprocessing_parameters):
@staticmethod def add_feature_data( feature, dataset_df, data, metadata, preprocessing_parameters ): set_default_value( feature, 'in_memory', preprocessing_parameters['in_memory'] ) if ('height' in preprocessing_parameters or 'width' in preprocessing_parameters): should_resize = True try: provided_height = int(preprocessing_parameters[HEIGHT]) provided_width = int(preprocessing_parameters[WIDTH]) except ValueError as e: raise ValueError( 'Image height and width must be set and have ' 'positive integer values: ' + str(e) ) if (provided_height <= 0 or provided_width <= 0): raise ValueError( 'Image height and width must be positive integers' ) else: should_resize = False csv_path = os.path.dirname(os.path.abspath(dataset_df.csv)) num_images = len(dataset_df) height = 0 width = 0 num_channels = 1 if num_images > 0: # here if a width and height have not been specified # we assume that all images have the same wifth and im_height # thus the width and height of the first one are the same # of all the other ones first_image = imread( os.path.join(csv_path, dataset_df[feature['name']][0]) ) height = first_image.shape[0] width = first_image.shape[1] if first_image.ndim == 2: num_channels = 1 else: num_channels = first_image.shape[2] if should_resize: height = provided_height width = provided_width metadata[feature['name']]['preprocessing']['height'] = height metadata[feature['name']]['preprocessing']['width'] = width metadata[feature['name']]['preprocessing'][ 'num_channels'] = num_channels if feature['in_memory']: data[feature['name']] = np.empty( (num_images, height, width, num_channels), dtype=np.int8 ) for i in range(len(dataset_df)): filename = os.path.join( csv_path, dataset_df[feature['name']][i] ) img = imread(filename) if img.ndim == 2: img = img.reshape((img.shape[0], img.shape[1], 1)) if should_resize: img = resize_image( img, (height, width), preprocessing_parameters['resize_method'] ) data[feature['name']][i, :, :, :] = img else: data_fp = os.path.splitext(dataset_df.csv)[0] + '.hdf5' mode = 'w' if os.path.isfile(data_fp): mode = 'r+' with h5py.File(data_fp, mode) as h5_file: image_dataset = h5_file.create_dataset( feature['name'] + '_data', (num_images, height, width, num_channels), dtype=np.uint8 ) for i in range(len(dataset_df)): filename = os.path.join( csv_path, dataset_df[feature['name']][i] ) img = imread(filename) if img.ndim == 2: img = img.reshape((img.shape[0], img.shape[1], 1)) if should_resize: img = resize_image( img, (height, width), preprocessing_parameters['resize_method'], ) image_dataset[i, :height, :width, :] = img data[feature['name']] = np.arange(num_images) class ImageInputFeature(ImageBaseFeature, InputFeature): def __init__(self, feature): super().__init__(feature) self.height = 0 self.width = 0 self.num_channels = 0 self.in_memory = True self.data_hdf5_fp = '' self.encoder = 'stacked_cnn' encoder_parameters = self.overwrite_defaults(feature) self.encoder_obj = self.get_image_encoder(encoder_parameters) def get_image_encoder(self, encoder_parameters): return get_from_registry( self.encoder, image_encoder_registry)( **encoder_parameters ) def _get_input_placeholder(self): # None dimension is for dealing with variable batch size return tf.placeholder( tf.float32, shape=[None, self.height, self.width, self.num_channels], name=self.name, ) def build_input( self, regularizer, dropout_rate, is_training=False, **kwargs ): placeholder = self._get_input_placeholder() logging.debug(' targets_placeholder: {0}'.format(placeholder)) feature_representation, feature_representation_size = self.encoder_obj( placeholder, regularizer, dropout_rate, is_training, ) logging.debug( ' feature_representation: {0}'.format(feature_representation) ) feature_representation = { 'name': self.name, 'type': self.type, 'representation': feature_representation, 'size': feature_representation_size, 'placeholder': placeholder } return feature_representation @staticmethod def update_model_definition_with_metadata( input_feature, feature_metadata, *args, **kwargs ): for dim in ['height', 'width', 'num_channels']: input_feature[dim] = feature_metadata['preprocessing'][dim] input_feature['data_hdf5_fp'] = ( kwargs['model_definition']['data_hdf5_fp'] ) @staticmethod def populate_defaults(input_feature): set_default_value(input_feature, 'tied_weights', None) image_encoder_registry = { 'stacked_cnn': Stacked2DCNN, 'resnet': ResNetEncoder }
return { 'preprocessing': preprocessing_parameters }
1620412189526-AddVerifiedGivebackDefaults.ts
import { MigrationInterface, QueryRunner } from 'typeorm';
export class AddVerifiedGivebackDefaults1620412189526 implements MigrationInterface { async up(queryRunner: QueryRunner): Promise<void> { await queryRunner.query( `ALTER TABLE project ALTER COLUMN verified SET DEFAULT false`, ); await queryRunner.query( `ALTER TABLE project ALTER COLUMN "giveBacks" SET DEFAULT false`, ); } async down(queryRunner: QueryRunner): Promise<void> {} }
tests.rs
use ecs::*; use spatial_hash::*; use util::LeakyReserver; use coord::Coord; struct Env { sh: SpatialHashTable, ctx: EcsCtx, ids: LeakyReserver<EntityId>, } impl Env { fn new() -> Self { Env { sh: SpatialHashTable::new(10, 10), ctx: EcsCtx::new(), ids: LeakyReserver::new(), } } } #[test] fn insert_remove() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord = Coord::new(1, 2); let id = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord); entity.insert_solid(); entity.id() }; assert!(!env.sh.get(coord).solid()); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); // this resets the action so it can be reused assert!(env.sh.get(coord).solid()); action.entity_mut(id).remove_solid(); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(!env.sh.get(coord).solid()); } #[test] fn insert_move() { let mut env = Env::new(); let mut action = EcsAction::new(); let start_coord = Coord::new(1, 2); let end_coord = Coord::new(1, 3); let id = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.insert_solid(); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); action.entity_mut(id).insert_position(end_coord); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(!env.sh.get(start_coord).solid()); assert!(env.sh.get(end_coord).solid()); } #[test] fn remove_position() { let mut env = Env::new(); let mut action = EcsAction::new(); let start_coord = Coord::new(1, 2); let id = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.insert_solid(); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); action.entity_mut(id).remove_position(); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(!env.sh.get(start_coord).solid()); } #[test] fn insert_solid()
#[test] fn track_opacity() { let mut env = Env::new(); let mut action = EcsAction::new(); let start_coord = Coord::new(1, 2); let end_coord = Coord::new(1, 3); // initialise with no opacity let id = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(start_coord).opacity() * 10.0).round(), 0.0 * 10.0); // add an opacity of 0.5 action.entity_mut(id).insert_opacity(0.5); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(start_coord).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); // decrease opacity to 0.2 action.entity_mut(id).insert_opacity(0.2); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(start_coord).opacity() * 10.0).round(), 0.2 * 10.0); // move the entity action.entity_mut(id).insert_position(end_coord); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(start_coord).opacity() * 10.0).round(), 0.0 * 10.0); assert_eq!((env.sh.get(end_coord).opacity() * 10.0).round(), 0.2 * 10.0); } #[test] fn insert_move_multiple() { let mut env = Env::new(); let mut action = EcsAction::new(); let start_coord = Coord::new(1, 2); let end_coord = Coord::new(1, 3); // add solid entity let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.insert_solid(); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(start_coord).solid()); // add second solid entity in same cell let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.insert_solid(); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(start_coord).solid()); // move original entity action.entity_mut(id_a).insert_position(end_coord); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(start_coord).solid()); assert!(env.sh.get(end_coord).solid()); // move second entity action.entity_mut(id_b).insert_position(end_coord); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(!env.sh.get(start_coord).solid()); assert!(env.sh.get(end_coord).solid()); // move both entities in single action action.entity_mut(id_a).insert_position(start_coord); action.entity_mut(id_b).insert_position(start_coord); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(start_coord).solid()); assert!(!env.sh.get(end_coord).solid()); } #[test] fn entity_set() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord_a = Coord::new(1, 2); let coord_b = Coord::new(1, 3); let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.id() }; let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.id() }; assert!(env.sh.get(coord_a).entity_ids().is_empty()); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); { let entities = env.sh.get(coord_a).entity_ids(); assert!(entities.contains(id_a)); assert!(entities.contains(id_b)); assert!(entities.len() == 2); } action.entity_mut(id_b).insert_position(coord_b); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); { let entities_a = env.sh.get(coord_a).entity_ids(); let entities_b = env.sh.get(coord_b).entity_ids(); assert!(entities_a.len() == 1); assert!(entities_b.len() == 1); assert!(entities_a.contains(id_a)); assert!(entities_b.contains(id_b)); } } #[test] fn component_move() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord_a = Coord::new(1, 2); let coord_b = Coord::new(1, 3); let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.insert_solid(); entity.id() }; let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_b); entity.insert_opacity(0.5); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(coord_a).solid()); assert!(!env.sh.get(coord_b).solid()); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); action.move_solid(id_a, id_b); action.move_opacity(id_b, id_a); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(!env.sh.get(coord_a).solid()); assert!(env.sh.get(coord_b).solid()); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0_f64 * 10.0).round()); } #[test] fn component_swap() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord_a = Coord::new(1, 2); let coord_b = Coord::new(1, 3); let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.insert_opacity(1.0); entity.id() }; let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_b); entity.insert_opacity(0.5); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (1.0_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); action.swap_opacity(id_b, id_a); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (1.0_f64 * 10.0).round()); } #[test] fn component_empty_swap() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord_a = Coord::new(1, 2); let coord_b = Coord::new(1, 3); let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.id() }; let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_b); entity.insert_opacity(0.5); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0.0_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); action.swap_opacity(id_b, id_a); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0_f64 * 10.0).round()); } #[test] fn component_position_swap() { let mut env = Env::new(); let mut action = EcsAction::new(); let coord_a = Coord::new(1, 2); let coord_b = Coord::new(1, 3); let id_a = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_a); entity.insert_opacity(1.0); entity.id() }; let id_b = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(coord_b); entity.insert_opacity(0.5); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (1.0_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); action.swap_position(id_b, id_a); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert_eq!((env.sh.get(coord_a).opacity() * 10.0).round(), (0.5_f64 * 10.0).round()); assert_eq!((env.sh.get(coord_b).opacity() * 10.0).round(), (1.0_f64 * 10.0).round()); }
{ let mut env = Env::new(); let mut action = EcsAction::new(); let start_coord = Coord::new(1, 2); let id = { let mut entity = action.entity_mut(env.ids.reserve()); entity.insert_position(start_coord); entity.id() }; env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); action.entity_mut(id).insert_solid(); assert!(!env.sh.get(start_coord).solid()); env.sh.update(&env.ctx, &action, 0); env.ctx.commit(&mut action); assert!(env.sh.get(start_coord).solid()); }
instructions.component.ts
import { Component, OnInit } from '@angular/core'; @Component({ selector: 'instructions', templateUrl: './instructions.component.html', styleUrls: ['./instructions.component.css'] }) export class InstructionsComponent implements OnInit { constructor() { }
}
ngOnInit() { }
main.rs
struct User { username: String, sign_in_count: u64, email: String, active: bool, } fn
() { struct Color(i32, i32, i32); struct Point(i32, i32, i32); let black = Color(0, 0, 0); let origin = Color(0, 0, 0); } //fn main fn tes() { let user1 = User { email: String::from("[email protected]"), username: String::from("Then"), active: true, sign_in_count: 1, }; let mut user2 = User { email: String::from("[email protected]"), username: String::from("TK"), active: true, sign_in_count: 1, }; user2.email = String::from("[email protected]"); // println!("u1.email = {}, u1.name = {}, u2.email = {}", user1.email, user1.username, user2.email); /* let user3 = build_user("[email protected]", "Tn"); println!("u3.email = {}, u3.name = {}", user3.email, user3.username); */ let user4 = User { email: String::from("[email protected]"), username: String::from("Tw"), active: user1.active, sign_in_count: user1.sign_in_count, }; let user5 = User { email: String::from("[email protected]"), username: String::from("U4e"), ..user1 }; println!("user4.active = {}, user5.sign_in_count = {}", user4.active, user5.sign_in_count); } fn build_user(email: String, username: String) -> User { /* //complex User { email: email, username: username, active: true, sign_in_count: 1, } */ /* * simple */ User { email, username, active: true, sign_in_count: 1, } }
main
devicehive.device.js
(function(root, factory) { if (typeof define === 'function' && define.amd) { define([], factory); } else if (typeof exports === 'object') { module.exports = factory(); } else { root.DHDevice = factory(); } }(this, function() { var utils = (function () { 'use strict'; var utils = { isArray: Array.isArray || function (obj) { return Object.prototype.toString.call(obj) === '[object Array]'; }, isString: function (obj) { return Object.prototype.toString.call(obj) === '[object String]'; }, inArray: function (val, arr, ind) { if (arr) { if (Array.prototype.indexOf) { return arr.indexOf(val, ind); } else { var len = arr.length, i = +ind || 0; if (!len || (i >= len)) { return -1; } i = i < 0 ? Math.max(0, len + i) : i; for (; i < len; i++) { if (i in arr && arr[i] === val) { return i; } } } return -1; } }, forEach: function (obj, callback) { var i; if (this.isArray(obj)) { var len = obj.length; for (i = 0; i < len; i++) { if (callback.call(obj[i], i, obj[i]) === false) { break; } } } else { for (i in obj) { if (obj.hasOwnProperty(i)) { if (callback.call(obj[i], i, obj[i]) === false) { break; } } } } return obj; }, // used for kinoma because it doesn't support Array.prototype.slice.call(arguments) toArray: function (args) { if (!args) { return null; } var mass = []; for (var i = 0, l = args.length; i < l; i++) { mass.push(args[i]); } return mass; }, parseDate: function (date) { return new Date(date.substring(0, 4), parseInt(date.substring(5, 7), 10) - 1, date.substring(8, 10), date.substring(11, 13), date.substring(14, 16), date.substring(17, 19), date.substring(20, 23)); }, formatDate: function (date) { if (utils.isString(date)) return date; // already formatted string - do not modify if (Object.prototype.toString.call(date) !== '[object Date]') throw new Error('Invalid object type'); var pad = function (value, length) { value = String(value); length = length || 2; while (value.length < length) value = "0" + value; return value; }; return date.getUTCFullYear() + "-" + pad(date.getUTCMonth() + 1) + "-" + pad(date.getUTCDate()) + "T" + pad(date.getUTCHours()) + ":" + pad(date.getUTCMinutes()) + ":" + pad(date.getUTCSeconds()) + "." + pad(date.getUTCMilliseconds(), 3); }, encodeBase64: function (data) { var b64 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='; var o1, o2, o3, h1, h2, h3, h4, bits, i = 0, ac = 0, enc = "", tmp_arr = []; if (!data) { return data; } do { // pack three octets into four hexets o1 = data.charCodeAt(i++); o2 = data.charCodeAt(i++); o3 = data.charCodeAt(i++); bits = o1 << 16 | o2 << 8 | o3; h1 = bits >> 18 & 0x3f; h2 = bits >> 12 & 0x3f; h3 = bits >> 6 & 0x3f; h4 = bits & 0x3f; // use hexets to index into b64, and append result to encoded string tmp_arr[ac++] = b64.charAt(h1) + b64.charAt(h2) + b64.charAt(h3) + b64.charAt(h4); } while (i < data.length); enc = tmp_arr.join(''); var r = data.length % 3; return (r ? enc.slice(0, r - 3) : enc) + '==='.slice(r || 3); }, noop: function () { }, createCallback: function (cb) { return cb && Object.prototype.toString.call(cb) === '[object Function]' ? cb : this.noop; }, isGuid: function (val) { return val && /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(val); }, serializeQuery: function (obj) { var str = '', key, val; for (key in obj) { if (obj.hasOwnProperty(key)) { if (str != '') { str += '&'; } val = obj[key]; val = val == null ? '' : val; str += encodeURIComponent(key) + '=' + encodeURIComponent(val); } } return str; }, makeUrl: function (params) { var method = params.method, url = params.url, data = params.data; if (method === 'GET') { if (data) { data = utils.serializeQuery(data); data && (url += (url.indexOf('?') != -1 ? '&' : '?') + data); } } return url; }, serverErrorMessage: function (http) { var errMsg = 'DeviceHive server error'; if (http.responseText) { try { errMsg += ' - ' + JSON.parse(http.responseText).message; } catch (e) { errMsg += ' - ' + http.responseText; } } return {error: errMsg}; }, errorMessage: function (msg) { return {error: 'DeviceHive error: ' + msg}; }, setTimeout: function (cb, delay) { return setTimeout(cb, delay); }, clearTimeout: function (timeoutID) { clearTimeout(timeoutID); } }; return utils; }()); var Events = (function () { 'use strict'; var Events = function () { }; Events.prototype = { bind: function (name, callback, context) { this._handlers || (this._handlers = {}); var events = this._handlers[name] || (this._handlers[name] = []); events.push({callback: callback, context: context || this}); return this; }, unbind: function (name, callback) { if (!name && !callback) { this._handlers = null; return this; } var events = this._handlers[name]; if (!events) { return this; } if (!callback) { delete this._handlers[name]; return this; } var remaining = []; utils.forEach(events, function (ind, ev) { if (callback && callback !== ev.callback) { remaining.push(ev); } }); if (remaining.length) { this._handlers[name] = remaining; } else { delete this._handlers[name]; } return this; }, trigger: function (name) { if (!this._handlers) { return this; } var args = utils.toArray(arguments).slice(1), events = this._handlers[name]; events && this._triggerEvents(events, args); return this; }, _triggerEvents: function (events, args) { utils.forEach(events, function (ind, ev) { ev.callback.apply(ev.context, args); }); } }; return Events; }()); var http = (function () { 'use strict'; var getXhr = utils.noop(); if (typeof XMLHttpRequest !== 'undefined') { getXhr = function () { return new XMLHttpRequest(); }; } else { getXhr = function () { try { return new ActiveXObject('Microsoft.XMLHTTP'); } catch (e) { var XMLHttpRequest = require('xhr2'); return new XMLHttpRequest; } }; } if (!getXhr()) { throw new Error('DeviceHive: XMLHttpRequest is not available'); } return { send: function (params, cb) { params.method = params.method || 'GET'; cb = utils.createCallback(cb); var xhr = getXhr(), headers = params.headers, url = utils.makeUrl(params), method = params.method; xhr.open(method, url, true); if (method == 'POST' || method == 'PUT') { xhr.setRequestHeader('Content-Type', 'application/json'); params.data = JSON.stringify(params.data); } xhr.onreadystatechange = function () { var isSuccess, err; if (xhr.readyState === 4) { isSuccess = xhr.status && xhr.status >= 200 && xhr.status < 300 || xhr.status === 304; if (!isSuccess) { err = utils.serverErrorMessage(xhr); } var result = xhr.responseText ? JSON.parse(xhr.responseText) : null; return cb(err, result); } }; if (headers) { for (var key in headers) { if (headers[key] !== void 0) { xhr.setRequestHeader(key, headers[key]); } } } xhr.send(params.data || void 0); return { abort: function () { xhr.abort(); } } } } }()); var restApi = (function () { 'use strict'; var authTypes = { USER: 1, KEY: 2, DEVICE: 4 }; var isFlagSet = function (variable, flag) { return (variable & flag) == flag; }; var applyAuth = function (request, params) { var authType = params.authTypes; var auth = params.auth; request.headers = params.headers || {}; if (!authType) return; if (!auth) { // library bug throw new Error('Authentication parameters must be specified for this endpoint. Endpoint auth code: ' + authType) } if (isFlagSet(authType, authTypes.KEY) && auth.accessKey) { // Set bearer token authorization request.headers['Authorization'] = 'Bearer ' + auth.accessKey; } else if (isFlagSet(authType, authTypes.DEVICE) && utils.isGuid(auth.deviceId) && utils.isGuid(auth.deviceKey)) { // Set Device authorization request.headers['Auth-DeviceID'] = auth.deviceId; request.headers['Auth-DeviceKey'] = auth.deviceKey; } else if (isFlagSet(authType, authTypes.USER)) { // Set User authorization request.headers['Authorization'] = 'Basic ' + utils.encodeBase64(auth.login + ':' + auth.password); } else { // library bug, therefore crash is necessary throw new Error('Invalid authentication parameters. Endpoint auth code: ' + authType); } }; var send = function (params, cb) { var req = { method: params.method, url: params.base + params.relative, data: params.data }; applyAuth(req, params, cb); return http.send(req, cb); }; return { /* API INFO */ info: function (serviceUrl, cb) { return send({ base: serviceUrl, relative: '/info', method: 'GET' }, cb); }, /* ACCESS KEYS */ getAccessKeys: function (serviceUrl, auth, userId, cb) { return send({ base: serviceUrl, relative: '/user/' + userId + '/accesskey', method: 'GET', authTypes: authTypes.USER, auth: auth }, cb); }, getAccessKey: function (serviceUrl, auth, userId, keyId, cb) { return send({ base: serviceUrl, relative: '/user/' + userId + '/accesskey/' + userId, method: 'GET', authTypes: authTypes.USER, auth: auth }, cb); }, insertAccessKey: function (serviceUrl, auth, userId, key, cb) { return send({ base: serviceUrl, relative: '/user/' + userId + '/accesskey', data: key, method: 'POST', authTypes: authTypes.USER, auth: auth }, cb); }, updateAccessKey: function (serviceUrl, auth, userId, keyId, key, cb) { return send({ base: serviceUrl, relative: '/user/' + userId + '/accesskey/' + keyId, data: key, method: 'PUT', authTypes: authTypes.USER, auth: auth }, cb); }, deleteAccessKey: function (serviceUrl, auth, userId, keyId, cb) { return send({ base: serviceUrl, relative: '/user/' + userId + '/accesskey/' + keyId, method: 'DELETE', authTypes: authTypes.USER, auth: auth }, cb); }, /* DEVICE */ getDevices: function (serviceUrl, auth, filter, cb) { return send({ base: serviceUrl, relative: '/device', method: 'GET', data: filter, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, getDevice: function (serviceUrl, auth, deviceId, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId, method: 'GET', authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, getEquipmentState: function (serviceUrl, auth, deviceId, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/equipment', method: 'GET', authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, registerDevice: function (serviceUrl, auth, deviceId, device, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId, method: 'PUT', data: device, authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, /* DEVICE CLASS */ getDeviceClass: function (serviceUrl, auth, deviceClassId, cb) { return send({ base: serviceUrl, relative: '/device/class/' + deviceClassId, method: 'GET', authTypes: authTypes.USER, auth: auth }, cb); }, /* COMMAND */ getCommands: function (serviceUrl, auth, deviceId, filter, cb) { if (filter && filter.start) { filter.start = utils.formatDate(filter.start); } if (filter && filter.end) { filter.end = utils.formatDate(filter.end); } return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command', method: 'GET', data: filter, authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, getCommand: function (serviceUrl, auth, deviceId, cmdId, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command/' + cmdId, method: 'GET', authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, insertCommand: function (serviceUrl, auth, deviceId, cmd, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command', method: 'POST', data: cmd, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, updateCommand: function (serviceUrl, auth, deviceId, cmdId, cmd, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command/' + cmdId, method: 'PUT', data: cmd, authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, pollCommands: function (serviceUrl, auth, deviceId, params, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command/poll', method: 'GET', data: params, authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, pollManyCommands: function (serviceUrl, auth, params, cb) { return send({ base: serviceUrl, relative: '/device/command/poll', method: 'GET', data: params, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, waitCommandResult: function (serviceUrl, auth, deviceId, cmdId, params, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/command/' + cmdId + '/poll', method: 'GET', data: params, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, /* NOTIFICATION */ getNotifications: function (serviceUrl, auth, deviceId, filter, cb) { if (filter && filter.start) { filter.start = utils.formatDate(filter.start); } if (filter && filter.end) { filter.end = utils.formatDate(filter.end); } return send({ base: serviceUrl, relative: '/device/' + deviceId + '/notification', method: 'GET', data: filter, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, getNotification: function (serviceUrl, auth, deviceId, notificationId, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/notification/' + notificationId, method: 'GET', authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, insertNotification: function (serviceUrl, auth, deviceId, notification, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/notification', method: 'POST', data: notification, authTypes: authTypes.USER | authTypes.KEY | authTypes.DEVICE, auth: auth }, cb); }, pollNotifications: function (serviceUrl, auth, deviceId, params, cb) { return send({ base: serviceUrl, relative: '/device/' + deviceId + '/notification/poll', method: 'GET', data: params, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, pollManyNotifications: function (serviceUrl, auth, params, cb) { return send({ base: serviceUrl, relative: '/device/notification/poll', method: 'GET', data: params, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, /* NETWORK */ getNetworks: function (serviceUrl, auth, filter, cb) { return send({ base: serviceUrl, relative: '/network', method: 'GET', data: filter, authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, getNetwork: function (serviceUrl, auth, networkId, cb) { return send({ base: serviceUrl, relative: '/network/' + networkId, method: 'GET', authTypes: authTypes.USER | authTypes.KEY, auth: auth }, cb); }, insertNetwork: function (serviceUrl, auth, network, cb) { return send({ base: serviceUrl, relative: '/network', method: 'POST', data: network, authTypes: authTypes.USER, auth: auth }, cb); }, updateNetwork: function (serviceUrl, auth, networkId, network, cb) { return send({ base: serviceUrl, relative: '/network/' + networkId, method: 'PUT', data: network, authTypes: authTypes.USER, auth: auth }, cb); }, deleteNetwork: function (serviceUrl, auth, networkId, cb) { return send({ base: serviceUrl, relative: '/network/' + networkId, method: 'DELETE', authTypes: authTypes.USER, auth: auth }, cb); }, /* OAUTH CLIENT */ /* OAUTH GRANT */ /* USER */ getCurrentUser: function (serviceUrl, auth, cb) { return send({ base: serviceUrl, relative: '/user/current', method: 'GET', authTypes: authTypes.USER, auth: auth }, cb); }, updateCurrentUser: function (serviceUrl, auth, user, cb) { return send({ base: serviceUrl, relative: '/user/current', method: 'PUT', data: user, authTypes: authTypes.USER, auth: auth }, cb); } }; }()); var DeviceHive = (function () { 'use strict'; var changeChannelState = function (self, newState, oldState) { oldState = oldState || self.channelState; if (oldState === self.channelState) { self.channelState = newState; self._events = self._events || new Events(); self._events.trigger('onChannelStateChanged', { oldState: oldState, newState: newState }); return true; } return false; }; // DeviceHive channel states var channelStates = { disconnected: 0, // channel is not connected connecting: 1, // channel is being connected connected: 2 // channel is connected }; DeviceHive = { channelStates: channelStates, channelState: channelStates.disconnected, // opens a communication channel to the server // supported channels: webSockets, longPolling // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred // - channel: a name of the opened channel openChannel: function (cb, channels) { cb = utils.createCallback(cb); if (!changeChannelState(this, this.channelStates.connecting, this.channelStates.disconnected)) { cb(null); return; } var self = this; function manageInfo(info) { self.serverInfo = info; if (!channels) { channels = []; utils.forEach(self._channels, function (t) { channels.push(t); }); } else if (!utils.isArray(channels)) { channels = [channels]; } var emptyChannel = true; (function checkChannel(channels) { utils.forEach(channels, function (ind) { // enumerate all channels in order var channel = this; if (self._channels[channel]) { self._channel = new self._channels[channel](self); self._channel.open(function (err) { if (err) { var channelsToCheck = channels.slice(++ind); if (!channelsToCheck.length) return cb(utils.errorMessage('Cannot open any of the specified channels')); checkChannel(channelsToCheck); } else { changeChannelState(self, self.channelStates.connected); cb(null, channel); } }); return emptyChannel = false; } }); })(channels); emptyChannel && cb(utils.errorMessage('None of the specified channels are supported')); } if (this.serverInfo) { manageInfo(this.serverInfo); } else { restApi.info(this.serviceUrl, function (err, res) { if (!err) { manageInfo(res); } else { changeChannelState(self, self.channelStates.disconnected); cb(err, res); } }); } }, // closes the communications channel to the server // callback (cb) must be a function which will be executed when channel is closed closeChannel: function (cb) { cb = utils.createCallback(cb); if (this.channelState === this.channelStates.disconnected) return cb(null); var self = this; if (this._channel) { this._channel.close(function (err, res) { if (err) { return cb(err, res); } self._channel = null; changeChannelState(self, self.channelStates.disconnected); return cb(null); }); } }, // adds a callback that will be invoked when the communication channel state is changed // callback (cb) must be a function with the following arguments: // - channelState: channel state object with the following fields: // - oldState: previous channel state // - newState: current channel state channelStateChanged: function (cb) { cb = utils.createCallback(cb); var self = this; this._events = this._events || new Events(); this._events.bind('onChannelStateChanged', function (data) { cb.call(self, data); }); return this; }, _ensureConnectedState: function () { if (this.channelState === this.channelStates.disconnected) { throw new Error('DeviceHive: Channel is not opened, call the .openChannel() method first'); } if (this.channelState === this.channelStates.connecting) { throw new Error('DeviceHive: Channel has not been initialized, use .openChannel().done() to run logic after the channel is initialized'); } } }; return DeviceHive; }()); var LongPolling = (function () { 'use strict'; var poll = function (self, timestamp) { var params = { timestamp: timestamp }; var continuePollingCb = function (err, res) { if (!err) { var lastTimestamp = null; if (res) { utils.forEach(res, function () { var newTimestamp = self._poller.resolveTimestamp(this); if (!lastTimestamp || newTimestamp > lastTimestamp) { lastTimestamp = newTimestamp; } self._poller.onData(this); }); } poll(self, lastTimestamp || timestamp); } else { if (self._polling) { utils.setTimeout(function () { poll(self, timestamp); }, 1000); } } }; self.pollRequest = self._polling && self._poller.executePoll(params, continuePollingCb); }; var LongPolling = function (serviceUrl, poller) { this.serviceUrl = serviceUrl; this._poller = poller }; LongPolling.prototype = { startPolling: function (cb) { cb = utils.createCallback(cb); this.stopPolling(); var self = this; return restApi.info(this.serviceUrl, function (err, res) { if (err) return cb(err); self._polling = true; poll(self, res.serverTimestamp); return cb(null); }); }, stopPolling: function () { this._polling = false; this.pollRequest && this.pollRequest.abort(); } }; return LongPolling; }()); var WebSocketTransport = (function () { 'use strict'; var WebSocketTransport = utils.noop; WebSocketTransport.requestTimeout = 10000; WebSocketTransport.prototype = { _handler: utils.noop, open: function (url, cb) { cb = utils.createCallback(cb); if (!WebSocket) { return cb(utils.errorMessage('WebSockets are not supported')); } var self = this; var opened = false; this._native = new WebSocket(url); this._native.onopen = function (e) { opened = true; cb(null, e); }; this._native.onmessage = function (e) { var response = JSON.parse(e.data); if (self._requests && response.requestId) { var request = self._requests[response.requestId]; if (request) { utils.clearTimeout(request.timeout); if (response.status && response.status == 'success') { request.cb(null, response); } else { request.cb({error: response.error}); } delete self._requests[response.requestId]; } } else { self._handler(response); } }; this._native.onclose = function (e) { if (!opened) { var err = utils.errorMessage('WebSocket connection has failed to open'); err.data = e; return cb(err); } }; }, close: function (cb) { cb = utils.createCallback(cb); this._native.onclose = function (e) { return cb(null, e); }; this._native.close(); }, message: function (cb) { this._handler = cb; }, send: function (action, data, cb) { cb = utils.createCallback(cb); var self = this, request = {}; this._requestId = this._requestId || 0; request.id = ++this._requestId; //callback for request request.cb = cb; request.timeout = utils.setTimeout(function () { request.cb(utils.errorMessage('Operation timeout')); delete self._requests[request.id]; }, WebSocketTransport.requestTimeout); this._requests = this._requests || {}; this._requests[request.id] = request; data = data || {}; data.requestId = request.id; data.action = action; this._native.send(JSON.stringify(data)); return request; } }; return WebSocketTransport; }()); var WebSocketClientApi = (function () { 'use strict'; var WebSocketClientApi = function (events) { this._transport = new WebSocketTransport(); this._transport.message(function (response) { if (response.action == 'command/insert' && response.command && response.command.id) { events.trigger('onCommandInsert', response.deviceGuid, response.command); } if (response.action == 'command/update') { events.trigger('onCommandUpdate', response.command); } if (response.action == 'notification/insert' && response.deviceGuid && response.notification) { events.trigger('onNotification', response.deviceGuid, response.notification); } }); }; WebSocketClientApi.prototype = { open: function (baseUrl, cb) { this._transport.open(baseUrl + '/client', cb); }, close: function (cb) { this._transport.close(cb); }, getInfo: function (cb) { this._transport.send('server/info', null, cb); }, authenticate: function (username, password, key, cb) { this._transport.send('authenticate', { login: username, password: password, accessKey: key }, cb); }, sendCommand: function (params, cb) { this._transport.send('command/insert', params, cb); }, updateCommand: function (params, cb) { this._transport.send('command/update', params, cb); }, commandSubscribe: function (params, cb) { this._transport.send('command/subscribe', params, cb); }, commandUnSubscribe: function (params, cb) { this._transport.send('command/unsubscribe', params, cb); }, sendNotification: function (params, cb) { this._transport.send('notification/insert', params, cb); }, notificationSubscribe: function (params, cb) { this._transport.send('notification/subscribe', params, cb); }, notificationUnSubscribe: function (params, cb) { this._transport.send('notification/unsubscribe', params, cb); } }; return WebSocketClientApi; }()); var WebSocketDeviceApi = (function () { 'use strict'; var WebSocketDeviceApi = function (events) { this._transport = new WebSocketTransport(); this._transport.message(function (response) { if (response.action == 'command/insert' && response.command && response.command.id) { events.trigger('onCommandInsert', response.deviceGuid, response.command); } }); }; WebSocketDeviceApi.prototype = { open: function (baseUrl, cb) { return this._transport.open(baseUrl + '/device', cb); }, close: function (cb) { return this._transport.close(cb); }, getInfo: function (cb) { this._transport.send('server/info', null, cb); }, authenticate: function (deviceId, deviceKey, cb) { this._transport.send('authenticate', { deviceId: deviceId, deviceKey: deviceKey }, cb); }, updateCommand: function (params, cb) { this._transport.send('command/update', params, cb); }, commandSubscribe: function (params, cb) { this._transport.send('command/subscribe', params, cb); }, commandUnSubscribe: function (params, cb) { this._transport.send('command/unsubscribe', params, cb); }, sendNotification: function (params, cb) { this._transport.send('notification/insert', params, cb); } }; return WebSocketDeviceApi; }()); var LongPollingDeviceChannel = (function () { 'use strict'; LongPollingDeviceChannel = function (hive) { this._hive = hive; this._handler = utils.noop; this._events = new Events(); this.deviceIds = []; var self = this; this._lp = new LongPolling(this._hive.serviceUrl, { executePoll: function (params, continuePollingCb) { return self._hive._executeApi(restApi.pollCommands, [params, continuePollingCb]); }, resolveTimestamp: function (data) { return data.timestamp; }, onData: function (command) { self._events.trigger('onCommandInsert', self._hive.deviceId, command); } }); }; LongPollingDeviceChannel.prototype = { open: function (cb) { cb = utils.createCallback(cb); return cb(null); }, close: function (cb) { cb = utils.createCallback(cb); this._lp.stopPolling(); return cb(null); }, subscribe: function (cb) { cb = utils.createCallback(cb); return this._lp.startPolling(cb); }, unsubscribe: function (cb) { cb = utils.createCallback(cb); this._lp.stopPolling(); return cb(null); }, sendNotification: function (params, cb) { cb = utils.createCallback(cb); return this._hive._executeApi(restApi.insertNotification, [params.notification, cb]); }, updateCommand: function (cmd, cb) { cb = utils.createCallback(cb); return this._hive._executeApi(restApi.updateCommand, [cmd.commandId, cmd.command, cb]); } }; return LongPollingDeviceChannel; }()); var WebSocketDeviceChannel = (function () { 'use strict'; var WebSocketDeviceChannel = function (hive) { this._hive = hive; this._events = new Events(); }; WebSocketDeviceChannel.prototype = { open: function (cb) { cb = utils.createCallback(cb); var webSocketUrl = this._hive.serverInfo.webSocketServerUrl; if (!webSocketUrl) { cb(utils.errorMessage('Open channel failed. Cannot get web socket server url')); return; } var self = this; function
(err) { if (err) return cb(err); if (self._hive.auth.accessKey) { self._wsApi.authenticate(null, null, self._hive.auth.accessKey, cb); } else { self._wsApi.authenticate(self._hive.auth.deviceId, self._hive.auth.deviceKey, cb); } } this._wsApi = self._hive.auth.accessKey ? new WebSocketClientApi(self._events) : new WebSocketDeviceApi(self._events); this._wsApi.open(webSocketUrl, onOpen); }, close: function (cb) { cb = utils.createCallback(cb); this._wsApi.close(cb); }, subscribe: function (cb) { cb = utils.createCallback(cb); var self = this; this._wsApi.commandSubscribe({ deviceGuids: [this._hive.deviceId]}, function (err, res) { if (!err) { self._sub = res.subscriptionId; } return cb(err); }); }, unsubscribe: function (cb) { cb = utils.createCallback(cb); this._wsApi.commandUnSubscribe({ subscriptionId: this._sub }, function (err, res) { if (!err) { self._sub = null; } return cb(err); }); }, sendNotification: function (params, cb) { cb = utils.createCallback(cb); this._wsApi.sendNotification(params, cb); }, updateCommand: function (cmd, cb) { cb = utils.createCallback(cb); this._wsApi.updateCommand(cmd, cb); } }; return WebSocketDeviceChannel; }()); var DHDevice = (function () { 'use strict'; // DHDevice object constructor // specify device key or access key as an authentication/authorization parameters // authentication with device key is deprecated and will be removed in future var DHDevice = function (serviceUrl, deviceId, deviceKeyOrAccessKey) { this.serviceUrl = serviceUrl; this.deviceId = deviceId; // save auth information this.auth = {}; if (utils.isGuid(deviceKeyOrAccessKey)) { this.auth.deviceId = deviceId; this.auth.deviceKey = deviceKeyOrAccessKey; } else { this.auth.accessKey = deviceKeyOrAccessKey; } }; DHDevice.prototype = DeviceHive; DHDevice.constructor = DHDevice; // gets information about the current device // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred // - device: current device information DHDevice.prototype.getDevice = function (cb) { cb = utils.createCallback(cb); return this._executeApi(restApi.getDevice, [cb]); }; // registers a device in the DeviceHive network with the current device id // device key will be implicitly added if specified as an authentication parameter // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred DHDevice.prototype.registerDevice = function (device, cb) { cb = utils.createCallback(cb); device.key = this.auth.deviceKey; return this._executeApi(restApi.registerDevice, [device, cb]); }; // updates a device in the DeviceHive network // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred DHDevice.prototype.updateDevice = function (device, cb) { cb = utils.createCallback(cb); return this._executeApi(restApi.registerDevice, [device, cb]); }; // opens a communication channel to the server // check DeviceHive.prototype.openChannel for more information var openChannelBase = DHDevice.prototype.openChannel; DHDevice.prototype.openChannel = function (cb, channels) { cb = utils.createCallback(cb); var self = this; openChannelBase.call(this, function (err, res) { if (err) return cb(err, res); self._channel._events.bind('onCommandInsert', function (deviceId, cmd) { cmd.update = function (params, onUpdated) { if (!params || !params.status) { return onUpdated(utils.errorMessage('Command status must be specified')); } var updateParams = {}; updateParams.commandId = cmd.id; updateParams.command = params || {}; updateParams.deviceGuid = self.deviceId; return self._channel.updateCommand(updateParams, onUpdated); }; self._events.trigger('onCommand', cmd); }); return cb(err, res); }, channels); }; // subscribes to device commands // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred DHDevice.prototype.subscribe = function (cb) { cb = utils.createCallback(cb); this._ensureConnectedState(); return this._channel.subscribe(cb); }; // unsubscribes from device commands // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred DHDevice.prototype.unsubscribe = function (cb) { cb = utils.createCallback(cb); this._ensureConnectedState(); return this._channel.unsubscribe(cb); }; // sends new notification to the client // callback (cb) must be a function with the following arguments: // - errors: an error object if any errors occurred DHDevice.prototype.sendNotification = function (notification, params, cb) { cb = utils.createCallback(cb); this._ensureConnectedState(); return this._channel.sendNotification({ notification: {notification: notification, parameters: params}, deviceGuid: this.deviceId }, cb); }; // adds a callback that will be invoked when a command from client is received // callback (cb) must be a function with the following arguments: // - command: received command DHDevice.prototype.command = function (cb) { cb = utils.createCallback(cb); var self = this; this._events.bind('onCommand', function (command) { cb.call(self, command); }); return this; }; DHDevice.prototype._executeApi = function (endpoint, args) { var endpointParams = [this.serviceUrl, this.auth, this.deviceId].concat(args); return endpoint.apply(null, endpointParams); }; DHDevice.prototype._channels = {}; DHDevice.prototype._channels.websocket = WebSocketDeviceChannel; DHDevice.prototype._channels.longpolling = LongPollingDeviceChannel; return DHDevice; }()); return DHDevice; }));
onOpen
Incident.js
import React, {Component} from 'react' import '../css/controlpanel.css' import high from '../icons/high.png' import low from '../icons/low.png' import medium from '../icons/medium.png' import Modal from 'react-modal'; import { Button }from 'reactstrap' import { withRouter } from 'react-router'; import moment from 'moment' import logo from '../icons/modal_img.jpg' import { FontAwesomeIcon } from '@fortawesome/react-fontawesome' import { faEye, faCheckDouble } from '@fortawesome/free-solid-svg-icons' import { incidentService } from '../services/incidents.service'; Modal.setAppElement('#root'); const customStyles = { content : { top: '20%', left: '20%', right:'20%', bottom:'25%', } }; class Incident extends Component { constructor(props) { super(props); this.state = { showModal: false };
this.handleClick = this.handleClick.bind(this); this.accept_incident = this.accept_incident.bind(this); } OpenModal (event) { event.cancelBubble = true; if(event.stopPropagation) event.stopPropagation(); this.setState({ showModal: true}); } CloseModal (e) { e.stopPropagation(); this.setState({ showModal: false }); }; authHeader() { // return authorization header with jwt token const token = localStorage.getItem('token'); if (token) { return { Authorization: `Bearer ${token}` }; } else { return {}; } } handleClick() { let id = this.props.incident._id let coordinates = [] //array of objects of coordinates let coordinate = {} coordinate['lat'] = this.props.incident.location['latitude'] coordinate['lng'] = this.props.incident.location['longitude'] coordinate['priority'] = this.props.incident.priority coordinates.push(coordinate) localStorage.setItem("coordinates", JSON.stringify(coordinates)) this.props.history.push(`/incident/${id}`) } accept_incident(e) { incidentService.accept_incident(this.props.incident._id) window.location.reload(false); } render() { let icon if(this.props.incident.priority === "Χαμηλή") icon = low else if(this.props.incident.priority === "Μέτρια") icon = medium if(this.props.incident.priority === "Υψηλή") icon = high return( <div> <br/> <div className="row"> <Modal overlayClassName={{ base: 'Modal-overlay', afterOpen: 'Modal-overlay-in', beforeClose: 'Modal-overlay-out' }} isOpen={this.state.showModal} contentLabel="onRequestClose" onRequestClose={this.CloseModal} style={customStyles}> <h6 style={{fontSize: "25px", marginLeft: "35%" }}>Προεπισκόπηση Συμβάντος</h6> <br/> <img className="modal_img" src={logo} alt='' /> <table style={{marginLeft: "35%"}}> <tbody> <tr> <td className="pr-2" style={{fontSize: "20px"}}>Ημερομηνία - Ώρα:</td> <td>{moment(this.props.incident.date).format('DD-MM-YYYY')} {moment(this.props.incident.date).format('HH:mm')}</td> </tr> <tr> <td className="pr-2" style={{fontSize: "20px"}}>Προτεραιότητα:</td> <td>{this.props.incident.priority}</td> </tr> <tr> <td className="pr-2" style={{fontSize: "20px"}}>Όνομα Αναφέροντα:</td> <td>{this.props.incident.callerName}</td> </tr> <tr> <td className="pr-2" style={{fontSize: "20px"}}>Τηλέφωνο Αναφέροντα:</td> <td>{this.props.incident.callerNumber}</td> </tr> <tr> <td className="pr-2" style={{fontSize: "20px"}}>Διεύθυνση:</td> <td>{this.props.incident.location.address}</td> </tr> </tbody> </table> <div id="container"> <Button style = {{marginTop: '12%', backgroundColor: 'white', color: 'black'}} onClick={this.CloseModal}>Κλείσιμο</Button> <Button style = {{marginTop: '12%', marginLeft: '2%'}} onClick={this.handleClick}>Περισσότερα</Button> </div> </Modal> {((this.props.incident.departmentReports >= this.props.incident.departments.length && this.props.incident.departments.length > 0) && Number(this.props.usertype) === 0 )? <div className = "container-fluid" style = {{marginLeft: this.props.style.marginLeft}}> <div className = "row" id="inc_box"> <div className="col-lg-1" onClick={this.handleClick}> <img src={icon} alt= ''/> </div> <div className="col-md-3 ml-1" onClick={this.handleClick} style={{marginLeft: '-100%'}}>{moment(this.props.incident.date).format('DD-MM-YYYY')} {moment(this.props.incident.date).format('HH:mm')}</div> <div className="col-sm-4 text-truncate" onClick={this.handleClick} style={{marginLeft: '-4%'}}>{this.props.incident.location.address}</div> <div className="col text-truncate" onClick={this.handleClick} style={{marginLeft: "2%"}}>{this.props.incident.title}</div> <div className="col" style={{marginLeft: "-5.9%", zIndex: "10"}}><FontAwesomeIcon className="iconBack" icon={ faEye } style={{height: '16px'}} onClick={this.OpenModal}/></div> <div className="col" style={{marginLeft: "-12.2%"}}><FontAwesomeIcon className="iconBack" icon={ faCheckDouble } style={{height: '16px', color: "#7684b8"}}/></div> </div> </div> :( //the incidents cannot be closed yet <div className = "container-fluid" style = {{marginLeft: this.props.style.marginLeft}}> <div className = "row" id="inc_box"> <div className="col-lg-1" onClick={this.handleClick}> <img src={icon} alt= ''/> </div> <div className="col-md-3 ml-1" onClick={this.handleClick} style={{marginLeft: '-100%'}}>{moment(this.props.incident.date).format('DD-MM-YYYY')} {moment(this.props.incident.date).format('HH:mm')}</div> <div className="col-sm-4 text-truncate" onClick={this.handleClick} style={{marginLeft: '-4%'}}>{this.props.incident.location.address}</div> <div className="col text-truncate" onClick={this.handleClick} style={{marginLeft: "2%"}}>{this.props.incident.title}</div> <div className="col" style={{marginLeft: "-11.2%"}}><FontAwesomeIcon className="iconBack" id="close_inc" icon={ faEye } style={{height: '16px'}} onClick={this.OpenModal}/></div> {Number(this.props.usertype) === 2 && <div className="col-md" style={{marginLeft: "-3%"}} onClick={this.accept_incident}> <button type="button" className="btn btn-primary btn-sm" >Αποδοχή</button> </div> } </div> </div> )} </div> <br/> </div> ); } } export default withRouter(Incident);
this.OpenModal = this.OpenModal.bind(this); this.CloseModal = this.CloseModal.bind(this);
flp.py
# # flp - Module to load fl forms from fd files # # Jack Jansen, December 1991 # import os import sys import FL SPLITLINE = '--------------------' FORMLINE = '=============== FORM ===============' ENDLINE = '==============================' class error(Exception): pass ################################################################## # Part 1 - The parsing routines # ################################################################## # # Externally visible function. Load form. # def parse_form(filename, formname): forms = checkcache(filename) if forms is None: forms = parse_forms(filename) if forms.has_key(formname): return forms[formname] else: raise error, 'No such form in fd file' # # Externally visible function. Load all forms. # def parse_forms(filename): forms = checkcache(filename) if forms is not None: return forms fp = _open_formfile(filename) nforms = _parse_fd_header(fp) forms = {} for i in range(nforms): form = _parse_fd_form(fp, None) forms[form[0].Name] = form writecache(filename, forms) return forms # # Internal: see if a cached version of the file exists # MAGIC = '.fdc' _internal_cache = {} # Used by frozen scripts only def checkcache(filename): if _internal_cache.has_key(filename): altforms = _internal_cache[filename] return _unpack_cache(altforms) import marshal fp, filename = _open_formfile2(filename) fp.close() cachename = filename + 'c' try: fp = open(cachename, 'r') except IOError: #print 'flp: no cache file', cachename return None try: if fp.read(4) != MAGIC: print 'flp: bad magic word in cache file', cachename return None cache_mtime = rdlong(fp) file_mtime = getmtime(filename) if cache_mtime != file_mtime: #print 'flp: outdated cache file', cachename return None #print 'flp: valid cache file', cachename altforms = marshal.load(fp) return _unpack_cache(altforms) finally: fp.close() def _unpack_cache(altforms):
def rdlong(fp): s = fp.read(4) if len(s) != 4: return None a, b, c, d = s[0], s[1], s[2], s[3] return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d) def wrlong(fp, x): a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff fp.write(chr(a) + chr(b) + chr(c) + chr(d)) def getmtime(filename): import os from stat import ST_MTIME try: return os.stat(filename)[ST_MTIME] except os.error: return None # # Internal: write cached version of the form (parsing is too slow!) # def writecache(filename, forms): import marshal fp, filename = _open_formfile2(filename) fp.close() cachename = filename + 'c' try: fp = open(cachename, 'w') except IOError: print 'flp: can\'t create cache file', cachename return # Never mind fp.write('\0\0\0\0') # Seek back and write MAGIC when done wrlong(fp, getmtime(filename)) altforms = _pack_cache(forms) marshal.dump(altforms, fp) fp.seek(0) fp.write(MAGIC) fp.close() #print 'flp: wrote cache file', cachename # # External: print some statements that set up the internal cache. # This is for use with the "freeze" script. You should call # flp.freeze(filename) for all forms used by the script, and collect # the output on a file in a module file named "frozenforms.py". Then # in the main program of the script import frozenforms. # (Don't forget to take this out when using the unfrozen version of # the script!) # def freeze(filename): forms = parse_forms(filename) altforms = _pack_cache(forms) print 'import flp' print 'flp._internal_cache[', repr(filename), '] =', altforms # # Internal: create the data structure to be placed in the cache # def _pack_cache(forms): altforms = {} for name in forms.keys(): obj, list = forms[name] altobj = obj.__dict__ altlist = [] for obj in list: altlist.append(obj.__dict__) altforms[name] = altobj, altlist return altforms # # Internal: Locate form file (using PYTHONPATH) and open file # def _open_formfile(filename): return _open_formfile2(filename)[0] def _open_formfile2(filename): if filename[-3:] != '.fd': filename = filename + '.fd' if filename[0] == '/': try: fp = open(filename,'r') except IOError: fp = None else: for pc in sys.path: pn = os.path.join(pc, filename) try: fp = open(pn, 'r') filename = pn break except IOError: fp = None if fp is None: raise error, 'Cannot find forms file ' + filename return fp, filename # # Internal: parse the fd file header, return number of forms # def _parse_fd_header(file): # First read the magic header line datum = _parse_1_line(file) if datum != ('Magic', 12321): raise error, 'Not a forms definition file' # Now skip until we know number of forms while 1: datum = _parse_1_line(file) if type(datum) == type(()) and datum[0] == 'Numberofforms': break return datum[1] # # Internal: parse fd form, or skip if name doesn't match. # the special value None means 'always parse it'. # def _parse_fd_form(file, name): datum = _parse_1_line(file) if datum != FORMLINE: raise error, 'Missing === FORM === line' form = _parse_object(file) if form.Name == name or name is None: objs = [] for j in range(form.Numberofobjects): obj = _parse_object(file) objs.append(obj) return (form, objs) else: for j in range(form.Numberofobjects): _skip_object(file) return None # # Internal class: a convenient place to store object info fields # class _newobj: def add(self, name, value): self.__dict__[name] = value def make(self, dict): for name in dict.keys(): self.add(name, dict[name]) # # Internal parsing routines. # def _parse_string(str): if '\\' in str: s = '\'' + str + '\'' try: return eval(s) except: pass return str def _parse_num(str): return eval(str) def _parse_numlist(str): slist = str.split() nlist = [] for i in slist: nlist.append(_parse_num(i)) return nlist # This dictionary maps item names to parsing routines. # If no routine is given '_parse_num' is default. _parse_func = { \ 'Name': _parse_string, \ 'Box': _parse_numlist, \ 'Colors': _parse_numlist, \ 'Label': _parse_string, \ 'Name': _parse_string, \ 'Callback': _parse_string, \ 'Argument': _parse_string } # This function parses a line, and returns either # a string or a tuple (name,value) import re prog = re.compile('^([^:]*): *(.*)') def _parse_line(line): match = prog.match(line) if not match: return line name, value = match.group(1, 2) if name[0] == 'N': name = ''.join(name.split()) name = name.lower() name = name.capitalize() try: pf = _parse_func[name] except KeyError: pf = _parse_num value = pf(value) return (name, value) def _readline(file): line = file.readline() if not line: raise EOFError return line[:-1] def _parse_1_line(file): line = _readline(file) while line == '': line = _readline(file) return _parse_line(line) def _skip_object(file): line = '' while not line in (SPLITLINE, FORMLINE, ENDLINE): pos = file.tell() line = _readline(file) if line == FORMLINE: file.seek(pos) def _parse_object(file): obj = _newobj() while 1: pos = file.tell() datum = _parse_1_line(file) if datum in (SPLITLINE, FORMLINE, ENDLINE): if datum == FORMLINE: file.seek(pos) return obj if type(datum) is not type(()) or len(datum) != 2: raise error, 'Parse error, illegal line in object: '+datum obj.add(datum[0], datum[1]) ################################################################# # Part 2 - High-level object/form creation routines # ################################################################# # # External - Create a form an link to an instance variable. # def create_full_form(inst, (fdata, odatalist)): form = create_form(fdata) exec 'inst.'+fdata.Name+' = form\n' for odata in odatalist: create_object_instance(inst, form, odata) # # External - Merge a form into an existing form in an instance # variable. # def merge_full_form(inst, form, (fdata, odatalist)): exec 'inst.'+fdata.Name+' = form\n' if odatalist[0].Class != FL.BOX: raise error, 'merge_full_form() expects FL.BOX as first obj' for odata in odatalist[1:]: create_object_instance(inst, form, odata) ################################################################# # Part 3 - Low-level object/form creation routines # ################################################################# # # External Create_form - Create form from parameters # def create_form(fdata): import fl return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height) # # External create_object - Create an object. Make sure there are # no callbacks. Returns the object created. # def create_object(form, odata): obj = _create_object(form, odata) if odata.Callback: raise error, 'Creating free object with callback' return obj # # External create_object_instance - Create object in an instance. # def create_object_instance(inst, form, odata): obj = _create_object(form, odata) if odata.Callback: cbfunc = eval('inst.'+odata.Callback) obj.set_call_back(cbfunc, odata.Argument) if odata.Name: exec 'inst.' + odata.Name + ' = obj\n' # # Internal _create_object: Create the object and fill options # def _create_object(form, odata): crfunc = _select_crfunc(form, odata.Class) obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \ odata.Box[3], odata.Label) if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP): obj.boxtype = odata.Boxtype obj.col1 = odata.Colors[0] obj.col2 = odata.Colors[1] obj.align = odata.Alignment obj.lstyle = odata.Style obj.lsize = odata.Size obj.lcol = odata.Lcol return obj # # Internal crfunc: helper function that returns correct create function # def _select_crfunc(fm, cl): if cl == FL.BEGIN_GROUP: return fm.bgn_group elif cl == FL.END_GROUP: return fm.end_group elif cl == FL.BITMAP: return fm.add_bitmap elif cl == FL.BOX: return fm.add_box elif cl == FL.BROWSER: return fm.add_browser elif cl == FL.BUTTON: return fm.add_button elif cl == FL.CHART: return fm.add_chart elif cl == FL.CHOICE: return fm.add_choice elif cl == FL.CLOCK: return fm.add_clock elif cl == FL.COUNTER: return fm.add_counter elif cl == FL.DIAL: return fm.add_dial elif cl == FL.FREE: return fm.add_free elif cl == FL.INPUT: return fm.add_input elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton elif cl == FL.MENU: return fm.add_menu elif cl == FL.POSITIONER: return fm.add_positioner elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton elif cl == FL.SLIDER: return fm.add_slider elif cl == FL.VALSLIDER: return fm.add_valslider elif cl == FL.TEXT: return fm.add_text elif cl == FL.TIMER: return fm.add_timer else: raise error, 'Unknown object type: %r' % (cl,) def test(): import time t0 = time.time() if len(sys.argv) == 2: forms = parse_forms(sys.argv[1]) t1 = time.time() print 'parse time:', 0.001*(t1-t0), 'sec.' keys = forms.keys() keys.sort() for i in keys: _printform(forms[i]) elif len(sys.argv) == 3: form = parse_form(sys.argv[1], sys.argv[2]) t1 = time.time() print 'parse time:', round(t1-t0, 3), 'sec.' _printform(form) else: print 'Usage: test fdfile [form]' def _printform(form): f = form[0] objs = form[1] print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects for i in objs: print ' Obj ', i.Name, ' type ', i.Class, i.Type print ' Box ', i.Box, ' btype ', i.Boxtype print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment print ' cols ', i.Colors print ' cback ', i.Callback, i.Argument
forms = {} for name in altforms.keys(): altobj, altlist = altforms[name] obj = _newobj() obj.make(altobj) list = [] for altobj in altlist: nobj = _newobj() nobj.make(altobj) list.append(nobj) forms[name] = obj, list return forms
test_init.py
"""Tests for fan platforms.""" import pytest from homeassistant.components.fan import FanEntity class BaseFan(FanEntity): """Implementation of the abstract FanEntity.""" def __init__(self):
def test_fanentity(): """Test fan entity methods.""" fan = BaseFan() assert fan.state == "off" assert fan.preset_modes is None assert fan.supported_features == 0 assert fan.percentage_step == 1 assert fan.speed_count == 100 assert fan.capability_attributes == {} # Test set_speed not required with pytest.raises(NotImplementedError): fan.oscillate(True) with pytest.raises(AttributeError): fan.set_speed("low") with pytest.raises(NotImplementedError): fan.set_percentage(0) with pytest.raises(NotImplementedError): fan.set_preset_mode("auto") with pytest.raises(NotImplementedError): fan.turn_on() with pytest.raises(NotImplementedError): fan.turn_off() async def test_async_fanentity(hass): """Test async fan entity methods.""" fan = BaseFan() fan.hass = hass assert fan.state == "off" assert fan.preset_modes is None assert fan.supported_features == 0 assert fan.percentage_step == 1 assert fan.speed_count == 100 assert fan.capability_attributes == {} # Test set_speed not required with pytest.raises(NotImplementedError): await fan.async_oscillate(True) with pytest.raises(AttributeError): await fan.async_set_speed("low") with pytest.raises(NotImplementedError): await fan.async_set_percentage(0) with pytest.raises(NotImplementedError): await fan.async_set_preset_mode("auto") with pytest.raises(NotImplementedError): await fan.async_turn_on() with pytest.raises(NotImplementedError): await fan.async_turn_off() with pytest.raises(NotImplementedError): await fan.async_increase_speed() with pytest.raises(NotImplementedError): await fan.async_decrease_speed() @pytest.mark.parametrize( "attribute_name, attribute_value", [ ("current_direction", "forward"), ("oscillating", True), ("percentage", 50), ("preset_mode", "medium"), ("preset_modes", ["low", "medium", "high"]), ("speed_count", 50), ("supported_features", 1), ], ) def test_fanentity_attributes(attribute_name, attribute_value): """Test fan entity attribute shorthand.""" fan = BaseFan() setattr(fan, f"_attr_{attribute_name}", attribute_value) assert getattr(fan, attribute_name) == attribute_value
"""Initialize the fan."""
dictionary_item.go
package fastly import ( "fmt" "sort" ) // DictionaryItem represents a dictionary item response from the Fastly API. type DictionaryItem struct { ServiceID string `mapstructure:"service_id"` DictionaryID string `mapstructure:"dictionary_id"` ItemKey string `mapstructure:"item_key"` ItemValue string `mapstructure:"item_value"` } // dictionaryItemsByKey is a sortable list of dictionary items. type dictionaryItemsByKey []*DictionaryItem // Len, Swap, and Less implement the sortable interface. func (s dictionaryItemsByKey) Len() int { return len(s) } func (s dictionaryItemsByKey) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s dictionaryItemsByKey) Less(i, j int) bool { return s[i].ItemKey < s[j].ItemKey } // ListDictionaryItemsInput is used as input to the ListDictionaryItems function. type ListDictionaryItemsInput struct { // Service is the ID of the service (required). Service string // Dictionary is the ID of the dictionary to retrieve items for (required). Dictionary string } // ListDictionaryItems returns the list of dictionary items for the // configuration version. func (c *Client) ListDictionaryItems(i *ListDictionaryItemsInput) ([]*DictionaryItem, error) { if i.Service == "" { return nil, ErrMissingService } if i.Dictionary == "" { return nil, ErrMissingDictionary } path := fmt.Sprintf("/service/%s/dictionary/%s/items", i.Service, i.Dictionary) resp, err := c.Get(path, nil) if err != nil { return nil, err } var bs []*DictionaryItem if err := decodeJSON(&bs, resp.Body); err != nil { return nil, err } sort.Stable(dictionaryItemsByKey(bs)) return bs, nil } // CreateDictionaryItemInput is used as input to the CreateDictionaryItem function. type CreateDictionaryItemInput struct { // Service is the ID of the service. Dictionary is the ID of the dictionary. // Both fields are required. Service string Dictionary string ItemKey string `form:"item_key,omitempty"` ItemValue string `form:"item_value,omitempty"` } // CreateDictionaryItem creates a new Fastly dictionary item. func (c *Client) CreateDictionaryItem(i *CreateDictionaryItemInput) (*DictionaryItem, error) { if i.Service == "" { return nil, ErrMissingService } if i.Dictionary == "" { return nil, ErrMissingDictionary } path := fmt.Sprintf("/service/%s/dictionary/%s/item", i.Service, i.Dictionary) resp, err := c.PostForm(path, i, nil) if err != nil { return nil, err } var b *DictionaryItem if err := decodeJSON(&b, resp.Body); err != nil { return nil, err } return b, nil } // GetDictionaryItemInput is used as input to the GetDictionaryItem function. type GetDictionaryItemInput struct { // Service is the ID of the service. Dictionary is the ID of the dictionary. // Both fields are required. Service string Dictionary string // ItemKey is the name of the dictionary item to fetch. ItemKey string } // GetDictionaryItem gets the dictionary item with the given parameters. func (c *Client) GetDictionaryItem(i *GetDictionaryItemInput) (*DictionaryItem, error) { if i.Service == "" { return nil, ErrMissingService } if i.Dictionary == "" { return nil, ErrMissingDictionary } if i.ItemKey == "" { return nil, ErrMissingItemKey } path := fmt.Sprintf("/service/%s/dictionary/%s/item/%s", i.Service, i.Dictionary, i.ItemKey) resp, err := c.Get(path, nil) if err != nil { return nil, err } var b *DictionaryItem if err := decodeJSON(&b, resp.Body); err != nil { return nil, err } return b, nil } // UpdateDictionaryItemInput is used as input to the UpdateDictionaryItem function. type UpdateDictionaryItemInput struct { // Service is the ID of the service. Dictionary is the ID of the dictionary. // Both fields are required. Service string Dictionary string // ItemKey is the name of the dictionary item to fetch. ItemKey string ItemValue string `form:"item_value,omitempty"` } // UpdateDictionaryItem updates a specific dictionary item. func (c *Client) UpdateDictionaryItem(i *UpdateDictionaryItemInput) (*DictionaryItem, error) { if i.Service == "" { return nil, ErrMissingService } if i.Dictionary == "" { return nil, ErrMissingDictionary } if i.ItemKey == ""
path := fmt.Sprintf("/service/%s/dictionary/%s/item/%s", i.Service, i.Dictionary, i.ItemKey) resp, err := c.PutForm(path, i, nil) if err != nil { return nil, err } var b *DictionaryItem if err := decodeJSON(&b, resp.Body); err != nil { return nil, err } return b, nil } // DeleteDictionaryItemInput is the input parameter to DeleteDictionaryItem. type DeleteDictionaryItemInput struct { // Service is the ID of the service. Dictionary is the ID of the dictionary. // Both fields are required. Service string Dictionary string // ItemKey is the name of the dictionary item to delete. ItemKey string } // DeleteDictionaryItem deletes the given dictionary item. func (c *Client) DeleteDictionaryItem(i *DeleteDictionaryItemInput) error { if i.Service == "" { return ErrMissingService } if i.Dictionary == "" { return ErrMissingDictionary } if i.ItemKey == "" { return ErrMissingItemKey } path := fmt.Sprintf("/service/%s/dictionary/%s/item/%s", i.Service, i.Dictionary, i.ItemKey) _, err := c.Delete(path, nil) if err != nil { return err } // Unlike other endpoints, the dictionary endpoint does not return a status // response - it just returns a 200 OK. return nil }
{ return nil, ErrMissingItemKey }
rowcount.go
package cmds import ( "fmt" "log" "os" goparquet "github.com/sagia-inneractive/parquet-go" "github.com/spf13/cobra" ) func init() { rootCmd.AddCommand(rowCountCmd) }
var rowCountCmd = &cobra.Command{ Use: "rowcount file-name.parquet", Short: "Prints the count of rows in Parquet file", Run: func(cmd *cobra.Command, args []string) { if len(args) != 1 { _ = cmd.Usage() os.Exit(1) } fl, err := os.Open(args[0]) if err != nil { log.Fatalf("Can not open the file: %q", err) } defer fl.Close() reader, err := goparquet.NewFileReader(fl) if err != nil { log.Fatalf("Failed to read the parquet header: %q", err) } fmt.Println("Total RowCount:", reader.NumRows()) }, }
utils.py
from __future__ import ( absolute_import, unicode_literals, ) import functools def decorated(func): @functools.wraps(func) def
(*args, **kwargs): return func(*args, **kwargs) return wrapper
wrapper
main.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import pygame import os from Controlador.Basic_Controller import Controlador from Controlador.Menu_Controller import Menu from Controlador.Versus_Controller import Versus_Controlador __author__ = "Isidora Ulloa" __license__ = "GPL" __version__ = "1.0.0" __email__ = "[email protected]" pygame.init() titulo = Menu("Recursos/menu sprite full.png","", True) while True: while titulo.mainloop: titulo.run() if titulo.versus.on: instruccion = Menu("Recursos/instrucciones 12.png","", False) program = Versus_Controlador(titulo.walls.on) else: instruccion = Menu("Recursos/instrucciones 11.png", "", False) program = Controlador(titulo.walls.on) while instruccion.mainloop: instruccion.run() while program.run==True: program.update() pygame.time.wait(program.refresh) if titulo.versus.on: fin = Menu(program.end,"Cuy: " + str(program.puntaje2.counter) + " Erizo: " + str(program.puntaje1.counter), False)
fin.run() titulo.mainloop=True
else: fin= Menu(program.end, " Puntos: " + str(program.puntaje.counter), False) while fin.running==True:
test_send_ai_pics_stats.py
from unittest import TestCase from django.core.management import call_command class SendAiPicsStatsTestCase(TestCase): def test_run_command(self):
call_command('send_ai_pics_stats')
SimpleMNIST.py
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE.md file in the project root # for full license information. # ============================================================================== import numpy as np import sys import os from cntk import Trainer from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT, FULL_DATA_SWEEP from cntk.learner import sgd, learning_rate_schedule, UnitType from cntk.ops import input_variable, cross_entropy_with_softmax, classification_error, relu, element_times, constant, \ reduce_max, reduce_mean, reduce_min from cntk.utils import ProgressPrinter abs_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.join(abs_path, "..", "..")) from Examples.common.nn import fully_connected_classifier_net def check_path(path): if not os.path.exists(path): readme_file = os.path.normpath(os.path.join( os.path.dirname(path), "..", "README.md")) raise RuntimeError( "File '%s' does not exist. Please follow the instructions at %s to download and prepare it." % (path, readme_file)) def create_reader(path, is_training, input_dim, label_dim): return MinibatchSource(CTFDeserializer(path, StreamDefs( features=StreamDef(field='features', shape=input_dim, is_sparse=False), labels=StreamDef(field='labels', shape=label_dim, is_sparse=False) )), randomize=is_training, epoch_size=INFINITELY_REPEAT if is_training else FULL_DATA_SWEEP) # Creates and trains a feedforward classification model for MNIST images def simple_mnist(): input_dim = 784 num_output_classes = 10 num_hidden_layers = 1 hidden_layers_dim = 200 # Input variables denoting the features and label data features = input_variable(input_dim, np.float32) label = input_variable(num_output_classes, np.float32) # Instantiate the feedforward classification model scaled_input = element_times(constant(0.00390625), features) netout = fully_connected_classifier_net( scaled_input, num_output_classes, hidden_layers_dim, num_hidden_layers, relu) ce = cross_entropy_with_softmax(netout, label) pe = classification_error(netout, label) try: rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'], *"Image/MNIST/v0/Train-28x28_cntk_text.txt".split("/")) except KeyError: rel_path = os.path.join(*"../Image/DataSets/MNIST/Train-28x28_cntk_text.txt".split("/")) path = os.path.normpath(os.path.join(abs_path, rel_path)) check_path(path) reader_train = create_reader(path, True, input_dim, num_output_classes) input_map = { features: reader_train.streams.features, label: reader_train.streams.labels } lr_per_minibatch = learning_rate_schedule(0.2, UnitType.minibatch) # Instantiate the trainer object to drive the model training trainer = Trainer(netout, ce, pe, sgd(netout.parameters, lr=lr_per_minibatch)) # Instantiate a ProgressPrinter. logdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "mnist_log") progress_printer = ProgressPrinter(tag='Training', freq=1, tensorboard_log_dir=logdir, model=netout) # Get minibatches of images to train with and perform model training minibatch_size = 64 num_samples_per_sweep = 6000 num_sweeps_to_train_with = 2 num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) / minibatch_size for minibatch_idx in range(0, int(num_minibatches_to_train)): trainer.train_minibatch(reader_train.next_minibatch(minibatch_size, input_map=input_map)) # Take snapshot of loss and eval criterion for the previous minibatch. progress_printer.update_with_trainer(trainer, with_metric=True) # Log max/min/mean of each parameter tensor, so that we can confirm that the parameters change indeed. # Don't want to do that very often though, otherwise will spend too much time computing min/max/mean. if minibatch_idx % 10 == 9: for p in netout.parameters: progress_printer.update_value("mb_" + p.uid + "_max", reduce_max(p).eval(), minibatch_idx) progress_printer.update_value("mb_" + p.uid + "_min", reduce_min(p).eval(), minibatch_idx) progress_printer.update_value("mb_" + p.uid + "_mean", reduce_mean(p).eval(), minibatch_idx) # Load test data try: rel_path = os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'], *"Image/MNIST/v0/Test-28x28_cntk_text.txt".split("/")) except KeyError: rel_path = os.path.join(*"../Image/DataSets/MNIST/Test-28x28_cntk_text.txt".split("/"))
check_path(path) reader_test = create_reader(path, False, input_dim, num_output_classes) input_map = { features: reader_test.streams.features, label: reader_test.streams.labels } # Test data for trained model test_minibatch_size = 1024 num_samples = 10000 num_minibatches_to_test = num_samples / test_minibatch_size test_result = 0.0 for i in range(0, int(num_minibatches_to_test)): mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map) test_result += trainer.test_minibatch(mb) # Average of evaluation errors of all test minibatches return test_result / num_minibatches_to_test if __name__ == '__main__': # Specify the target device to be used for computing, if you do not want to # use the best available one, e.g. # set_default_device(cpu()) error = simple_mnist() print("Error: %f" % error)
path = os.path.normpath(os.path.join(abs_path, rel_path))
nest-application-context.interface.ts
import { LoggerService } from '../services/logger.service'; import { Type } from './type.interface'; export interface INestApplicationContext { /** * Allows navigating through the modules tree, for example, to pull out a specific instance from the selected module. * @returns {INestApplicationContext} */ select<T>(module: Type<T>): INestApplicationContext; /** * Retrieves an instance of either injectable or controller available anywhere, otherwise, throws exception. * @returns {TResult}
typeOrToken: Type<TInput> | string | symbol, options?: { strict: boolean }, ): TResult; /** * Terminates the application * @returns {Promise<void>} */ close(): Promise<void>; /** * Sets custom logger service * @returns {void} */ useLogger(logger: LoggerService); }
*/ get<TInput = any, TResult = TInput>(
main.go
package main import ( "context" "flag" "fmt" "os" "runtime" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" apis "github.com/kubeflow/kfctl/v3/pkg/apis/apps" "github.com/kubeflow/kfctl/v3/pkg/controller" "github.com/operator-framework/operator-sdk/pkg/k8sutil" kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" "github.com/operator-framework/operator-sdk/pkg/metrics" "github.com/operator-framework/operator-sdk/pkg/restmapper" sdkVersion "github.com/operator-framework/operator-sdk/version" log "github.com/sirupsen/logrus" "github.com/spf13/pflag" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" ) // Kubeflow operator version var ( Version string = "1.0.0" ) // Change below variables to serve metrics on different host or port. var ( metricsHost = "0.0.0.0" metricsPort int32 = 8383 operatorMetricsPort int32 = 8686 ) func printVersion()
func main() { // Add the zap logger flag set to the CLI. The flag set must // be added before calling pflag.Parse(). pflag.CommandLine.AddFlagSet(zap.FlagSet()) // Add flags registered by imported packages (e.g. glog and // controller-runtime) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() printVersion() namespace, err := k8sutil.GetWatchNamespace() if err != nil { log.Errorf("Failed to get watch namespace. Error %v.", err) os.Exit(1) } // Get a config to talk to the apiserver cfg, err := config.GetConfig() if err != nil { log.Errorf("Error: %v.", err) os.Exit(1) } ctx := context.TODO() // Become the leader before proceeding err = leader.Become(ctx, "kfctl-lock") if err != nil { log.Errorf("Error: %v.", err) os.Exit(1) } // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ // Watch all namespace Namespace: "", MapperProvider: restmapper.NewDynamicRESTMapper, MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), }) if err != nil { log.Errorf("Error: %v.", err) os.Exit(1) } log.Info("Registering Components.") // Setup Scheme for all resources if err := apis.AddToScheme(mgr.GetScheme()); err != nil { log.Errorf("Error: %v.", err) os.Exit(1) } // Setup all Controllers if err := controller.AddToManager(mgr); err != nil { log.Errorf("Error: %v.", err) os.Exit(1) } if err = serveCRMetrics(cfg); err != nil { log.Errorf("Could not generate and serve custom resource metrics. Error: %v.", err.Error()) } // Add to the below struct any other metrics ports you want to expose. servicePorts := []v1.ServicePort{ {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, } // Create Service object to expose the metrics port(s). service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) if err != nil { log.Errorf("Could not create metrics Service. Error: %v.", err.Error()) } // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources // necessary to configure Prometheus to scrape metrics from this operator. services := []*v1.Service{service} _, err = metrics.CreateServiceMonitors(cfg, namespace, services) if err != nil { log.Errorf("Could not create ServiceMonitor object. Error: %v.", err.Error()) // If this operator is deployed to a cluster without the prometheus-operator running, it will return // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. if err == metrics.ErrServiceMonitorNotPresent { log.Errorf("Install prometheus-operator in your cluster to create ServiceMonitor objects. Error: %v.", err.Error()) } } log.Infof("Starting the Cmd.") // Start the Cmd if err := mgr.Start(signals.SetupSignalHandler()); err != nil { log.Errorf("Manager exited non-zero. Error: %v.", err) os.Exit(1) } } // serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. // It serves those metrics on "http://metricsHost:operatorMetricsPort". func serveCRMetrics(cfg *rest.Config) error { // Below function returns filtered operator/CustomResource specific GVKs. // For more control override the below GVK list with your own custom logic. filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) if err != nil { return err } // Get the namespace the operator is currently deployed in. operatorNs, err := k8sutil.GetOperatorNamespace() if err != nil { return err } // To generate metrics in other namespaces, add the values below. ns := []string{operatorNs} // Generate and serve custom resource specific metrics. err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) if err != nil { return err } return nil }
{ log.Infof("Go Version: %s", runtime.Version()) log.Infof("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH) log.Infof("Version of operator-sdk: %v", sdkVersion.Version) log.Infof("Kubeflow version: %v", Version) }
mod.rs
//! The main parser interface. use crate::ast::{self, CrateConfig, NodeId}; use crate::early_buffered_lints::{BufferedEarlyLint, BufferedEarlyLintId}; use crate::source_map::{SourceMap, FilePathMapping}; use crate::feature_gate::UnstableFeatures; use crate::parse::parser::Parser; use crate::syntax::parse::parser::emit_unclosed_delims; use crate::tokenstream::{TokenStream, TokenTree}; use crate::diagnostics::plugin::ErrorMap; use crate::print::pprust::token_to_string; use errors::{Applicability, FatalError, Level, Handler, ColorConfig, Diagnostic, DiagnosticBuilder}; use rustc_data_structures::sync::{Lrc, Lock}; use syntax_pos::{Span, SourceFile, FileName, MultiSpan}; use syntax_pos::edition::Edition; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::str; pub type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>; #[macro_use] pub mod parser; pub mod attr; pub mod lexer; pub mod token; crate mod classify; crate mod diagnostics; crate mod literal; crate mod unescape; crate mod unescape_error_reporting; /// Info about a parsing session. pub struct ParseSess { pub span_diagnostic: Handler, pub unstable_features: UnstableFeatures, pub config: CrateConfig, pub edition: Edition, pub missing_fragment_specifiers: Lock<FxHashSet<Span>>, /// Places where raw identifiers were used. This is used for feature-gating raw identifiers. pub raw_identifier_spans: Lock<Vec<Span>>, /// The registered diagnostics codes. crate registered_diagnostics: Lock<ErrorMap>, /// Used to determine and report recursive module inclusions. included_mod_stack: Lock<Vec<PathBuf>>, source_map: Lrc<SourceMap>, pub buffered_lints: Lock<Vec<BufferedEarlyLint>>, /// Contains the spans of block expressions that could have been incomplete based on the /// operation token that followed it, but that the parser cannot identify without further /// analysis. pub ambiguous_block_expr_parse: Lock<FxHashMap<Span, Span>>, } impl ParseSess { pub fn new(file_path_mapping: FilePathMapping) -> Self { let cm = Lrc::new(SourceMap::new(file_path_mapping)); let handler = Handler::with_tty_emitter(ColorConfig::Auto, true, None, Some(cm.clone())); ParseSess::with_span_handler(handler, cm) } pub fn with_span_handler(handler: Handler, source_map: Lrc<SourceMap>) -> ParseSess { ParseSess { span_diagnostic: handler, unstable_features: UnstableFeatures::from_environment(), config: FxHashSet::default(), missing_fragment_specifiers: Lock::new(FxHashSet::default()), raw_identifier_spans: Lock::new(Vec::new()), registered_diagnostics: Lock::new(ErrorMap::new()), included_mod_stack: Lock::new(vec![]), source_map, buffered_lints: Lock::new(vec![]), edition: Edition::from_session(), ambiguous_block_expr_parse: Lock::new(FxHashMap::default()), } } #[inline] pub fn source_map(&self) -> &SourceMap { &self.source_map } pub fn buffer_lint<S: Into<MultiSpan>>(&self, lint_id: BufferedEarlyLintId, span: S, id: NodeId, msg: &str, ) { self.buffered_lints.with_lock(|buffered_lints| { buffered_lints.push(BufferedEarlyLint{ span: span.into(), id, msg: msg.into(), lint_id, }); }); } /// Extend an error with a suggestion to wrap an expression with parentheses to allow the /// parser to continue parsing the following operation as part of the same expression. pub fn expr_parentheses_needed( &self, err: &mut DiagnosticBuilder<'_>, span: Span, alt_snippet: Option<String>, ) { if let Some(snippet) = self.source_map().span_to_snippet(span).ok().or(alt_snippet) { err.span_suggestion( span, "parentheses are required to parse this as an expression", format!("({})", snippet), Applicability::MachineApplicable, ); } } } #[derive(Clone)] pub struct Directory<'a> { pub path: Cow<'a, Path>, pub ownership: DirectoryOwnership, } #[derive(Copy, Clone)] pub enum DirectoryOwnership { Owned { // None if `mod.rs`, `Some("foo")` if we're in `foo.rs` relative: Option<ast::Ident>, }, UnownedViaBlock, UnownedViaMod(bool /* legacy warnings? */), } // a bunch of utility functions of the form parse_<thing>_from_<source> // where <thing> includes crate, expr, item, stmt, tts, and one that // uses a HOF to parse anything, and <source> includes file and // source_str. pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'a, ast::Crate> { let mut parser = new_parser_from_file(sess, input); parser.parse_crate_mod() } pub fn parse_crate_attrs_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'a, Vec<ast::Attribute>> { let mut parser = new_parser_from_file(sess, input); parser.parse_inner_attributes() } pub fn parse_crate_from_source_str(name: FileName, source: String, sess: &ParseSess) -> PResult<'_, ast::Crate> { new_parser_from_source_str(sess, name, source).parse_crate_mod() } pub fn parse_crate_attrs_from_source_str(name: FileName, source: String, sess: &ParseSess) -> PResult<'_, Vec<ast::Attribute>> { new_parser_from_source_str(sess, name, source).parse_inner_attributes() } pub fn parse_stream_from_source_str( name: FileName, source: String, sess: &ParseSess, override_span: Option<Span>, ) -> TokenStream { let (stream, mut errors) = source_file_to_stream( sess, sess.source_map().new_source_file(name, source), override_span, ); emit_unclosed_delims(&mut errors, &sess.span_diagnostic); stream } /// Creates a new parser from a source string. pub fn new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String) -> Parser<'_> { panictry_buffer!(&sess.span_diagnostic, maybe_new_parser_from_source_str(sess, name, source)) } /// Creates a new parser from a source string. Returns any buffered errors from lexing the initial /// token stream. pub fn maybe_new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String) -> Result<Parser<'_>, Vec<Diagnostic>> { let mut parser = maybe_source_file_to_parser(sess, sess.source_map().new_source_file(name, source))?; parser.recurse_into_file_modules = false; Ok(parser) } /// Creates a new parser, handling errors as appropriate /// if the file doesn't exist pub fn new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path) -> Parser<'a> { source_file_to_parser(sess, file_to_source_file(sess, path, None)) } /// Creates a new parser, returning buffered diagnostics if the file doesn't /// exist or from lexing the initial token stream. pub fn maybe_new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path) -> Result<Parser<'a>, Vec<Diagnostic>> { let file = try_file_to_source_file(sess, path, None).map_err(|db| vec![db])?; maybe_source_file_to_parser(sess, file) } /// Given a session, a crate config, a path, and a span, add /// the file at the given path to the source_map, and return a parser. /// On an error, use the given span as the source of the problem. pub fn new_sub_parser_from_file<'a>(sess: &'a ParseSess, path: &Path, directory_ownership: DirectoryOwnership, module_name: Option<String>, sp: Span) -> Parser<'a> { let mut p = source_file_to_parser(sess, file_to_source_file(sess, path, Some(sp))); p.directory.ownership = directory_ownership; p.root_module_name = module_name; p } /// Given a source_file and config, return a parser fn source_file_to_parser(sess: &ParseSess, source_file: Lrc<SourceFile>) -> Parser<'_> { panictry_buffer!(&sess.span_diagnostic, maybe_source_file_to_parser(sess, source_file)) } /// Given a source_file and config, return a parser. Returns any buffered errors from lexing the /// initial token stream. fn maybe_source_file_to_parser( sess: &ParseSess,
let (stream, unclosed_delims) = maybe_file_to_stream(sess, source_file, None)?; let mut parser = stream_to_parser(sess, stream, None); parser.unclosed_delims = unclosed_delims; if parser.token == token::Eof && parser.span.is_dummy() { parser.span = Span::new(end_pos, end_pos, parser.span.ctxt()); } Ok(parser) } // must preserve old name for now, because quote! from the *existing* // compiler expands into it pub fn new_parser_from_tts(sess: &ParseSess, tts: Vec<TokenTree>) -> Parser<'_> { stream_to_parser(sess, tts.into_iter().collect(), crate::MACRO_ARGUMENTS) } // base abstractions /// Given a session and a path and an optional span (for error reporting), /// add the path to the session's source_map and return the new source_file or /// error when a file can't be read. fn try_file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>) -> Result<Lrc<SourceFile>, Diagnostic> { sess.source_map().load_file(path) .map_err(|e| { let msg = format!("couldn't read {}: {}", path.display(), e); let mut diag = Diagnostic::new(Level::Fatal, &msg); if let Some(sp) = spanopt { diag.set_span(sp); } diag }) } /// Given a session and a path and an optional span (for error reporting), /// add the path to the session's `source_map` and return the new `source_file`. fn file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>) -> Lrc<SourceFile> { match try_file_to_source_file(sess, path, spanopt) { Ok(source_file) => source_file, Err(d) => { DiagnosticBuilder::new_diagnostic(&sess.span_diagnostic, d).emit(); FatalError.raise(); } } } /// Given a source_file, produces a sequence of token trees. pub fn source_file_to_stream( sess: &ParseSess, source_file: Lrc<SourceFile>, override_span: Option<Span>, ) -> (TokenStream, Vec<lexer::UnmatchedBrace>) { panictry_buffer!(&sess.span_diagnostic, maybe_file_to_stream(sess, source_file, override_span)) } /// Given a source file, produces a sequence of token trees. Returns any buffered errors from /// parsing the token stream. pub fn maybe_file_to_stream( sess: &ParseSess, source_file: Lrc<SourceFile>, override_span: Option<Span>, ) -> Result<(TokenStream, Vec<lexer::UnmatchedBrace>), Vec<Diagnostic>> { let srdr = lexer::StringReader::new_or_buffered_errs(sess, source_file, override_span)?; let (token_trees, unmatched_braces) = srdr.into_token_trees(); match token_trees { Ok(stream) => Ok((stream, unmatched_braces)), Err(err) => { let mut buffer = Vec::with_capacity(1); err.buffer(&mut buffer); // Not using `emit_unclosed_delims` to use `db.buffer` for unmatched in unmatched_braces { let mut db = sess.span_diagnostic.struct_span_err(unmatched.found_span, &format!( "incorrect close delimiter: `{}`", token_to_string(&token::Token::CloseDelim(unmatched.found_delim)), )); db.span_label(unmatched.found_span, "incorrect close delimiter"); if let Some(sp) = unmatched.candidate_span { db.span_label(sp, "close delimiter possibly meant for this"); } if let Some(sp) = unmatched.unclosed_span { db.span_label(sp, "un-closed delimiter"); } db.buffer(&mut buffer); } Err(buffer) } } } /// Given stream and the `ParseSess`, produces a parser. pub fn stream_to_parser<'a>( sess: &'a ParseSess, stream: TokenStream, subparser_name: Option<&'static str>, ) -> Parser<'a> { Parser::new(sess, stream, None, true, false, subparser_name) } /// Given stream, the `ParseSess` and the base directory, produces a parser. /// /// Use this function when you are creating a parser from the token stream /// and also care about the current working directory of the parser (e.g., /// you are trying to resolve modules defined inside a macro invocation). /// /// # Note /// /// The main usage of this function is outside of rustc, for those who uses /// libsyntax as a library. Please do not remove this function while refactoring /// just because it is not used in rustc codebase! pub fn stream_to_parser_with_base_dir<'a>( sess: &'a ParseSess, stream: TokenStream, base_dir: Directory<'a>, ) -> Parser<'a> { Parser::new(sess, stream, Some(base_dir), true, false, None) } /// A sequence separator. pub struct SeqSep { /// The seperator token. pub sep: Option<token::Token>, /// `true` if a trailing separator is allowed. pub trailing_sep_allowed: bool, } impl SeqSep { pub fn trailing_allowed(t: token::Token) -> SeqSep { SeqSep { sep: Some(t), trailing_sep_allowed: true, } } pub fn none() -> SeqSep { SeqSep { sep: None, trailing_sep_allowed: false, } } } #[cfg(test)] mod tests { use super::*; use crate::ast::{self, Ident, PatKind}; use crate::attr::first_attr_value_str_by_name; use crate::ptr::P; use crate::print::pprust::item_to_string; use crate::tokenstream::{DelimSpan, TokenTree}; use crate::util::parser_testing::string_to_stream; use crate::util::parser_testing::{string_to_expr, string_to_item}; use crate::with_default_globals; use syntax_pos::{Span, BytePos, Pos, NO_EXPANSION}; /// Parses an item. /// /// Returns `Ok(Some(item))` when successful, `Ok(None)` when no item was found, and `Err` /// when a syntax error occurred. fn parse_item_from_source_str(name: FileName, source: String, sess: &ParseSess) -> PResult<'_, Option<P<ast::Item>>> { new_parser_from_source_str(sess, name, source).parse_item() } // produce a syntax_pos::span fn sp(a: u32, b: u32) -> Span { Span::new(BytePos(a), BytePos(b), NO_EXPANSION) } #[should_panic] #[test] fn bad_path_expr_1() { with_default_globals(|| { string_to_expr("::abc::def::return".to_string()); }) } // check the token-tree-ization of macros #[test] fn string_to_tts_macro () { with_default_globals(|| { use crate::symbol::sym; let tts: Vec<_> = string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).trees().collect(); let tts: &[TokenTree] = &tts[..]; match (tts.len(), tts.get(0), tts.get(1), tts.get(2), tts.get(3)) { ( 4, Some(&TokenTree::Token(_, token::Ident(name_macro_rules, false))), Some(&TokenTree::Token(_, token::Not)), Some(&TokenTree::Token(_, token::Ident(name_zip, false))), Some(&TokenTree::Delimited(_, macro_delim, ref macro_tts)), ) if name_macro_rules.name == sym::macro_rules && name_zip.name.as_str() == "zip" => { let tts = &macro_tts.trees().collect::<Vec<_>>(); match (tts.len(), tts.get(0), tts.get(1), tts.get(2)) { ( 3, Some(&TokenTree::Delimited(_, first_delim, ref first_tts)), Some(&TokenTree::Token(_, token::FatArrow)), Some(&TokenTree::Delimited(_, second_delim, ref second_tts)), ) if macro_delim == token::Paren => { let tts = &first_tts.trees().collect::<Vec<_>>(); match (tts.len(), tts.get(0), tts.get(1)) { ( 2, Some(&TokenTree::Token(_, token::Dollar)), Some(&TokenTree::Token(_, token::Ident(ident, false))), ) if first_delim == token::Paren && ident.name.as_str() == "a" => {}, _ => panic!("value 3: {:?} {:?}", first_delim, first_tts), } let tts = &second_tts.trees().collect::<Vec<_>>(); match (tts.len(), tts.get(0), tts.get(1)) { ( 2, Some(&TokenTree::Token(_, token::Dollar)), Some(&TokenTree::Token(_, token::Ident(ident, false))), ) if second_delim == token::Paren && ident.name.as_str() == "a" => {}, _ => panic!("value 4: {:?} {:?}", second_delim, second_tts), } }, _ => panic!("value 2: {:?} {:?}", macro_delim, macro_tts), } }, _ => panic!("value: {:?}",tts), } }) } #[test] fn string_to_tts_1() { with_default_globals(|| { let tts = string_to_stream("fn a (b : i32) { b; }".to_string()); let expected = TokenStream::new(vec![ TokenTree::Token(sp(0, 2), token::Ident(Ident::from_str("fn"), false)).into(), TokenTree::Token(sp(3, 4), token::Ident(Ident::from_str("a"), false)).into(), TokenTree::Delimited( DelimSpan::from_pair(sp(5, 6), sp(13, 14)), token::DelimToken::Paren, TokenStream::new(vec![ TokenTree::Token(sp(6, 7), token::Ident(Ident::from_str("b"), false)).into(), TokenTree::Token(sp(8, 9), token::Colon).into(), TokenTree::Token(sp(10, 13), token::Ident(Ident::from_str("i32"), false)).into(), ]).into(), ).into(), TokenTree::Delimited( DelimSpan::from_pair(sp(15, 16), sp(20, 21)), token::DelimToken::Brace, TokenStream::new(vec![ TokenTree::Token(sp(17, 18), token::Ident(Ident::from_str("b"), false)).into(), TokenTree::Token(sp(18, 19), token::Semi).into(), ]).into(), ).into() ]); assert_eq!(tts, expected); }) } #[test] fn parse_use() { with_default_globals(|| { let use_s = "use foo::bar::baz;"; let vitem = string_to_item(use_s.to_string()).unwrap(); let vitem_s = item_to_string(&vitem); assert_eq!(&vitem_s[..], use_s); let use_s = "use foo::bar as baz;"; let vitem = string_to_item(use_s.to_string()).unwrap(); let vitem_s = item_to_string(&vitem); assert_eq!(&vitem_s[..], use_s); }) } #[test] fn parse_extern_crate() { with_default_globals(|| { let ex_s = "extern crate foo;"; let vitem = string_to_item(ex_s.to_string()).unwrap(); let vitem_s = item_to_string(&vitem); assert_eq!(&vitem_s[..], ex_s); let ex_s = "extern crate foo as bar;"; let vitem = string_to_item(ex_s.to_string()).unwrap(); let vitem_s = item_to_string(&vitem); assert_eq!(&vitem_s[..], ex_s); }) } fn get_spans_of_pat_idents(src: &str) -> Vec<Span> { let item = string_to_item(src.to_string()).unwrap(); struct PatIdentVisitor { spans: Vec<Span> } impl<'a> crate::visit::Visitor<'a> for PatIdentVisitor { fn visit_pat(&mut self, p: &'a ast::Pat) { match p.node { PatKind::Ident(_ , ref spannedident, _) => { self.spans.push(spannedident.span.clone()); } _ => { crate::visit::walk_pat(self, p); } } } } let mut v = PatIdentVisitor { spans: Vec::new() }; crate::visit::walk_item(&mut v, &item); return v.spans; } #[test] fn span_of_self_arg_pat_idents_are_correct() { with_default_globals(|| { let srcs = ["impl z { fn a (&self, &myarg: i32) {} }", "impl z { fn a (&mut self, &myarg: i32) {} }", "impl z { fn a (&'a self, &myarg: i32) {} }", "impl z { fn a (self, &myarg: i32) {} }", "impl z { fn a (self: Foo, &myarg: i32) {} }", ]; for &src in &srcs { let spans = get_spans_of_pat_idents(src); let (lo, hi) = (spans[0].lo(), spans[0].hi()); assert!("self" == &src[lo.to_usize()..hi.to_usize()], "\"{}\" != \"self\". src=\"{}\"", &src[lo.to_usize()..hi.to_usize()], src) } }) } #[test] fn parse_exprs () { with_default_globals(|| { // just make sure that they parse.... string_to_expr("3 + 4".to_string()); string_to_expr("a::z.froob(b,&(987+3))".to_string()); }) } #[test] fn attrs_fix_bug () { with_default_globals(|| { string_to_item("pub fn mk_file_writer(path: &Path, flags: &[FileFlag]) -> Result<Box<Writer>, String> { #[cfg(windows)] fn wb() -> c_int { (O_WRONLY | libc::consts::os::extra::O_BINARY) as c_int } #[cfg(unix)] fn wb() -> c_int { O_WRONLY as c_int } let mut fflags: c_int = wb(); }".to_string()); }) } #[test] fn crlf_doc_comments() { with_default_globals(|| { use crate::symbol::sym; let sess = ParseSess::new(FilePathMapping::empty()); let name_1 = FileName::Custom("crlf_source_1".to_string()); let source = "/// doc comment\r\nfn foo() {}".to_string(); let item = parse_item_from_source_str(name_1, source, &sess) .unwrap().unwrap(); let doc = first_attr_value_str_by_name(&item.attrs, sym::doc).unwrap(); assert_eq!(doc.as_str(), "/// doc comment"); let name_2 = FileName::Custom("crlf_source_2".to_string()); let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string(); let item = parse_item_from_source_str(name_2, source, &sess) .unwrap().unwrap(); let docs = item.attrs.iter().filter(|a| a.path == sym::doc) .map(|a| a.value_str().unwrap().to_string()).collect::<Vec<_>>(); let b: &[_] = &["/// doc comment".to_string(), "/// line 2".to_string()]; assert_eq!(&docs[..], b); let name_3 = FileName::Custom("clrf_source_3".to_string()); let source = "/** doc comment\r\n * with CRLF */\r\nfn foo() {}".to_string(); let item = parse_item_from_source_str(name_3, source, &sess).unwrap().unwrap(); let doc = first_attr_value_str_by_name(&item.attrs, sym::doc).unwrap(); assert_eq!(doc.as_str(), "/** doc comment\n * with CRLF */"); }); } #[test] fn ttdelim_span() { fn parse_expr_from_source_str( name: FileName, source: String, sess: &ParseSess ) -> PResult<'_, P<ast::Expr>> { new_parser_from_source_str(sess, name, source).parse_expr() } with_default_globals(|| { let sess = ParseSess::new(FilePathMapping::empty()); let expr = parse_expr_from_source_str(PathBuf::from("foo").into(), "foo!( fn main() { body } )".to_string(), &sess).unwrap(); let tts: Vec<_> = match expr.node { ast::ExprKind::Mac(ref mac) => mac.node.stream().trees().collect(), _ => panic!("not a macro"), }; let span = tts.iter().rev().next().unwrap().span(); match sess.source_map().span_to_snippet(span) { Ok(s) => assert_eq!(&s[..], "{ body }"), Err(_) => panic!("could not get snippet"), } }); } // This tests that when parsing a string (rather than a file) we don't try // and read in a file for a module declaration and just parse a stub. // See `recurse_into_file_modules` in the parser. #[test] fn out_of_line_mod() { with_default_globals(|| { let sess = ParseSess::new(FilePathMapping::empty()); let item = parse_item_from_source_str( PathBuf::from("foo").into(), "mod foo { struct S; mod this_does_not_exist; }".to_owned(), &sess, ).unwrap().unwrap(); if let ast::ItemKind::Mod(ref m) = item.node { assert!(m.items.len() == 2); } else { panic!(); } }); } }
source_file: Lrc<SourceFile>, ) -> Result<Parser<'_>, Vec<Diagnostic>> { let end_pos = source_file.end_pos;
sectionLineController_20211204180057.js
import SectionLineModel from "../model/sectionLineModel.js"; import SectionLineView from "../view/sectionLineView.js"; import { DOMS } from "../util/constant.js"; export default class
{ constructor() { this.model = new SectionLineModel(); this.view = new SectionLineView(); } init = () => { this.setButtonClickEvent(); const sectionButtons = document.querySelectorAll( ".section-line-menu-button" ); this.view.renderSectionLineEdit(sectionButtons[0]); }; setButtonClickEvent = () => { DOMS.$app.addEventListener("click", this.setSelectedMenuEvent); DOMS.$app.addEventListener("click", this.setSubmitSectionEvent); DOMS.$app.addEventListener("click", this.setDeleteSectionEvent); }; setSelectedMenuEvent = (event) => { if (!event.target.classList.contains("section-line-menu-button")) { return; } const lineName = event.target.dataset.lineName; this.view.renderSectionLineEdit(this.model.stationNames); }; renderSelectedSection = (lineName) => { const $buttons = document.querySelectorAll(".section-line-menu-button"); const currentBtn = Array.from($buttons).filter( (button) => button.dataset.lineName === lineName )[0]; this.renderSectionLineEdit(currentBtn); }; getStations = () => { return this.model.lines.find((line) => Object.keys(line)[0] === lineName)[ lineName ]; }; setSubmitSectionEvent = ({ target }) => { if (target.id === "section-add-button") { const $sectionOrderInput = document.querySelector("#section-order-input"); const $sectionStationSelector = document.querySelector( "#section-station-selector" ); const lineName = target.dataset.lineName; const stationName = $sectionStationSelector.options[$sectionStationSelector.selectedIndex] .value; const order = $sectionOrderInput.value; if (this.cantAddStation(lineName, stationName)) { alert("역이름이 중복되었습니다"); return; } this.addStation(stationName, order, lineName); this.renderSelectedSection(lineName); } }; setDeleteSectionEvent = ({ target }) => { const lineName = target.dataset.lineName; if (!target.classList.contains("section-delete-button")) { return; } if (!confirm("정말로 삭제하시겠습니까")) { return; } if (!this.canDeleteStation(lineName)) { alert("2개 이하의 역개수는 지울수 없습니다"); return; } const deleteIdx = target.dataset.idx; this.deleteStation(deleteIdx, lineName); this.renderSelectedSection(lineName); }; canDeleteStation = (lineName) => { const stations = this.getStations(lineName); return stations.length > 2; }; cantAddStation = (lineName, newStation) => { const stations = this.getStations(lineName); return stations.some((station) => station === newStation); }; }
SectionLineController
main.go
// Advent of Code 2016 - Day 10 // // Approach taken was to keep track of the bots in a // closure and print the id of the bot which handled the // chips we were interested in to a global variable. That // feels wrong, maybe I can redo it later. // // Issues: Tried this one a few ways. First attempt // was what ultimately worked but I got distracted // assuming that I needed to have the bots running // concurrently. I had a set of code which used channels // to pass values between bots and it got the first answer // correct but I had issues with the second one and left // it at that. So far day 10 has given me the most issues. // By this point I have day 1-21 complete save 10, 11, and // 14. 10 was a bit of a sore spot, though looking back // nothing appears to be wrong with it. Just a mental // block I suppose. // package main import ( "fmt" "io/ioutil" "sort" "strconv" "strings" ) type chip int type Receiver interface { receive(chip) } type bot struct { id int inventory [2]chip } type bin struct { id int inventory []chip } var hiReceiver map[int]Receiver var lowReceiver map[int]Receiver var botID int // store the id of the bot for problem 1 //this is here to sort the instructions so we have value last type valuesLast []string func (v valuesLast) Len() int { return len(v) } func (v valuesLast) Swap(i, j int) { v[i], v[j] = v[j], v[i] } func (v valuesLast) Less(i, j int) bool { return v[i] < v[j] } func (b *bot) receive(c chip) { if b.inventory[0] == 0 { b.inventory[0] = c } else { b.inventory[1] = c } b.process() } func (bi *bin) receive(c chip) { bi.inventory = append(bi.inventory, c) } func (b *bot) check() { if (b.inventory[0] == 61 && b.inventory[1] == 17) || (b.inventory[0] == 17 && b.inventory[1] == 61) { botID = b.id } } func (b *bot) process() { var hi chip var low chip if b.inventory[0] != 0 && b.inventory[1] != 0 { b.check() //determines value of part 1 if b.inventory[0] > b.inventory[1] { hi = b.inventory[0] low = b.inventory[1] } else { hi = b.inventory[1] low = b.inventory[0] } //give hi to hi target hiReceiver[b.id].receive(hi) //give low to low target lowReceiver[b.id].receive(low) //zero out inventory b.inventory = [2]chip{0, 0} } } func bots() func(int) *bot { store := make(map[int]*bot) return func(i int) *bot { b, ok := store[i] if ok == false { b = &bot{i, [2]chip{0, 0}} store[i] = b return b } return b } } func bins() func(int) *bin { store := make(map[int]*bin) return func(i int) *bin { bi, ok := store[i] if ok == false { bi = &bin{i, []chip{}} store[i] = bi return bi } return bi } } func check(e error) { if e != nil { panic(e) } } func part_one(input string) string { getBot := bots() getBin := bins() hiReceiver = make(map[int]Receiver) lowReceiver = make(map[int]Receiver) instructions := strings.Split(input, "\n") sort.Sort(valuesLast(instructions)) loop: for _, instruction := range instructions { if instruction == "" { continue loop } tokens := strings.Split(instruction, " ") switch tokens[0] { case "value": v, _ := strconv.Atoi(tokens[1]) id, _ := strconv.Atoi(tokens[5]) if tokens[4] == "bot" { getBot(id).receive(chip(v)) } else { getBin(id).receive(chip(v)) } default: id, _ := strconv.Atoi(tokens[1]) hiId, _ := strconv.Atoi(tokens[11]) lowId, _ := strconv.Atoi(tokens[6]) if tokens[10] == "bot" { hiReceiver[id] = getBot(hiId) } else { hiReceiver[id] = getBin(hiId) } if tokens[5] == "bot" { lowReceiver[id] = getBot(lowId) } else { lowReceiver[id] = getBin(lowId) } } } return strconv.Itoa(botID) //the use of a global here doesn't feel right... } func part_two(input string) string { getBot := bots() getBin := bins() hiReceiver = make(map[int]Receiver) lowReceiver = make(map[int]Receiver) instructions := strings.Split(input, "\n") sort.Sort(valuesLast(instructions)) loop: for _, instruction := range instructions { if instruction == "" { continue loop } tokens := strings.Split(instruction, " ") switch tokens[0] { case "value": v, _ := strconv.Atoi(tokens[1]) id, _ := strconv.Atoi(tokens[5]) if tokens[4] == "bot" { getBot(id).receive(chip(v)) } else { getBin(id).receive(chip(v)) } default: id, _ := strconv.Atoi(tokens[1]) hiId, _ := strconv.Atoi(tokens[11]) lowId, _ := strconv.Atoi(tokens[6]) if tokens[10] == "bot"
else { hiReceiver[id] = getBin(hiId) } if tokens[5] == "bot" { lowReceiver[id] = getBot(lowId) } else { lowReceiver[id] = getBin(lowId) } } } output := int(getBin(0).inventory[0] * getBin(1).inventory[0] * getBin(2).inventory[0]) return strconv.Itoa(output) } func main() { input, err := ioutil.ReadFile("./2016_10.txt") check(err) fmt.Println("Problem 1: " + part_one(string(input))) fmt.Println("Problem 2: " + part_two(string(input))) }
{ hiReceiver[id] = getBot(hiId) }
gmail_test.go
package gmail import ( "encoding/base64" "errors" "github.com/danmarg/outtake/lib" "github.com/danmarg/outtake/lib/maildir" gmail "google.golang.org/api/gmail/v1" "io/ioutil" "os" "path" "sort" "strings" "testing" ) func newTestCache() gmailCache { d, err := ioutil.TempDir("", "") if err != nil { panic(err) } f := path.Join(d, "test_outtake_cache") if c, err := lib.NewBoltCache(f); err != nil { panic(err) } else { return gmailCache{c} } } func TestComputeLabels(t *testing.T) { g := Gmail{cache: newTestCache()} g.cache.SetMsgLabels("id", []string{"a", "b"}) ls := g.computeLabels("id", []string{"c"}, []string{"b"}) sort.Strings(ls) if len(ls) != 2 || ls[0] != "a" || ls[1] != "c" { t.Errorf(`computeLabels("id", {"c"}, {"b"}) = %v, expected {"a", "c"}`, ls) } } func TestLabelsChanged(t *testing.T) { g := Gmail{cache: newTestCache()} g.cache.SetMsgLabels("id", []string{"a", "b"}) if !g.labelsChanged("id", []string{"a"}) { t.Error(`labelsChanged("id", {"a"}) = false, expected true`) } if g.labelsChanged("id", []string{"a", "b"}) { t.Error(`labelsChanged("id", {"a", "b"}) = true, expected false`) }
t.Error(`labelsChanged("id", {}) = false, expected true`) } if !g.labelsChanged("id", []string{"a", "b", "c"}) { t.Error(`labelsChanged("id", {"a", "b", "c"}) = false, expected true`) } } type testService struct { gmailService Msgs map[string]string Metadata map[string]*gmail.Message Labels *gmail.ListLabelsResponse History map[string]*gmail.ListHistoryResponse Messages map[string]*gmail.ListMessagesResponse } func (s *testService) GetRawMessage(id string) (string, error) { if m, ok := s.Msgs[id]; ok { return m, nil } return "", errors.New("not found") } func (s *testService) GetMetadata(id string) (*gmail.Message, error) { if m, ok := s.Metadata[id]; ok { return m, nil } return nil, errors.New("not found") } func (s *testService) GetLabels() (*gmail.ListLabelsResponse, error) { return s.Labels, nil } func (s *testService) GetHistory(i uint64, label, page string) (*gmail.ListHistoryResponse, error) { if m, ok := s.History[page]; ok { return m, nil } return nil, errors.New("not found") } func (s *testService) GetMessages(q, page string) (*gmail.ListMessagesResponse, error) { if m, ok := s.Messages[page]; ok { return m, nil } return nil, errors.New("not found") } func getTestClient() (*Gmail, *testService, string) { d, err := ioutil.TempDir("", "") if err != nil { panic(err) } var c lib.Cache if c, err = lib.NewBoltCache(d + "test_cache"); err != nil { panic(err) } md, err := maildir.Create(d) if err != nil { panic(err) } s := &testService{ Msgs: make(map[string]string), Metadata: make(map[string]*gmail.Message), Messages: make(map[string]*gmail.ListMessagesResponse), History: make(map[string]*gmail.ListHistoryResponse), } g := &Gmail{ dir: md, cache: gmailCache{c}, svc: s, } return g, s, d } func TestSync(t *testing.T) { c, svc, dir := getTestClient() m := base64.URLEncoding.EncodeToString([]byte( `From: [email protected] To: [email protected] Subject: Doodle! asdf`)) svc.Msgs["0x1"], svc.Msgs["0x2"], svc.Msgs["0x3"] = m, m, m svc.Metadata["0x1"], svc.Metadata["0x2"], svc.Metadata["0x3"] = &gmail.Message{}, &gmail.Message{}, &gmail.Message{} svc.Labels = &gmail.ListLabelsResponse{} svc.Messages[""] = &gmail.ListMessagesResponse{ Messages: []*gmail.Message{ {Id: "0x1"}, {Id: "0x2"}, {Id: "0x3"}}, } svc.Metadata["0x1"] = &gmail.Message{Id: "0x01", HistoryId: 1} svc.Metadata["0x2"] = &gmail.Message{Id: "0x02", HistoryId: 2} svc.Metadata["0x3"] = &gmail.Message{Id: "0x03", HistoryId: 3, LabelIds: []string{"LABEL_3"}} err := c.Sync(false, nil) if err != nil { t.Errorf(`Sync(false, nil) = %v, expected nil`, err) } // There should be three new messages in the maildir. fs, err := ioutil.ReadDir(dir + "/new") if err != nil { panic(err) } if len(fs) != 3 { t.Errorf(`Sync(true, nil) wrote %v messages, expected 3`, len(fs)) } if i := c.cache.GetHistoryIdx(); i != 3 { t.Errorf(`GetHistoryIdx() == %v, expected 3`, i) } // And one of the messages should have LABEL_3 set. k, ok := c.cache.GetMsgKey("0x3") if !ok { t.Errorf(`GetMsgKey("0x3") == false, expected true`) } f, err := c.dir.GetFile(k) if err != nil { t.Errorf(`GetFile(%v) == %v, expected no error`, k, err) } bs, err := ioutil.ReadFile(f) if err != nil { t.Errorf(`ReadFile(%v) == %v, expected no error`, f, err) } // Check contents of bs. if !strings.Contains(string(bs), "X-Keywords: LABEL_3") { t.Errorf(`Expected %v to contain X-Keywords: LABEL_3`, string(bs)) } // If we move 0x3 to ./cur and add some maildir flags like "Seen", it should still work. err = os.Rename(f, dir+"/cur/"+path.Base(f)+":S") if err != nil { panic(err) } // Message sync: delete 0x1, add a label to 0x2, remove LABEL_3 from 0x3, and add message 0x4. svc.History[""] = &gmail.ListHistoryResponse{ History: []*gmail.History{ { Id: 1, MessagesDeleted: []*gmail.HistoryMessageDeleted{{&gmail.Message{Id: "0x1"}}}, LabelsAdded: []*gmail.HistoryLabelAdded{{LabelIds: []string{"LABEL_2"}, Message: &gmail.Message{Id: "0x2"}}}, LabelsRemoved: []*gmail.HistoryLabelRemoved{{LabelIds: []string{"LABEL_3"}, Message: &gmail.Message{Id: "0x3"}}}, MessagesAdded: []*gmail.HistoryMessageAdded{{&gmail.Message{Id: "0x4"}}}, }, }, } // Add the new message 0x4 body. svc.Msgs["0x4"] = m // And metadata. svc.Metadata["0x4"] = &gmail.Message{} err = c.Sync(false, nil) if err != nil { t.Errorf(`Sync(false, nil) = %v, expected nil`, err) } // There should be two new messages in the maildir. fs, err = ioutil.ReadDir(dir + "/new") if err != nil { panic(err) } if len(fs) != 3 { t.Errorf(`Sync(true, nil) wrote %v messages to "new", expected 3`, len(fs)) } // And zero in "cur". fs, err = ioutil.ReadDir(dir + "/cur") if err != nil { panic(err) } if len(fs) != 0 { t.Errorf(`Sync(true, nil) wrote %v messages to "cur", expected 0`, len(fs)) } // And 0x3 should no longer have LABEL_3 set. k, ok = c.cache.GetMsgKey("0x3") if !ok { t.Errorf(`GetMsgKey("0x3") == false, expected true`) } f, err = c.dir.GetFile(k) if err != nil { t.Errorf(`GetFile(%v) == %v, expected no error`, k, err) } bs, err = ioutil.ReadFile(f) if err != nil { t.Errorf(`ReadFile(%v) == %v, expected no error`, f, err) } // Check contents of bs. if strings.Contains(string(bs), "X-Keywords: LABEL_3") { t.Errorf(`Expected %v to not contain X-Keywords: LABEL_3`, string(bs)) } // And 0x2 should have LABEL_2 set. k, ok = c.cache.GetMsgKey("0x2") if !ok { t.Errorf(`GetMsgKey("0x2") == false, expected true`) } f, err = c.dir.GetFile(k) if err != nil { t.Errorf(`GetFile(%v) == %v, expected no error`, k, err) } bs, err = ioutil.ReadFile(f) if err != nil { t.Errorf(`ReadFile(%v) == %v, expected no error`, f, err) } // Check contents of bs. if !strings.Contains(string(bs), "X-Keywords: LABEL_2") { t.Errorf(`Expected %v to contain X-Keywords: LABEL_2`, string(bs)) } }
if !g.labelsChanged("id", []string{}) {
fabriccli.go
/* Copyright SecureKey Technologies Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package bdd import ( "bytes" "os/exec" "github.com/pkg/errors" ) const ( fabricCLIPath = "../../.build/bin/fabric" homeDir = "./.fabriccli/" ) // FabricCLI invokes the fabric-cli command-line tool type FabricCLI struct { } // NewFabricCLI returns a new NewFabricCLI func
() *FabricCLI { return &FabricCLI{} } // Exec executes fabric-cli with the given args and returns the response func (cli *FabricCLI) Exec(args ...string) (string, error) { var a []string a = append(a, "--home", homeDir) a = append(a, args...) cmd := exec.Command(fabricCLIPath, a...) // nolint: gosec cmd.Env = []string{"PROJECT_PATH=../.."} var out bytes.Buffer var er bytes.Buffer cmd.Stdout = &out cmd.Stderr = &er err := cmd.Start() if err != nil { return er.String(), errors.Errorf("%s: %s", err, er.Bytes()) } err = cmd.Wait() if err != nil { return er.String(), errors.Errorf("%s: %s", err, er.Bytes()) } return out.String(), nil }
NewFabricCLI
helper.rs
use std::{ collections::hash_map::DefaultHasher, hash::{Hash, Hasher}, net::SocketAddr, time::Duration, }; use anyhow::{anyhow, Result}; use socket2::{SockRef, TcpKeepalive}; use tokio::net::{lookup_host, TcpStream, ToSocketAddrs, UdpSocket}; use tracing::trace; // Tokio hesitates to expose this option...So we have to do it on our own :( // The good news is that using socket2 it can be easily done, without losing portability. // See https://github.com/tokio-rs/tokio/issues/3082 pub fn try_set_tcp_keepalive( conn: &TcpStream, keepalive_duration: Duration, keepalive_interval: Duration, ) -> Result<()> { let s = SockRef::from(conn); let keepalive = TcpKeepalive::new() .with_time(keepalive_duration) .with_interval(keepalive_interval); trace!( "Set TCP keepalive {:?} {:?}", keepalive_duration, keepalive_interval ); Ok(s.set_tcp_keepalive(&keepalive)?) } #[allow(dead_code)] pub fn feature_not_compile(feature: &str) -> ! { panic!( "The feature '{}' is not compiled in this binary. Please re-compile rathole", feature ) } /// Create a UDP socket and connect to `addr` pub async fn
<A: ToSocketAddrs>(addr: A) -> Result<UdpSocket> { let addr = lookup_host(addr) .await? .next() .ok_or(anyhow!("Failed to lookup the host"))?; let bind_addr = match addr { SocketAddr::V4(_) => "0.0.0.0:0", SocketAddr::V6(_) => ":::0", }; let s = UdpSocket::bind(bind_addr).await?; s.connect(addr).await?; Ok(s) } // FIXME: These functions are for the load balance for UDP. But not used for now. #[allow(dead_code)] pub fn hash_socket_addr(a: &SocketAddr) -> u64 { let mut hasher = DefaultHasher::new(); a.hash(&mut hasher); hasher.finish() } // Wait for the stabilization of https://doc.rust-lang.org/std/primitive.i64.html#method.log2 #[allow(dead_code)] fn log2_floor(x: usize) -> u8 { (x as f64).log2().floor() as u8 } #[allow(dead_code)] pub fn floor_to_pow_of_2(x: usize) -> usize { if x == 1 { return 1; } let w = log2_floor(x); 1 << w } #[cfg(test)] mod test { use crate::helper::{floor_to_pow_of_2, log2_floor}; #[test] fn test_log2_floor() { let t = [ (2, 1), (3, 1), (4, 2), (8, 3), (9, 3), (15, 3), (16, 4), (1023, 9), (1024, 10), (2000, 10), (2048, 11), ]; for t in t { assert_eq!(log2_floor(t.0), t.1); } } #[test] fn test_floor_to_pow_of_2() { let t = [ (1 as usize, 1 as usize), (2, 2), (3, 2), (4, 4), (5, 4), (15, 8), (31, 16), (33, 32), (1000, 512), (1500, 1024), (2300, 2048), ]; for t in t { assert_eq!(floor_to_pow_of_2(t.0), t.1); } } }
udp_connect
build_test.go
// Copyright (c) 2019 Baidu, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package condition import ( "net" "reflect" "regexp" "testing" ) import ( "github.com/baidu/bfe/bfe_basic" "github.com/baidu/bfe/bfe_http" "github.com/baidu/bfe/bfe_util/net_util" ) var ( req bfe_basic.Request = bfe_basic.Request{ Session: &bfe_basic.Session{}, HttpRequest: &bfe_http.Request{}, } ) func stringAddr(x string) *string { return &x } func intAddr(x int) *int { return &x } func compileRegExpr(expr string) *regexp.Regexp { reg, err := regexp.Compile(expr) if err != nil { return nil } return reg } func IPv4ToUint32(ipStr string) uint32 { ipUint32, _ := net_util.IPv4StrToUint32(ipStr) return ipUint32 } var buildPrimitiveTests = []struct { name string cond string buildCond Condition buildErr bool }{ { "testWrongName", "req_path_in1", nil, true, }, { "testDefaultBoolParam", "req_path_in(\"/ABC\", false)", &PrimitiveCond{ name: "req_path_in", fetcher: &PathFetcher{}, matcher: &InMatcher{ patterns: []string{"/ABC"}, foldCase: false}, }, false, }, { "testWrongParamType", "req_path_in(\"notbool\")", nil, true, }, { "testWrongVriable", "a && b", nil, true, }, { "testDefaultTrue", "default_t()", &DefaultTrueCond{}, false, }, { "testBuildReqPatIn", "req_path_in(\"/abc\", true)", &PrimitiveCond{ name: "req_path_in", fetcher: &PathFetcher{}, matcher: &InMatcher{ patterns: []string{"/ABC"}, foldCase: true}, }, false, }, { "testBuildQueRegMatch", "req_query_value_regmatch(\"abc\", \"123\")", &PrimitiveCond{ name: "req_query_value_regmatch", fetcher: &QueryValueFetcher{ key: "abc", }, matcher: &RegMatcher{ regex: compileRegExpr("123"), }, }, false, }, { "testQueryExist", "req_query_exist()", &QueryExistMatcher{}, false, }, { "testBuildUrlRegMatch", "req_url_regmatch(\"123\")", &PrimitiveCond{ name: "req_url_regmatch", fetcher: &UrlFetcher{}, matcher: &RegMatcher{ regex: compileRegExpr("123"), }, }, false, }, { "testBuildUrlRegMatchcase1", "req_url_regmatch(`123`)", &PrimitiveCond{ name: "req_url_regmatch", fetcher: &UrlFetcher{}, matcher: &RegMatcher{ regex: compileRegExpr("123"), }, }, false, }, { "testBuildVIPIn", "req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")", &PrimitiveCond{ name: "req_vip_in", fetcher: &VIPFetcher{}, matcher: &IpInMatcher{ patterns: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("2001:DB8:2de::e13")}, }, }, false, }, { "testBuildVIPInWrongCase", "req_vip_in(\"1.1.1.1|2001::DB8:2de::e13\")", nil, true, }, { "testBuildCIPMatch", "req_cip_range(\"1.1.1.1\", \"2.2.2.2\")", &PrimitiveCond{ name: "req_cip_range", fetcher: &CIPFetcher{}, matcher: &IPMatcher{ startIP: net.ParseIP("1.1.1.1"), endIP: net.ParseIP("2.2.2.2"), }, }, false, }, { "testBuildCIPMatchIpv6", "req_cip_range(\"2001:DB8:2de::e13\", \"2002:DB8:2de::e13\")", &PrimitiveCond{ name: "req_cip_range", fetcher: &CIPFetcher{}, matcher: &IPMatcher{ startIP: net.ParseIP("2001:DB8:2de::e13"), endIP: net.ParseIP("2002:DB8:2de::e13"), }, }, false, }, { "testBuildCIPMatch_wrongCase1", "req_cip_range(\"1.1.1.1\", \"1.1.1.0\")", nil, true, }, { "testBuildCIPMatch_wrongCase1_notip", "req_cip_range(\"1.1.1\", \"1.1.1.0\")", nil, true, }, { "testBuildCIPMatch_wrongCase_ipv4_ipv6", "req_cip_range(\"1.1.1.1\", \"2001:DB8:2de::e13\")", nil, true, }, { "testBuildCIPMatch_wrongCase_ipv6", "req_cip_range(\"2002:DB8:2de::e13\", \"2001:DB8:2de::e13\")", nil, true, }, } func TestBuildPrimitive(t *testing.T) { for _, buildPrimitiveTest := range buildPrimitiveTests { t.Logf("run test %s", buildPrimitiveTest.name) buildC, err := Build(buildPrimitiveTest.cond) if buildPrimitiveTest.buildErr { if err == nil { t.Errorf("build primitive should return err") } t.Logf("build err as expected [%s]", err) } else { if err != nil { t.Errorf("build should success %s", err) } // check equal // hack:ignore node field compare if c, ok := buildC.(*PrimitiveCond); ok { c.node = nil } if !reflect.DeepEqual(buildC, buildPrimitiveTest.buildCond) { t.Errorf("build cond not equal [%v] [%v]", buildC, buildPrimitiveTest.buildCond) } } } } func TestBuildReqVipIn(t *testing.T) { buildC, err := Build("req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") if err != nil { t.Errorf("build failed, req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.IPv4(1, 1, 1, 1).To4() if !buildC.Match(&req) { t.Errorf("1.1.1.1 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.ParseIP("2001:0DB8:02de:0::e13") if !buildC.Match(&req) { t.Errorf("2001:DB8:2de::e13 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } req.Session.Vip = net.ParseIP("2002:0DB8:02de:0::e13") if buildC.Match(&req) { t.Errorf("2002:DB8:2de::e13 not match req_vip_in(\"1.1.1.1|2001:DB8:2de::e13\")") } } func TestBuildReqVipRange(t *testing.T)
{ buildC, err := Build("req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") if err != nil { t.Errorf("build failed, req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") } req.Session.Vip = net.IPv4(3, 255, 255, 255).To4() if !buildC.Match(&req) { t.Errorf("3.255.255.255 not match req_vip_range(\"1.1.1.1\", \"4.4.4.4\")") } buildC, err = Build("req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") if err != nil { t.Errorf("build failed, req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") } req.Session.Vip = net.ParseIP("2001:ffff::ffff") if !buildC.Match(&req) { t.Errorf("2001:ffff::ffff not match req_vip_range(\"2001:0DB8:02de:0::e13\", \"2002:0DB8:02de:0::e13\")") } }
App.js
import React from 'react'; import ReactAnime from 'react-animejs'; import { Hello } from '../components/Hello'; import logo from '../assets/img/logo.svg'; import styled from 'styled-components'; import tw from 'tailwind.macro'; const { Anime } = ReactAnime; const Logo = styled.img` ${tw`block w-1/4 ml-auto mr-auto`}; `;
function App() { return ( <div className="App"> <Anime initial={[ { targets: '#logo', translateY: [-50, 0], easing: 'linear' } ]} > <Logo id="logo" src={logo} alt="logo" /> </Anime> <Hello /> </div> ); } export default App;
client.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package v2 import ( "bytes" "encoding/binary" "errors" "fmt" "io" "net" "time" "github.com/klauspost/compress/zlib" protocol "github.com/elastic/go-lumber/protocol/v2" ) // Client implements the low-level lumberjack wire protocol. SyncClient and // AsyncClient should be used for publishing events to lumberjack endpoint. type Client struct { conn net.Conn wb *bytes.Buffer opts options } var ( codeWindowSize = []byte{protocol.CodeVersion, protocol.CodeWindowSize} codeCompressed = []byte{protocol.CodeVersion, protocol.CodeCompressed} codeJSONDataFrame = []byte{protocol.CodeVersion, protocol.CodeJSONDataFrame} empty4 = []byte{0, 0, 0, 0} ) var ( // ErrProtocolError is returned if an protocol error was detected in the // conversation with lumberjack server. ErrProtocolError = errors.New("lumberjack protocol error") ) // NewWithConn create a new lumberjack client with an existing and active // connection. func NewWithConn(c net.Conn, opts ...Option) (*Client, error) { o, err := applyOptions(opts) if err != nil { return nil, err } return &Client{ conn: c, wb: bytes.NewBuffer(nil), opts: o, }, nil } // Dial connects to the lumberjack server and returns new Client. // Returns an error if connection attempt fails. func Dial(address string, opts ...Option) (*Client, error) { o, err := applyOptions(opts) if err != nil { return nil, err } dialer := net.Dialer{Timeout: o.timeout} return DialWith(dialer.Dial, address, opts...) } // DialWith uses provided dialer to connect to lumberjack server returning a // new Client. Returns error if connection attempt fails. func DialWith( dial func(network, address string) (net.Conn, error), address string, opts ...Option, ) (*Client, error) { c, err := dial("tcp", address) if err != nil { return nil, err } client, err := NewWithConn(c, opts...) if err != nil { _ = c.Close() // ignore error return nil, err } return client, nil } // Close closes underlying network connection func (c *Client) Close() error { return c.conn.Close() } // Send attempts to JSON-encode and send all events without waiting for ACK. // Returns error if sending or serialization fails. func (c *Client) Send(data []interface{}) error { if len(data) == 0 { return nil } // 1. create window message c.wb.Reset() _, _ = c.wb.Write(codeWindowSize) writeUint32(c.wb, uint32(len(data))) // 2. serialize data (payload) if c.opts.compressLvl > 0 { // Compressed Data Frame: // version: uint8 = '2' // code: uint8 = 'C' // payloadSz: uint32 // payload: compressed payload _, _ = c.wb.Write(codeCompressed) // write compressed header offSz := c.wb.Len() _, _ = c.wb.Write(empty4) offPayload := c.wb.Len() // compress payload w, err := zlib.NewWriterLevel(c.wb, c.opts.compressLvl) if err != nil { return err } if err := c.serialize(w, data); err != nil { return err } if err := w.Close(); err != nil { return err } // write compress header payloadSz := c.wb.Len() - offPayload binary.BigEndian.PutUint32(c.wb.Bytes()[offSz:], uint32(payloadSz)) } else { if err := c.serialize(c.wb, data); err != nil { return err } } // 3. send buffer if err := c.setWriteDeadline(); err != nil { return err } payload := c.wb.Bytes() for len(payload) > 0 { n, err := c.conn.Write(payload) if err != nil { return err } payload = payload[n:] } return nil } // ReceiveACK awaits and reads next ACK response or error. Note: Server might // send partial ACK, in which case client must continue reading ACKs until last send // window size is matched. Use AwaitACK when waiting for a known sequence number. func (c *Client) ReceiveACK() (uint32, error) { if err := c.setReadDeadline(); err != nil { return 0, err } var msg [6]byte ackbytes := 0 for ackbytes < 6 { n, err := c.conn.Read(msg[ackbytes:]) if err != nil { return 0, err } ackbytes += n } // validate response isACK := msg[0] == protocol.CodeVersion && msg[1] == protocol.CodeACK if !isACK { return 0, ErrProtocolError } seq := binary.BigEndian.Uint32(msg[2:]) return seq, nil } // AwaitACK waits for count elements being ACKed. Returns last known ACK on error. func (c *Client) AwaitACK(count uint32) (uint32, error) { var ackSeq uint32 var err error // read until all acks for ackSeq < count { ackSeq, err = c.ReceiveACK() if err != nil { return ackSeq, err } } if ackSeq > count { return count, fmt.Errorf( "invalid sequence number received (seq=%v, expected=%v)", ackSeq, count) } return ackSeq, nil } func (c *Client) serialize(out io.Writer, data []interface{}) error { for i, d := range data { b, err := c.opts.encoder(d) if err != nil { return err } // Write JSON Data Frame: // version: uint8 = '2' // code: uint8 = 'J' // seq: uint32 // payloadLen (bytes): uint32 // payload: JSON document _, _ = out.Write(codeJSONDataFrame) writeUint32(out, uint32(i)+1) writeUint32(out, uint32(len(b))) _, _ = out.Write(b) } return nil } func (c *Client) setWriteDeadline() error { return c.conn.SetWriteDeadline(time.Now().Add(c.opts.timeout)) } func (c *Client) setReadDeadline() error { return c.conn.SetReadDeadline(time.Now().Add(c.opts.timeout)) } func writeUint32(out io.Writer, v uint32)
{ _ = binary.Write(out, binary.BigEndian, v) }
development.py
from .base import * DEBUG = get_env_variable('DEBUG_MODE') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': get_env_variable('DATABASE_NAME'), 'USER': get_env_variable('DATABASE_USER'), 'PASSWORD': get_env_variable('DATABASE_PASSWORD'), 'HOST': get_env_variable('DATABASE_HOST'), 'PORT': get_env_variable('DATABASE_PORT'), # Set to empty string for default.
# toggle sentry # if config is None, sentry will never be triggered # if DEBUG: # RAVEN_CONFIG = dict() # RAVEN_CONFIG = dict() # enable/disable qcount if True: MIDDLEWARE += [ 'querycount.middleware.QueryCountMiddleware', ] QUERYCOUNT = { 'THRESHOLDS': { 'MEDIUM': 50, 'HIGH': 200, 'MIN_TIME_TO_LOG': 0, 'MIN_QUERY_COUNT_TO_LOG': 0 }, 'IGNORE_REQUEST_PATTERNS': [], 'IGNORE_SQL_PATTERNS': [], 'DISPLAY_DUPLICATES': 20, } # enable/disable django debug toolbar if True: INSTALLED_APPS += [ 'debug_toolbar', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] # django debug toolbar allowed internal ips INTERNAL_IPS = ['127.0.0.1']
} }
cluster_xyz.py
from run import MainHandler from .cluster import ClusterHandler from util import * class ClusterXYZHandler(ClusterHandler): def __init__(self, args, **kwargs): super().__init__(args, **kwargs) self.n_stages = self.n_main_stages + self.n_substages n_substages = ClusterHandler.n_substages #  one more than ClusterHandler def run_command(self): super().run_command() def save_cluster_xyz(self): z = self.call_para("R_to_xyz", "z", args=[self, self.dataset]) self.z = z dir_path = os.path.join(self.storage_dir, "cluster_xyz") if not os.path.exists(dir_path): os.mkdir(dir_path) var_index_R = self.call_para("R_to_xyz", "var_index_R") R = self.vars[var_index_R] var_index_F = self.call_para("R_to_xyz", "var_index_F") F = self.vars[var_index_F] var_index_E = self.call_para("R_to_xyz", "var_index_E") E = self.vars[var_index_E] cl_ind = self.cluster_indices for i in range(len(cl_ind)): cl = np.array(cl_ind[i], dtype=np.int64) self.save_xyz_index(i, R[cl], F[cl], E[cl]) def save_xyz_index(self, i, R, F, E): file_name = f"cluster_{i}.xyz" path = os.path.join(self.storage_dir, "cluster_xyz", file_name) file = open(path, "w+") for j in range(len(R)): r_j, f_j, e_j = R[j], F[j], E[j] s = self.RFE_to_xyz_single(r_j, f_j, e_j)
# Energy=-620726.002662 Properties=species:S:1:pos:R:3:forces:R:3 def RFE_to_xyz_single(self, R, F, E): z = self.z s = f"{len(z)}\n" s += f"{E[0]:.5e}\n" for i in range(0, len(R), 3): s += f"{z[i//3]:<3}{R[i]:<13.5e}{R[i+1]:<13.5e}{R[i+2]:<13.5e}" s += f"{F[i]:<13.5e}{F[i+1]:<13.5e}{F[i+2]:<13.5e}\n" return s def save_command(self): super().save_command() from time import time t0 = time() self.save_cluster_xyz() print(f"Took {time() - t0} seconds")
file.write(s) file.close()
preprocess.py
from keras.applications.resnet50 import ResNet50 from keras.preprocessing import image from keras.applications.resnet50 import preprocess_input from keras.models import Model import numpy as np import time import cv2 def collect_demo(path, num_patch, aux_dim, action_dim): for i in range(num_patch): path_patch = path + str(i) + "/" demo_name = path_patch + "demo.txt" demo_raw = open(demo_name, 'r').readlines() state_name = path_patch + "states.txt" state_raw = open(state_name, 'r').readlines() pa = np.zeros(6, dtype=np.float32) print("Loading patch %d ..." % i) for j in range(0, len(demo_raw)): action_data = np.array(demo_raw[j].strip().split(" ")).astype(np.float32) state_data = np.array(state_raw[j].strip().split(" ")).astype(np.float32) aux = np.expand_dims([state_data[-3], state_data[-1]], axis=0).astype(np.float32) action = np.expand_dims(action_data[:], axis=0).astype(np.float32) img_path = path_patch + str(j) + ".jpg" img = image.load_img(img_path) img = image.img_to_array(img) img = cv2.resize(img, (256, 256)) #img = img[40:, :, :] ''' if j < 130 and i == 1: img_cv2 = cv2.imread(img_path) img_cv2 = cv2.resize(img_cv2, (200, 150)) img_cv2 = img_cv2[40:, :, :] cv2.imshow('image', cv2.cvtColor(img, cv2.COLOR_RGB2BGR)/255.0) cv2.waitKey(0) ''' img = np.expand_dims(img, axis=0).astype(np.uint8) if j == 0: auxs_tmp = aux actions_tmp = action imgs_tmp = img else: auxs_tmp = np.concatenate((auxs_tmp, aux), axis=0) actions_tmp = np.concatenate((actions_tmp, action), axis=0) imgs_tmp = np.concatenate((imgs_tmp, img), axis=0) if i == 0: auxs = auxs_tmp actions = actions_tmp imgs = imgs_tmp else: auxs = np.concatenate((auxs, auxs_tmp), axis=0) actions = np.concatenate((actions, actions_tmp), axis=0) imgs = np.concatenate((imgs, imgs_tmp), axis=0) print("Current total:", imgs.shape, auxs.shape, actions.shape) print("Images:", imgs.shape, "Auxs:", auxs.shape, "Actions:", actions.shape) return imgs, auxs, actions def normalize(x):
def main(): aux_dim = 66 action_dim = 3 num_patch = 240 #demo_path = "/home/yunzhu/Desktop/human_low_case_1/demo_" demo_path = "/home/zhiyang/Desktop/intention/reacher/rl_demo/demo_" imgs, auxs, actions = collect_demo(demo_path, num_patch, aux_dim, action_dim) auxs = normalize(auxs) #np.savez_compressed("/home/zhiyang/Desktop/intention/reacher/rl_demo/demo.npz", #imgs=imgs, auxs=auxs, actions=actions) print("Finished.") if __name__ == "__main__": main()
x[:, 0:4] /= 200. return x
liveness.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! * A classic liveness analysis based on dataflow over the AST. Computes, * for each local variable in a function, whether that variable is live * at a given point. Program execution points are identified by their * id. * * # Basic idea * * The basic model is that each local variable is assigned an index. We * represent sets of local variables using a vector indexed by this * index. The value in the vector is either 0, indicating the variable * is dead, or the id of an expression that uses the variable. * * We conceptually walk over the AST in reverse execution order. If we * find a use of a variable, we add it to the set of live variables. If * we find an assignment to a variable, we remove it from the set of live * variables. When we have to merge two flows, we take the union of * those two flows---if the variable is live on both paths, we simply * pick one id. In the event of loops, we continue doing this until a * fixed point is reached. * * ## Checking initialization * * At the function entry point, all variables must be dead. If this is * not the case, we can report an error using the id found in the set of * live variables, which identifies a use of the variable which is not * dominated by an assignment. * * ## Checking moves * * After each explicit move, the variable must be dead. * * ## Computing last uses * * Any use of the variable where the variable is dead afterwards is a * last use. * * # Implementation details * * The actual implementation contains two (nested) walks over the AST. * The outer walk has the job of building up the ir_maps instance for the * enclosing function. On the way down the tree, it identifies those AST * nodes and variable IDs that will be needed for the liveness analysis * and assigns them contiguous IDs. The liveness id for an AST node is * called a `live_node` (it's a newtype'd uint) and the id for a variable * is called a `variable` (another newtype'd uint). * * On the way back up the tree, as we are about to exit from a function * declaration we allocate a `liveness` instance. Now that we know * precisely how many nodes and variables we need, we can allocate all * the various arrays that we will need to precisely the right size. We then * perform the actual propagation on the `liveness` instance. * * This propagation is encoded in the various `propagate_through_*()` * methods. It effectively does a reverse walk of the AST; whenever we * reach a loop node, we iterate until a fixed point is reached. * * ## The `Users` struct * * At each live node `N`, we track three pieces of information for each * variable `V` (these are encapsulated in the `Users` struct): * * - `reader`: the `LiveNode` ID of some node which will read the value * that `V` holds on entry to `N`. Formally: a node `M` such * that there exists a path `P` from `N` to `M` where `P` does not * write `V`. If the `reader` is `invalid_node()`, then the current * value will never be read (the variable is dead, essentially). * * - `writer`: the `LiveNode` ID of some node which will write the * variable `V` and which is reachable from `N`. Formally: a node `M` * such that there exists a path `P` from `N` to `M` and `M` writes * `V`. If the `writer` is `invalid_node()`, then there is no writer * of `V` that follows `N`. * * - `used`: a boolean value indicating whether `V` is *used*. We * distinguish a *read* from a *use* in that a *use* is some read that * is not just used to generate a new value. For example, `x += 1` is * a read but not a use. This is used to generate better warnings. * * ## Special Variables * * We generate various special variables for various, well, special purposes. * These are described in the `specials` struct: * * - `exit_ln`: a live node that is generated to represent every 'exit' from * the function, whether it be by explicit return, fail, or other means. * * - `fallthrough_ln`: a live node that represents a fallthrough * * - `no_ret_var`: a synthetic variable that is only 'read' from, the * fallthrough node. This allows us to detect functions where we fail * to return explicitly. */ use middle::def::*; use middle::freevars; use middle::mem_categorization::Typer; use middle::pat_util; use middle::ty; use lint; use util::nodemap::NodeMap; use std::fmt; use std::gc::Gc; use std::io; use std::mem::transmute; use std::rc::Rc; use std::str; use std::uint; use syntax::ast::*; use syntax::codemap::{BytePos, original_sp, Span}; use syntax::parse::token::special_idents; use syntax::parse::token; use syntax::print::pprust::{expr_to_string, block_to_string}; use syntax::{visit, ast_util}; use syntax::visit::{Visitor, FnKind}; #[deriving(PartialEq)] struct Variable(uint); #[deriving(PartialEq)] struct LiveNode(uint); impl Variable { fn get(&self) -> uint { let Variable(v) = *self; v } } impl LiveNode { fn get(&self) -> uint { let LiveNode(v) = *self; v } } impl Clone for LiveNode { fn clone(&self) -> LiveNode { LiveNode(self.get()) } } #[deriving(PartialEq)] enum LiveNodeKind { FreeVarNode(Span), ExprNode(Span), VarDefNode(Span), ExitNode } fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &ty::ctxt) -> String { let cm = cx.sess.codemap(); match lnk { FreeVarNode(s) => { format!("Free var node [{}]", cm.span_to_string(s)) } ExprNode(s) => { format!("Expr node [{}]", cm.span_to_string(s)) } VarDefNode(s) => { format!("Var def node [{}]", cm.span_to_string(s)) } ExitNode => "Exit node".to_string(), } } impl<'a> Visitor<()> for IrMaps<'a> { fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) { visit_fn(self, fk, fd, b, s, n); } fn visit_local(&mut self, l: &Local, _: ()) { visit_local(self, l); } fn visit_expr(&mut self, ex: &Expr, _: ()) { visit_expr(self, ex); } fn visit_arm(&mut self, a: &Arm, _: ()) { visit_arm(self, a); } } pub fn check_crate(tcx: &ty::ctxt, krate: &Crate) { visit::walk_crate(&mut IrMaps::new(tcx), krate, ()); tcx.sess.abort_if_errors(); } impl fmt::Show for LiveNode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ln({})", self.get()) } } impl fmt::Show for Variable { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "v({})", self.get()) } } // ______________________________________________________________________ // Creating ir_maps // // This is the first pass and the one that drives the main // computation. It walks up and down the IR once. On the way down, // we count for each function the number of variables as well as // liveness nodes. A liveness node is basically an expression or // capture clause that does something of interest: either it has // interesting control flow or it uses/defines a local variable. // // On the way back up, at each function node we create liveness sets // (we now know precisely how big to make our various vectors and so // forth) and then do the data-flow propagation to compute the set // of live variables at each program point. // // Finally, we run back over the IR one last time and, using the // computed liveness, check various safety conditions. For example, // there must be no live nodes at the definition site for a variable // unless it has an initializer. Similarly, each non-mutable local // variable must not be assigned if there is some successor // assignment. And so forth. impl LiveNode { fn is_valid(&self) -> bool { self.get() != uint::MAX } } fn invalid_node() -> LiveNode { LiveNode(uint::MAX) } struct CaptureInfo { ln: LiveNode, var_nid: NodeId } struct LocalInfo { id: NodeId, ident: Ident } enum VarKind { Arg(NodeId, Ident), Local(LocalInfo), ImplicitRet } struct IrMaps<'a> { tcx: &'a ty::ctxt, num_live_nodes: uint, num_vars: uint, live_node_map: NodeMap<LiveNode>, variable_map: NodeMap<Variable>, capture_info_map: NodeMap<Rc<Vec<CaptureInfo>>>, var_kinds: Vec<VarKind>, lnks: Vec<LiveNodeKind>, } impl<'a> IrMaps<'a> { fn new(tcx: &'a ty::ctxt) -> IrMaps<'a> { IrMaps { tcx: tcx, num_live_nodes: 0, num_vars: 0, live_node_map: NodeMap::new(), variable_map: NodeMap::new(), capture_info_map: NodeMap::new(), var_kinds: Vec::new(), lnks: Vec::new(), } } fn add_live_node(&mut self, lnk: LiveNodeKind) -> LiveNode { let ln = LiveNode(self.num_live_nodes); self.lnks.push(lnk); self.num_live_nodes += 1; debug!("{} is of kind {}", ln.to_string(), live_node_kind_to_string(lnk, self.tcx)); ln } fn add_live_node_for_node(&mut self, node_id: NodeId, lnk: LiveNodeKind) { let ln = self.add_live_node(lnk); self.live_node_map.insert(node_id, ln); debug!("{} is node {}", ln.to_string(), node_id); } fn add_variable(&mut self, vk: VarKind) -> Variable { let v = Variable(self.num_vars); self.var_kinds.push(vk); self.num_vars += 1; match vk { Local(LocalInfo { id: node_id, .. }) | Arg(node_id, _) => { self.variable_map.insert(node_id, v); }, ImplicitRet => {} } debug!("{} is {:?}", v.to_string(), vk); v } fn variable(&self, node_id: NodeId, span: Span) -> Variable { match self.variable_map.find(&node_id) { Some(&var) => var, None => { self.tcx .sess .span_bug(span, format!("no variable registered for id {}", node_id).as_slice()); } } } fn variable_name(&self, var: Variable) -> String { match self.var_kinds.get(var.get()) { &Local(LocalInfo { ident: nm, .. }) | &Arg(_, nm) => { token::get_ident(nm).get().to_string() }, &ImplicitRet => "<implicit-ret>".to_string() } } fn set_captures(&mut self, node_id: NodeId, cs: Vec<CaptureInfo>) { self.capture_info_map.insert(node_id, Rc::new(cs)); } fn lnk(&self, ln: LiveNode) -> LiveNodeKind { *self.lnks.get(ln.get()) } } impl<'a> Visitor<()> for Liveness<'a> { fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) { check_fn(self, fk, fd, b, s, n); } fn visit_local(&mut self, l: &Local, _: ()) { check_local(self, l); } fn visit_expr(&mut self, ex: &Expr, _: ()) { check_expr(self, ex); } fn visit_arm(&mut self, a: &Arm, _: ()) { check_arm(self, a); } } fn visit_fn(ir: &mut IrMaps, fk: &FnKind, decl: &FnDecl, body: &Block, sp: Span, id: NodeId) { debug!("visit_fn: id={}", id); let _i = ::util::common::indenter(); // swap in a new set of IR maps for this function body: let mut fn_maps = IrMaps::new(ir.tcx); unsafe { debug!("creating fn_maps: {}", transmute::<&IrMaps, *const IrMaps>(&fn_maps)); } for arg in decl.inputs.iter() { pat_util::pat_bindings(&ir.tcx.def_map, &*arg.pat, |_bm, arg_id, _x, path1| { debug!("adding argument {}", arg_id); let ident = path1.node; fn_maps.add_variable(Arg(arg_id, ident)); }) }; // gather up the various local variables, significant expressions, // and so forth: visit::walk_fn(&mut fn_maps, fk, decl, body, sp, ()); // Special nodes and variables: // - exit_ln represents the end of the fn, either by return or fail // - implicit_ret_var is a pseudo-variable that represents // an implicit return let specials = Specials { exit_ln: fn_maps.add_live_node(ExitNode), fallthrough_ln: fn_maps.add_live_node(ExitNode), no_ret_var: fn_maps.add_variable(ImplicitRet) }; // compute liveness let mut lsets = Liveness::new(&mut fn_maps, specials); let entry_ln = lsets.compute(decl, body); // check for various error conditions lsets.visit_block(body, ()); lsets.check_ret(id, sp, fk, entry_ln, body); lsets.warn_about_unused_args(decl, entry_ln); } fn visit_local(ir: &mut IrMaps, local: &Local) { pat_util::pat_bindings(&ir.tcx.def_map, &*local.pat, |_, p_id, sp, path1| { debug!("adding local variable {}", p_id); let name = path1.node; ir.add_live_node_for_node(p_id, VarDefNode(sp)); ir.add_variable(Local(LocalInfo { id: p_id, ident: name })); }); visit::walk_local(ir, local, ()); } fn visit_arm(ir: &mut IrMaps, arm: &Arm) { for pat in arm.pats.iter() { pat_util::pat_bindings(&ir.tcx.def_map, &**pat, |bm, p_id, sp, path1| { debug!("adding local variable {} from match with bm {:?}", p_id, bm); let name = path1.node; ir.add_live_node_for_node(p_id, VarDefNode(sp)); ir.add_variable(Local(LocalInfo { id: p_id, ident: name })); }) } visit::walk_arm(ir, arm, ()); } fn moved_variable_node_id_from_def(def: Def) -> Option<NodeId> { match def { DefBinding(nid, _) | DefArg(nid, _) | DefLocal(nid, _) => Some(nid), _ => None } } fn visit_expr(ir: &mut IrMaps, expr: &Expr) { match expr.node { // live nodes required for uses or definitions of variables: ExprPath(_) => { let def = ir.tcx.def_map.borrow().get_copy(&expr.id); debug!("expr {}: path that leads to {:?}", expr.id, def); if moved_variable_node_id_from_def(def).is_some() { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); } visit::walk_expr(ir, expr, ()); } ExprFnBlock(..) | ExprProc(..) => { // Interesting control flow (for loops can contain labeled // breaks or continues) ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); // Make a live_node for each captured variable, with the span // being the location that the variable is used. This results // in better error messages than just pointing at the closure // construction site. let mut call_caps = Vec::new(); freevars::with_freevars(ir.tcx, expr.id, |freevars| { for fv in freevars.iter() { match moved_variable_node_id_from_def(fv.def) { Some(rv) => { let fv_ln = ir.add_live_node(FreeVarNode(fv.span)); call_caps.push(CaptureInfo {ln: fv_ln, var_nid: rv}); } None => {} } } }); ir.set_captures(expr.id, call_caps); visit::walk_expr(ir, expr, ()); } // live nodes required for interesting control flow: ExprIf(..) | ExprMatch(..) | ExprWhile(..) | ExprLoop(..) => { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); visit::walk_expr(ir, expr, ()); } ExprForLoop(..) => fail!("non-desugared expr_for_loop"), ExprBinary(op, _, _) if ast_util::lazy_binop(op) => { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); visit::walk_expr(ir, expr, ()); } // otherwise, live nodes are not required: ExprIndex(..) | ExprField(..) | ExprVstore(..) | ExprVec(..) | ExprCall(..) | ExprMethodCall(..) | ExprTup(..) | ExprBinary(..) | ExprAddrOf(..) | ExprCast(..) | ExprUnary(..) | ExprBreak(_) | ExprAgain(_) | ExprLit(_) | ExprRet(..) | ExprBlock(..) | ExprAssign(..) | ExprAssignOp(..) | ExprMac(..) | ExprStruct(..) | ExprRepeat(..) | ExprParen(..) | ExprInlineAsm(..) | ExprBox(..) => { visit::walk_expr(ir, expr, ()); } } } // ______________________________________________________________________ // Computing liveness sets // // Actually we compute just a bit more than just liveness, but we use // the same basic propagation framework in all cases. #[deriving(Clone)] struct Users { reader: LiveNode, writer: LiveNode, used: bool } fn invalid_users() -> Users { Users { reader: invalid_node(), writer: invalid_node(), used: false } } struct Specials { exit_ln: LiveNode, fallthrough_ln: LiveNode, no_ret_var: Variable } static ACC_READ: uint = 1u; static ACC_WRITE: uint = 2u; static ACC_USE: uint = 4u; struct Liveness<'a> { ir: &'a mut IrMaps<'a>, s: Specials, successors: Vec<LiveNode>, users: Vec<Users>, // The list of node IDs for the nested loop scopes // we're in. loop_scope: Vec<NodeId>, // mappings from loop node ID to LiveNode // ("break" label should map to loop node ID, // it probably doesn't now) break_ln: NodeMap<LiveNode>, cont_ln: NodeMap<LiveNode> } impl<'a> Liveness<'a> { fn new(ir: &'a mut IrMaps<'a>, specials: Specials) -> Liveness<'a> { let num_live_nodes = ir.num_live_nodes; let num_vars = ir.num_vars; Liveness { ir: ir, s: specials, successors: Vec::from_elem(num_live_nodes, invalid_node()), users: Vec::from_elem(num_live_nodes * num_vars, invalid_users()), loop_scope: Vec::new(), break_ln: NodeMap::new(), cont_ln: NodeMap::new(), } } fn live_node(&self, node_id: NodeId, span: Span) -> LiveNode { match self.ir.live_node_map.find(&node_id) { Some(&ln) => ln, None => { // This must be a mismatch between the ir_map construction // above and the propagation code below; the two sets of // code have to agree about which AST nodes are worth // creating liveness nodes for. self.ir.tcx.sess.span_bug( span, format!("no live node registered for node {}", node_id).as_slice()); } } } fn variable(&self, node_id: NodeId, span: Span) -> Variable { self.ir.variable(node_id, span) } fn pat_bindings(&mut self, pat: &Pat, f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) { pat_util::pat_bindings(&self.ir.tcx.def_map, pat, |_bm, p_id, sp, _n| { let ln = self.live_node(p_id, sp); let var = self.variable(p_id, sp); f(self, ln, var, sp, p_id); }) } fn arm_pats_bindings(&mut self, pats: &[Gc<Pat>], f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) { // only consider the first pattern; any later patterns must have // the same bindings, and we also consider the first pattern to be // the "authoritative" set of ids if !pats.is_empty() { self.pat_bindings(&*pats[0], f) } } fn define_bindings_in_pat(&mut self, pat: Gc<Pat>, succ: LiveNode) -> LiveNode { self.define_bindings_in_arm_pats([pat], succ) } fn define_bindings_in_arm_pats(&mut self, pats: &[Gc<Pat>], succ: LiveNode) -> LiveNode { let mut succ = succ; self.arm_pats_bindings(pats, |this, ln, var, _sp, _id| { this.init_from_succ(ln, succ); this.define(ln, var); succ = ln; }); succ } fn idx(&self, ln: LiveNode, var: Variable) -> uint { ln.get() * self.ir.num_vars + var.get() } fn live_on_entry(&self, ln: LiveNode, var: Variable) -> Option<LiveNodeKind> { assert!(ln.is_valid()); let reader = self.users.get(self.idx(ln, var)).reader; if reader.is_valid() {Some(self.ir.lnk(reader))} else {None} } /* Is this variable live on entry to any of its successor nodes? */ fn live_on_exit(&self, ln: LiveNode, var: Variable) -> Option<LiveNodeKind> { let successor = *self.successors.get(ln.get()); self.live_on_entry(successor, var) } fn used_on_entry(&self, ln: LiveNode, var: Variable) -> bool { assert!(ln.is_valid()); self.users.get(self.idx(ln, var)).used } fn assigned_on_entry(&self, ln: LiveNode, var: Variable) -> Option<LiveNodeKind> { assert!(ln.is_valid()); let writer = self.users.get(self.idx(ln, var)).writer; if writer.is_valid() {Some(self.ir.lnk(writer))} else {None} } fn assigned_on_exit(&self, ln: LiveNode, var: Variable) -> Option<LiveNodeKind> { let successor = *self.successors.get(ln.get()); self.assigned_on_entry(successor, var) } fn indices2(&mut self, ln: LiveNode, succ_ln: LiveNode, op: |&mut Liveness<'a>, uint, uint|) { let node_base_idx = self.idx(ln, Variable(0u)); let succ_base_idx = self.idx(succ_ln, Variable(0u)); for var_idx in range(0u, self.ir.num_vars) { op(self, node_base_idx + var_idx, succ_base_idx + var_idx); } } fn write_vars(&self, wr: &mut io::Writer, ln: LiveNode, test: |uint| -> LiveNode) -> io::IoResult<()> { let node_base_idx = self.idx(ln, Variable(0)); for var_idx in range(0u, self.ir.num_vars) { let idx = node_base_idx + var_idx; if test(idx).is_valid() { try!(write!(wr, " {}", Variable(var_idx).to_string())); } } Ok(()) } fn find_loop_scope(&self, opt_label: Option<Ident>, id: NodeId, sp: Span) -> NodeId { match opt_label { Some(_) => { // Refers to a labeled loop. Use the results of resolve // to find with one match self.ir.tcx.def_map.borrow().find(&id) { Some(&DefLabel(loop_id)) => loop_id, _ => self.ir.tcx.sess.span_bug(sp, "label on break/loop \ doesn't refer to a loop") } } None => { // Vanilla 'break' or 'loop', so use the enclosing // loop scope if self.loop_scope.len() == 0 { self.ir.tcx.sess.span_bug(sp, "break outside loop"); } else { *self.loop_scope.last().unwrap() } } } } #[allow(unused_must_use)] fn ln_str(&self, ln: LiveNode) -> String { let mut wr = io::MemWriter::new(); { let wr = &mut wr as &mut io::Writer; write!(wr, "[ln({}) of kind {:?} reads", ln.get(), self.ir.lnk(ln)); self.write_vars(wr, ln, |idx| self.users.get(idx).reader); write!(wr, " writes"); self.write_vars(wr, ln, |idx| self.users.get(idx).writer); write!(wr, " precedes {}]", self.successors.get(ln.get()).to_string()); } str::from_utf8(wr.unwrap().as_slice()).unwrap().to_string() } fn init_empty(&mut self, ln: LiveNode, succ_ln: LiveNode) { *self.successors.get_mut(ln.get()) = succ_ln; // It is not necessary to initialize the // values to empty because this is the value // they have when they are created, and the sets // only grow during iterations. // // self.indices(ln) { |idx| // self.users[idx] = invalid_users(); // } } fn init_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode) { // more efficient version of init_empty() / merge_from_succ() *self.successors.get_mut(ln.get()) = succ_ln; self.indices2(ln, succ_ln, |this, idx, succ_idx| { *this.users.get_mut(idx) = *this.users.get(succ_idx) }); debug!("init_from_succ(ln={}, succ={})", self.ln_str(ln), self.ln_str(succ_ln)); } fn merge_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode, first_merge: bool) -> bool { if ln == succ_ln { return false; } let mut changed = false; self.indices2(ln, succ_ln, |this, idx, succ_idx| { changed |= copy_if_invalid(this.users.get(succ_idx).reader, &mut this.users.get_mut(idx).reader); changed |= copy_if_invalid(this.users.get(succ_idx).writer, &mut this.users.get_mut(idx).writer); if this.users.get(succ_idx).used && !this.users.get(idx).used { this.users.get_mut(idx).used = true; changed = true; } }); debug!("merge_from_succ(ln={}, succ={}, first_merge={}, changed={})", ln.to_string(), self.ln_str(succ_ln), first_merge, changed); return changed; fn copy_if_invalid(src: LiveNode, dst: &mut LiveNode) -> bool { if src.is_valid() && !dst.is_valid() { *dst = src; true } else { false } } } // Indicates that a local variable was *defined*; we know that no // uses of the variable can precede the definition (resolve checks // this) so we just clear out all the data. fn define(&mut self, writer: LiveNode, var: Variable) { let idx = self.idx(writer, var); self.users.get_mut(idx).reader = invalid_node(); self.users.get_mut(idx).writer = invalid_node(); debug!("{} defines {} (idx={}): {}", writer.to_string(), var.to_string(), idx, self.ln_str(writer)); } // Either read, write, or both depending on the acc bitset fn acc(&mut self, ln: LiveNode, var: Variable, acc: uint) { debug!("{} accesses[{:x}] {}: {}", ln.to_string(), acc, var.to_string(), self.ln_str(ln)); let idx = self.idx(ln, var); let user = self.users.get_mut(idx); if (acc & ACC_WRITE) != 0 { user.reader = invalid_node(); user.writer = ln; } // Important: if we both read/write, must do read second // or else the write will override. if (acc & ACC_READ) != 0 { user.reader = ln; } if (acc & ACC_USE) != 0 { user.used = true; } } // _______________________________________________________________________ fn compute(&mut self, decl: &FnDecl, body: &Block) -> LiveNode { // if there is a `break` or `again` at the top level, then it's // effectively a return---this only occurs in `for` loops, // where the body is really a closure. debug!("compute: using id for block, {}", block_to_string(body)); let exit_ln = self.s.exit_ln; let entry_ln: LiveNode = self.with_loop_nodes(body.id, exit_ln, exit_ln, |this| this.propagate_through_fn_block(decl, body)); // hack to skip the loop unless debug! is enabled: debug!("^^ liveness computation results for body {} (entry={})", { for ln_idx in range(0u, self.ir.num_live_nodes) { debug!("{}", self.ln_str(LiveNode(ln_idx))); } body.id }, entry_ln.to_string()); entry_ln } fn propagate_through_fn_block(&mut self, _: &FnDecl, blk: &Block) -> LiveNode { // the fallthrough exit is only for those cases where we do not // explicitly return: let s = self.s; self.init_from_succ(s.fallthrough_ln, s.exit_ln); if blk.expr.is_none() { self.acc(s.fallthrough_ln, s.no_ret_var, ACC_READ) } self.propagate_through_block(blk, s.fallthrough_ln) } fn propagate_through_block(&mut self, blk: &Block, succ: LiveNode) -> LiveNode { let succ = self.propagate_through_opt_expr(blk.expr, succ); blk.stmts.iter().rev().fold(succ, |succ, stmt| { self.propagate_through_stmt(&**stmt, succ) }) } fn propagate_through_stmt(&mut self, stmt: &Stmt, succ: LiveNode) -> LiveNode { match stmt.node { StmtDecl(ref decl, _) => { self.propagate_through_decl(&**decl, succ) } StmtExpr(ref expr, _) | StmtSemi(ref expr, _) => { self.propagate_through_expr(&**expr, succ) } StmtMac(..) => { self.ir.tcx.sess.span_bug(stmt.span, "unexpanded macro"); } } } fn
(&mut self, decl: &Decl, succ: LiveNode) -> LiveNode { match decl.node { DeclLocal(ref local) => { self.propagate_through_local(&**local, succ) } DeclItem(_) => succ, } } fn propagate_through_local(&mut self, local: &Local, succ: LiveNode) -> LiveNode { // Note: we mark the variable as defined regardless of whether // there is an initializer. Initially I had thought to only mark // the live variable as defined if it was initialized, and then we // could check for uninit variables just by scanning what is live // at the start of the function. But that doesn't work so well for // immutable variables defined in a loop: // loop { let x; x = 5; } // because the "assignment" loops back around and generates an error. // // So now we just check that variables defined w/o an // initializer are not live at the point of their // initialization, which is mildly more complex than checking // once at the func header but otherwise equivalent. let succ = self.propagate_through_opt_expr(local.init, succ); self.define_bindings_in_pat(local.pat, succ) } fn propagate_through_exprs(&mut self, exprs: &[Gc<Expr>], succ: LiveNode) -> LiveNode { exprs.iter().rev().fold(succ, |succ, expr| { self.propagate_through_expr(&**expr, succ) }) } fn propagate_through_opt_expr(&mut self, opt_expr: Option<Gc<Expr>>, succ: LiveNode) -> LiveNode { opt_expr.iter().fold(succ, |succ, expr| { self.propagate_through_expr(&**expr, succ) }) } fn propagate_through_expr(&mut self, expr: &Expr, succ: LiveNode) -> LiveNode { debug!("propagate_through_expr: {}", expr_to_string(expr)); match expr.node { // Interesting cases with control flow or which gen/kill ExprPath(_) => { self.access_path(expr, succ, ACC_READ | ACC_USE) } ExprField(ref e, _, _) => { self.propagate_through_expr(&**e, succ) } ExprFnBlock(_, ref blk) | ExprProc(_, ref blk) => { debug!("{} is an ExprFnBlock or ExprProc", expr_to_string(expr)); /* The next-node for a break is the successor of the entire loop. The next-node for a continue is the top of this loop. */ let node = self.live_node(expr.id, expr.span); self.with_loop_nodes(blk.id, succ, node, |this| { // the construction of a closure itself is not important, // but we have to consider the closed over variables. let caps = match this.ir.capture_info_map.find(&expr.id) { Some(caps) => caps.clone(), None => { this.ir.tcx.sess.span_bug(expr.span, "no registered caps"); } }; caps.iter().rev().fold(succ, |succ, cap| { this.init_from_succ(cap.ln, succ); let var = this.variable(cap.var_nid, expr.span); this.acc(cap.ln, var, ACC_READ | ACC_USE); cap.ln }) }) } ExprIf(ref cond, ref then, ref els) => { // // (cond) // | // v // (expr) // / \ // | | // v v // (then)(els) // | | // v v // ( succ ) // let else_ln = self.propagate_through_opt_expr(els.clone(), succ); let then_ln = self.propagate_through_block(&**then, succ); let ln = self.live_node(expr.id, expr.span); self.init_from_succ(ln, else_ln); self.merge_from_succ(ln, then_ln, false); self.propagate_through_expr(&**cond, ln) } ExprWhile(ref cond, ref blk) => { self.propagate_through_loop(expr, Some(cond.clone()), &**blk, succ) } ExprForLoop(..) => fail!("non-desugared expr_for_loop"), // Note that labels have been resolved, so we don't need to look // at the label ident ExprLoop(ref blk, _) => { self.propagate_through_loop(expr, None, &**blk, succ) } ExprMatch(ref e, ref arms) => { // // (e) // | // v // (expr) // / | \ // | | | // v v v // (..arms..) // | | | // v v v // ( succ ) // // let ln = self.live_node(expr.id, expr.span); self.init_empty(ln, succ); let mut first_merge = true; for arm in arms.iter() { let body_succ = self.propagate_through_expr(&*arm.body, succ); let guard_succ = self.propagate_through_opt_expr(arm.guard, body_succ); let arm_succ = self.define_bindings_in_arm_pats(arm.pats.as_slice(), guard_succ); self.merge_from_succ(ln, arm_succ, first_merge); first_merge = false; }; self.propagate_through_expr(&**e, ln) } ExprRet(o_e) => { // ignore succ and subst exit_ln: let exit_ln = self.s.exit_ln; self.propagate_through_opt_expr(o_e, exit_ln) } ExprBreak(opt_label) => { // Find which label this break jumps to let sc = self.find_loop_scope(opt_label, expr.id, expr.span); // Now that we know the label we're going to, // look it up in the break loop nodes table match self.break_ln.find(&sc) { Some(&b) => b, None => self.ir.tcx.sess.span_bug(expr.span, "break to unknown label") } } ExprAgain(opt_label) => { // Find which label this expr continues to let sc = self.find_loop_scope(opt_label, expr.id, expr.span); // Now that we know the label we're going to, // look it up in the continue loop nodes table match self.cont_ln.find(&sc) { Some(&b) => b, None => self.ir.tcx.sess.span_bug(expr.span, "loop to unknown label") } } ExprAssign(ref l, ref r) => { // see comment on lvalues in // propagate_through_lvalue_components() let succ = self.write_lvalue(&**l, succ, ACC_WRITE); let succ = self.propagate_through_lvalue_components(&**l, succ); self.propagate_through_expr(&**r, succ) } ExprAssignOp(_, ref l, ref r) => { // see comment on lvalues in // propagate_through_lvalue_components() let succ = self.write_lvalue(&**l, succ, ACC_WRITE|ACC_READ); let succ = self.propagate_through_expr(&**r, succ); self.propagate_through_lvalue_components(&**l, succ) } // Uninteresting cases: just propagate in rev exec order ExprVstore(ref expr, _) => { self.propagate_through_expr(&**expr, succ) } ExprVec(ref exprs) => { self.propagate_through_exprs(exprs.as_slice(), succ) } ExprRepeat(ref element, ref count) => { let succ = self.propagate_through_expr(&**count, succ); self.propagate_through_expr(&**element, succ) } ExprStruct(_, ref fields, ref with_expr) => { let succ = self.propagate_through_opt_expr(with_expr.clone(), succ); fields.iter().rev().fold(succ, |succ, field| { self.propagate_through_expr(&*field.expr, succ) }) } ExprCall(ref f, ref args) => { // calling a fn with bot return type means that the fn // will fail, and hence the successors can be ignored let is_bot = !self.ir.tcx.is_method_call(expr.id) && { let t_ret = ty::ty_fn_ret(ty::expr_ty(self.ir.tcx, &**f)); ty::type_is_bot(t_ret) }; let succ = if is_bot { self.s.exit_ln } else { succ }; let succ = self.propagate_through_exprs(args.as_slice(), succ); self.propagate_through_expr(&**f, succ) } ExprMethodCall(_, _, ref args) => { // calling a method with bot return type means that the method // will fail, and hence the successors can be ignored let t_ret = ty::node_id_to_type(self.ir.tcx, expr.id); let succ = if ty::type_is_bot(t_ret) {self.s.exit_ln} else {succ}; self.propagate_through_exprs(args.as_slice(), succ) } ExprTup(ref exprs) => { self.propagate_through_exprs(exprs.as_slice(), succ) } ExprBinary(op, ref l, ref r) if ast_util::lazy_binop(op) => { let r_succ = self.propagate_through_expr(&**r, succ); let ln = self.live_node(expr.id, expr.span); self.init_from_succ(ln, succ); self.merge_from_succ(ln, r_succ, false); self.propagate_through_expr(&**l, ln) } ExprIndex(ref l, ref r) | ExprBinary(_, ref l, ref r) | ExprBox(ref l, ref r) => { self.propagate_through_exprs([l.clone(), r.clone()], succ) } ExprAddrOf(_, ref e) | ExprCast(ref e, _) | ExprUnary(_, ref e) | ExprParen(ref e) => { self.propagate_through_expr(&**e, succ) } ExprInlineAsm(ref ia) => { let succ = ia.outputs.iter().rev().fold(succ, |succ, &(_, ref expr)| { // see comment on lvalues in // propagate_through_lvalue_components() let succ = self.write_lvalue(&**expr, succ, ACC_WRITE); self.propagate_through_lvalue_components(&**expr, succ) }); // Inputs are executed first. Propagate last because of rev order ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| { self.propagate_through_expr(&**expr, succ) }) } ExprLit(..) => { succ } ExprBlock(ref blk) => { self.propagate_through_block(&**blk, succ) } ExprMac(..) => { self.ir.tcx.sess.span_bug(expr.span, "unexpanded macro"); } } } fn propagate_through_lvalue_components(&mut self, expr: &Expr, succ: LiveNode) -> LiveNode { // # Lvalues // // In general, the full flow graph structure for an // assignment/move/etc can be handled in one of two ways, // depending on whether what is being assigned is a "tracked // value" or not. A tracked value is basically a local // variable or argument. // // The two kinds of graphs are: // // Tracked lvalue Untracked lvalue // ----------------------++----------------------- // || // | || | // v || v // (rvalue) || (rvalue) // | || | // v || v // (write of lvalue) || (lvalue components) // | || | // v || v // (succ) || (succ) // || // ----------------------++----------------------- // // I will cover the two cases in turn: // // # Tracked lvalues // // A tracked lvalue is a local variable/argument `x`. In // these cases, the link_node where the write occurs is linked // to node id of `x`. The `write_lvalue()` routine generates // the contents of this node. There are no subcomponents to // consider. // // # Non-tracked lvalues // // These are lvalues like `x[5]` or `x.f`. In that case, we // basically ignore the value which is written to but generate // reads for the components---`x` in these two examples. The // components reads are generated by // `propagate_through_lvalue_components()` (this fn). // // # Illegal lvalues // // It is still possible to observe assignments to non-lvalues; // these errors are detected in the later pass borrowck. We // just ignore such cases and treat them as reads. match expr.node { ExprPath(_) => succ, ExprField(ref e, _, _) => self.propagate_through_expr(&**e, succ), _ => self.propagate_through_expr(expr, succ) } } // see comment on propagate_through_lvalue() fn write_lvalue(&mut self, expr: &Expr, succ: LiveNode, acc: uint) -> LiveNode { match expr.node { ExprPath(_) => self.access_path(expr, succ, acc), // We do not track other lvalues, so just propagate through // to their subcomponents. Also, it may happen that // non-lvalues occur here, because those are detected in the // later pass borrowck. _ => succ } } fn access_path(&mut self, expr: &Expr, succ: LiveNode, acc: uint) -> LiveNode { let def = self.ir.tcx.def_map.borrow().get_copy(&expr.id); match moved_variable_node_id_from_def(def) { Some(nid) => { let ln = self.live_node(expr.id, expr.span); if acc != 0u { self.init_from_succ(ln, succ); let var = self.variable(nid, expr.span); self.acc(ln, var, acc); } ln } None => succ } } fn propagate_through_loop(&mut self, expr: &Expr, cond: Option<Gc<Expr>>, body: &Block, succ: LiveNode) -> LiveNode { /* We model control flow like this: (cond) <--+ | | v | +-- (expr) | | | | | v | | (body) ---+ | | v (succ) */ // first iteration: let mut first_merge = true; let ln = self.live_node(expr.id, expr.span); self.init_empty(ln, succ); if cond.is_some() { // if there is a condition, then it's possible we bypass // the body altogether. otherwise, the only way is via a // break in the loop body. self.merge_from_succ(ln, succ, first_merge); first_merge = false; } debug!("propagate_through_loop: using id for loop body {} {}", expr.id, block_to_string(body)); let cond_ln = self.propagate_through_opt_expr(cond, ln); let body_ln = self.with_loop_nodes(expr.id, succ, ln, |this| { this.propagate_through_block(body, cond_ln) }); // repeat until fixed point is reached: while self.merge_from_succ(ln, body_ln, first_merge) { first_merge = false; assert!(cond_ln == self.propagate_through_opt_expr(cond, ln)); assert!(body_ln == self.with_loop_nodes(expr.id, succ, ln, |this| this.propagate_through_block(body, cond_ln))); } cond_ln } fn with_loop_nodes<R>(&mut self, loop_node_id: NodeId, break_ln: LiveNode, cont_ln: LiveNode, f: |&mut Liveness<'a>| -> R) -> R { debug!("with_loop_nodes: {} {}", loop_node_id, break_ln.get()); self.loop_scope.push(loop_node_id); self.break_ln.insert(loop_node_id, break_ln); self.cont_ln.insert(loop_node_id, cont_ln); let r = f(self); self.loop_scope.pop(); r } } // _______________________________________________________________________ // Checking for error conditions fn check_local(this: &mut Liveness, local: &Local) { match local.init { Some(_) => { this.warn_about_unused_or_dead_vars_in_pat(&*local.pat); }, None => { this.pat_bindings(&*local.pat, |this, ln, var, sp, id| { this.warn_about_unused(sp, id, ln, var); }) } } visit::walk_local(this, local, ()); } fn check_arm(this: &mut Liveness, arm: &Arm) { this.arm_pats_bindings(arm.pats.as_slice(), |this, ln, var, sp, id| { this.warn_about_unused(sp, id, ln, var); }); visit::walk_arm(this, arm, ()); } fn check_expr(this: &mut Liveness, expr: &Expr) { match expr.node { ExprAssign(ref l, ref r) => { this.check_lvalue(&**l); this.visit_expr(&**r, ()); visit::walk_expr(this, expr, ()); } ExprAssignOp(_, ref l, _) => { this.check_lvalue(&**l); visit::walk_expr(this, expr, ()); } ExprInlineAsm(ref ia) => { for &(_, ref input) in ia.inputs.iter() { this.visit_expr(&**input, ()); } // Output operands must be lvalues for &(_, ref out) in ia.outputs.iter() { this.check_lvalue(&**out); this.visit_expr(&**out, ()); } visit::walk_expr(this, expr, ()); } // no correctness conditions related to liveness ExprCall(..) | ExprMethodCall(..) | ExprIf(..) | ExprMatch(..) | ExprWhile(..) | ExprLoop(..) | ExprIndex(..) | ExprField(..) | ExprVstore(..) | ExprVec(..) | ExprTup(..) | ExprBinary(..) | ExprCast(..) | ExprUnary(..) | ExprRet(..) | ExprBreak(..) | ExprAgain(..) | ExprLit(_) | ExprBlock(..) | ExprMac(..) | ExprAddrOf(..) | ExprStruct(..) | ExprRepeat(..) | ExprParen(..) | ExprFnBlock(..) | ExprProc(..) | ExprPath(..) | ExprBox(..) => { visit::walk_expr(this, expr, ()); } ExprForLoop(..) => fail!("non-desugared expr_for_loop") } } fn check_fn(_v: &Liveness, _fk: &FnKind, _decl: &FnDecl, _body: &Block, _sp: Span, _id: NodeId) { // do not check contents of nested fns } impl<'a> Liveness<'a> { fn check_ret(&self, id: NodeId, sp: Span, _fk: &FnKind, entry_ln: LiveNode, body: &Block) { if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() { // if no_ret_var is live, then we fall off the end of the // function without any kind of return expression: let t_ret = ty::ty_fn_ret(ty::node_id_to_type(self.ir.tcx, id)); if ty::type_is_nil(t_ret) { // for nil return types, it is ok to not return a value expl. } else if ty::type_is_bot(t_ret) { // for bot return types, not ok. Function should fail. self.ir.tcx.sess.span_err( sp, "some control paths may return"); } else { let ends_with_stmt = match body.expr { None if body.stmts.len() > 0 => match body.stmts.last().unwrap().node { StmtSemi(ref e, _) => { let t_stmt = ty::expr_ty(self.ir.tcx, &**e); ty::get(t_stmt).sty == ty::get(t_ret).sty }, _ => false }, _ => false }; self.ir.tcx.sess.span_err( sp, "not all control paths return a value"); if ends_with_stmt { let last_stmt = body.stmts.last().unwrap(); let original_span = original_sp(last_stmt.span, sp); let span_semicolon = Span { lo: original_span.hi - BytePos(1), hi: original_span.hi, expn_info: original_span.expn_info }; self.ir.tcx.sess.span_note( span_semicolon, "consider removing this semicolon:"); } } } } fn check_lvalue(&mut self, expr: &Expr) { match expr.node { ExprPath(_) => { match self.ir.tcx.def_map.borrow().get_copy(&expr.id) { DefLocal(nid, _) => { // Assignment to an immutable variable or argument: only legal // if there is no later assignment. If this local is actually // mutable, then check for a reassignment to flag the mutability // as being used. let ln = self.live_node(expr.id, expr.span); let var = self.variable(nid, expr.span); self.warn_about_dead_assign(expr.span, expr.id, ln, var); } def => { match moved_variable_node_id_from_def(def) { Some(nid) => { let ln = self.live_node(expr.id, expr.span); let var = self.variable(nid, expr.span); self.warn_about_dead_assign(expr.span, expr.id, ln, var); } None => {} } } } } _ => { // For other kinds of lvalues, no checks are required, // and any embedded expressions are actually rvalues visit::walk_expr(self, expr, ()); } } } fn should_warn(&self, var: Variable) -> Option<String> { let name = self.ir.variable_name(var); if name.len() == 0 || name.as_bytes()[0] == ('_' as u8) { None } else { Some(name) } } fn warn_about_unused_args(&self, decl: &FnDecl, entry_ln: LiveNode) { for arg in decl.inputs.iter() { pat_util::pat_bindings(&self.ir.tcx.def_map, &*arg.pat, |_bm, p_id, sp, path1| { let var = self.variable(p_id, sp); // Ignore unused self. let ident = path1.node; if ident.name != special_idents::self_.name { self.warn_about_unused(sp, p_id, entry_ln, var); } }) } } fn warn_about_unused_or_dead_vars_in_pat(&mut self, pat: &Pat) { self.pat_bindings(pat, |this, ln, var, sp, id| { if !this.warn_about_unused(sp, id, ln, var) { this.warn_about_dead_assign(sp, id, ln, var); } }) } fn warn_about_unused(&self, sp: Span, id: NodeId, ln: LiveNode, var: Variable) -> bool { if !self.used_on_entry(ln, var) { let r = self.should_warn(var); for name in r.iter() { // annoying: for parameters in funcs like `fn(x: int) // {ret}`, there is only one node, so asking about // assigned_on_exit() is not meaningful. let is_assigned = if ln == self.s.exit_ln { false } else { self.assigned_on_exit(ln, var).is_some() }; if is_assigned { self.ir.tcx.sess.add_lint(lint::builtin::UNUSED_VARIABLE, id, sp, format!("variable `{}` is assigned to, but never used", *name)); } else { self.ir.tcx.sess.add_lint(lint::builtin::UNUSED_VARIABLE, id, sp, format!("unused variable: `{}`", *name)); } } true } else { false } } fn warn_about_dead_assign(&self, sp: Span, id: NodeId, ln: LiveNode, var: Variable) { if self.live_on_exit(ln, var).is_none() { let r = self.should_warn(var); for name in r.iter() { self.ir.tcx.sess.add_lint(lint::builtin::DEAD_ASSIGNMENT, id, sp, format!("value assigned to `{}` is never read", *name)); } } } }
propagate_through_decl
test_hostvirtual.py
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.dns.types import RecordType, ZoneDoesNotExistError from libcloud.dns.types import RecordDoesNotExistError from libcloud.dns.drivers.hostvirtual import HostVirtualDNSDriver from libcloud.test import MockHttp from libcloud.test.file_fixtures import DNSFileFixtures from libcloud.test.secrets import DNS_PARAMS_HOSTVIRTUAL class HostVirtualTests(unittest.TestCase): def setUp(self): HostVirtualDNSDriver.connectionCls.conn_classes = ( None, HostVirtualMockHttp) HostVirtualMockHttp.type = None self.driver = HostVirtualDNSDriver(*DNS_PARAMS_HOSTVIRTUAL) def test_list_record_types(self): record_types = self.driver.list_record_types() self.assertEqual(len(record_types), 7) self.assertTrue(RecordType.A in record_types) def test_list_zones(self): zones = self.driver.list_zones() self.assertEqual(len(zones), 5) zone = zones[0] self.assertEqual(zone.id, '47234') self.assertEqual(zone.type, 'master') self.assertEqual(zone.domain, 't.com') self.assertEqual(zone.ttl, '3600') def
(self): zone = self.driver.list_zones()[0] records = self.driver.list_records(zone=zone) self.assertEqual(len(records), 3) record = records[1] self.assertEqual(record.name, 'www.t.com') self.assertEqual(record.id, '300719') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.data, '208.111.35.173') def test_get_zone(self): zone = self.driver.get_zone(zone_id='47234') self.assertEqual(zone.id, '47234') self.assertEqual(zone.type, 'master') self.assertEqual(zone.domain, 't.com') self.assertEqual(zone.ttl, '3600') def test_get_record(self): record = self.driver.get_record(zone_id='47234', record_id='300377') self.assertEqual(record.id, '300377') self.assertEqual(record.name, '*.t.com') self.assertEqual(record.type, RecordType.CNAME) self.assertEqual(record.data, 't.com') def test_list_records_zone_does_not_exist(self): zone = self.driver.list_zones()[0] HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: self.driver.list_records(zone=zone) except ZoneDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, zone.id) else: self.fail('Exception was not thrown') def test_get_zone_does_not_exist(self): HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: self.driver.get_zone(zone_id='4444') except ZoneDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, '4444') else: self.fail('Exception was not thrown') def test_get_record_zone_does_not_exist(self): HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: self.driver.get_record(zone_id='4444', record_id='28536') except ZoneDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_get_record_record_does_not_exist(self): HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST' try: self.driver.get_record(zone_id='47234', record_id='4444') except RecordDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_create_zone(self): zone = self.driver.create_zone(domain='t.com', type='master', ttl=None, extra=None) self.assertEqual(zone.id, '47234') self.assertEqual(zone.domain, 't.com') def test_update_zone(self): zone = self.driver.list_zones()[0] updated_zone = self.driver.update_zone(zone=zone, domain='tt.com') self.assertEqual(updated_zone.id, zone.id) self.assertEqual(updated_zone.domain, 'tt.com') self.assertEqual(updated_zone.type, zone.type) self.assertEqual(updated_zone.ttl, '3600') def test_create_record(self): zone = self.driver.list_zones()[0] record = self.driver.create_record( name='www', zone=zone, type=RecordType.A, data='127.0.0.1' ) self.assertEqual(record.id, '300377') self.assertEqual(record.name, 'www') self.assertEqual(record.zone, zone) self.assertEqual(record.type, RecordType.A) self.assertEqual(record.data, '127.0.0.1') def test_update_record(self): zone = self.driver.list_zones()[0] record = self.driver.list_records(zone=zone)[1] updated_record = self.driver.update_record(record=record, name='www', type=RecordType.AAAA, data='::1') self.assertEqual(record.data, '208.111.35.173') self.assertEqual(updated_record.id, record.id) self.assertEqual(updated_record.name, 'www') self.assertEqual(updated_record.zone, record.zone) self.assertEqual(updated_record.type, RecordType.AAAA) self.assertEqual(updated_record.data, '::1') def test_delete_zone(self): zone = self.driver.list_zones()[0] status = self.driver.delete_zone(zone=zone) self.assertTrue(status) def test_delete_zone_does_not_exist(self): zone = self.driver.list_zones()[0] HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: self.driver.delete_zone(zone=zone) except ZoneDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.zone_id, zone.id) else: self.fail('Exception was not thrown') def test_delete_record(self): zone = self.driver.list_zones()[0] record = self.driver.list_records(zone=zone)[0] status = self.driver.delete_record(record=record) self.assertTrue(status) def test_delete_record_does_not_exist(self): zone = self.driver.list_zones()[0] record = self.driver.list_records(zone=zone)[0] HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST' try: self.driver.delete_record(record=record) except RecordDoesNotExistError: e = sys.exc_info()[1] self.assertEqual(e.record_id, record.id) else: self.fail('Exception was not thrown') class HostVirtualMockHttp(MockHttp): fixtures = DNSFileFixtures('hostvirtual') def _dns_zone(self, method, url, body, headers): body = self.fixtures.load('get_zone.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _dns_zones(self, method, url, body, headers): body = self.fixtures.load('list_zones.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _dns_record(self, method, url, body, headers): body = self.fixtures.load('get_record.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _dns_records(self, method, url, body, headers): body = self.fixtures.load('list_records.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _dns_zone_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _dns_zone_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('get_zone.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _dns_zones_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _dns_record_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _dns_record_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _dns_records_ZONE_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) def _dns_zones_RECORD_DOES_NOT_EXIST(self, method, url, body, headers): body = self.fixtures.load('zone_does_not_exist.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) if __name__ == '__main__': sys.exit(unittest.main())
test_list_records
xt_filehelper_06_test.go
package pathfileops import ( "strings" "testing" ) func TestFileHelper_GetPathFromPathFileName_01(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_02(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("..\\..\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("..\\..\\pathfilego\\003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_03(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" + "003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" + "003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return 'false', instead isEmpty='%v' ", isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid file name. Instead path='%v'", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_04(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" + "003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("D:\\go\\work\\src\\MikeAustin71\\pathfilego\\" + "003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error())
} if isEmpty != false { t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name. "+ "Instead path=='%v' ", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_05(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Expected no error returned from fh.GetPathFromPathFileName(commonDir). "+ "Instead an error WAS Returned. commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != true { t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", true, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file name. "+ "Instead path=='%v' ", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_06(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). "+ "commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension to return "+ "'false', instead isEmpty='%v' ", isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file "+ "name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_07(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("./") expectedDir := fh.AdjustPathSlash("./") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). "+ "commonDir='%v' Error='%v'", commonDir, err.Error()) return } if false != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v'\n"+ "for valid path/file name.\nInstead return path == '%v'\n", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_08(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".") expectedDir := fh.AdjustPathSlash(".") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' "+ "Error='%v'", commonDir, err.Error()) } if false != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid "+ "path/file name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_09(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("..") expectedDir := fh.AdjustPathSlash("..") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if false != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file "+ "name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_10(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("") expectedDir := fh.AdjustPathSlash("") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err == nil { t.Errorf("Expected error to be returned from fh.GetPathFromPathFileName(commonDir). "+ "commonDir='%v' No Error Returned!", commonDir) } if true != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'. Instead, isEmpty='%v' ", true, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid pathn/file"+ "name, instead got: %v", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_11(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("../../../../") expectedDir := fh.AdjustPathSlash("../../../../") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir).\n"+ "commonDir='%v'\nError='%v'\n", commonDir, err.Error()) return } if false != isEmpty { t.Errorf("Expected GetPathFromPathFileName isEmpty=='%v'.\n"+ "Instead, isEmpty='%v'\n", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file "+ "name\n"+ "Instead return path == '%v'\n", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_12(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("./xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("./") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Expected no error returned from fh.GetPathFromPathFileName(commonDir). "+ "Instead an error WAS Returned. commonDir='%v' Error='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty='%v', instead isEmpty='%v' ", false, isEmpty) } if result != expectedDir { t.Errorf("Expected GetPathFromPathFileName to return path == '%v' for valid path/file"+ "name. Instead path=='%v' ", expectedDir, result) } } func TestFileHelper_GetPathFromPathFileName_13(t *testing.T) { fh := FileHelper{} result, isEmpty, err := fh.GetPathFromPathFileName(" ") if err == nil { t.Error("Expected an error return from fh.GetPathFromPathFileName(\" \") " + "because the input parameter consists entirely of spaces. " + "However, NO ERROR WAS RETURNED!") } if isEmpty == false { t.Error("Expected isEmpty='true', instead isEmpty='false' ") } if result != "" { t.Errorf("Expected GetPathFromPathFileName to return path == 'empty string'. "+ "Instead path=='%v' ", result) } } func TestFileHelper_GetPathFromPathFileName_14(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\.git") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") result, isEmpty, err := fh.GetPathFromPathFileName(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathFromPathFileName(commonDir).\n"+ "commonDir='%v'\nError='%v'", commonDir, err.Error()) } if isEmpty != false { t.Errorf("Expected isEmpty GetPathFromPathFileName for valid file extension\n"+ "to return 'false'. Instead isEmpty='%v'\n", isEmpty) } if result != expectedDir { t.Errorf("ERROR: Expected GetPathFromPathFileName to return "+ "path == '%v' for valid path/file name.\n"+ "Instead path == %v\n", expectedDir, result) } } func TestFileHelper_GetPathAndFileNameExt_01(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") expectedFileNameExt := "xt_dirmgr_01_test.go" pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' "+ "Error='%v'", commonDir, err.Error()) } if false != bothAreEmpty { t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ", false, bothAreEmpty) } if pathDir != expectedDir { t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. "+ "Instead, path== '%v' ", expectedDir, pathDir) } if fileNameExt != expectedFileNameExt { t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+ "fileNameExt == '%v' ", expectedFileNameExt, fileNameExt) } } func TestFileHelper_GetPathAndFileNameExt_02(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") expectedFileNameExt := "" pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if false != bothAreEmpty { t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ", false, bothAreEmpty) } if pathDir != expectedDir { t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ", expectedDir, pathDir) } if fileNameExt != expectedFileNameExt { t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+ "fileNameExt == '%v' ", expectedFileNameExt, fileNameExt) } } func TestFileHelper_GetPathAndFileNameExt_03(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common\\dirmgr_test") expectedDir := fh.AdjustPathSlash(".\\pathfilego\\003_filehelper\\common") expectedFileNameExt := "dirmgr_test" pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' "+ "Error='%v'", commonDir, err.Error()) } if false != bothAreEmpty { t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ", false, bothAreEmpty) } if pathDir != expectedDir { t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ", expectedDir, pathDir) } if fileNameExt != expectedFileNameExt { t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, "+ "fileNameExt == '%v' ", expectedFileNameExt, fileNameExt) } } func TestFileHelper_GetPathAndFileNameExt_04(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("xt_dirmgr_01_test.go") expectedDir := fh.AdjustPathSlash("") expectedFileNameExt := "xt_dirmgr_01_test.go" pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(commonDir) if err != nil { t.Errorf("Error returned from fh.GetPathAndFileNameExt(commonDir). commonDir='%v' Error='%v'", commonDir, err.Error()) } if false != bothAreEmpty { t.Errorf("Expected GetPathAndFileNameExt bothAreEmpty='%v'. Instead, bothAreEmpty='%v' ", false, bothAreEmpty) } if pathDir != expectedDir { t.Errorf("Expected GetPathAndFileNameExt to return path == '%v'. Instead, path== '%v' ", expectedDir, pathDir) } if fileNameExt != expectedFileNameExt { t.Errorf("Expected GetPathAndFileNameExt to return fileNameExt == '%v'. Instead, fileNameExt == '%v' ", expectedFileNameExt, fileNameExt) } } func TestFileHelper_GetPathAndFileNameExt_05(t *testing.T) { fh := FileHelper{} pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt("") if err == nil { t.Error("Expected error return from fh.GetPathAndFileNameExt(\"\") because " + "the input parameter is an empty string. " + "However, NO ERROR WAS RETURNED!") } if pathDir != "" { t.Errorf("Expected pathDir would be an empty string. Instead, pathDir='%v'", pathDir) } if fileNameExt != "" { t.Errorf("Expected fileNameExt would be an empty string. Instead, pathDir='%v'", fileNameExt) } if bothAreEmpty == false { t.Error("Expected bothAreEmpty='true'. Instead, bothArEmpty='false'. ") } } func TestFileHelper_GetPathAndFileNameExt_06(t *testing.T) { fh := FileHelper{} pathDir, fileNameExt, bothAreEmpty, err := fh.GetPathAndFileNameExt(" ") if err == nil { t.Error("Expected error return from fh.GetPathAndFileNameExt(\" \") because " + "the input parameter consists entirely of blank spaces. " + "However, NO ERROR WAS RETURNED!") } if pathDir != "" { t.Errorf("Expected pathDir would be an empty string. Instead, pathDir='%v'", pathDir) } if fileNameExt != "" { t.Errorf("Expected fileNameExt would be an empty string. Instead, pathDir='%v'", fileNameExt) } if bothAreEmpty == false { t.Error("Expected bothAreEmpty='true'. Instead, bothArEmpty='false'. ") } } func TestFileHelper_GetPathSeparatorIndexesInPathStr_01(t *testing.T) { fh := FileHelper{} idxs, err := fh.GetPathSeparatorIndexesInPathStr("") if err == nil { t.Error("Expected error return from fh.GetPathSeparatorIndexesInPathStr(\"\") " + "because the input parameter is an empty string. " + "However, NO ERROR WAS RETURNED!") } if len(idxs) != 0 { t.Errorf("Expected length of indexes='0'. Instead length of indexes='%v' ", len(idxs)) } } func TestFileHelper_GetPathSeparatorIndexesInPathStr_02(t *testing.T) { fh := FileHelper{} idxs, err := fh.GetPathSeparatorIndexesInPathStr(" ") if err == nil { t.Error("Expected error return from fh.GetPathSeparatorIndexesInPathStr(\" \") " + "because the input parameter consists entirely of blank spaces. " + "However, NO ERROR WAS RETURNED!") } if len(idxs) != 0 { t.Errorf("Expected length of indexes='0'. Instead length of indexes='%v' ", len(idxs)) } } // /d/gowork/src/MikeAustin71/pathfileopsgo/pathfileops // D:\gowork\src\MikeAustin71\pathfileopsgo\pathfileops func TestFileHelper_GetVolumeName_01(t *testing.T) { fh := FileHelper{} volumeName := fh.GetVolumeName("") if volumeName != "" { t.Errorf("Expected an empty string return from fh.GetVolumeName(\"\") because "+ "the input parameter is an empty string. Instead, the return value='%v' ", volumeName) } } func TestFileHelper_GetVolumeName_02(t *testing.T) { fh := FileHelper{} volumeName := fh.GetVolumeName(" ") if volumeName != "" { t.Errorf("Expected an empty string return from fh.GetVolumeName(\"\") because "+ "the input parameter consists of blank spaces. Instead, the return value='%v' ", volumeName) } } func TestFileHelper_GetVolumeName_03(t *testing.T) { fh := FileHelper{} testVolStr := "D:\\gowork\\src\\MikeAustin71\\pathfileopsgo\\pathfileops" expectedVolName := strings.ToLower("D:") volumeName := fh.GetVolumeName(testVolStr) if expectedVolName != strings.ToLower(volumeName) { t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ", expectedVolName, strings.ToLower(volumeName)) } } func TestFileHelper_GetVolumeName_04(t *testing.T) { fh := FileHelper{} testVolStr := "D:\\" expectedVolName := strings.ToLower("D:") volumeName := fh.GetVolumeName(testVolStr) if expectedVolName != strings.ToLower(volumeName) { t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ", expectedVolName, strings.ToLower(volumeName)) } } func TestFileHelper_GetVolumeName_05(t *testing.T) { fh := FileHelper{} testVolStr := "D:" isLinux := GlobalPathFileOpsSys{}.IsLinuxOperatingSystem() if isLinux { testVolStr = "/dev/sda1" } expectedVolName := strings.ToLower(testVolStr) volumeName := fh.GetVolumeName(testVolStr) if expectedVolName != strings.ToLower(volumeName) { t.Errorf("Expected volumeName='%v'. Instead, volName='%v' ", expectedVolName, strings.ToLower(volumeName)) } } func TestFileHelper_GetVolumeNameIndex_01(t *testing.T) { fh := FileHelper{} testVolStr := "" _, _, volumeName := fh.GetVolumeNameIndex(testVolStr) if volumeName != "" { t.Errorf("Expected returned volumeName to be an EMPTY STRING.\n" + "Instead, volume Name='%v'\n", volumeName) } } func TestFileHelper_IsAbsolutePath_01(t *testing.T) { fh := FileHelper{} commonDir := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/level_02_dir/" + "level_03_dir/level_3_1_test.txt") result := fh.IsAbsolutePath(commonDir) if result == true { t.Error("IsAbsolutePath result is INVALID. Relative path classified as Absolute path!") } } func TestFileHelper_IsAbsolutePath_02(t *testing.T) { fh := FileHelper{} absPathDir := fh.AdjustPathSlash("D:/gowork/src/MikeAustin71/pathfileopsgo/filesfortest/" + "levelfilesfortest/level_01_dir/level_02_dir/level_03_dir/level_3_1_test.txt") result := fh.IsAbsolutePath(absPathDir) if result == false { t.Error("IsAbsolutePath result is INVALID. Absolute path classified as Relative Path!") } } func TestFileHelper_IsAbsolutePath_03(t *testing.T) { fh := FileHelper{} absPathDir := "" result := fh.IsAbsolutePath(absPathDir) if result == true { t.Error("Expected a return value of 'false' from fh.IsAbsolutePath(absPathDir) because\n" + "'absPathDir' is an empty string. However, the returned value was 'true'. ERROR!\n") } } func TestFileHelper_IsPathFileString_01(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/level_03_dir/level_3_1_test.txt") expectedPathFile := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\level_03_dir\\level_3_1_test.txt") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } if pathFileType != PathFileType.PathFile() { t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ", pathFileType.String()) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_02(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/level_03_dir/iDoNotExist.txt") expectedPathFile := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\level_03_dir\\iDoNotExist.txt") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } if pathFileType != PathFileType.PathFile() { t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ", pathFileType.String()) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_03(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/level_03_dir") expectedPathFile := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\level_03_dir") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } if pathFileType != PathFileType.Path() { t.Errorf("Expected PathFileTypeCode='PathFile'. Instead, PathFileTypeCode='%v' ", pathFileType.String()) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_04(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/iDoNotExist") expectedPathFile := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\iDoNotExist") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Indeterminate() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_05(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("D:") expectedPathFile := fh.AdjustPathSlash("D:") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Volume() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile := strings.ToLower(expectedPathFile) absolutePath = strings.ToLower(absolutePath) if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_06(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("D:\\") expectedPathFile := fh.AdjustPathSlash("D:\\") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Path() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile := strings.ToLower(expectedPathFile) absolutePath = strings.ToLower(absolutePath) if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_07(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("fileIDoNotExist.txt") expectedPathFile := fh.AdjustPathSlash("fileIDoNotExist.txt") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.File() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_08(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("fileIDoNotExist") expectedPathFile := fh.AdjustPathSlash("fileIDoNotExist") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.File() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_09(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("..") expectedPathFile := fh.AdjustPathSlash("..") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Path() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_10(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash(".") expectedPathFile := fh.AdjustPathSlash(".") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Path() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_11(t *testing.T) { fh := FileHelper{} pathFile := "" _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' is an empty string. However, NO ERROR WAS RETURNED! ") } } func TestFileHelper_IsPathFileString_12(t *testing.T) { fh := FileHelper{} pathFile := " " _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' consists of blank spaces. However, NO ERROR WAS RETURNED! ") } } func TestFileHelper_IsPathFileString_13(t *testing.T) { fh := FileHelper{} pathFile := "..\\...\\" _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' 3-dots ('...'). However, NO ERROR WAS RETURNED! ") } } func TestFileHelper_IsPathFileString_14(t *testing.T) { fh := FileHelper{} pathFile := "....\\" _, _, err := fh.IsPathFileString(pathFile) if err == nil { t.Error("Expected an error return from fh.IsPathFileString(pathFile) " + "because 'pathFile' 4-dots ('....'). However, NO ERROR WAS RETURNED! ") } } func TestFileHelper_IsPathFileString_15(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash(".\\") expectedPathFile := fh.AdjustPathSlash(".\\") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Path() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathFileString_16(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("..\\..\\..\\") expectedPathFile := fh.AdjustPathSlash("..\\..\\..\\") pathFileType, absolutePath, err := fh.IsPathFileString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathFileString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) return } expectedFileType := PathFileType.Path() if expectedFileType != pathFileType { t.Errorf("Expected PathFileTypeCode='%v'. Instead, PathFileTypeCode='%v' "+ "testFilePathStr='%v' ", expectedFileType.String(), pathFileType.String(), absolutePath) } absExpectedPathFile, err := fh.MakeAbsolutePath(expectedPathFile) if err != nil { t.Errorf("Error returned by fh.MakeAbsolutePath(expectedPathFile). "+ "expectedPathFile='%v' Error='%v' ", expectedPathFile, err.Error()) } if absExpectedPathFile != absolutePath { t.Errorf("Error: Expected 'absolutePath'='%v'. Instead, 'absolutePath='%v'.", absExpectedPathFile, absolutePath) } } func TestFileHelper_IsPathString_01(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("..\\..\\..\\") expectedPathStr := fh.AdjustPathSlash("..\\..\\..\\") isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) } if true != isPath { t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+ "testPathStr='%v' ", true, isPath, testPathStr) } if expectedPathStr != testPathStr { t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.", expectedPathStr, testPathStr) } if false != cannotDetermine { t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ", false, cannotDetermine) } } func TestFileHelper_IsPathString_02(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/level_03_dir") expectedPathStr := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\level_03_dir") isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) } if true != isPath { t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+ "testPathStr='%v' ", true, isPath, testPathStr) } if expectedPathStr != testPathStr { t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.", expectedPathStr, testPathStr) } if false != cannotDetermine { t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ", false, cannotDetermine) } } func TestFileHelper_IsPathString_03(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/iDoNotExist") expectedPathStr := fh.AdjustPathSlash("..\\..\\filesfortest\\levelfilesfortest\\level_01_dir\\" + "level_02_dir\\iDoNotExist") isPath, cannotDetermine, testPathStr, err := fh.IsPathString(pathFile) if err != nil { t.Errorf("Error returned from fh.IsPathString(pathFile). "+ "pathFile='%v' Error='%v' ", pathFile, err.Error()) } if false != isPath { t.Errorf("Expected isPath='%v'. Instead, isPath='%v' "+ "testPathStr='%v' ", false, isPath, testPathStr) } if expectedPathStr != testPathStr { t.Errorf("Error: Expected 'expectedPathStr'='%v'. Instead, 'expectedPathStr='%v'.", expectedPathStr, testPathStr) } if true != cannotDetermine { t.Errorf("Error: Expected 'cannotDetermine'='%v'. Instead, 'cannotDetermine'='%v' ", true, cannotDetermine) } } func TestFileHelper_IsPathString_04(t *testing.T) { fh := FileHelper{} pathFile := "" _, _, _, err := fh.IsPathString(pathFile) if err == nil { t.Errorf("Expected an error return from fh.IsPathString(pathFile) " + "because 'pathFile' is an empty string. " + "However, NO ERROR WAS RETURNED!") } } func TestFileHelper_IsPathString_05(t *testing.T) { fh := FileHelper{} pathFile := " " _, _, _, err := fh.IsPathString(pathFile) if err == nil { t.Errorf("Expected an error return from fh.IsPathString(pathFile) " + "because 'pathFile' consists entirely of blank spaces. " + "However, NO ERROR WAS RETURNED!") } } func TestFileHelper_IsPathString_06(t *testing.T) { fh := FileHelper{} pathFile := fh.AdjustPathSlash("../../filesfortest/levelfilesfortest/level_01_dir/" + "level_02_dir/level_03_dir") pathFile = "." + pathFile _, _, _, err := fh.IsPathString(pathFile) if err == nil { t.Errorf("Expected an error return from fh.IsPathString(pathFile) " + "because 'pathFile' includes the text '...' . " + "However, NO ERROR WAS RETURNED!") } }
syntax_type.rs
/** * Copyright (c) 2016, Facebook, Inc. * All rights reserved. * * This source code is licensed under the MIT license found in the * LICENSE file in the "hack" directory of this source tree. An additional * directory. * ** * * THIS FILE IS @generated; DO NOT EDIT IT * To regenerate this file, run * * buck run //hphp/hack/src:generate_full_fidelity * ** * */ use crate::syntax::*; pub trait SyntaxType<'a, C>: SyntaxTypeBase<'a, C> { fn make_end_of_file(ctx: &C, end_of_file_token: Self) -> Self; fn make_script(ctx: &C, script_declarations: Self) -> Self; fn make_qualified_name(ctx: &C, qualified_name_parts: Self) -> Self; fn make_simple_type_specifier(ctx: &C, simple_type_specifier: Self) -> Self; fn make_literal_expression(ctx: &C, literal_expression: Self) -> Self; fn make_prefixed_string_expression(ctx: &C, prefixed_string_name: Self, prefixed_string_str: Self) -> Self; fn make_variable_expression(ctx: &C, variable_expression: Self) -> Self; fn make_pipe_variable_expression(ctx: &C, pipe_variable_expression: Self) -> Self; fn make_file_attribute_specification(ctx: &C, file_attribute_specification_left_double_angle: Self, file_attribute_specification_keyword: Self, file_attribute_specification_colon: Self, file_attribute_specification_attributes: Self, file_attribute_specification_right_double_angle: Self) -> Self; fn make_enum_declaration(ctx: &C, enum_attribute_spec: Self, enum_keyword: Self, enum_name: Self, enum_colon: Self, enum_base: Self, enum_type: Self, enum_left_brace: Self, enum_enumerators: Self, enum_right_brace: Self) -> Self; fn make_enumerator(ctx: &C, enumerator_name: Self, enumerator_equal: Self, enumerator_value: Self, enumerator_semicolon: Self) -> Self; fn make_record_declaration(ctx: &C, record_attribute_spec: Self, record_modifier: Self, record_keyword: Self, record_name: Self, record_extends_keyword: Self, record_extends_list: Self, record_left_brace: Self, record_fields: Self, record_right_brace: Self) -> Self; fn make_record_field(ctx: &C, record_field_name: Self, record_field_colon: Self, record_field_type: Self, record_field_init: Self, record_field_comma: Self) -> Self; fn make_alias_declaration(ctx: &C, alias_attribute_spec: Self, alias_keyword: Self, alias_name: Self, alias_generic_parameter: Self, alias_constraint: Self, alias_equal: Self, alias_type: Self, alias_semicolon: Self) -> Self; fn make_property_declaration(ctx: &C, property_attribute_spec: Self, property_modifiers: Self, property_type: Self, property_declarators: Self, property_semicolon: Self) -> Self; fn make_property_declarator(ctx: &C, property_name: Self, property_initializer: Self) -> Self; fn make_namespace_declaration(ctx: &C, namespace_keyword: Self, namespace_name: Self, namespace_body: Self) -> Self; fn make_namespace_body(ctx: &C, namespace_left_brace: Self, namespace_declarations: Self, namespace_right_brace: Self) -> Self; fn make_namespace_empty_body(ctx: &C, namespace_semicolon: Self) -> Self; fn make_namespace_use_declaration(ctx: &C, namespace_use_keyword: Self, namespace_use_kind: Self, namespace_use_clauses: Self, namespace_use_semicolon: Self) -> Self; fn make_namespace_group_use_declaration(ctx: &C, namespace_group_use_keyword: Self, namespace_group_use_kind: Self, namespace_group_use_prefix: Self, namespace_group_use_left_brace: Self, namespace_group_use_clauses: Self, namespace_group_use_right_brace: Self, namespace_group_use_semicolon: Self) -> Self; fn make_namespace_use_clause(ctx: &C, namespace_use_clause_kind: Self, namespace_use_name: Self, namespace_use_as: Self, namespace_use_alias: Self) -> Self; fn make_function_declaration(ctx: &C, function_attribute_spec: Self, function_declaration_header: Self, function_body: Self) -> Self; fn make_function_declaration_header(ctx: &C, function_modifiers: Self, function_keyword: Self, function_name: Self, function_type_parameter_list: Self, function_left_paren: Self, function_parameter_list: Self, function_right_paren: Self, function_colon: Self, function_type: Self, function_where_clause: Self) -> Self; fn make_where_clause(ctx: &C, where_clause_keyword: Self, where_clause_constraints: Self) -> Self; fn make_where_constraint(ctx: &C, where_constraint_left_type: Self, where_constraint_operator: Self, where_constraint_right_type: Self) -> Self; fn make_methodish_declaration(ctx: &C, methodish_attribute: Self, methodish_function_decl_header: Self, methodish_function_body: Self, methodish_semicolon: Self) -> Self; fn make_methodish_trait_resolution(ctx: &C, methodish_trait_attribute: Self, methodish_trait_function_decl_header: Self, methodish_trait_equal: Self, methodish_trait_name: Self, methodish_trait_semicolon: Self) -> Self; fn make_classish_declaration(ctx: &C, classish_attribute: Self, classish_modifiers: Self, classish_keyword: Self, classish_name: Self, classish_type_parameters: Self, classish_extends_keyword: Self, classish_extends_list: Self, classish_implements_keyword: Self, classish_implements_list: Self, classish_where_clause: Self, classish_body: Self) -> Self; fn make_classish_body(ctx: &C, classish_body_left_brace: Self, classish_body_elements: Self, classish_body_right_brace: Self) -> Self; fn make_trait_use_precedence_item(ctx: &C, trait_use_precedence_item_name: Self, trait_use_precedence_item_keyword: Self, trait_use_precedence_item_removed_names: Self) -> Self; fn make_trait_use_alias_item(ctx: &C, trait_use_alias_item_aliasing_name: Self, trait_use_alias_item_keyword: Self, trait_use_alias_item_modifiers: Self, trait_use_alias_item_aliased_name: Self) -> Self; fn make_trait_use_conflict_resolution(ctx: &C, trait_use_conflict_resolution_keyword: Self, trait_use_conflict_resolution_names: Self, trait_use_conflict_resolution_left_brace: Self, trait_use_conflict_resolution_clauses: Self, trait_use_conflict_resolution_right_brace: Self) -> Self; fn make_trait_use(ctx: &C, trait_use_keyword: Self, trait_use_names: Self, trait_use_semicolon: Self) -> Self; fn make_require_clause(ctx: &C, require_keyword: Self, require_kind: Self, require_name: Self, require_semicolon: Self) -> Self; fn make_const_declaration(ctx: &C, const_modifiers: Self, const_keyword: Self, const_type_specifier: Self, const_declarators: Self, const_semicolon: Self) -> Self; fn make_constant_declarator(ctx: &C, constant_declarator_name: Self, constant_declarator_initializer: Self) -> Self; fn make_type_const_declaration(ctx: &C, type_const_attribute_spec: Self, type_const_modifiers: Self, type_const_keyword: Self, type_const_type_keyword: Self, type_const_name: Self, type_const_type_parameters: Self, type_const_type_constraint: Self, type_const_equal: Self, type_const_type_specifier: Self, type_const_semicolon: Self) -> Self; fn make_decorated_expression(ctx: &C, decorated_expression_decorator: Self, decorated_expression_expression: Self) -> Self; fn make_parameter_declaration(ctx: &C, parameter_attribute: Self, parameter_visibility: Self, parameter_call_convention: Self, parameter_type: Self, parameter_name: Self, parameter_default_value: Self) -> Self; fn make_variadic_parameter(ctx: &C, variadic_parameter_call_convention: Self, variadic_parameter_type: Self, variadic_parameter_ellipsis: Self) -> Self; fn make_old_attribute_specification(ctx: &C, old_attribute_specification_left_double_angle: Self, old_attribute_specification_attributes: Self, old_attribute_specification_right_double_angle: Self) -> Self; fn make_attribute_specification(ctx: &C, attribute_specification_attributes: Self) -> Self; fn make_attribute(ctx: &C, attribute_at: Self, attribute_attribute_name: Self) -> Self; fn make_inclusion_expression(ctx: &C, inclusion_require: Self, inclusion_filename: Self) -> Self; fn make_inclusion_directive(ctx: &C, inclusion_expression: Self, inclusion_semicolon: Self) -> Self; fn make_compound_statement(ctx: &C, compound_left_brace: Self, compound_statements: Self, compound_right_brace: Self) -> Self; fn make_expression_statement(ctx: &C, expression_statement_expression: Self, expression_statement_semicolon: Self) -> Self; fn make_markup_section(ctx: &C, markup_prefix: Self, markup_text: Self, markup_suffix: Self, markup_expression: Self) -> Self; fn make_markup_suffix(ctx: &C, markup_suffix_less_than_question: Self, markup_suffix_name: Self) -> Self; fn make_unset_statement(ctx: &C, unset_keyword: Self, unset_left_paren: Self, unset_variables: Self, unset_right_paren: Self, unset_semicolon: Self) -> Self; fn make_let_statement(ctx: &C, let_statement_keyword: Self, let_statement_name: Self, let_statement_colon: Self, let_statement_type: Self, let_statement_initializer: Self, let_statement_semicolon: Self) -> Self; fn make_using_statement_block_scoped(ctx: &C, using_block_await_keyword: Self, using_block_using_keyword: Self, using_block_left_paren: Self, using_block_expressions: Self, using_block_right_paren: Self, using_block_body: Self) -> Self; fn make_using_statement_function_scoped(ctx: &C, using_function_await_keyword: Self, using_function_using_keyword: Self, using_function_expression: Self, using_function_semicolon: Self) -> Self; fn make_while_statement(ctx: &C, while_keyword: Self, while_left_paren: Self, while_condition: Self, while_right_paren: Self, while_body: Self) -> Self; fn make_if_statement(ctx: &C, if_keyword: Self, if_left_paren: Self, if_condition: Self, if_right_paren: Self, if_statement: Self, if_elseif_clauses: Self, if_else_clause: Self) -> Self; fn make_elseif_clause(ctx: &C, elseif_keyword: Self, elseif_left_paren: Self, elseif_condition: Self, elseif_right_paren: Self, elseif_statement: Self) -> Self; fn make_else_clause(ctx: &C, else_keyword: Self, else_statement: Self) -> Self; fn make_try_statement(ctx: &C, try_keyword: Self, try_compound_statement: Self, try_catch_clauses: Self, try_finally_clause: Self) -> Self; fn make_catch_clause(ctx: &C, catch_keyword: Self, catch_left_paren: Self, catch_type: Self, catch_variable: Self, catch_right_paren: Self, catch_body: Self) -> Self; fn make_finally_clause(ctx: &C, finally_keyword: Self, finally_body: Self) -> Self; fn make_do_statement(ctx: &C, do_keyword: Self, do_body: Self, do_while_keyword: Self, do_left_paren: Self, do_condition: Self, do_right_paren: Self, do_semicolon: Self) -> Self; fn make_for_statement(ctx: &C, for_keyword: Self, for_left_paren: Self, for_initializer: Self, for_first_semicolon: Self, for_control: Self, for_second_semicolon: Self, for_end_of_loop: Self, for_right_paren: Self, for_body: Self) -> Self; fn make_foreach_statement(ctx: &C, foreach_keyword: Self, foreach_left_paren: Self, foreach_collection: Self, foreach_await_keyword: Self, foreach_as: Self, foreach_key: Self, foreach_arrow: Self, foreach_value: Self, foreach_right_paren: Self, foreach_body: Self) -> Self; fn make_switch_statement(ctx: &C, switch_keyword: Self, switch_left_paren: Self, switch_expression: Self, switch_right_paren: Self, switch_left_brace: Self, switch_sections: Self, switch_right_brace: Self) -> Self; fn make_switch_section(ctx: &C, switch_section_labels: Self, switch_section_statements: Self, switch_section_fallthrough: Self) -> Self; fn make_switch_fallthrough(ctx: &C, fallthrough_keyword: Self, fallthrough_semicolon: Self) -> Self; fn make_case_label(ctx: &C, case_keyword: Self, case_expression: Self, case_colon: Self) -> Self; fn make_default_label(ctx: &C, default_keyword: Self, default_colon: Self) -> Self; fn make_return_statement(ctx: &C, return_keyword: Self, return_expression: Self, return_semicolon: Self) -> Self; fn make_goto_label(ctx: &C, goto_label_name: Self, goto_label_colon: Self) -> Self; fn make_goto_statement(ctx: &C, goto_statement_keyword: Self, goto_statement_label_name: Self, goto_statement_semicolon: Self) -> Self; fn make_throw_statement(ctx: &C, throw_keyword: Self, throw_expression: Self, throw_semicolon: Self) -> Self; fn make_break_statement(ctx: &C, break_keyword: Self, break_semicolon: Self) -> Self; fn make_continue_statement(ctx: &C, continue_keyword: Self, continue_semicolon: Self) -> Self; fn make_echo_statement(ctx: &C, echo_keyword: Self, echo_expressions: Self, echo_semicolon: Self) -> Self; fn make_concurrent_statement(ctx: &C, concurrent_keyword: Self, concurrent_statement: Self) -> Self; fn make_simple_initializer(ctx: &C, simple_initializer_equal: Self, simple_initializer_value: Self) -> Self; fn make_anonymous_class(ctx: &C, anonymous_class_class_keyword: Self, anonymous_class_left_paren: Self, anonymous_class_argument_list: Self, anonymous_class_right_paren: Self, anonymous_class_extends_keyword: Self, anonymous_class_extends_list: Self, anonymous_class_implements_keyword: Self, anonymous_class_implements_list: Self, anonymous_class_body: Self) -> Self; fn make_anonymous_function(ctx: &C, anonymous_attribute_spec: Self, anonymous_static_keyword: Self, anonymous_async_keyword: Self, anonymous_coroutine_keyword: Self, anonymous_function_keyword: Self, anonymous_left_paren: Self, anonymous_parameters: Self, anonymous_right_paren: Self, anonymous_colon: Self, anonymous_type: Self, anonymous_use: Self, anonymous_body: Self) -> Self; fn make_anonymous_function_use_clause(ctx: &C, anonymous_use_keyword: Self, anonymous_use_left_paren: Self, anonymous_use_variables: Self, anonymous_use_right_paren: Self) -> Self; fn make_lambda_expression(ctx: &C, lambda_attribute_spec: Self, lambda_async: Self, lambda_coroutine: Self, lambda_signature: Self, lambda_arrow: Self, lambda_body: Self) -> Self; fn make_lambda_signature(ctx: &C, lambda_left_paren: Self, lambda_parameters: Self, lambda_right_paren: Self, lambda_colon: Self, lambda_type: Self) -> Self; fn make_cast_expression(ctx: &C, cast_left_paren: Self, cast_type: Self, cast_right_paren: Self, cast_operand: Self) -> Self; fn make_scope_resolution_expression(ctx: &C, scope_resolution_qualifier: Self, scope_resolution_operator: Self, scope_resolution_name: Self) -> Self; fn make_member_selection_expression(ctx: &C, member_object: Self, member_operator: Self, member_name: Self) -> Self; fn make_safe_member_selection_expression(ctx: &C, safe_member_object: Self, safe_member_operator: Self, safe_member_name: Self) -> Self; fn make_embedded_member_selection_expression(ctx: &C, embedded_member_object: Self, embedded_member_operator: Self, embedded_member_name: Self) -> Self; fn make_yield_expression(ctx: &C, yield_keyword: Self, yield_operand: Self) -> Self; fn make_yield_from_expression(ctx: &C, yield_from_yield_keyword: Self, yield_from_from_keyword: Self, yield_from_operand: Self) -> Self; fn make_prefix_unary_expression(ctx: &C, prefix_unary_operator: Self, prefix_unary_operand: Self) -> Self; fn make_postfix_unary_expression(ctx: &C, postfix_unary_operand: Self, postfix_unary_operator: Self) -> Self; fn make_binary_expression(ctx: &C, binary_left_operand: Self, binary_operator: Self, binary_right_operand: Self) -> Self; fn make_is_expression(ctx: &C, is_left_operand: Self, is_operator: Self, is_right_operand: Self) -> Self; fn make_as_expression(ctx: &C, as_left_operand: Self, as_operator: Self, as_right_operand: Self) -> Self; fn make_nullable_as_expression(ctx: &C, nullable_as_left_operand: Self, nullable_as_operator: Self, nullable_as_right_operand: Self) -> Self; fn make_conditional_expression(ctx: &C, conditional_test: Self, conditional_question: Self, conditional_consequence: Self, conditional_colon: Self, conditional_alternative: Self) -> Self; fn make_eval_expression(ctx: &C, eval_keyword: Self, eval_left_paren: Self, eval_argument: Self, eval_right_paren: Self) -> Self; fn make_define_expression(ctx: &C, define_keyword: Self, define_left_paren: Self, define_argument_list: Self, define_right_paren: Self) -> Self; fn make_halt_compiler_expression(ctx: &C, halt_compiler_keyword: Self, halt_compiler_left_paren: Self, halt_compiler_argument_list: Self, halt_compiler_right_paren: Self) -> Self; fn make_isset_expression(ctx: &C, isset_keyword: Self, isset_left_paren: Self, isset_argument_list: Self, isset_right_paren: Self) -> Self; fn make_function_call_expression(ctx: &C, function_call_receiver: Self, function_call_type_args: Self, function_call_left_paren: Self, function_call_argument_list: Self, function_call_right_paren: Self) -> Self; fn make_parenthesized_expression(ctx: &C, parenthesized_expression_left_paren: Self, parenthesized_expression_expression: Self, parenthesized_expression_right_paren: Self) -> Self; fn make_braced_expression(ctx: &C, braced_expression_left_brace: Self, braced_expression_expression: Self, braced_expression_right_brace: Self) -> Self; fn make_embedded_braced_expression(ctx: &C, embedded_braced_expression_left_brace: Self, embedded_braced_expression_expression: Self, embedded_braced_expression_right_brace: Self) -> Self; fn make_list_expression(ctx: &C, list_keyword: Self, list_left_paren: Self, list_members: Self, list_right_paren: Self) -> Self; fn make_collection_literal_expression(ctx: &C, collection_literal_name: Self, collection_literal_left_brace: Self, collection_literal_initializers: Self, collection_literal_right_brace: Self) -> Self; fn make_object_creation_expression(ctx: &C, object_creation_new_keyword: Self, object_creation_object: Self) -> Self; fn make_constructor_call(ctx: &C, constructor_call_type: Self, constructor_call_left_paren: Self, constructor_call_argument_list: Self, constructor_call_right_paren: Self) -> Self; fn make_record_creation_expression(ctx: &C, record_creation_type: Self, record_creation_array_token: Self, record_creation_left_bracket: Self, record_creation_members: Self, record_creation_right_bracket: Self) -> Self; fn make_array_creation_expression(ctx: &C, array_creation_left_bracket: Self, array_creation_members: Self, array_creation_right_bracket: Self) -> Self; fn make_array_intrinsic_expression(ctx: &C, array_intrinsic_keyword: Self, array_intrinsic_left_paren: Self, array_intrinsic_members: Self, array_intrinsic_right_paren: Self) -> Self; fn make_darray_intrinsic_expression(ctx: &C, darray_intrinsic_keyword: Self, darray_intrinsic_explicit_type: Self, darray_intrinsic_left_bracket: Self, darray_intrinsic_members: Self, darray_intrinsic_right_bracket: Self) -> Self; fn make_dictionary_intrinsic_expression(ctx: &C, dictionary_intrinsic_keyword: Self, dictionary_intrinsic_explicit_type: Self, dictionary_intrinsic_left_bracket: Self, dictionary_intrinsic_members: Self, dictionary_intrinsic_right_bracket: Self) -> Self; fn make_keyset_intrinsic_expression(ctx: &C, keyset_intrinsic_keyword: Self, keyset_intrinsic_explicit_type: Self, keyset_intrinsic_left_bracket: Self, keyset_intrinsic_members: Self, keyset_intrinsic_right_bracket: Self) -> Self; fn make_varray_intrinsic_expression(ctx: &C, varray_intrinsic_keyword: Self, varray_intrinsic_explicit_type: Self, varray_intrinsic_left_bracket: Self, varray_intrinsic_members: Self, varray_intrinsic_right_bracket: Self) -> Self; fn make_vector_intrinsic_expression(ctx: &C, vector_intrinsic_keyword: Self, vector_intrinsic_explicit_type: Self, vector_intrinsic_left_bracket: Self, vector_intrinsic_members: Self, vector_intrinsic_right_bracket: Self) -> Self; fn make_element_initializer(ctx: &C, element_key: Self, element_arrow: Self, element_value: Self) -> Self; fn make_subscript_expression(ctx: &C, subscript_receiver: Self, subscript_left_bracket: Self, subscript_index: Self, subscript_right_bracket: Self) -> Self; fn make_embedded_subscript_expression(ctx: &C, embedded_subscript_receiver: Self, embedded_subscript_left_bracket: Self, embedded_subscript_index: Self, embedded_subscript_right_bracket: Self) -> Self; fn make_awaitable_creation_expression(ctx: &C, awaitable_attribute_spec: Self, awaitable_async: Self, awaitable_coroutine: Self, awaitable_compound_statement: Self) -> Self; fn make_xhp_children_declaration(ctx: &C, xhp_children_keyword: Self, xhp_children_expression: Self, xhp_children_semicolon: Self) -> Self; fn make_xhp_children_parenthesized_list(ctx: &C, xhp_children_list_left_paren: Self, xhp_children_list_xhp_children: Self, xhp_children_list_right_paren: Self) -> Self; fn make_xhp_category_declaration(ctx: &C, xhp_category_keyword: Self, xhp_category_categories: Self, xhp_category_semicolon: Self) -> Self; fn make_xhp_enum_type(ctx: &C, xhp_enum_optional: Self, xhp_enum_keyword: Self, xhp_enum_left_brace: Self, xhp_enum_values: Self, xhp_enum_right_brace: Self) -> Self; fn make_xhp_lateinit(ctx: &C, xhp_lateinit_at: Self, xhp_lateinit_keyword: Self) -> Self; fn make_xhp_required(ctx: &C, xhp_required_at: Self, xhp_required_keyword: Self) -> Self; fn make_xhp_class_attribute_declaration(ctx: &C, xhp_attribute_keyword: Self, xhp_attribute_attributes: Self, xhp_attribute_semicolon: Self) -> Self; fn make_xhp_class_attribute(ctx: &C, xhp_attribute_decl_type: Self, xhp_attribute_decl_name: Self, xhp_attribute_decl_initializer: Self, xhp_attribute_decl_required: Self) -> Self; fn make_xhp_simple_class_attribute(ctx: &C, xhp_simple_class_attribute_type: Self) -> Self; fn make_xhp_simple_attribute(ctx: &C, xhp_simple_attribute_name: Self, xhp_simple_attribute_equal: Self, xhp_simple_attribute_expression: Self) -> Self; fn make_xhp_spread_attribute(ctx: &C, xhp_spread_attribute_left_brace: Self, xhp_spread_attribute_spread_operator: Self, xhp_spread_attribute_expression: Self, xhp_spread_attribute_right_brace: Self) -> Self; fn make_xhp_open(ctx: &C, xhp_open_left_angle: Self, xhp_open_name: Self, xhp_open_attributes: Self, xhp_open_right_angle: Self) -> Self; fn make_xhp_expression(ctx: &C, xhp_open: Self, xhp_body: Self, xhp_close: Self) -> Self; fn make_xhp_close(ctx: &C, xhp_close_left_angle: Self, xhp_close_name: Self, xhp_close_right_angle: Self) -> Self; fn make_type_constant(ctx: &C, type_constant_left_type: Self, type_constant_separator: Self, type_constant_right_type: Self) -> Self; fn make_pu_access(ctx: &C, pu_access_left_type: Self, pu_access_separator: Self, pu_access_right_type: Self) -> Self; fn make_vector_type_specifier(ctx: &C, vector_type_keyword: Self, vector_type_left_angle: Self, vector_type_type: Self, vector_type_trailing_comma: Self, vector_type_right_angle: Self) -> Self; fn make_keyset_type_specifier(ctx: &C, keyset_type_keyword: Self, keyset_type_left_angle: Self, keyset_type_type: Self, keyset_type_trailing_comma: Self, keyset_type_right_angle: Self) -> Self; fn make_tuple_type_explicit_specifier(ctx: &C, tuple_type_keyword: Self, tuple_type_left_angle: Self, tuple_type_types: Self, tuple_type_right_angle: Self) -> Self; fn make_varray_type_specifier(ctx: &C, varray_keyword: Self, varray_left_angle: Self, varray_type: Self, varray_trailing_comma: Self, varray_right_angle: Self) -> Self; fn make_vector_array_type_specifier(ctx: &C, vector_array_keyword: Self, vector_array_left_angle: Self, vector_array_type: Self, vector_array_right_angle: Self) -> Self; fn make_type_parameter(ctx: &C, type_attribute_spec: Self, type_reified: Self, type_variance: Self, type_name: Self, type_constraints: Self) -> Self; fn make_type_constraint(ctx: &C, constraint_keyword: Self, constraint_type: Self) -> Self; fn make_darray_type_specifier(ctx: &C, darray_keyword: Self, darray_left_angle: Self, darray_key: Self, darray_comma: Self, darray_value: Self, darray_trailing_comma: Self, darray_right_angle: Self) -> Self; fn make_map_array_type_specifier(ctx: &C, map_array_keyword: Self, map_array_left_angle: Self, map_array_key: Self, map_array_comma: Self, map_array_value: Self, map_array_right_angle: Self) -> Self; fn make_dictionary_type_specifier(ctx: &C, dictionary_type_keyword: Self, dictionary_type_left_angle: Self, dictionary_type_members: Self, dictionary_type_right_angle: Self) -> Self; fn make_closure_type_specifier(ctx: &C, closure_outer_left_paren: Self, closure_coroutine: Self, closure_function_keyword: Self, closure_inner_left_paren: Self, closure_parameter_list: Self, closure_inner_right_paren: Self, closure_colon: Self, closure_return_type: Self, closure_outer_right_paren: Self) -> Self; fn make_closure_parameter_type_specifier(ctx: &C, closure_parameter_call_convention: Self, closure_parameter_type: Self) -> Self; fn make_classname_type_specifier(ctx: &C, classname_keyword: Self, classname_left_angle: Self, classname_type: Self, classname_trailing_comma: Self, classname_right_angle: Self) -> Self; fn make_field_specifier(ctx: &C, field_question: Self, field_name: Self, field_arrow: Self, field_type: Self) -> Self; fn make_field_initializer(ctx: &C, field_initializer_name: Self, field_initializer_arrow: Self, field_initializer_value: Self) -> Self; fn make_shape_type_specifier(ctx: &C, shape_type_keyword: Self, shape_type_left_paren: Self, shape_type_fields: Self, shape_type_ellipsis: Self, shape_type_right_paren: Self) -> Self; fn make_shape_expression(ctx: &C, shape_expression_keyword: Self, shape_expression_left_paren: Self, shape_expression_fields: Self, shape_expression_right_paren: Self) -> Self; fn make_tuple_expression(ctx: &C, tuple_expression_keyword: Self, tuple_expression_left_paren: Self, tuple_expression_items: Self, tuple_expression_right_paren: Self) -> Self; fn make_generic_type_specifier(ctx: &C, generic_class_type: Self, generic_argument_list: Self) -> Self;
fn make_like_type_specifier(ctx: &C, like_tilde: Self, like_type: Self) -> Self; fn make_soft_type_specifier(ctx: &C, soft_at: Self, soft_type: Self) -> Self; fn make_attributized_specifier(ctx: &C, attributized_specifier_attribute_spec: Self, attributized_specifier_type: Self) -> Self; fn make_reified_type_argument(ctx: &C, reified_type_argument_reified: Self, reified_type_argument_type: Self) -> Self; fn make_type_arguments(ctx: &C, type_arguments_left_angle: Self, type_arguments_types: Self, type_arguments_right_angle: Self) -> Self; fn make_type_parameters(ctx: &C, type_parameters_left_angle: Self, type_parameters_parameters: Self, type_parameters_right_angle: Self) -> Self; fn make_tuple_type_specifier(ctx: &C, tuple_left_paren: Self, tuple_types: Self, tuple_right_paren: Self) -> Self; fn make_union_type_specifier(ctx: &C, union_left_paren: Self, union_types: Self, union_right_paren: Self) -> Self; fn make_intersection_type_specifier(ctx: &C, intersection_left_paren: Self, intersection_types: Self, intersection_right_paren: Self) -> Self; fn make_error(ctx: &C, error_error: Self) -> Self; fn make_list_item(ctx: &C, list_item: Self, list_separator: Self) -> Self; fn make_pocket_atom_expression(ctx: &C, pocket_atom_glyph: Self, pocket_atom_expression: Self) -> Self; fn make_pocket_identifier_expression(ctx: &C, pocket_identifier_qualifier: Self, pocket_identifier_pu_operator: Self, pocket_identifier_field: Self, pocket_identifier_operator: Self, pocket_identifier_name: Self) -> Self; fn make_pocket_atom_mapping_declaration(ctx: &C, pocket_atom_mapping_glyph: Self, pocket_atom_mapping_name: Self, pocket_atom_mapping_left_paren: Self, pocket_atom_mapping_mappings: Self, pocket_atom_mapping_right_paren: Self, pocket_atom_mapping_semicolon: Self) -> Self; fn make_pocket_enum_declaration(ctx: &C, pocket_enum_modifiers: Self, pocket_enum_enum: Self, pocket_enum_name: Self, pocket_enum_left_brace: Self, pocket_enum_fields: Self, pocket_enum_right_brace: Self) -> Self; fn make_pocket_field_type_expr_declaration(ctx: &C, pocket_field_type_expr_case: Self, pocket_field_type_expr_type: Self, pocket_field_type_expr_name: Self, pocket_field_type_expr_semicolon: Self) -> Self; fn make_pocket_field_type_declaration(ctx: &C, pocket_field_type_case: Self, pocket_field_type_type: Self, pocket_field_type_name: Self, pocket_field_type_semicolon: Self) -> Self; fn make_pocket_mapping_id_declaration(ctx: &C, pocket_mapping_id_name: Self, pocket_mapping_id_initializer: Self) -> Self; fn make_pocket_mapping_type_declaration(ctx: &C, pocket_mapping_type_keyword: Self, pocket_mapping_type_name: Self, pocket_mapping_type_equal: Self, pocket_mapping_type_type: Self) -> Self; }
fn make_nullable_type_specifier(ctx: &C, nullable_question: Self, nullable_type: Self) -> Self;
footer.js
import React from 'react' import GitHubSVG from '../svg/github.svg' import LinkedInSVG from '../svg/linkedin.svg' import SlackSVG from '../svg/slack.svg' import TwitterSVG from '../svg/twitter.svg' const socialLinks = [ { Component: GitHubSVG, href: 'https://github.com/maralihart', title: 'GitHub', }, { Component: TwitterSVG, href: 'https://twitter.com/maradrinksmilk', title: 'Twitter', }, { Component: LinkedInSVG, href: 'https://www.linkedin.com/in/maralihart', title: 'LinkedIn', }, ] function Footer() { return ( <footer className="bg-yellow-800"> <div className="flex flex-col md:flex-row items-center md:justify-between py-6 max-w-3xl mx-auto px-4 sm:px-6 lg:max-w-5xl space-y-6 md:space-y-0"> <p className="text-gray-300">Powered by GraphCMS &amp; Gatsby, Developed by Mara</p> <ul className="inline-flex space-x-6"> {socialLinks.map(({ Component, href, title }, index) => ( <li key={index}> <a href={href} target="_blank" className="block text-gray-300 hover:text-white p-1 text-sm" rel="noopener noreferrer" title={title} > <Component className="h-6 w-6" /> </a> </li> ))} </ul>
} export default Footer
</div> </footer> )
config.py
""" MIT License Copyright (c) 2019 Yoga Suhas Kuruba Manjunath Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # raw images shoud be saved in "images" folder image_folder = './images' # final preprocessed images will be stored extracted_folder = './extracted_images' # to store model files models = './models' # to stroe graphs graphs = './graphs' # vertical and horizontal size to be used image_size_vertical = 100 image_size_horizontal = 100 # number of epochs to train a model epoch = 100 # batch size used to train a model batch_size = 64 # data set split ratio train_ratio = 0.6 test_ratio = 0.2 validation_ratio = 0.2 # input data shape, this will be updated # accordingly in the code for GREY_SCALE # or RGB images if used. x_shape = () # type of channels GREY = 1 RGB = 3 # this config represents the image fusion
horizontal = "HORIZONTAL" # number of classes, this will be updated # in code num_classes = 0 # labeling of classes, this will be updated # in code person_label = {}
# in vertical or horizontal way vertical = "VERTICAL"
editTeaShopSagas.test.ts
import watchEditTeaShopSaga, { editTeaShopSaga, editTeaShopSagaReturn, watchEditTeaShopSagaReturn } from './editTeaShopSagas'; import { editTeaShop, editTeaShopSucceeded, editTeaShopFailed, getAllTeaShops } from '../../actions/teaShopActions'; import { call, put, take } from 'redux-saga/effects'; import { endpoint, testTeaShop, testError } from '../../../entities'; import { TeaShopActionTypes } from '../../types'; import axios from 'axios'; let generator: editTeaShopSagaReturn | watchEditTeaShopSagaReturn; describe('testing editTeaShopSaga', () => { beforeEach( () => { generator = editTeaShopSaga(testTeaShop); }) it('generates the expected objects', () => { expect( generator.next().value ) .toEqual( call(axios.post, `${endpoint}/${testTeaShop.id}`, testTeaShop) ); expect( generator.next().value ) .toEqual( put(editTeaShopSucceeded()) ); expect( generator.next().value ) .toEqual( put(getAllTeaShops()) ); }); it('handles errors properly', () => { generator.next(); expect( generator.throw(testError).value ) .toEqual( put(editTeaShopFailed(testError)) ); });
beforeEach( () => { generator = watchEditTeaShopSaga(); }); it('generates the expected objects', () => { expect( generator.next().value ) .toEqual( take(TeaShopActionTypes.EDIT_TEASHOP) ); expect( generator.next(editTeaShop(testTeaShop)).value ) .toEqual( call(editTeaShopSaga, editTeaShop(testTeaShop).payload) ); }); });
}); describe('testing watchEditTeaShopSaga', () => {
testcase.py
import sys import threading import pytest from tornado import ioloop, web from dummyserver.server import ( SocketServerThread, run_tornado_app, run_loop_in_thread, DEFAULT_CERTS, HAS_IPV6, ) from dummyserver.handlers import TestingApp from dummyserver.proxy import ProxyHandler if sys.version_info >= (2, 7): import unittest else: import unittest2 as unittest def consume_socket(sock, chunks=65536): while not sock.recv(chunks).endswith(b'\r\n\r\n'): pass class SocketDummyServerTestCase(unittest.TestCase): """ A simple socket-based server is created for this class that is good for exactly one request. """ scheme = 'http' host = 'localhost' @classmethod def _start_server(cls, socket_handler): ready_event = threading.Event() cls.server_thread = SocketServerThread(socket_handler=socket_handler, ready_event=ready_event, host=cls.host) cls.server_thread.start() ready_event.wait(5) if not ready_event.is_set(): raise Exception("most likely failed to start server") cls.port = cls.server_thread.port @classmethod def start_response_handler(cls, response, num=1, block_send=None): ready_event = threading.Event() def socket_handler(listener): for _ in range(num): ready_event.set() sock = listener.accept()[0] consume_socket(sock) if block_send: block_send.wait() block_send.clear() sock.send(response) sock.close() cls._start_server(socket_handler) return ready_event @classmethod def start_basic_handler(cls, **kw): return cls.start_response_handler( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' b'\r\n', **kw) @classmethod def tearDownClass(cls): if hasattr(cls, 'server_thread'): cls.server_thread.join(0.1) def assert_header_received( self, received_headers, header_name, expected_value=None ): header_name = header_name.encode('ascii') if expected_value is not None: expected_value = expected_value.encode('ascii') header_titles = [] for header in received_headers: key, value = header.split(b': ') header_titles.append(key) if key == header_name and expected_value is not None: self.assertEqual(value, expected_value) self.assertIn(header_name, header_titles) class IPV4SocketDummyServerTestCase(SocketDummyServerTestCase): @classmethod def _start_server(cls, socket_handler): ready_event = threading.Event() cls.server_thread = SocketServerThread(socket_handler=socket_handler, ready_event=ready_event, host=cls.host) cls.server_thread.USE_IPV6 = False cls.server_thread.start() ready_event.wait(5) if not ready_event.is_set(): raise Exception("most likely failed to start server") cls.port = cls.server_thread.port class HTTPDummyServerTestCase(unittest.TestCase): """ A simple HTTP server that runs when your test class runs Have your unittest class inherit from this one, and then a simple server will start when your tests run, and automatically shut down when they complete. For examples of what test requests you can send to the server, see the TestingApp in dummyserver/handlers.py. """ scheme = 'http' host = 'localhost' host_alt = '127.0.0.1' # Some tests need two hosts certs = DEFAULT_CERTS @classmethod def _start_server(cls): cls.io_loop = ioloop.IOLoop() app = web.Application([(r".*", TestingApp)]) cls.server, cls.port = run_tornado_app(app, cls.io_loop, cls.certs, cls.scheme, cls.host) cls.server_thread = run_loop_in_thread(cls.io_loop) @classmethod def _stop_server(cls): cls.io_loop.add_callback(cls.server.stop) cls.io_loop.add_callback(cls.io_loop.stop) cls.server_thread.join() @classmethod def setUpClass(cls): cls._start_server() @classmethod def tearDownClass(cls): cls._stop_server() class HTTPSDummyServerTestCase(HTTPDummyServerTestCase): scheme = 'https' host = 'localhost' certs = DEFAULT_CERTS @pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available') class IPV6HTTPSDummyServerTestCase(HTTPSDummyServerTestCase): host = '::1' class HTTPDummyProxyTestCase(unittest.TestCase): http_host = 'localhost' http_host_alt = '127.0.0.1' https_host = 'localhost' https_host_alt = '127.0.0.1' https_certs = DEFAULT_CERTS proxy_host = 'localhost' proxy_host_alt = '127.0.0.1' @classmethod def setUpClass(cls): cls.io_loop = ioloop.IOLoop() app = web.Application([(r'.*', TestingApp)]) cls.http_server, cls.http_port = run_tornado_app( app, cls.io_loop, None, 'http', cls.http_host) app = web.Application([(r'.*', TestingApp)]) cls.https_server, cls.https_port = run_tornado_app( app, cls.io_loop, cls.https_certs, 'https', cls.http_host) app = web.Application([(r'.*', ProxyHandler)]) cls.proxy_server, cls.proxy_port = run_tornado_app( app, cls.io_loop, None, 'http', cls.proxy_host) cls.server_thread = run_loop_in_thread(cls.io_loop) @classmethod def tearDownClass(cls): cls.io_loop.add_callback(cls.http_server.stop) cls.io_loop.add_callback(cls.https_server.stop) cls.io_loop.add_callback(cls.proxy_server.stop) cls.io_loop.add_callback(cls.io_loop.stop) cls.server_thread.join() @pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available') class IPv6HTTPDummyServerTestCase(HTTPDummyServerTestCase): host = '::1'
@pytest.mark.skipif(not HAS_IPV6, reason='IPv6 not available') class IPv6HTTPDummyProxyTestCase(HTTPDummyProxyTestCase): http_host = 'localhost' http_host_alt = '127.0.0.1' https_host = 'localhost' https_host_alt = '127.0.0.1' https_certs = DEFAULT_CERTS proxy_host = '::1' proxy_host_alt = '127.0.0.1'