prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>base.rs<|end_file_name|><|fim▁begin|>#[cfg(feature = "packed_simd")]
use packed_simd;
use crate::time::{Time, TimeDelta};
use byteorder::{ByteOrder, LittleEndian};
use serde;
use std;
use std::cmp::{Eq, PartialEq};
use std::collections::VecDeque;
use std::error;
use std::fmt;
use std::io;
use std::iter::Enumerate;
use std::ops::{Add, AddAssign, Sub, SubAssign};
use std::result;
use std::slice;
#[cfg(feature = "packed_simd")]
use self::packed_simd::f32x4;
// Subwoofer channel is expected to be reproduced 10dB louder
// than other channels.
pub const SUBWOOFER_LEVEL: f32 = 3.16227766017;
#[derive(Clone, Copy, Debug)]
pub enum SampleFormat {
S16LE,
S24LE3,
S24LE4,
S32LE,
F32LE,
}
impl SampleFormat {
pub fn to_str(&self) -> &'static str {
match *self {
SampleFormat::S16LE => "S16LE",
SampleFormat::S24LE3 => "S24LE3",
SampleFormat::S24LE4 => "S24LE4",
SampleFormat::S32LE => "S32LE",
SampleFormat::F32LE => "F32LE",
}
}
pub fn bytes_per_sample(&self) -> usize {
match *self {
SampleFormat::S16LE => 2,
SampleFormat::S24LE3 => 3,
SampleFormat::S24LE4 => 4,
SampleFormat::S32LE => 4,
SampleFormat::F32LE => 4,
}
}
}
pub type ChannelPos = u8;
pub const CHANNEL_UNDEFINED: ChannelPos = 255;
pub const CHANNEL_FL: ChannelPos = 0;
pub const CHANNEL_FR: ChannelPos = 1;
pub const CHANNEL_FC: ChannelPos = 2;
pub const CHANNEL_SL: ChannelPos = 3;
pub const CHANNEL_SR: ChannelPos = 4;
pub const CHANNEL_SC: ChannelPos = 5;
pub const CHANNEL_LFE: ChannelPos = 7;
pub const CHANNEL_DYNAMIC_BASE: ChannelPos = 8;
pub const CHANNEL_MAX: ChannelPos = 20;
pub const NUM_CHANNEL_MAX: usize = CHANNEL_MAX as usize;
pub fn parse_channel_id(id: &str) -> Option<ChannelPos> {
match id {
"L" | "left" => Some(CHANNEL_FL),
"R" | "right" => Some(CHANNEL_FR),
"C" | "center" | "centre" => Some(CHANNEL_FC),
"SL" | "surround_left" => Some(CHANNEL_SL),
"SR" | "surround_right" => Some(CHANNEL_SR),
"SC" | "surround" | "surround_center" | "surround_centre" => Some(CHANNEL_SC),
"LFE" => Some(CHANNEL_LFE),
"_" => Some(CHANNEL_UNDEFINED),
_ => None,
}
}
#[derive(Clone)]
pub struct PerChannel<T> {
values: [Option<T>; NUM_CHANNEL_MAX],
}
pub struct ChannelIter<'a, T: 'a> {
inner: Enumerate<slice::IterMut<'a, Option<T>>>,
}
impl<'a, T> ChannelIter<'a, T> {
fn new(per_channel: &'a mut PerChannel<T>) -> ChannelIter<T> {
ChannelIter {
inner: per_channel.values.iter_mut().enumerate(),
}
}
}
impl<T> PerChannel<T> {
pub fn new() -> PerChannel<T> {
PerChannel {
values: Default::default(),
}
}
pub fn get(&self, c: ChannelPos) -> Option<&T> {
assert!(c < CHANNEL_MAX);
self.values[c as usize].as_ref()
}
pub fn get_mut(&mut self, c: ChannelPos) -> Option<&mut T> {
assert!(c < CHANNEL_MAX);
self.values[c as usize].as_mut()
}
pub fn take(&mut self, c: ChannelPos) -> Option<T> {
assert!(c < CHANNEL_MAX);
self.values[c as usize].take()
}
pub fn get_or_insert<F: FnOnce() -> T>(&mut self, c: ChannelPos, default: F) -> &mut T {
assert!(c < CHANNEL_MAX);
if !self.have_channel(c) {
self.values[c as usize] = Some(default());
}
self.values[c as usize].as_mut().unwrap()
}
pub fn have_channel(&self, c: ChannelPos) -> bool {
assert!(c < CHANNEL_MAX);
self.values[c as usize].is_some()
}
pub fn set(&mut self, c: ChannelPos, v: T) {
assert!(c < CHANNEL_MAX);
self.values[c as usize] = Some(v)
}
pub fn iter(&mut self) -> ChannelIter<T> {
ChannelIter::new(self)
}
pub fn clear(&mut self) {
self.values = Default::default();
}
}
impl<'a, T> Iterator for ChannelIter<'a, T> {
type Item = (ChannelPos, &'a mut T);
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.inner.next() {
None => return None,
Some((i, v)) => {
if v.is_none() {
continue;
} else {
return v.as_mut().map(|v| (i as ChannelPos, v));
}
}
}
}
}
}
impl fmt::Display for SampleFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_str())
}
}
fn clamp(v: f32) -> f32 {
if v > 1.0 {
1.0
} else if v < -1.0 {
-1.0
} else {
v
}
}
fn write_sample_s16le(val: f32, buf: &mut [u8]) {
let ival = (clamp(val) as f64 * 2147483648f64 - 32767.5) as i32 as u32;
buf[0] = ((ival & 0x00ff0000) >> 16) as u8;
buf[1] = ((ival & 0xff000000) >> 24) as u8;
}
fn write_sample_s24le3(val: f32, buf: &mut [u8]) {
let ival = (clamp(val) as f64 * 2147483648f64 - 127.5) as i32 as u32;
buf[0] = ((ival & 0x0000ff00) >> 8) as u8;
buf[1] = ((ival & 0x00ff0000) >> 16) as u8;
buf[2] = ((ival & 0xff000000) >> 24) as u8;
}
fn write_sample_s24le4(val: f32, buf: &mut [u8]) {
let ival = (clamp(val) as f64 * 2147483648f64 - 127.5) as i32 as u32;
buf[0] = ((ival & 0x0000ff00) >> 8) as u8;
buf[1] = ((ival & 0x00ff0000) >> 16) as u8;
buf[2] = ((ival & 0xff000000) >> 24) as u8;
buf[3] = 0;
}
fn write_sample_s32le(val: f32, buf: &mut [u8]) {
let ival = (clamp(val) as f64 * 2147483648f64 - 0.5) as i32 as u32;
buf[0] = ((ival & 0x000000ff) >> 0) as u8;
buf[1] = ((ival & 0x0000ff00) >> 8) as u8;
buf[2] = ((ival & 0x00ff0000) >> 16) as u8;
buf[3] = ((ival & 0xff000000) >> 24) as u8;
}
fn read_sample_s16le(buf: &[u8]) -> f32 {
(((((buf[0] as u32) << 16) | ((buf[1] as u32) << 24)) as i32 as f64 + 32767.5) / 2147483648f64)
as f32
}
fn read_sample_s24le3(buf: &[u8]) -> f32 {
(((((buf[0] as u32) << 8) | ((buf[1] as u32) << 16) | ((buf[2] as u32) << 24)) as i32 as f64
+ 127.5)
/ 2147483648f64) as f32
}
fn read_sample_s24le4(buf: &[u8]) -> f32 {
(((((buf[0] as u32) << 8) | ((buf[1] as u32) << 16) | ((buf[2] as u32) << 24)) as i32 as f64
+ 127.5)
/ 2147483648f64) as f32
}
fn read_sample_s32le(buf: &[u8]) -> f32 {
(((((buf[0] as u32) << 0)
| ((buf[1] as u32) << 8)
| ((buf[2] as u32) << 16)
| ((buf[3] as u32) << 24)) as i32 as f64
+ 0.5)
/ 2147483648f64) as f32
}
// Fast SIMD-optimized convolution. Optimized for NEON on Raspberry PI 3.
#[cfg(feature = "packed_simd")]
pub fn convolve(v1: &[f32], v2: &[f32]) -> f32 {
assert!(v1.len() == v2.len());
let mut sum1 = f32x4::splat(0.0);
let mut sum2 = f32x4::splat(0.0);
for i in 0..(v1.len() / 8) {
let v1_0 = f32x4::from_slice_unaligned(&v1[i * 8..]);
let v1_4 = f32x4::from_slice_unaligned(&v1[i * 8 + 4..]);
let v2_0 = f32x4::from_slice_unaligned(&v2[i * 8..]);
let v2_4 = f32x4::from_slice_unaligned(&v2[i * 8 + 4..]);
sum1 = sum1 + v1_0 * v2_0;
sum2 = sum2 + v1_4 * v2_4;
}
let mut pos = (v1.len() / 8) * 8;
while pos + 4 <= v1.len() {
sum1 = sum1
+ f32x4::from_slice_unaligned(&v1[pos..]) * f32x4::from_slice_unaligned(&v2[pos..]);
pos += 4;
}
let mut sum_end = 0.0;
while pos < v1.len() {
sum_end += v1[pos] * v2[pos];
pos += 1;
}
sum1.extract(0)
+ sum1.extract(1)
+ sum1.extract(2)
+ sum1.extract(3)
+ sum2.extract(0)
+ sum2.extract(1)
+ sum2.extract(2)
+ sum2.extract(3)
+ sum_end
}
#[cfg(not(feature = "packed_simd"))]
pub fn convolve(v1: &[f32], v2: &[f32]) -> f32 {
let mut r = 0.0;
let mut block_count = v1.len() / 4;
unsafe {
if v1.len() == 0 {
return 0.0;
}
let mut p1 = &v1[0] as *const f32;
let mut p2 = &v2[0] as *const f32;
while block_count > 0 {
r += *p1 * *p2;
p1 = p1.add(1);
p2 = p2.add(1);
r += *p1 * *p2;
p1 = p1.add(1);
p2 = p2.add(1);
r += *p1 * *p2;
p1 = p1.add(1);
p2 = p2.add(1);
r += *p1 * *p2;
p1 = p1.add(1);
p2 = p2.add(1);
block_count -= 1;
}
block_count = v1.len() % 4;
while block_count > 0 {
r += *p1 * *p2;
p1 = p1.add(1);
p2 = p2.add(1);
block_count -= 1;
}
}
r
}
pub fn samples_to_timedelta(sample_rate: f64, samples: i64) -> TimeDelta {
TimeDelta::seconds_f(samples as f64 / sample_rate)
}
pub fn get_sample_timestamp(start: Time, sample_rate: f64, sample: i64) -> Time {
start + samples_to_timedelta(sample_rate, sample)
}
#[derive(Copy, Clone, Debug)]
pub struct Gain {
pub db: f32,
}
impl Gain {
pub fn zero() -> Gain {
Gain { db: 0.0 }
}
pub fn get_multiplier(&self) -> f32 {
10f32.powf(self.db / 20.0)
}
pub fn from_level(level: f32) -> Gain {
Gain {
db: level.log(10.0) * 20.0,
}
}
}
impl Add for Gain {
type Output = Gain;
fn add(self, other: Gain) -> Gain {
Gain {
db: self.db + other.db,
}
}
}
impl AddAssign for Gain {
fn add_assign(&mut self, other: Gain) {
self.db += other.db;
}
}
impl Sub for Gain {
type Output = Gain;
fn sub(self, other: Gain) -> Gain {
Gain {
db: self.db - other.db,
}
}
}
impl SubAssign for Gain {
fn sub_assign(&mut self, other: Gain) {
self.db -= other.db;
}
}
impl PartialEq for Gain {
fn eq(&self, other: &Gain) -> bool {
self.db == other.db
}
}
impl Eq for Gain {}
impl serde::ser::Serialize for Gain {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
serializer.serialize_f32(self.db)
}
}
pub struct Frame {
pub sample_rate: f64,
pub timestamp: Time,
pub gain: Gain,
channels: PerChannel<Vec<f32>>,
len: usize,
num_channels: usize,
}
pub struct FrameChannelIter<'a> {
iter: ChannelIter<'a, Vec<f32>>,
}
impl<'a> Iterator for FrameChannelIter<'a> {
type Item = (ChannelPos, &'a mut [f32]);
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next() {
None => None,
Some((c, v)) => Some((c, &mut v[..])),
}
}
}
impl Frame {
pub fn new(sample_rate: f64, timestamp: Time, len: usize) -> Frame {
Frame {
sample_rate: sample_rate,
timestamp: timestamp,
gain: Gain::zero(),
channels: PerChannel::new(),
len: len,
num_channels: 0,
}
}
pub fn get_channel(&self, pos: ChannelPos) -> Option<&[f32]> {
self.channels.get(pos).map(|c| &c[..])
}
pub fn get_channel_mut(&mut self, pos: ChannelPos) -> Option<&mut [f32]> {
self.channels.get_mut(pos).map(|c| &mut c[..])
}
pub fn ensure_channel(&mut self, pos: ChannelPos) -> &mut [f32] {
let mut added = false;
let len = self.len;
let result = &mut (self.channels.get_or_insert(pos, || {
added = true;
// Avoid denormal zero.
vec![1e-10f32; len]
})[..]);
if added {
self.num_channels += 1;
}
result
}
pub fn set_channel(&mut self, pos: ChannelPos, samples: Vec<f32>) {
if self.num_channels == 0 && self.len == 0 {
self.len = samples.len();
}
assert!(samples.len() == self.len);
if !self.channels.have_channel(pos) {
self.num_channels += 1;
}
self.channels.set(pos, samples);
}
pub fn mix_channel(&mut self, pos: ChannelPos, samples: Vec<f32>, level: f32) {
assert!(samples.len() == self.len);
if !self.channels.have_channel(pos) && level == 1.0 {
self.num_channels += 1;
self.channels.set(pos, samples);
} else {
let data = self.ensure_channel(pos);
for i in 0..data.len() {
data[i] += samples[i] * level;
}
}
}
pub fn take_channel(&mut self, pos: ChannelPos) -> Option<Vec<f32>> {
if self.channels.have_channel(pos) {
self.num_channels -= 1;
}
self.channels.take(pos)
}
pub fn take_channel_or_zero(&mut self, pos: ChannelPos) -> Vec<f32> {
self.channels
.take(pos)
.unwrap_or_else(|| vec![1e-10f32; self.len])
}
pub fn iter_channels(&mut self) -> FrameChannelIter {
FrameChannelIter {
iter: self.channels.iter(),
}
}
pub fn channels(&self) -> usize {
self.num_channels
}
pub fn len(&self) -> usize {
self.len
}
pub fn duration(&self) -> TimeDelta {
samples_to_timedelta(self.sample_rate, self.len() as i64)
}
pub fn end_timestamp(&self) -> Time {
self.timestamp + self.duration()
}
pub fn to_buffer_with_channel_map(
&self,
format: SampleFormat,
out_channels: &[ChannelPos],
) -> Vec<u8> {
let bytes_per_frame = format.bytes_per_sample() * out_channels.len();
let multiplier = self.gain.get_multiplier();
let mut buf = vec![0; bytes_per_frame * self.len()];
for c in 0..out_channels.len() {
if out_channels[c] == CHANNEL_UNDEFINED {
continue;
}
assert!(out_channels[c] < CHANNEL_MAX);
let data = match self.channels.get(out_channels[c]) {
Some(data) => data,
None => continue,
};
let shift = c * format.bytes_per_sample();
match format {
SampleFormat::S16LE => {
for i in 0..self.len() {
write_sample_s16le(
data[i] * multiplier,
&mut buf[i * bytes_per_frame + shift..],
);
}
}
SampleFormat::S24LE3 => {
for i in 0..self.len() {
write_sample_s24le3(
data[i] * multiplier,
&mut buf[i * bytes_per_frame + shift..],
);
}
}
SampleFormat::S24LE4 => {
for i in 0..self.len() {
write_sample_s24le4(
data[i] * multiplier,
&mut buf[i * bytes_per_frame + shift..],
);
}
}
SampleFormat::S32LE => {
for i in 0..self.len() {
write_sample_s32le(
data[i] * multiplier,
&mut buf[i * bytes_per_frame + shift..],
);
}
}
SampleFormat::F32LE => {
for i in 0..self.len() {
LittleEndian::write_f32(
&mut buf[i * bytes_per_frame + shift..],
data[i] * multiplier,
);
}
}
}
}
buf
}
pub fn from_buffer_stereo(
format: SampleFormat,<|fim▁hole|> timestamp: Time,
) -> Frame {
Frame::from_buffer(
format,
sample_rate,
&[CHANNEL_FL, CHANNEL_FR],
buffer,
timestamp,
)
}
pub fn from_buffer(
format: SampleFormat,
sample_rate: f64,
channels: &[ChannelPos],
buffer: &[u8],
timestamp: Time,
) -> Frame {
let samples = buffer.len() / format.bytes_per_sample() / channels.len();
let mut frame = Frame::new(sample_rate, timestamp, samples);
let bytes_per_sample = format.bytes_per_sample() * channels.len();
for c in 0..channels.len() {
let data = frame.ensure_channel(channels[c]);
match format {
SampleFormat::S16LE => {
for i in 0..samples {
data[i] = read_sample_s16le(&buffer[i * bytes_per_sample + c * 2..]);
}
}
SampleFormat::S24LE3 => {
for i in 0..samples {
data[i] = read_sample_s24le3(&buffer[i * bytes_per_sample + c * 3..]);
}
}
SampleFormat::S24LE4 => {
for i in 0..samples {
data[i] = read_sample_s24le4(&buffer[i * bytes_per_sample + c * 4..]);
}
}
SampleFormat::S32LE => {
for i in 0..samples {
data[i] = read_sample_s32le(&buffer[i * bytes_per_sample + c * 4..]);
}
}
SampleFormat::F32LE => {
for i in 0..samples {
data[i] = LittleEndian::read_f32(&buffer[i * bytes_per_sample + c * 4..])
+ 1e-10f32;
}
}
}
}
frame
}
pub fn have_channel(&self, pos: ChannelPos) -> bool {
self.channels.have_channel(pos)
}
}
#[derive(Debug)]
pub struct Error {
msg: String,
}
pub type Result<T> = result::Result<T, Error>;
impl Error {
pub fn new(msg: &str) -> Error {
Error {
msg: String::from(msg),
}
}
pub fn from_string(msg: String) -> Error {
Error { msg: msg }
}
}
impl error::Error for Error {
fn description(&self) -> &str {
return &self.msg;
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", &self.msg)
}
}
impl From<alsa::Error> for Error {
fn from(e: alsa::Error) -> Error {
Error::from_string(format!("Alsa error: {}", e))
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::new(error::Error::description(&e))
}
}
impl From<rppal::gpio::Error> for Error {
fn from(e: rppal::gpio::Error) -> Error {
Error::from_string(format!("GPIO error: {}", e))
}
}
impl<R: pest::RuleType> From<pest::error::Error<R>> for Error {
fn from(e: pest::error::Error<R>) -> Error {
Error::from_string(format!("Failed to parse format expression: {}", e))
}
}
#[derive(Clone, Eq, PartialEq)]
pub struct DeviceSpec {
pub name: String,
pub id: String,
pub sample_rate: Option<usize>,
pub channels: Vec<ChannelPos>,
pub delay: TimeDelta,
pub enable_a52: bool,
}
pub struct SeriesStats {
window: usize,
values: VecDeque<f64>,
sum: f64,
}
impl SeriesStats {
pub fn new(window: usize) -> SeriesStats {
return SeriesStats {
window: window,
values: VecDeque::new(),
sum: 0.0f64,
};
}
pub fn average(&self) -> Option<f64> {
if self.values.is_empty() {
None
} else {
Some(self.sum / self.values.len() as f64)
}
}
pub fn push(&mut self, v: f64) {
self.values.push_back(v);
self.sum += v;
if self.values.len() > self.window {
self.sum -= self.values.pop_front().unwrap();
}
}
pub fn reset(&mut self) {
self.values.clear();
self.sum = 0.0;
}
}
pub struct StreamPositionTracker {
base_time: Time,
base_sample_rate: f64,
sample_rate: f64,
samples_pos: f64,
offset: SeriesStats,
prev_clock_drift: Option<TimeDelta>,
clock_drift: Option<TimeDelta>,
}
const RATE_UPDATE_PERIOD_S: f64 = 2.0;
// How much measured sample rate is allowed to deviate from the expected value.
// For 48kHz 0.3% is 144, i.e. sample rate can change between 47856 and 48144.
const MAX_RATE_DEVIATION: f64 = 0.003;
impl StreamPositionTracker {
pub fn new(sample_rate: f64) -> StreamPositionTracker {
StreamPositionTracker {
base_time: Time::zero(),
base_sample_rate: sample_rate,
sample_rate: sample_rate,
samples_pos: 0.0,
offset: SeriesStats::new(200),
prev_clock_drift: None,
clock_drift: None,
}
}
pub fn offset(&self) -> f64 {
self.offset.average().unwrap_or(0.0)
}
pub fn pos_no_offset(&self) -> Time {
let pos_s = self.samples_pos / self.sample_rate;
self.base_time + TimeDelta::seconds_f(pos_s)
}
pub fn pos(&self) -> Time {
let pos_s = self.samples_pos / self.sample_rate + self.offset.average().unwrap_or(0.0);
self.base_time + TimeDelta::seconds_f(pos_s)
}
pub fn set_target_pos(&mut self, target_pos: Option<Time>) {
self.clock_drift = target_pos.map(|t| t - self.pos());
}
pub fn add_samples(&mut self, samples: usize, pos_estimate: Time) {
if self.base_time == Time::zero() {
self.base_time = pos_estimate;
self.samples_pos = 0.0;
} else {
self.samples_pos += samples as f64;
let new_offset = (pos_estimate - self.base_time).in_seconds_f()
- self.samples_pos / self.sample_rate;
self.offset.push(new_offset);
}
}
pub fn reset(&mut self, base_time: Time, base_sample_rate: f64) {
self.base_time = base_time;
self.base_sample_rate = base_sample_rate;
self.sample_rate = self.base_sample_rate;
self.samples_pos = 0.0;
self.offset.reset();
self.clock_drift = None;
self.prev_clock_drift = None;
}
pub fn update_sample_rate(&mut self) -> Option<f64> {
if self.samples_pos < RATE_UPDATE_PERIOD_S * self.sample_rate {
return None;
}
let diff = match (self.clock_drift, self.prev_clock_drift) {
(Some(clock_drift), Some(prev_clock_drift)) => {
(clock_drift - prev_clock_drift + clock_drift / 5).in_seconds_f()
}
_ => 0.0,
};
let mut new_sample_rate =
self.sample_rate + diff * self.sample_rate * self.sample_rate / self.samples_pos / 2.0;
let min = self.base_sample_rate * (1.0 - MAX_RATE_DEVIATION);
if new_sample_rate < min {
new_sample_rate = min;
}
let max = self.base_sample_rate * (1.0 + MAX_RATE_DEVIATION);
if new_sample_rate > max {
new_sample_rate = max;
}
println!(
"offset {} diff {} new_rate {}.",
self.offset() * 1000.0,
diff * 1000.0,
new_sample_rate
);
self.prev_clock_drift = self.clock_drift;
self.base_time = self.pos_no_offset();
self.samples_pos = 0.0;
self.sample_rate = new_sample_rate;
Some(new_sample_rate)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn frame_read_write_s16le() {
// Verify that converting S16LE->Frame->S16LE doesn't change any data.
let mut buf: Vec<u8> = vec![0u8; 0];
for i in 0..32767 {
buf.push((i & 0xff) as u8);
buf.push(((i & 0xff00) >> 8) as u8);
let r = (-(i as i32)) as u32;
buf.push((r & 0xff) as u8);
buf.push(((r & 0xff00) >> 8) as u8);
}
let frame = Frame::from_buffer_stereo(SampleFormat::S16LE, 44100.0, &buf[..], Time::now());
let buf2 = frame.to_buffer_with_channel_map(SampleFormat::S16LE, &[CHANNEL_FL, CHANNEL_FR]);
assert_eq!(buf.len(), buf2.len());
for i in 0..buf.len() {
assert_eq!(buf[i], buf2[i]);
}
}
#[test]
fn clamping() {
let mut frame = Frame::new(100.0, Time::now(), 1);
frame.ensure_channel(CHANNEL_FL)[0] = 1.5;
frame.ensure_channel(CHANNEL_FR)[0] = -1.5;
let buf16 =
frame.to_buffer_with_channel_map(SampleFormat::S16LE, &[CHANNEL_FL, CHANNEL_FR]);
// 32767
assert_eq!(buf16[0], 0xff);
assert_eq!(buf16[1], 0x7f);
// -32768
assert_eq!(buf16[2], 0x00);
assert_eq!(buf16[3], 0x80);
let buf24 =
frame.to_buffer_with_channel_map(SampleFormat::S24LE3, &[CHANNEL_FL, CHANNEL_FR]);
assert_eq!(buf24[0], 0xff);
assert_eq!(buf24[1], 0xff);
assert_eq!(buf24[2], 0x7f);
assert_eq!(buf24[3], 0x00);
assert_eq!(buf24[4], 0x00);
assert_eq!(buf24[5], 0x80);
let buf32 =
frame.to_buffer_with_channel_map(SampleFormat::S32LE, &[CHANNEL_FL, CHANNEL_FR]);
assert_eq!(buf32[0], 0xff);
assert_eq!(buf32[1], 0xff);
assert_eq!(buf32[2], 0xff);
assert_eq!(buf32[3], 0x7f);
assert_eq!(buf32[4], 0x00);
assert_eq!(buf32[5], 0x00);
assert_eq!(buf32[6], 0x00);
assert_eq!(buf32[7], 0x80);
}
}<|fim▁end|> | sample_rate: f64,
buffer: &[u8], |
<|file_name|>layer_norm.py<|end_file_name|><|fim▁begin|>import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):<|fim▁hole|> super().__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta<|fim▁end|> | |
<|file_name|>renderer.rs<|end_file_name|><|fim▁begin|>// This file is part of Mooneye GB.
// Copyright (C) 2014-2020 Joonas Javanainen <[email protected]>
//
// Mooneye GB is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Mooneye GB is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Mooneye GB. If not, see <http://www.gnu.org/licenses/>.
use anyhow::Error;
use glium::backend::Facade;
use glium::index::PrimitiveType;
use glium::texture::pixel_buffer::PixelBuffer;
use glium::texture::texture2d::Texture2d;
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
use glium::uniforms::{MagnifySamplerFilter, MinifySamplerFilter};
use glium::{implement_vertex, program, uniform};
use glium::{DrawParameters, IndexBuffer, Program, Surface, VertexBuffer};
use mooneye_gb;
use nalgebra::{Matrix4, Vector4};
type Texture = Texture2d;
#[derive(Copy, Clone)]
pub struct Vertex {
position: [f32; 2],
tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
pub struct Renderer {
vertex_buffer: VertexBuffer<Vertex>,
index_buffer: IndexBuffer<u16>,
pixel_buffer: PixelBuffer<u8>,
program: Program,
texture_even: Texture,
texture_odd: Texture,
matrix: Matrix4<f32>,
palette: Matrix4<f32>,
frame_state: FrameState,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum FrameState {
Even,
Odd,
}
impl FrameState {
fn flip(&mut self) {
*self = match self {
FrameState::Even => FrameState::Odd,
FrameState::Odd => FrameState::Even,
}
}
}
const TEXTURE_WIDTH: u32 = 256;
const TEXTURE_HEIGHT: u32 = 256;
const TEX_OFFSET_X: f32 = mooneye_gb::SCREEN_WIDTH as f32 / TEXTURE_WIDTH as f32;
const TEX_OFFSET_Y: f32 = mooneye_gb::SCREEN_HEIGHT as f32 / TEXTURE_HEIGHT as f32;
fn upload_pixels(texture: &mut Texture, pixel_buffer: &PixelBuffer<u8>) {
texture.main_level().raw_upload_from_pixel_buffer(
pixel_buffer.as_slice(),
0..mooneye_gb::SCREEN_WIDTH as u32,
0..mooneye_gb::SCREEN_HEIGHT as u32,<|fim▁hole|>
const ASPECT_RATIO: f32 = mooneye_gb::SCREEN_WIDTH as f32 / mooneye_gb::SCREEN_HEIGHT as f32;
fn aspect_ratio_correction(width: u32, height: u32) -> (f32, f32) {
let fb_aspect_ratio = width as f32 / height as f32;
let scale = ASPECT_RATIO / fb_aspect_ratio;
if fb_aspect_ratio >= ASPECT_RATIO {
(scale, 1.0)
} else {
(1.0, 1.0 / scale)
}
}
impl Renderer {
pub fn new<F: Facade>(display: &F) -> Result<Renderer, Error> {
let vertexes = [
Vertex {
position: [-1.0, -1.0],
tex_coords: [0.0, TEX_OFFSET_Y],
},
Vertex {
position: [-1.0, 1.0],
tex_coords: [0.0, 0.0],
},
Vertex {
position: [1.0, 1.0],
tex_coords: [TEX_OFFSET_X, 0.0],
},
Vertex {
position: [1.0, -1.0],
tex_coords: [TEX_OFFSET_X, TEX_OFFSET_Y],
},
];
let vertex_buffer = VertexBuffer::immutable(display, &vertexes)?;
let index_buffer =
IndexBuffer::immutable(display, PrimitiveType::TriangleStrip, &[1u16, 2, 0, 3])?;
let program = program!(
display,
140 => {
vertex: include_str!("shader/vert_140.glsl"),
fragment: include_str!("shader/frag_140.glsl"),
outputs_srgb: true
},
110 => {
vertex: include_str!("shader/vert_110.glsl"),
fragment: include_str!("shader/frag_110.glsl"),
outputs_srgb: true
}
)?;
let pixel_buffer = PixelBuffer::new_empty(
display,
mooneye_gb::SCREEN_WIDTH * mooneye_gb::SCREEN_HEIGHT,
);
pixel_buffer.write(&[0; mooneye_gb::SCREEN_PIXELS]);
let mut texture_even = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
let mut texture_odd = Texture::empty_with_format(
display,
UncompressedFloatFormat::U8,
MipmapsOption::NoMipmap,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
)?;
upload_pixels(&mut texture_even, &pixel_buffer);
upload_pixels(&mut texture_odd, &pixel_buffer);
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
let matrix = Matrix4::from_diagonal(&Vector4::new(x_scale, y_scale, 1.0, 1.0));
let palette = Matrix4::new(
255.0, 181.0, 107.0, 33.0, 247.0, 174.0, 105.0, 32.0, 123.0, 74.0, 49.0, 16.0, 1.0, 1.0, 1.0,
1.0,
) / 255.0;
Ok(Renderer {
vertex_buffer,
index_buffer,
pixel_buffer,
program,
texture_even,
texture_odd,
matrix,
palette,
frame_state: FrameState::Even,
})
}
pub fn draw<S: Surface>(&self, frame: &mut S) -> Result<(), Error> {
let matrix: &[[f32; 4]; 4] = self.matrix.as_ref();
let palette: &[[f32; 4]; 4] = self.palette.as_ref();
let (tex_front, tex_back) = match self.frame_state {
FrameState::Even => (&self.texture_even, &self.texture_odd),
FrameState::Odd => (&self.texture_odd, &self.texture_even),
};
let uniforms = uniform! {
matrix: *matrix,
palette: *palette,
tex_front: tex_front.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
tex_back: tex_back.sampled()
.minify_filter(MinifySamplerFilter::Nearest)
.magnify_filter(MagnifySamplerFilter::Nearest),
};
let params = DrawParameters {
..Default::default()
};
frame.draw(
&self.vertex_buffer,
&self.index_buffer,
&self.program,
&uniforms,
¶ms,
)?;
Ok(())
}
pub fn update_dimensions<F: Facade>(&mut self, display: &F) {
let (width, height) = display.get_context().get_framebuffer_dimensions();
let (x_scale, y_scale) = aspect_ratio_correction(width, height);
self.matrix.m11 = x_scale;
self.matrix.m22 = y_scale;
}
pub fn update_pixels(&mut self, pixels: &mooneye_gb::ScreenBuffer) {
let mut buffer = [0u8; mooneye_gb::SCREEN_PIXELS];
for idx in 0..mooneye_gb::SCREEN_PIXELS {
buffer[idx] = pixels[idx] as u8;
}
self.pixel_buffer.write(&buffer);
self.frame_state.flip();
let texture = match self.frame_state {
FrameState::Odd => &mut self.texture_odd,
FrameState::Even => &mut self.texture_even,
};
upload_pixels(texture, &self.pixel_buffer);
}
}<|fim▁end|> | 0..1,
);
} |
<|file_name|>com_amazonaws_services_ec2_model_Storage.go<|end_file_name|><|fim▁begin|>package ec2
import "github.com/timob/javabind"
type ServicesEc2ModelStorageInterface interface {
JavaLangObjectInterface
// public void setS3(com.amazonaws.services.ec2.model.S3Storage)
SetS3(a ServicesEc2ModelS3StorageInterface)
// public com.amazonaws.services.ec2.model.S3Storage getS3()
GetS3() *ServicesEc2ModelS3Storage
// public com.amazonaws.services.ec2.model.Storage withS3(com.amazonaws.services.ec2.model.S3Storage)
WithS3(a ServicesEc2ModelS3StorageInterface) *ServicesEc2ModelStorage
// public com.amazonaws.services.ec2.model.Storage clone()
Clone() *ServicesEc2ModelStorage
}
type ServicesEc2ModelStorage struct {
JavaLangObject
}
// public com.amazonaws.services.ec2.model.Storage()
func NewServicesEc2ModelStorage() (*ServicesEc2ModelStorage) {
obj, err := javabind.GetEnv().NewObject("com/amazonaws/services/ec2/model/Storage")
if err != nil {
panic(err)
}
x := &ServicesEc2ModelStorage{}
x.Callable = &javabind.Callable{obj}
return x
}
// public void setS3(com.amazonaws.services.ec2.model.S3Storage)
func (jbobject *ServicesEc2ModelStorage) SetS3(a ServicesEc2ModelS3StorageInterface) {
conv_a := javabind.NewGoToJavaCallable()
if err := conv_a.Convert(a); err != nil {
panic(err)
}
_, err := jbobject.CallMethod(javabind.GetEnv(), "setS3", javabind.Void, conv_a.Value().Cast("com/amazonaws/services/ec2/model/S3Storage"))
if err != nil {
panic(err)
}
conv_a.CleanUp()
}
// public com.amazonaws.services.ec2.model.S3Storage getS3()
func (jbobject *ServicesEc2ModelStorage) GetS3() *ServicesEc2ModelS3Storage {
jret, err := jbobject.CallMethod(javabind.GetEnv(), "getS3", "com/amazonaws/services/ec2/model/S3Storage")
if err != nil {
panic(err)
}
retconv := javabind.NewJavaToGoCallable()
dst := &javabind.Callable{}
retconv.Dest(dst)
if err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {
panic(err)
}
retconv.CleanUp()
unique_x := &ServicesEc2ModelS3Storage{}
unique_x.Callable = dst
return unique_x
}
// public com.amazonaws.services.ec2.model.Storage withS3(com.amazonaws.services.ec2.model.S3Storage)
func (jbobject *ServicesEc2ModelStorage) WithS3(a ServicesEc2ModelS3StorageInterface) *ServicesEc2ModelStorage {
conv_a := javabind.NewGoToJavaCallable()
if err := conv_a.Convert(a); err != nil {
panic(err)
}
jret, err := jbobject.CallMethod(javabind.GetEnv(), "withS3", "com/amazonaws/services/ec2/model/Storage", conv_a.Value().Cast("com/amazonaws/services/ec2/model/S3Storage"))
if err != nil {
panic(err)
}
conv_a.CleanUp()
retconv := javabind.NewJavaToGoCallable()<|fim▁hole|> }
retconv.CleanUp()
unique_x := &ServicesEc2ModelStorage{}
unique_x.Callable = dst
return unique_x
}
// public java.lang.String toString()
func (jbobject *ServicesEc2ModelStorage) ToString() string {
jret, err := jbobject.CallMethod(javabind.GetEnv(), "toString", "java/lang/String")
if err != nil {
panic(err)
}
retconv := javabind.NewJavaToGoString()
dst := new(string)
retconv.Dest(dst)
if err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {
panic(err)
}
retconv.CleanUp()
return *dst
}
// public boolean equals(java.lang.Object)
func (jbobject *ServicesEc2ModelStorage) Equals(a interface{}) bool {
conv_a := javabind.NewGoToJavaCallable()
if err := conv_a.Convert(a); err != nil {
panic(err)
}
jret, err := jbobject.CallMethod(javabind.GetEnv(), "equals", javabind.Boolean, conv_a.Value().Cast("java/lang/Object"))
if err != nil {
panic(err)
}
conv_a.CleanUp()
return jret.(bool)
}
// public int hashCode()
func (jbobject *ServicesEc2ModelStorage) HashCode() int {
jret, err := jbobject.CallMethod(javabind.GetEnv(), "hashCode", javabind.Int)
if err != nil {
panic(err)
}
return jret.(int)
}
// public com.amazonaws.services.ec2.model.Storage clone()
func (jbobject *ServicesEc2ModelStorage) Clone() *ServicesEc2ModelStorage {
jret, err := jbobject.CallMethod(javabind.GetEnv(), "clone", "com/amazonaws/services/ec2/model/Storage")
if err != nil {
panic(err)
}
retconv := javabind.NewJavaToGoCallable()
dst := &javabind.Callable{}
retconv.Dest(dst)
if err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {
panic(err)
}
retconv.CleanUp()
unique_x := &ServicesEc2ModelStorage{}
unique_x.Callable = dst
return unique_x
}
// public java.lang.Object clone() throws java.lang.CloneNotSupportedException
func (jbobject *ServicesEc2ModelStorage) Clone2() (*JavaLangObject, error) {
jret, err := jbobject.CallMethod(javabind.GetEnv(), "clone", "java/lang/Object")
if err != nil {
var zero *JavaLangObject
return zero, err
}
retconv := javabind.NewJavaToGoCallable()
dst := &javabind.Callable{}
retconv.Dest(dst)
if err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {
panic(err)
}
retconv.CleanUp()
unique_x := &JavaLangObject{}
unique_x.Callable = dst
return unique_x, nil
}<|fim▁end|> | dst := &javabind.Callable{}
retconv.Dest(dst)
if err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {
panic(err) |
<|file_name|>refresh_code_plugin.py<|end_file_name|><|fim▁begin|># proxy module<|fim▁hole|>from envisage.plugins.refresh_code.refresh_code_plugin import *<|fim▁end|> | from __future__ import absolute_import |
<|file_name|>Solution.java<|end_file_name|><|fim▁begin|>public class Solution {
public List<List<Integer>> generate(int numRows) {
List<List<Integer>> result = new ArrayList<>();
ArrayList<Integer> tmp = new ArrayList<>();
for (int i = 0; i < numRows; i++) {<|fim▁hole|> result.add(new ArrayList<>(tmp));
}
return result;
}
}<|fim▁end|> | tmp.add(0, 1);
for (int j = 1; j < tmp.size() - 1; j++) {
tmp.set(j, tmp.get(j) + tmp.get(j + 1));
} |
<|file_name|>generate_block.rs<|end_file_name|><|fim▁begin|>// Copyrighttape Technologies LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::crypto::{CreateKey, KeyPair, PrivKey, Sign, Signature};
use bincode::{serialize, Infinite};
use cita_types::{Address, H256, U256};
use hashable::Hashable;
use libproto::TryInto;
use libproto::{Block, BlockWithProof, Message, SignedTransaction, Transaction};
use proof::BftProof;
use rustc_serialize::hex::FromHex;
use std::collections::HashMap;
use std::convert::Into;
use std::time::Duration;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum Step {
Propose,
Prevote,
Precommit,
Commit,
}
pub trait AsMillis {
fn as_millis(&self) -> u64;
}
impl AsMillis for Duration {
fn as_millis(&self) -> u64 {
self.as_secs() * 1_000 + u64::from(self.subsec_millis())
}<|fim▁hole|>impl BuildBlock {
pub fn build_contract_address(sender: &Address, nonce: &U256) -> Address {
use rlp::RlpStream;
let mut stream = RlpStream::new_list(2);
stream.append(sender);
stream.append(nonce);
From::from(stream.out().crypt_hash())
}
pub fn build_tx(
to_address: &str,
data: &str,
quota: u64,
nonce: u32,
valid_until_block: u64,
privkey: &PrivKey,
) -> SignedTransaction {
let data = data.from_hex().unwrap();
let mut tx = Transaction::new();
tx.set_data(data);
tx.set_nonce(format!("{}", nonce));
tx.set_quota(quota);
// create contract if `to_address` empty
tx.set_to(to_address.to_string());
tx.set_valid_until_block(valid_until_block);
tx.set_value(vec![0u8; 32]);
tx.set_chain_id(123);
tx.set_chain_id_v1(vec![]);
tx.sign(*privkey)
}
/// Build a signed block with given transactions
pub fn build_block_with_proof(
txs: &[SignedTransaction],
pre_hash: H256,
height: u64,
privkey: &PrivKey,
timestamp: u64,
) -> (Vec<u8>, BlockWithProof) {
let sender = KeyPair::from_privkey(*privkey).unwrap().address();
let mut block = Block::new();
block.mut_header().set_timestamp(timestamp * 1000);
block.mut_header().set_height(height);
block.mut_header().set_prevhash(pre_hash.0.to_vec());
block.mut_body().set_transactions(txs.into());
let mut proof = BftProof::default();
proof.height = height as usize;
proof.round = 0;
proof.proposal = H256::default();
let mut commits = HashMap::new();
let msg = serialize(
&(
proof.height,
proof.round,
Step::Precommit,
sender,
Some(proof.proposal),
),
Infinite,
)
.unwrap();
let signature = Signature::sign(privkey, &msg.crypt_hash()).unwrap();
commits.insert((*sender).into(), signature);
proof.commits = commits;
let mut previous_proof = proof.clone();
previous_proof.height = height as usize - 1;
block.mut_header().set_proof(previous_proof.into());
let transactions_root = block.get_body().transactions_root();
block
.mut_header()
.set_transactions_root(transactions_root.to_vec());
let mut proof_blk = BlockWithProof::new();
proof_blk.set_blk(block);
proof_blk.set_proof(proof.into());
let msg: Message = proof_blk.clone().into();
(msg.try_into().unwrap(), proof_blk)
}
}<|fim▁end|> | }
pub struct BuildBlock {}
|
<|file_name|>unwind-box-unique.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//<|fim▁hole|>// except according to those terms.
// error-pattern:fail
extern crate debug;
use std::gc::GC;
fn failfn() {
fail!();
}
fn main() {
let x = box(GC) box 0i;
failfn();
println!("{:?}", x);
}<|fim▁end|> | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed |
<|file_name|>lessons10.js<|end_file_name|><|fim▁begin|>/**
* Created by sasha on 18.11.13.<|fim▁hole|> */<|fim▁end|> | |
<|file_name|>tools.py<|end_file_name|><|fim▁begin|>## $Id: tools.py 23525 2011-05-12 04:11:40Z davea $
import configxml
try:
# use new hashlib if available
from hashlib import md5
except:
import md5
import os, shutil, binascii, filecmp
# from http://www.plope.com/software/uuidgen/view
_urandomfd = None
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
global _urandomfd
if _urandomfd is None:
try:
_urandomfd = os.open("/dev/urandom", os.O_RDONLY)
except:
_urandomfd = NotImplementedError
if _urandomfd is NotImplementedError:
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += os.read(_urandomfd, n - len(bytes))
return bytes
def make_uuid():
return binascii.hexlify(urandom(16))
def md5_file(path):
"""
Return a 16-digit MD5 hex digest of a file's contents
Read the file in chunks
"""
chunk = 8096
try:
checksum = md5()
except NameError:
checksum = md5.new()
fp = open(path, 'r')
while True:
buffer = fp.read(chunk)
if not buffer:
break
checksum.update(buffer)
fp.close()<|fim▁hole|>def file_size(path):
"""Return the size of a file"""
f = open(path)
f.seek(0,2)
return f.tell()
def query_yesno(str):
'''Query user; default Yes'''
print str, "[Y/n] ",
return not raw_input().strip().lower().startswith('n')
def query_noyes(str):
'''Query user; default No'''
print str, "[y/N] ",
return raw_input().strip().lower().startswith('y')
def get_output_file_path(filename):
""" Return the filename's path in the upload directory
Use this if you're developing a validator/assimilator in Python
"""
config = configxml.default_config()
fanout = long(config.config.uldl_dir_fanout)
s = md5.new(filename).hexdigest()[1:8]
x = long(s, 16)
return "%s/%x/%s" % (config.config.upload_dir, x % fanout, filename)<|fim▁end|> |
return checksum
|
<|file_name|>base.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import copy
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from uuid import uuid4
from ._compat import PY2, pickle, hashlib_md5, pjoin, copyreg, integer_types, \
with_metaclass
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import Serializable, SQLCallableList, BasicStorage
from .helpers.methods import hide_password, smart_query, auto_validators, \
auto_represent
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME, \
REGEX_SEARCH_PATTERN, REGEX_SQUARE_BRACKETS
from .helpers.serializers import serializers
from .objects import Table, Field, Row, Set
from .adapters import ADAPTERS
from .adapters.base import BaseAdapter
long = integer_types[-1]
TABLE_ARGS = set(
('migrate', 'primarykey', 'fake_migrate', 'format', 'redefine',
'singular', 'plural', 'trigger_name', 'sequence_name', 'fields',
'common_filter', 'polymodel', 'table_class', 'on_define', 'rname'))
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL costumisation on call
intercepts = [
'logger', 'representers', 'serializers', 'uuid', 'validators',
'validators_method']
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delaya table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
validators_method = None
representers = {}
uuid = lambda x: str(uuid4())
logger = logging.getLogger("pyDAL")
Table = Table
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
if not hasattr(THREAD_LOCAL, 'db_instances'):
THREAD_LOCAL.db_instances = {}
if not hasattr(THREAD_LOCAL, 'db_instances_zombie'):
THREAD_LOCAL.db_instances_zombie = {}
if uri == '<zombie>':
db_uid = kwargs['db_uid'] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL.db_instances_zombie[db_uid] = db
else:
db_uid = kwargs.get('db_uid', hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL.db_instances_zombie:
db = THREAD_LOCAL.db_instances_zombie[db_uid]
del THREAD_LOCAL.db_instances_zombie[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL.db_instances.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL.db_instances[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, 'db_instances', {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats = [(row[0], row[1]) for row in db._timings],
dbtables = {'defined': sorted(
list(set(db.tables)-set(db._LAZY_TABLES.keys()))),
'lazy': sorted(db._LAZY_TABLES.keys())})
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbname)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
'distributed transaction not suported by %s' % db._dbanme)<|fim▁hole|> db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError('failure to commit distributed transaction')
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri='sqlite://dummy.db',
pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args=None, attempts=5, auto_import=False,
bigint_id=False, debug=False, lazy_tables=False,
db_uid=None, do_connect=True,
after_connection=None, tables=None, ignore_field_case=True,
entity_quoting=False, table_hash=None):
if uri == '<zombie>' and db_uid is not None:
return
super(DAL, self).__init__()
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._lastsql = ''
self._timings = []
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
self._referee_name = '%(table)s'
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._do_connect = do_connect
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
error = ''
connected = False
for k in range(attempts):
for uri in uris:
try:
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:'+uri
self._dbname = REGEX_DBNAME.match(uri).group()
if not self._dbname in ADAPTERS:
raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname)
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(db=self,uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
do_connect=do_connect,
after_connection=after_connection,
entity_quoting=entity_quoting)
self._adapter = ADAPTERS[self._dbname](**kwargs)
types = ADAPTERS[self._dbname].types
# copy so multiple DAL() possible
self._adapter.types = copy.copy(types)
self._adapter.build_parsemap()
self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
if 'big-id' in types and 'reference' in types:
self._adapter.types['id'] = types['big-id']
self._adapter.types['reference'] = types['big-reference']
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb))
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb))
else:
self._adapter = BaseAdapter(db=self,pool_size=0,
uri='None',folder=folder,
db_codec=db_codec, after_connection=after_connection,
entity_quoting=entity_quoting)
migrate = fake_migrate = False
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
self.check_reserved = check_reserved
if self.check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder,
tables=tables)
@property
def tables(self):
return self._tables
def import_table_definitions(self, path, migrate=False,
fake_migrate=False, tables=None):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path,self._uri_hash+'_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern)-7:-6]
mf = [(value['sortable'],
Field(key,
type=value['type'],
length=value.get('length',None),
notnull=value.get('notnull',False),
unique=value.get('unique',False))) \
for key, value in sql_fields.iteritems()]
mf.sort(lambda a,b: cmp(a[0],b[0]))
self.define_table(name,*[item[1] for item in mf],
**dict(migrate=migrate,
fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self.check_reserve which is a list of operators to use.
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
"""
Example:
Use as::
db.define_table('person',Field('name'),Field('info'))
db.define_table('pet',
Field('ownedby',db.person),
Field('name'),Field('info')
)
@request.restful()
def index():
def GET(*args,**vars):
patterns = [
"/friends[person]",
"/{person.name}/:field",
"/{person.name}/pets[pet.ownedby]",
"/{person.name}/pets[pet.ownedby]/{pet.name}",
"/{person.name}/pets[pet.ownedby]/{pet.name}/:field",
("/dogs[pet]", db.pet.info=='dog'),
("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'),
]
parser = db.parse_as_rest(patterns,args,vars)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(table_name,**vars):
if table_name == 'person':
return db.person.validate_and_insert(**vars)
elif table_name == 'pet':
return db.pet.validate_and_insert(**vars)
else:
raise HTTP(400)
return locals()
"""
db = self
re1 = REGEX_SEARCH_PATTERN
re2 = REGEX_SQUARE_BRACKETS
def auto_table(table,base='',depth=0):
patterns = []
for field in db[table].fields:
if base:
tag = '%s/%s' % (base,field.replace('_','-'))
else:
tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
f = db[table][field]
if not f.readable: continue
if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('boolean'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('float','double','integer','bigint'):
tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('list:'):
tag += '/{%s.%s.contains}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('date','datetime'):
tag+= '/{%s.%s.year}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.month}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.day}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if f.type in ('datetime','time'):
tag+= '/{%s.%s.hour}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.minute}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.second}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if depth>0:
for f in db[table]._referenced_by:
tag+='/%s[%s.%s]' % (table,f.tablename,f.name)
patterns.append(tag)
patterns += auto_table(table,base=tag,depth=depth-1)
return patterns
if patterns == 'auto':
patterns=[]
for table in db.tables:
if not table.startswith('auth_'):
patterns.append('/%s[%s]' % (table,table))
patterns += auto_table(table,base='',depth=1)
else:
i = 0
while i<len(patterns):
pattern = patterns[i]
if not isinstance(pattern,str):
pattern = pattern[0]
tokens = pattern.split('/')
if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],
'/'.join(tokens[:-1]))
patterns = patterns[:i]+new_patterns+patterns[i+1:]
i += len(new_patterns)
else:
i += 1
if '/'.join(args) == 'patterns':
return Row({'status':200,'pattern':'list',
'error':None,'response':patterns})
for pattern in patterns:
basequery, exposedfields = None, []
if isinstance(pattern,tuple):
if len(pattern)==2:
pattern, basequery = pattern
elif len(pattern)>2:
pattern, basequery, exposedfields = pattern[0:3]
otable=table=None
if not isinstance(queries,dict):
dbset=db(queries)
if basequery is not None:
dbset = dbset(basequery)
i=0
tags = pattern[1:].split('/')
if len(tags)!=len(args):
continue
for tag in tags:
if re1.match(tag):
# print 're1:'+tag
tokens = tag[1:-1].split('.')
table, field = tokens[0], tokens[1]
if not otable or table == otable:
if len(tokens)==2 or tokens[2]=='eq':
query = db[table][field]==args[i]
elif tokens[2]=='ne':
query = db[table][field]!=args[i]
elif tokens[2]=='lt':
query = db[table][field]<args[i]
elif tokens[2]=='gt':
query = db[table][field]>args[i]
elif tokens[2]=='ge':
query = db[table][field]>=args[i]
elif tokens[2]=='le':
query = db[table][field]<=args[i]
elif tokens[2]=='year':
query = db[table][field].year()==args[i]
elif tokens[2]=='month':
query = db[table][field].month()==args[i]
elif tokens[2]=='day':
query = db[table][field].day()==args[i]
elif tokens[2]=='hour':
query = db[table][field].hour()==args[i]
elif tokens[2]=='minute':
query = db[table][field].minutes()==args[i]
elif tokens[2]=='second':
query = db[table][field].seconds()==args[i]
elif tokens[2]=='startswith':
query = db[table][field].startswith(args[i])
elif tokens[2]=='contains':
query = db[table][field].contains(args[i])
else:
raise RuntimeError("invalid pattern: %s" % pattern)
if len(tokens)==4 and tokens[3]=='not':
query = ~query
elif len(tokens)>=4:
raise RuntimeError("invalid pattern: %s" % pattern)
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
if basequery is not None:
dbset = dbset(basequery)
dbset=dbset(query)
else:
raise RuntimeError("missing relation in pattern: %s" % pattern)
elif re2.match(tag) and args[i]==tag[:tag.find('[')]:
ref = tag[tag.find('[')+1:-1]
if '.' in ref and otable:
table,field = ref.split('.')
selfld = '_id'
if db[table][field].type.startswith('reference '):
refs = [ x.name for x in db[otable] if x.type == db[table][field].type ]
else:
refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ]
if refs:
selfld = refs[0]
if nested_select:
try:
dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld])))
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
else:
items = [item.id for item in dbset.select(db[otable][selfld])]
dbset=db(db[table][field].belongs(items))
else:
table = ref
if not otable and isinstance(queries,dict):
dbset = db(queries[table])
dbset=dbset(db[table])
elif tag==':field' and table:
# print 're3:'+tag
field = args[i]
if not field in db[table]: break
# hand-built patterns should respect .readable=False as well
if not db[table][field].readable:
return Row({'status':418,'pattern':pattern,
'error':'I\'m a teapot','response':None})
try:
distinct = vars.get('distinct', False) == 'True'
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
items = dbset.select(db[table][field], distinct=distinct, limitby=limits)
if items:
return Row({'status':200,'response':items,
'pattern':pattern})
else:
return Row({'status':404,'pattern':pattern,
'error':'no record found','response':None})
elif tag != args[i]:
break
otable = table
i += 1
if i == len(tags) and table:
if hasattr(db[table], '_id'):
ofields = vars.get('order', db[table]._id.name).split('|')
else:
ofields = vars.get('order', db[table]._primarykey[0]).split('|')
try:
orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields]
except (KeyError, AttributeError):
return Row({'status':400,'error':'invalid orderby','response':None})
if exposedfields:
fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable]
else:
fields = [field for field in db[table] if field.readable]
count = dbset.count()
try:
offset = long(vars.get('offset',None) or 0)
limits = (offset,long(vars.get('limit',None) or 1000)+offset)
except ValueError:
return Row({'status':400,'error':'invalid limits','response':None})
#if count > limits[1]-limits[0]:
# return Row({'status':400,'error':'too many records','response':None})
try:
response = dbset.select(limitby=limits,orderby=orderby,*fields)
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
return Row({'status':200,'response':response,
'pattern':pattern,'count':count})
return Row({'status':400,'error':'no matching pattern','response':None})
def define_table(
self,
tablename,
*fields,
**args
):
if not fields and 'fields' in args:
fields = args.get('fields',())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
elif hasattr(self,tablename) or tablename in self.tables:
if args.get('redefine',False):
delattr(self, tablename)
else:
raise SyntaxError('table already defined: %s' % tablename)
elif tablename.startswith('_') or hasattr(self,tablename) or \
REGEX_PYTHON_KEYWORDS.match(tablename):
raise SyntaxError('invalid table name: %s' % tablename)
elif self.check_reserved:
self.check_reserved_keyword(tablename)
else:
invalid_args = set(args)-TABLE_ARGS
if invalid_args:
raise SyntaxError('invalid table "%s" attributes: %s' \
% (tablename,invalid_args))
if self._lazy_tables and tablename not in self._LAZY_TABLES:
self._LAZY_TABLES[tablename] = (tablename,fields,args)
table = None
else:
table = self.lazy_define_table(tablename,*fields,**args)
if not tablename in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(
self,
tablename,
*fields,
**args
):
args_get = args.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + list(common_fields)
table_class = args_get('table_class',Table)
table = table_class(self, tablename, *fields, **args)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires == DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and args_get('migrate',self._migrate)
if migrate and not self._uri in (None,'None') \
or self._adapter.dbengine=='google:datastore':
fake_migrate = self._fake_migrate_all or \
args_get('fake_migrate',self._fake_migrate)
polymodel = args_get('polymodel',None)
try:
GLOBAL_LOCKER.acquire()
self._lastsql = self._adapter.create_table(
table,migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = args_get('on_define',None)
if on_define: on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[(k, getattr(self, "_" + k, None)) for k in [
'pool_size', 'folder', 'db_codec', 'check_reserved',
'migrate', 'fake_migrate', 'migrate_enabled',
'fake_migrate_all', 'decode_credentials', 'driver_args',
'adapter_args', 'attempts', 'bigint_id', 'debug',
'lazy_tables', 'do_connect']]))
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat,
sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(self, '_lazy_tables') and \
key in object.__getattribute__(self, '_LAZY_TABLES'):
tablename, fields, args = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **args)
return super(DAL, self).__getattr__(key)
def __setattr__(self, key, value):
if key[:1] != '_' and key in self:
raise SyntaxError(
'Object %s exists and cannot be redefined' % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, '_uri'):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query!=None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf: ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL.db_instances:
db_group = THREAD_LOCAL.db_instances[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL.db_instances[self._db_uid]
def executesql(self, query, placeholders=None, as_dict=False,
fields=None, colnames=None, as_ordered_dict=False):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor,'description'):
raise RuntimeError("database does not support executesql(...,as_dict=True)")
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError("Result set includes duplicate column names. Specify unique column names using the 'colnames' argument")
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter._fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields,row)) for row in data]
try:
data = adapter._fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = ['%s.%s' % (f.tablename, f.name)
for f in extracted_fields]
data = adapter.parse(
data, fields=extracted_fields, colnames=colnames)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [field for field in table._referenced_by
if not field.table==thistable]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get('max_fetch_rows,',500))
write_colnames = kwargs['write_colnames'] = \
kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs['write_colnames'] = write_colnames
for k in range(0,nrows,step):
self(query).select(limitby=(k,k+step)).export_to_csv_file(
ofile, *args, **kwargs)
kwargs['write_colnames'] = False
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map=None, null='<NULL>',
unique='uuid', map_tablenames=None,
ignore_missing_tables=False,
*args, **kwargs):
#if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or \
not line[6:] in self.tables:
raise SyntaxError('invalid file format')
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename,tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset,
*args, **kwargs)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL('<zombie>', db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)<|fim▁end|> | try:
for (i, db) in instances: |
<|file_name|>075_Sort_Colors.js<|end_file_name|><|fim▁begin|><|fim▁hole|> * @return {void} Do not return anything, modify nums in-place instead.
*/
var sortColors = function(nums) {
nums.sort();
};<|fim▁end|> | /**
* @param {number[]} nums |
<|file_name|>DataTypePointInTime.java<|end_file_name|><|fim▁begin|>package org.hl7.v3;
import javax.xml.bind.annotation.XmlEnum;
import javax.xml.bind.annotation.XmlType;
/**
* <p>DataTypePointInTimeのJavaクラス。
*
* <p>次のスキーマ・フラグメントは、このクラス内に含まれる予期されるコンテンツを指定します。
* <p>
* <pre>
* <simpleType name="DataTypePointInTime">
<|fim▁hole|> * </pre>
*
*/
@XmlType(name = "DataTypePointInTime")
@XmlEnum
public enum DataTypePointInTime {
TS;
public String value() {
return name();
}
public static DataTypePointInTime fromValue(String v) {
return valueOf(v);
}
}<|fim▁end|> | * <restriction base="{urn:hl7-org:v3}cs">
* <enumeration value="TS"/>
* </restriction>
* </simpleType>
|
<|file_name|>10.4.3-1-19gs.js<|end_file_name|><|fim▁begin|>/// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
<|fim▁hole|> * @onlyStrict
*/
"use strict";
var my_eval = eval;
if (my_eval("this") !== fnGlobalObject()) {
throw "'this' had incorrect value!";
}<|fim▁end|> | * @path ch10/10.4/10.4.3/10.4.3-1-19gs.js
* @description Strict - checking 'this' from a global scope (indirect eval used within strict mode)
|
<|file_name|>feature_column_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.ops import init_ops
class TransformerTest(tf.test.TestCase):
def testRealValuedColumnIsIdentityTransformation(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(real_valued)
with self.test_session():
self.assertAllEqual(output.eval(), [[20.], [110], [-3]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2], [3], [0]])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 110], [110., 20], [-3, -3]])}
output = feature_column_ops._Transformer(features).transform(bucket)
with self.test_session():
self.assertAllEqual(output.eval(), [[2, 3], [3, 2], [0, 0]])
def testCachedTransformation(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
transformer = feature_column_ops._Transformer(features)
with self.test_session() as sess:
transformer.transform(bucket)
num_of_ops = len(sess.graph.get_operations())
# Verify that the second call to transform the same feature
# doesn't increase the number of ops.
transformer.transform(bucket)
self.assertEqual(num_of_ops, len(sess.graph.get_operations()))
def testSparseColumnWithHashBucket(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(
tf.contrib.layers.embedding_column(hashed_sparse, 10))
expected = feature_column_ops._Transformer(features).transform(
hashed_sparse)
with self.test_session():
self.assertAllEqual(output.values.eval(), expected.values.eval())
self.assertAllEqual(output.indices.eval(), expected.indices.eval())
self.assertAllEqual(output.shape.eval(), expected.shape.eval())
def testSparseColumnWithKeys(self):
keys_sparse = tf.contrib.layers.sparse_column_with_keys(
"wire", ["marlo", "omar", "stringer"])
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(keys_sparse)
with self.test_session():
tf.initialize_all_tables().run()
self.assertEqual(output.values.dtype, tf.int64)
self.assertAllEqual(output.values.eval(), [1, 2, 0])
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testSparseColumnWithHashBucket_IsIntegerized(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_integerized_feature(
"wire", 10)
wire_tensor = tf.SparseTensor(values=[100, 1, 25],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
output = feature_column_ops._Transformer(features).transform(hashed_sparse)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int32)
self.assertTrue(all(x < 10 and x >= 0 for x in output.values.eval()))
self.assertAllEqual(output.indices.eval(), wire_tensor.indices.eval())
self.assertAllEqual(output.shape.eval(), wire_tensor.shape.eval())
def testWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
output = feature_column_ops._Transformer(features).transform(weighted_ids)
with self.test_session():
tf.initialize_all_tables().run()
self.assertAllEqual(output[0].shape.eval(), ids_tensor.shape.eval())
self.assertAllEqual(output[0].indices.eval(), ids_tensor.indices.eval())
self.assertAllEqual(output[0].values.eval(), [2, 2, 0])
self.assertAllEqual(output[1].shape.eval(), weights_tensor.shape.eval())
self.assertAllEqual(output[1].indices.eval(),
weights_tensor.indices.eval())
self.assertEqual(output[1].values.dtype, tf.float32)
self.assertAllEqual(output[1].values.eval(), weights_tensor.values.eval())
def testCrossColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=15)
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output = feature_column_ops._Transformer(features).transform(
country_language)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output = feature_column_ops._Transformer(features).transform(country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testCrossWithMultiDimensionBucketizedColumn(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=1000)
with tf.Graph().as_default():
features = {"price": tf.constant([[20., 210.], [110., 50.], [-3., -30.]]),
"country": tf.SparseTensor(values=["US", "SV", "US"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[country_price],
num_outputs=1))
weights = column_to_variable[country_price][0]
grad = tf.squeeze(tf.gradients(output, weights)[0].values)
with self.test_session():
tf.initialize_all_variables().run()
self.assertEqual(len(grad.eval()), 6)
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=15)
wire = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_country_price = tf.contrib.layers.crossed_column(
[wire, country_price], hash_bucket_size=15)
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [0, 1], [0, 2]],
shape=[1, 3])
}
output = feature_column_ops._Transformer(features).transform(
wire_country_price)
with self.test_session():
self.assertEqual(output.values.dtype, tf.int64)
self.assertTrue(all(x < 15 and x >= 0 for x in output.values.eval()))
def testIfFeatureTableContainsTransfromationReturnIt(self):
any_column = tf.contrib.layers.sparse_column_with_hash_bucket("sparse", 10)
features = {any_column: "any-thing-even-not-a-tensor"}
output = feature_column_ops._Transformer(features).transform(any_column)
self.assertEqual(output, "any-thing-even-not-a-tensor")
class InputLayerTest(tf.test.TestCase):
def testRealValuedColumn(self):
real_valued = tf.contrib.layers.real_valued_column("price")
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.],
[110, 0.],
[-3, 30]])}
output = tf.contrib.layers.input_from_feature_columns(features,
[real_valued])
with self.test_session():
self.assertAllClose(output.eval(), features["price"].eval())
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
# buckets [2, 3], [3, 2], [0, 0]. dimension = 2
features = {"price": tf.constant([[20., 200],
[110, 50],
[-3, -3]])}
output = tf.contrib.layers.input_from_feature_columns(features, [bucket])
expected = [[0, 0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0, 0]]
with self.test_session():
self.assertAllClose(output.eval(), expected)
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testHashedEmbeddingColumn(self):
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo", "omar"],
indices=[[0, 0], [1, 0], [1, 1], [2, 0]],
shape=[3, 2])
features = {"wire": wire_tensor}
# Big enough hash space so that hopefully there is no collision
embedded_sparse = tf.contrib.layers.hashed_embedding_column("wire", 1000, 3)
output = tf.contrib.layers.input_from_feature_columns(
features, [embedded_sparse], weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
grad = tf.gradients(output, weights)
with self.test_session():
tf.initialize_all_variables().run()
gradient_values = []
# Collect the gradient from the different partitions (one in this test)
for p in range(len(grad)):
gradient_values.extend(grad[p].values.eval())
gradient_values.sort()
self.assertAllEqual(gradient_values, [0.5]*6 + [2]*3)
def testEmbeddingColumnWithInitializer(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
init_value = 133.7
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(init_value))
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
output_eval = output.eval()
self.assertAllEqual(output_eval.shape, [2, 10])
self.assertAllClose(output_eval, np.tile(init_value, [2, 10]))
def testEmbeddingColumnWithMultipleInitializers(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embedded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=42,
stddev=1337))
embedded_sparse_alternate = tf.contrib.layers.embedding_column(
hashed_sparse,
10,
initializer=tf.truncated_normal_initializer(mean=1337,
stddev=42))
# Makes sure that trying to use different initializers with the same
# embedding column explicitly fails.
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Duplicate feature column key found for column: wire_embedding"):
tf.contrib.layers.input_from_feature_columns(
features, [embedded_sparse, embedded_sparse_alternate])
def testEmbeddingColumnWithWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(weighted_ids, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testEmbeddingColumnWitCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(crossed, 10)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(output.eval().shape, [2, 10])
def testSparseColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: wire"):
tf.initialize_all_variables().run()
tf.contrib.layers.input_from_feature_columns(features, [hashed_sparse])
def testWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError,
"Error creating input layer for column: ids_weighted_by_weights"):
tf.initialize_all_tables().run()
tf.contrib.layers.input_from_feature_columns(features, [weighted_ids])
def testCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating input layer for column: aaa_X_bbb"):
tf.initialize_all_variables().run()
tf.contrib.layers.input_from_feature_columns(features, [crossed])
def testAllColumns(self):
real_valued = tf.contrib.layers.real_valued_column("income", 3)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"income": tf.constant([[20., 10, -5], [110, 0, -7], [-3, 30, 50]]),
"price": tf.constant([[20., 200], [110, 2], [-20, -30]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse,
10, initializer=tf.constant_initializer(133.7))
output = tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
# size of output = 3 (real_valued) + 2 * 4 (bucket) + 10 (embedding) = 21
self.assertAllEqual(output.eval().shape, [3, 21])
def testPredictionsEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
hashed_sparse, 1, combiner="sum", initializer=init_ops.ones_initializer)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
# score: (number of values)
self.assertAllEqual(output.eval(), [[1.], [2.]])
def testPredictionsEmbeddingColumnWithWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(
weighted_ids, 1, combiner="sum", initializer=init_ops.ones_initializer)
output = tf.contrib.layers.input_from_feature_columns(features,
[embeded_sparse])
with self.test_session():
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# score: (sum of weights)
self.assertAllEqual(output.eval(), [[10.], [50.]])
def testInputLayerWithCollections(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# one variable for embeded sparse
self.assertEqual(1, len(weights))
def testInputLayerWithTrainableArg(self):
real_valued = tf.contrib.layers.real_valued_column("price")
bucket = tf.contrib.layers.bucketized_column(real_valued,
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
features = {
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=False)
# There should not be any trainable variables
self.assertEqual(0, len(tf.trainable_variables()))
tf.contrib.layers.input_from_feature_columns(
features, [real_valued, bucket, embeded_sparse],
weight_collections=["my_collection"],
trainable=True)
# There should one trainable variable for embeded sparse
self.assertEqual(1, len(tf.trainable_variables()))
class WeightedSumTest(tf.test.TestCase):
def testSparseColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [hashed_sparse], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testWeightedSparseColumn(self):
ids = tf.contrib.layers.sparse_column_with_keys(
"ids", ["marlo", "omar", "stringer"])
ids_tensor = tf.SparseTensor(values=["stringer", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
weighted_ids = tf.contrib.layers.weighted_sparse_column(ids, "weights")
weights_tensor = tf.SparseTensor(values=[10.0, 20.0, 30.0],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"ids": ids_tensor,
"weights": weights_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [weighted_ids], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testCrossedColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
crossed = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"aaa": wire_tensor, "bbb": wire_tensor}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [crossed], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [2, 5])
def testEmbeddingColumn(self):
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
wire_tensor = tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [1, 1]],
shape=[2, 2])
features = {"wire": wire_tensor}
embeded_sparse = tf.contrib.layers.embedding_column(hashed_sparse, 10)
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "Error creating weighted sum for column: wire_embedding"):
tf.initialize_all_variables().run()
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[embeded_sparse],
num_outputs=5)
def testRealValuedColumnWithMultiDimensions(self):
real_valued = tf.contrib.layers.real_valued_column("price", 2)
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testBucketizedColumnWithMultiDimensions(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
features = {"price": tf.constant([[20., 10.], [110, 0.], [-3, 30]])}
logits, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [bucket], num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(logits.eval().shape, [3, 5])
def testAllColumns(self):
real_valued = tf.contrib.layers.real_valued_column("income", 2)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
hashed_sparse = tf.contrib.layers.sparse_column_with_hash_bucket("wire", 10)
crossed = tf.contrib.layers.crossed_column([bucket, hashed_sparse], 100)
features = {
"income": tf.constant([[20., 10], [110, 0], [-3, 30]]),
"price": tf.constant([[20.], [110], [-3]]),
"wire": tf.SparseTensor(values=["omar", "stringer", "marlo"],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])
}
output, _, _ = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [real_valued, bucket, hashed_sparse, crossed],
num_outputs=5)
with self.test_session():
tf.initialize_all_variables().run()
self.assertAllEqual(output.eval().shape, [3, 5])
def testPredictions(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
age = tf.contrib.layers.real_valued_column("age")
with tf.Graph().as_default():
features = {
"age": tf.constant([[1], [2]]),
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[age, language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: 0.1 + age*0.1
sess.run(column_to_variable[age][0].assign([[0.2]]))
self.assertAllClose(output.eval(), [[0.3], [0.5]])
# score: 0.1 + age*0.1 + language_weight[language_index]
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.5], [0.6]])
def testPredictionsWithWeightedSparseColumn(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "finnish", "hindi"])
weighted_language = tf.contrib.layers.weighted_sparse_column(
sparse_id_column=language,
weight_column_name="age")
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"age": tf.SparseTensor(values=[10.0, 20.0],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [weighted_language], num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertAllClose(output.eval(), [[0.], [0.]])
sess.run(bias.assign([0.1]))
self.assertAllClose(output.eval(), [[0.1], [0.1]])
# score: bias + age*language_weight[index]
sess.run(column_to_variable[weighted_language][0].assign(
[[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[3.1], [2.1]])
def testPredictionsWithMultivalentColumnButNoCross(self):
language = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["hindi", "english"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, bias = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# score: 0.1 + language_weight['hindi'] + language_weight['english']
sess.run(bias.assign([0.1]))
sess.run(column_to_variable[language][0].assign([[0.1], [0.3], [0.2]]))
self.assertAllClose(output.eval(), [[0.4]])
def testSparseFeatureColumnWithHashedBucketSize(self):
movies = tf.contrib.layers.sparse_column_with_hash_bucket(
column_name="movies", hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"movies": tf.SparseTensor(
values=["matrix", "head-on", "winter sleep"],
indices=[[0, 0], [0, 1], [1, 0]],
shape=[2, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[movies],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[movies][0]
self.assertEqual(weights.get_shape(), (15, 1))
sess.run(weights.assign(weights + 0.4))
# score for first example = 0.4 (matrix) + 0.4 (head-on) = 0.8
# score for second example = 0.4 (winter sleep)
self.assertAllClose(output.eval(), [[0.8], [0.4]])
def testCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [1, 0]],
shape=[2, 1]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
self.assertAllClose(output.eval(), [[0.4], [0.4]])
def testCrossColumnByItself(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
language_language = tf.contrib.layers.crossed_column(
[language, language], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [language_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[language_language][0]
sess.run(weights.assign(weights + 0.4))
# There are two features inside language. If we cross it by itself we'll
# have four crossed features.
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictions(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language][0]
sess.run(weights.assign(weights + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testMultivalentCrossUsageInPredictionsWithPartition(self):
# bucket size has to be big enough to allwo sharding.
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=64 << 19)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=64 << 18)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=64 << 18)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
with tf.variable_op_scope(
features.values(),
"weighted_sum_from_feature_columns",
partitioner=tf.min_max_variable_partitioner(
max_partitions=10, min_slice_size=((64 << 20) - 1))) as scope:
output, column_to_variable, _ = (<|fim▁hole|> with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
self.assertEqual(2, len(column_to_variable[country]))
self.assertEqual(3, len(column_to_variable[language]))
self.assertEqual(2, len(column_to_variable[country_language]))
weights = column_to_variable[country_language]
for partition_variable in weights:
sess.run(partition_variable.assign(partition_variable + 0.4))
# There are four crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.1], [0.2], [0.3]]))
self.assertAllClose(output.eval(), [[140.], [14.]])
def testMulticlassWithRealValuedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
age = tf.contrib.layers.real_valued_column("age")
# The following RealValuedColumn has 3 dimensions.
incomes = tf.contrib.layers.real_valued_column("incomes", 3)
with tf.Graph().as_default():
features = {"age": tf.constant([[1], [1]]),
"incomes": tf.constant([[100., 200., 300.], [10., 20., 30.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[2, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, age, incomes],
num_outputs=5))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
incomes_weights = column_to_variable[incomes][0]
sess.run(incomes_weights.assign([[0.01, 0.1, 1., 10., 100.],
[0.02, 0.2, 2., 20., 200.],
[0.03, 0.3, 3., 30., 300.]]))
self.assertAllClose(output.eval(), [[14., 140., 1400., 14000., 140000.],
[1.4, 14., 140., 1400., 14000.]])
def testBucketizedColumn(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20.], [110], [-3]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
sess.run(column_to_variable[bucket][0].assign([[0.1], [0.2], [0.3], [0.4
]]))
self.assertAllClose(output.eval(), [[0.3], [0.4], [0.1]])
def testBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 1
sess.run(column_to_variable[bucket][0].assign(
[[0.1], [0.2], [0.3], [0.4], [1], [2], [3], [4]]))
self.assertAllClose(output.eval(), [[0.3 + 4], [0.4 + 3], [0.1 + 1]])
def testMulticlassWithBucketizedColumnHavingMultiDimensions(self):
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", 2),
boundaries=[0., 10., 100.])
with tf.Graph().as_default():
# buckets 2, 3, 0
features = {"price": tf.constant([[20., 210], [110, 50], [-3, -30]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [1, 0]],
shape=[3, 2])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[bucket, country],
num_outputs=5))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
# dimension = 2, bucket_size = 4, num_classes = 5
sess.run(column_to_variable[bucket][0].assign(
[[0.1, 1, 10, 100, 1000], [0.2, 2, 20, 200, 2000],
[0.3, 3, 30, 300, 3000], [0.4, 4, 40, 400, 4000],
[5, 50, 500, 5000, 50000], [6, 60, 600, 6000, 60000],
[7, 70, 700, 7000, 70000], [8, 80, 800, 8000, 80000]]))
self.assertAllClose(
output.eval(),
[[0.3 + 8, 3 + 80, 30 + 800, 300 + 8000, 3000 + 80000],
[0.4 + 7, 4 + 70, 40 + 700, 400 + 7000, 4000 + 70000],
[0.1 + 5, 1 + 50, 10 + 500, 100 + 5000, 1000 + 50000]])
def testCrossWithBucketizedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[country_price],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4
self.assertAllClose(output.eval(), [[0.8]])
def testCrossWithCrossedColumn(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
country_language_price = tf.contrib.layers.crossed_column(
set([country_language, price_bucket]),
hash_bucket_size=15)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2]),
"language": tf.SparseTensor(values=["english", "spanish"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_language_price],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[country_language_price][0]
sess.run(weights.assign(weights + 0.4))
# There are two crosses each with 0.4 weight.
# score = 0.4 + 0.4 + 0.4 + 0.4
self.assertAllClose(output.eval(), [[1.6]])
def testIntegerizedColumn(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[0, 4, 2],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testIntegerizedColumnWithInvalidId(self):
product = tf.contrib.layers.sparse_column_with_integerized_feature(
"product", bucket_size=5)
with tf.Graph().as_default():
features = {"product": tf.SparseTensor(values=[5, 4, 7],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[product],
num_outputs=1))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
product_weights = column_to_variable[product][0]
sess.run(product_weights.assign([[0.1], [0.2], [0.3], [0.4], [0.5]]))
self.assertAllClose(output.eval(), [[0.1], [0.5], [0.3]])
def testMulticlassWithOnlyBias(self):
with tf.Graph().as_default():
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, _, bias = tf.contrib.layers.weighted_sum_from_feature_columns(
features, [tf.contrib.layers.real_valued_column("age")],
num_outputs=3)
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
sess.run(bias.assign([0.1, 0.2, 0.3]))
self.assertAllClose(output.eval(), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3],
[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def testMulticlassWithRealValuedColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.real_valued_column("age")
features = {"age": tf.constant([[10.], [20.], [30.], [40.]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (1, 3))
sess.run(weights.assign([[0.01, 0.03, 0.05]]))
self.assertAllClose(output.eval(), [[0.1, 0.3, 0.5], [0.2, 0.6, 1.0],
[0.3, 0.9, 1.5], [0.4, 1.2, 2.0]])
def testMulticlassWithSparseColumn(self):
with tf.Graph().as_default():
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "arabic", "hindi", "russian", "swahili"])
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "arabic", "russian"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.3, 0.6, 0.9], [0.1, 0.4, 0.7],
[0.2, 0.5, 0.8], [0.4, 0.7, 1.0]])
def testMulticlassWithBucketizedColumn(self):
column = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 100., 500., 1000.])
with tf.Graph().as_default():
# buckets 0, 2, 1, 2
features = {"price": tf.constant([[-3], [110], [20.], [210]])}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.1, 0.4, 0.7], [0.3, 0.6, 0.9],
[0.2, 0.5, 0.8], [0.3, 0.6, 0.9]])
def testMulticlassWithCrossedColumn(self):
language = tf.contrib.layers.sparse_column_with_hash_bucket(
"language", hash_bucket_size=3)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=2)
column = tf.contrib.layers.crossed_column(
{language, country}, hash_bucket_size=5)
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["english", "spanish", "russian", "swahili"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1]),
"country": tf.SparseTensor(values=["US", "SV", "RU", "KE"],
indices=[[0, 0], [1, 0], [2, 0], [3, 0]],
shape=[4, 1])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(tf.shape(output).eval(), [4, 3])
def testMulticlassWithMultivalentColumn(self):
column = tf.contrib.layers.sparse_column_with_keys(
column_name="language",
keys=["english", "turkish", "hindi", "russian", "swahili"])
with tf.Graph().as_default():
features = {
"language": tf.SparseTensor(
values=["hindi", "english", "turkish", "turkish", "english"],
indices=[[0, 0], [0, 1], [1, 0], [2, 0], [3, 0]],
shape=[4, 2])
}
output, column_to_variable, _ = (
tf.contrib.layers.weighted_sum_from_feature_columns(features,
[column],
num_outputs=3))
with self.test_session() as sess:
tf.initialize_all_variables().run()
tf.initialize_all_tables().run()
weights = column_to_variable[column][0]
self.assertEqual(weights.get_shape(), (5, 3))
sess.run(weights.assign([[0.1, 0.4, 0.7], [0.2, 0.5, 0.8],
[0.3, 0.6, 0.9], [0.4, 0.7, 1.0], [0.5, 0.8,
1.1]]))
self.assertAllClose(output.eval(), [[0.4, 1.0, 1.6], [0.2, 0.5, 0.8],
[0.2, 0.5, 0.8], [0.1, 0.4, 0.7]])
def testVariablesAddedToCollection(self):
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price"),
boundaries=[0., 10., 100.])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
"country", hash_bucket_size=5)
country_price = tf.contrib.layers.crossed_column(
[country, price_bucket], hash_bucket_size=10)
with tf.Graph().as_default():
features = {
"price": tf.constant([[20.]]),
"country": tf.SparseTensor(values=["US", "SV"],
indices=[[0, 0], [0, 1]],
shape=[1, 2])
}
tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country_price, price_bucket],
num_outputs=1,
weight_collections=["my_collection"])
weights = tf.get_collection("my_collection")
# 3 = bias + price_bucket + country_price
self.assertEqual(3, len(weights))
class ParseExampleTest(tf.test.TestCase):
def testParseExample(self):
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("price", dimension=3),
boundaries=[0., 10., 100.])
wire_cast = tf.contrib.layers.sparse_column_with_keys(
"wire_cast", ["marlo", "omar", "stringer"])
# buckets 2, 3, 0
data = tf.train.Example(features=tf.train.Features(feature={
"price": tf.train.Feature(float_list=tf.train.FloatList(value=[20., 110,
-3])),
"wire_cast": tf.train.Feature(bytes_list=tf.train.BytesList(value=[
b"stringer", b"marlo"
])),
}))
output = tf.contrib.layers.parse_feature_columns_from_examples(
serialized=[data.SerializeToString()],
feature_columns=[bucket, wire_cast])
self.assertIn(bucket, output)
self.assertIn(wire_cast, output)
with self.test_session():
tf.initialize_all_tables().run()
self.assertAllEqual(output[bucket].eval(), [[2, 3, 0]])
self.assertAllEqual(output[wire_cast].indices.eval(), [[0, 0], [0, 1]])
self.assertAllEqual(output[wire_cast].values.eval(), [2, 0])
class InferRealValuedColumnTest(tf.test.TestCase):
def testTensorInt32(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.int32)),
[tf.contrib.layers.real_valued_column("", dimension=4, dtype=tf.int32)])
def testTensorInt64(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.int64)),
[tf.contrib.layers.real_valued_column("", dimension=4, dtype=tf.int64)])
def testTensorFloat32(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.float32)),
[tf.contrib.layers.real_valued_column(
"", dimension=4, dtype=tf.float32)])
def testTensorFloat64(self):
self.assertEqual(
tf.contrib.layers.infer_real_valued_columns(
tf.zeros(shape=[33, 4], dtype=tf.float64)),
[tf.contrib.layers.real_valued_column(
"", dimension=4, dtype=tf.float64)])
def testDictionary(self):
self.assertItemsEqual(
tf.contrib.layers.infer_real_valued_columns({
"a": tf.zeros(shape=[33, 4], dtype=tf.int32),
"b": tf.zeros(shape=[3, 2], dtype=tf.float32)
}),
[tf.contrib.layers.real_valued_column(
"a", dimension=4, dtype=tf.int32),
tf.contrib.layers.real_valued_column(
"b", dimension=2, dtype=tf.float32)])
def testNotGoodDtype(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.constant([["a"]], dtype=tf.string))
def testSparseTensor(self):
with self.assertRaises(ValueError):
tf.contrib.layers.infer_real_valued_columns(
tf.SparseTensor(indices=[[0, 0]], values=["a"], shape=[1, 1]))
if __name__ == "__main__":
tf.test.main()<|fim▁end|> | tf.contrib.layers.weighted_sum_from_feature_columns(
features, [country, language, country_language],
num_outputs=1,
scope=scope)) |
<|file_name|>ReactTransitionGroup.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2013-2014 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @providesModule ReactTransitionGroup
*/
"use strict";
var React = require('React');
var ReactTransitionChildMapping = require('ReactTransitionChildMapping');
var cloneWithProps = require('cloneWithProps');
var emptyFunction = require('emptyFunction');
var merge = require('merge');
var ReactTransitionGroup = React.createClass({
propTypes: {
component: React.PropTypes.func,
childFactory: React.PropTypes.func
},
getDefaultProps: function() {
return {
component: React.DOM.span,
childFactory: emptyFunction.thatReturnsArgument
};
},
getInitialState: function() {
return {
children: ReactTransitionChildMapping.getChildMapping(this.props.children)
};
},
componentWillReceiveProps: function(nextProps) {
var nextChildMapping = ReactTransitionChildMapping.getChildMapping(
nextProps.children
);
var prevChildMapping = this.state.children;
this.setState({
children: ReactTransitionChildMapping.mergeChildMappings(
prevChildMapping,
nextChildMapping
)
});
var key;
for (key in nextChildMapping) {
var hasPrev = prevChildMapping && prevChildMapping.hasOwnProperty(key);
if (nextChildMapping[key] && !hasPrev &&
!this.currentlyTransitioningKeys[key]) {
this.keysToEnter.push(key);
}
}
for (key in prevChildMapping) {
var hasNext = nextChildMapping && nextChildMapping.hasOwnProperty(key);
if (prevChildMapping[key] && !hasNext &&
!this.currentlyTransitioningKeys[key]) {
this.keysToLeave.push(key);
}
}
// If we want to someday check for reordering, we could do it here.
},
componentWillMount: function() {
this.currentlyTransitioningKeys = {};
this.keysToEnter = [];
this.keysToLeave = [];
},<|fim▁hole|> var keysToEnter = this.keysToEnter;
this.keysToEnter = [];
keysToEnter.forEach(this.performEnter);
var keysToLeave = this.keysToLeave;
this.keysToLeave = [];
keysToLeave.forEach(this.performLeave);
},
performEnter: function(key) {
this.currentlyTransitioningKeys[key] = true;
var component = this.refs[key];
if (component.componentWillEnter) {
component.componentWillEnter(
this._handleDoneEntering.bind(this, key)
);
} else {
this._handleDoneEntering(key);
}
},
_handleDoneEntering: function(key) {
var component = this.refs[key];
if (component.componentDidEnter) {
component.componentDidEnter();
}
delete this.currentlyTransitioningKeys[key];
var currentChildMapping = ReactTransitionChildMapping.getChildMapping(
this.props.children
);
if (!currentChildMapping || !currentChildMapping.hasOwnProperty(key)) {
// This was removed before it had fully entered. Remove it.
this.performLeave(key);
}
},
performLeave: function(key) {
this.currentlyTransitioningKeys[key] = true;
var component = this.refs[key];
if (component.componentWillLeave) {
component.componentWillLeave(this._handleDoneLeaving.bind(this, key));
} else {
// Note that this is somewhat dangerous b/c it calls setState()
// again, effectively mutating the component before all the work
// is done.
this._handleDoneLeaving(key);
}
},
_handleDoneLeaving: function(key) {
var component = this.refs[key];
if (component.componentDidLeave) {
component.componentDidLeave();
}
delete this.currentlyTransitioningKeys[key];
var currentChildMapping = ReactTransitionChildMapping.getChildMapping(
this.props.children
);
if (currentChildMapping && currentChildMapping.hasOwnProperty(key)) {
// This entered again before it fully left. Add it again.
this.performEnter(key);
} else {
var newChildren = merge(this.state.children);
delete newChildren[key];
this.setState({children: newChildren});
}
},
render: function() {
// TODO: we could get rid of the need for the wrapper node
// by cloning a single child
var childrenToRender = {};
for (var key in this.state.children) {
var child = this.state.children[key];
if (child) {
// You may need to apply reactive updates to a child as it is leaving.
// The normal React way to do it won't work since the child will have
// already been removed. In case you need this behavior you can provide
// a childFactory function to wrap every child, even the ones that are
// leaving.
childrenToRender[key] = cloneWithProps(
this.props.childFactory(child),
{ref: key}
);
}
}
return this.transferPropsTo(this.props.component(null, childrenToRender));
}
});
module.exports = ReactTransitionGroup;<|fim▁end|> |
componentDidUpdate: function() { |
<|file_name|>action_reposync.py<|end_file_name|><|fim▁begin|>"""
Builds out and synchronizes yum repo mirrors.
Initial support for rsync, perhaps reposync coming later.
Copyright 2006-2007, Red Hat, Inc
Michael DeHaan <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import time
import yaml # Howell-Clark version
import sys
HAS_YUM = True
try:
import yum
except:
HAS_YUM = False
import utils
from cexceptions import *
import traceback
import errno
from utils import _
import clogger
class RepoSync:
"""
Handles conversion of internal state to the tftpboot tree layout
"""
# ==================================================================================
def __init__(self,config,tries=1,nofail=False,logger=None):
"""
Constructor
"""
self.verbose = True
self.api = config.api
self.config = config
self.distros = config.distros()
self.profiles = config.profiles()
self.systems = config.systems()
self.settings = config.settings()
self.repos = config.repos()
self.rflags = self.settings.reposync_flags
self.tries = tries
self.nofail = nofail
self.logger = logger
if logger is None:
self.logger = clogger.Logger()
self.logger.info("hello, reposync")
# ===================================================================
def run(self, name=None, verbose=True):
"""
Syncs the current repo configuration file with the filesystem.
"""
self.logger.info("run, reposync, run!")
try:
self.tries = int(self.tries)
except:
utils.die(self.logger,"retry value must be an integer")
self.verbose = verbose
report_failure = False
for repo in self.repos:
env = repo.environment
for k in env.keys():
self.logger.info("environment: %s=%s" % (k,env[k]))
if env[k] is not None:
os.putenv(k,env[k])
if name is not None and repo.name != name:
# invoked to sync only a specific repo, this is not the one
continue
elif name is None and not repo.keep_updated:
# invoked to run against all repos, but this one is off
self.logger.info("%s is set to not be updated" % repo.name)
continue
repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
repo_path = os.path.join(repo_mirror, repo.name)
mirror = repo.mirror
if not os.path.isdir(repo_path) and not repo.mirror.lower().startswith("rhn://"):
os.makedirs(repo_path)
# which may actually NOT reposync if the repo is set to not mirror locally
# but that's a technicality
for x in range(self.tries+1,1,-1):
success = False
try:
self.sync(repo)
success = True
except:
utils.log_exc(self.logger)
self.logger.warning("reposync failed, tries left: %s" % (x-2))
if not success:
report_failure = True
if not self.nofail:
utils.die(self.logger,"reposync failed, retry limit reached, aborting")
else:
self.logger.error("reposync failed, retry limit reached, skipping")
self.update_permissions(repo_path)
if report_failure:
utils.die(self.logger,"overall reposync failed, at least one repo failed to synchronize")
return True
# ==================================================================================
def sync(self, repo):
"""
Conditionally sync a repo, based on type.
"""
if repo.breed == "rhn":
return self.rhn_sync(repo)
elif repo.breed == "yum":
return self.yum_sync(repo)
#elif repo.breed == "apt":
# return self.apt_sync(repo)
elif repo.breed == "rsync":
return self.rsync_sync(repo)
else:
utils.die(self.logger,"unable to sync repo (%s), unknown or unsupported repo type (%s)" % (repo.name, repo.breed))
# ====================================================================================
def createrepo_walker(self, repo, dirname, fnames):
"""
Used to run createrepo on a copied Yum mirror.
"""
if os.path.exists(dirname) or repo['breed'] == 'rsync':
utils.remove_yum_olddata(dirname)
# add any repo metadata we can use
mdoptions = []
if os.path.isfile("%s/.origin/repomd.xml" % (dirname)):
if not HAS_YUM:
utils.die(self.logger,"yum is required to use this feature")
rmd = yum.repoMDObject.RepoMD('', "%s/.origin/repomd.xml" % (dirname))
if rmd.repoData.has_key("group"):
groupmdfile = rmd.getData("group").location[1]
mdoptions.append("-g %s" % groupmdfile)
if rmd.repoData.has_key("prestodelta"):
# need createrepo >= 0.9.7 to add deltas
if utils.check_dist() == "redhat" or utils.check_dist() == "suse":
cmd = "/usr/bin/rpmquery --queryformat=%{VERSION} createrepo"
createrepo_ver = utils.subprocess_get(self.logger, cmd)
if createrepo_ver >= "0.9.7":
mdoptions.append("--deltas")
else:
utils.die(self.logger,"this repo has presto metadata; you must upgrade createrepo to >= 0.9.7 first and then need to resync the repo through cobbler.")
blended = utils.blender(self.api, False, repo)
flags = blended.get("createrepo_flags","(ERROR: FLAGS)")
try:
# BOOKMARK
cmd = "createrepo %s %s %s" % (" ".join(mdoptions), flags, dirname)
utils.subprocess_call(self.logger, cmd)
except:
utils.log_exc(self.logger)
self.logger.error("createrepo failed.")
del fnames[:] # we're in the right place
# ====================================================================================
def rsync_sync(self, repo):
"""
Handle copying of rsync:// and rsync-over-ssh repos.
"""
repo_mirror = repo.mirror
if not repo.mirror_locally:
utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")
if repo.rpm_list != "" and repo.rpm_list != []:
self.logger.warning("--rpm-list is not supported for rsync'd repositories")
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
spacer = ""
if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
spacer = "-e ssh"
if not repo.mirror.endswith("/"):
repo.mirror = "%s/" % repo.mirror
# FIXME: wrapper for subprocess that logs to logger
cmd = "rsync -rltDv %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
os.path.walk(dest_path, self.createrepo_walker, repo)
self.create_local_file(dest_path, repo)
# ====================================================================================
def rhn_sync(self, repo):
"""
Handle mirroring of RHN repos.
"""
repo_mirror = repo.mirror
# FIXME? warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
# FIXME: don't hardcode
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path):
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# how we invoke yum-utils depends on whether this is RHN content or not.
# this is the somewhat more-complex RHN case.
# NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
if not repo.mirror_locally:
utils.die("rhn:// repos do not work with --mirror-locally=1")
if has_rpm_list:
self.logger.warning("warning: --rpm-list is not supported for RHN content")
rest = repo.mirror[6:] # everything after rhn://
cmd = "/usr/bin/reposync %s -r %s --download_path=%s" % (self.rflags, rest, "/var/www/cobbler/repo_mirror")
if repo.name != rest:
args = { "name" : repo.name, "rest" : rest }
utils.die(self.logger,"ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel" % args)
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
repo.arch = "i686"
if repo.arch != "":
cmd = "%s -a %s" % (cmd, repo.arch)
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
<|fim▁hole|> # Don't die if reposync fails, it is logged
# if rc !=0:
# utils.die(self.logger,"cobbler reposync failed")
# some more special case handling for RHN.
# create the config file now, because the directory didn't exist earlier
temp_file = self.create_local_file(temp_path, repo, output=False)
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
def yum_sync(self, repo):
"""
Handle copying of http:// and ftp:// yum repos.
"""
repo_mirror = repo.mirror
# warn about not having yum-utils. We don't want to require it in the package because
# RHEL4 and RHEL5U0 don't have it.
if not os.path.exists("/usr/bin/reposync"):
utils.die(self.logger,"no /usr/bin/reposync found, please install yum-utils")
cmd = "" # command to run
has_rpm_list = False # flag indicating not to pull the whole repo
# detect cases that require special handling
if repo.rpm_list != "" and repo.rpm_list != []:
has_rpm_list = True
# create yum config file for use by reposync
dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
temp_path = os.path.join(dest_path, ".origin")
if not os.path.isdir(temp_path) and repo.mirror_locally:
# FIXME: there's a chance this might break the RHN D/L case
os.makedirs(temp_path)
# create the config file that yum will use for the copying
if repo.mirror_locally:
temp_file = self.create_local_file(temp_path, repo, output=False)
if not has_rpm_list and repo.mirror_locally:
# if we have not requested only certain RPMs, use reposync
cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, "/var/www/cobbler/repo_mirror")
if repo.arch != "":
if repo.arch == "x86":
repo.arch = "i386" # FIX potential arch errors
if repo.arch == "i386":
# counter-intuitive, but we want the newish kernels too
cmd = "%s -a i686" % (cmd)
else:
cmd = "%s -a %s" % (cmd, repo.arch)
elif repo.mirror_locally:
# create the output directory if it doesn't exist
if not os.path.exists(dest_path):
os.makedirs(dest_path)
use_source = ""
if repo.arch == "src":
use_source = "--source"
# older yumdownloader sometimes explodes on --resolvedeps
# if this happens to you, upgrade yum & yum-utils
extra_flags = self.settings.yumdownloader_flags
cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))
# now regardless of whether we're doing yumdownloader or reposync
# or whether the repo was http://, ftp://, or rhn://, execute all queued
# commands here. Any failure at any point stops the operation.
if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd)
if rc !=0:
utils.die(self.logger,"cobbler reposync failed")
repodata_path = os.path.join(dest_path, "repodata")
if not os.path.exists("/usr/bin/wget"):
utils.die(self.logger,"no /usr/bin/wget found, please install wget")
# grab repomd.xml and use it to download any metadata we can use
cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
rc = utils.subprocess_call(self.logger,cmd2)
if rc == 0:
# create our repodata directory now, as any extra metadata we're
# about to download probably lives there
if not os.path.isdir(repodata_path):
os.makedirs(repodata_path)
rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
for mdtype in rmd.repoData.keys():
# don't download metadata files that are created by default
if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
mdfile = rmd.getData(mdtype).location[1]
cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
utils.subprocess_call(self.logger,cmd3)
if rc !=0:
utils.die(self.logger,"wget failed")
# now run createrepo to rebuild the index
if repo.mirror_locally:
os.path.walk(dest_path, self.createrepo_walker, repo)
# create the config file the hosts will use to access the repository.
self.create_local_file(dest_path, repo)
# ====================================================================================
# def apt_sync(self, repo):
#
# """
# Handle copying of http:// and ftp:// debian repos.
# """
#
# repo_mirror = repo.mirror
#
# # warn about not having mirror program.
#
# mirror_program = "/usr/bin/debmirror"
# if not os.path.exists(mirror_program):
# utils.die(self.logger,"no %s found, please install it"%(mirror_program))
#
# cmd = "" # command to run
# has_rpm_list = False # flag indicating not to pull the whole repo
#
# # detect cases that require special handling
#
# if repo.rpm_list != "" and repo.rpm_list != []:
# utils.die(self.logger,"has_rpm_list not yet supported on apt repos")
#
# if not repo.arch:
# utils.die(self.logger,"Architecture is required for apt repositories")
#
# # built destination path for the repo
# dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)
#
# if repo.mirror_locally:
# mirror = repo.mirror.replace("@@suite@@",repo.os_version)
#
# idx = mirror.find("://")
# method = mirror[:idx]
# mirror = mirror[idx+3:]
#
# idx = mirror.find("/")
# host = mirror[:idx]
# mirror = mirror[idx+1:]
#
# idx = mirror.rfind("/dists/")
# suite = mirror[idx+7:]
# mirror = mirror[:idx]
#
# mirror_data = "--method=%s --host=%s --root=%s --dist=%s " % ( method , host , mirror , suite )
#
# # FIXME : flags should come from repo instead of being hardcoded
#
# rflags = "--passive --nocleanup"
# for x in repo.yumopts:
# if repo.yumopts[x]:
# rflags += " %s %s" % ( x , repo.yumopts[x] )
# else:
# rflags += " %s" % x
# cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data, dest_path)
# if repo.arch == "src":
# cmd = "%s --source" % cmd
# else:
# arch = repo.arch
# if arch == "x86":
# arch = "i386" # FIX potential arch errors
# if arch == "x86_64":
# arch = "amd64" # FIX potential arch errors
# cmd = "%s --nosource -a %s" % (cmd, arch)
#
# rc = utils.subprocess_call(self.logger, cmd)
# if rc !=0:
# utils.die(self.logger,"cobbler reposync failed")
# ==================================================================================
def create_local_file(self, dest_path, repo, output=True):
"""
Creates Yum config files for use by reposync
Two uses:
(A) output=True, Create local files that can be used with yum on provisioned clients to make use of this mirror.
(B) output=False, Create a temporary file for yum to feed into yum for mirroring
"""
# the output case will generate repo configuration files which are usable
# for the installed systems. They need to be made compatible with --server-override
# which means they are actually templates, which need to be rendered by a cobbler-sync
# on per profile/system basis.
if output:
fname = os.path.join(dest_path,"config.repo")
else:
fname = os.path.join(dest_path, "%s.repo" % repo.name)
self.logger.debug("creating: %s" % fname)
if not os.path.exists(dest_path):
utils.mkdir(dest_path)
config_file = open(fname, "w+")
config_file.write("[%s]\n" % repo.name)
config_file.write("name=%s\n" % repo.name)
optenabled = False
optgpgcheck = False
if output:
if repo.mirror_locally:
line = "baseurl=http://${server}/cobbler/repo_mirror/%s\n" % (repo.name)
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
config_file.write(line)
# user may have options specific to certain yum plugins
# add them to the file
for x in repo.yumopts:
config_file.write("%s=%s\n" % (x, repo.yumopts[x]))
if x == "enabled":
optenabled = True
if x == "gpgcheck":
optgpgcheck = True
else:
mstr = repo.mirror
if mstr.startswith("/"):
mstr = "file://%s" % mstr
line = "baseurl=%s\n" % mstr
if self.settings.http_port not in (80, '80'):
http_server = "%s:%s" % (self.settings.server, self.settings.http_port)
else:
http_server = self.settings.server
line = line.replace("@@server@@",http_server)
config_file.write(line)
if not optenabled:
config_file.write("enabled=1\n")
config_file.write("priority=%s\n" % repo.priority)
# FIXME: potentially might want a way to turn this on/off on a per-repo basis
if not optgpgcheck:
config_file.write("gpgcheck=0\n")
config_file.close()
return fname
# ==================================================================================
def update_permissions(self, repo_path):
"""
Verifies that permissions and contexts after an rsync are as expected.
Sending proper rsync flags should prevent the need for this, though this is largely
a safeguard.
"""
# all_path = os.path.join(repo_path, "*")
cmd1 = "chown -R root:apache %s" % repo_path
utils.subprocess_call(self.logger, cmd1)
cmd2 = "chmod -R 755 %s" % repo_path
utils.subprocess_call(self.logger, cmd2)<|fim▁end|> | if repo.mirror_locally:
rc = utils.subprocess_call(self.logger, cmd) |
<|file_name|>test_manager.py<|end_file_name|><|fim▁begin|>import datetime
import queue
import multiprocessing
import pytest
from honcho.printer import Message
from honcho.manager import Manager
from honcho.manager import SYSTEM_PRINTER_NAME
HISTORIES = {
'one': {
'processes': {'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('foo', 'line', b'hello, world!\n'),
('foo', 'stop', {'returncode': 0})),
},
'two': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'line', b'process one\n'),
('bar', 'line', b'process two\n'),
('foo', 'stop', {'returncode': 0}),
('bar', 'stop', {'returncode': 0})),
},
'returncode': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'stop', {'returncode': 456}),
('bar', 'stop', {'returncode': 321})),
},
'output_after_stop': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'line', b'hi from foo\n'),
('bar', 'line', b'hi from bar\n'),
('foo', 'stop', {'returncode': 0}),
('bar', 'line', b'fishmongers\n'),
('bar', 'line', b'butchers\n'),
('bar', 'stop', {'returncode': -15})),
},
}
class FakeClock(object):
def now(self):
return datetime.datetime(2012, 8, 11, 12, 42)
class FakeProcessManager(object):
def terminate(self, pid):
pass
def kill(self, pid):
pass
class FakeProcess(object):
def __init__(self, cmd, name=None, colour=None, quiet=None, env=None, cwd=None):
self.cmd = cmd
self.name = name
self.colour = colour
self.quiet = quiet
self.env = env
self.cwd = cwd
self._events = None
self._options = {}
def run(self, events=None, ignore_signals=False):
self._report('run', events_passed=events is not None)
def _report(self, type, **data):
if self._events is not None:
data.update({'type': type,
'name': self.name})
self._events.put(data)
<|fim▁hole|>class Harness(object):
def __init__(self, history, manager):
self.history = history
self.manager = manager
self.events_local = []
self._q = multiprocessing.Queue()
self._rc = multiprocessing.Value('i', -999)
def run(self, wait=True):
self.manager._process_ctor = self._process_ctor
for name, options in self.history['processes'].items():
self.manager.add_process(name,
options.get('command', 'test'),
options.get('quiet', False))
def _loop(rc):
self.manager.loop()
rc.value = self.manager.returncode
self._mproc = multiprocessing.Process(target=_loop, args=(self._rc,))
self._mproc.start()
for msg in self.history['messages']:
self.send_manager(*msg)
self._mproc.join()
@property
def manager_returncode(self):
if self._rc.value == -999:
return None
return self._rc.value
def send_manager(self, process_name, type, data, **kwargs):
self.manager.events.put(Message(type=type,
data=data,
time=datetime.datetime.now(),
name=process_name,
colour=None))
def fetch_events(self):
"""
Retrieve any pending events from the queue and put them on the local
event cache
"""
while 1:
try:
self.events_local.append(self._q.get(False))
except queue.Empty:
break
def find_events(self, name=None, type=None):
self.fetch_events()
results = []
for event in self.events_local:
if name is not None and event['name'] != name:
continue
if type is not None and event['type'] != type:
continue
results.append(event)
return results
def _process_ctor(self, *args, **kwargs):
options = self.history['processes'][kwargs['name']]
p = FakeProcess(*args, **kwargs)
p._events = self._q
p._options = options
return p
class FakePrinter(object):
def __init__(self, width=0):
self.width = width
self.lines_local = []
self._q = multiprocessing.Queue()
def write(self, message):
# Called in a remote thread, so just put the message on the queue.
self._q.put(message)
def fetch_lines(self):
"""
Retrieve any pending lines from the queue and put them on the local
line cache
"""
while 1:
try:
self.lines_local.append(self._q.get(False))
except queue.Empty:
break
def got_line(self, data):
return self.find_line(data) is not None
def find_line(self, data):
self.fetch_lines()
for line in self.lines_local:
if line.data == data:
return line
class TestManager(object):
@pytest.fixture(autouse=True)
def printer(self): # noqa
self.p = FakePrinter()
self.m = Manager(printer=self.p)
self.m._clock = FakeClock()
self.m._procmgr = FakeProcessManager()
def run_history(self, name, wait=True):
self.h = Harness(HISTORIES[name], self.m)
self.h.run(wait=wait)
def test_init_sets_default_printer_width(self):
assert self.p.width == len(SYSTEM_PRINTER_NAME)
def test_add_process_updates_printer_width(self):
self.m.add_process('interesting', 'ruby server.rb')
assert self.p.width == len('interesting')
def test_add_process_sets_name(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.name == 'foo'
def test_add_process_sets_cmd(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.cmd == 'ruby server.rb'
def test_add_process_sets_colour(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.colour is not None
def test_add_process_sets_unique_colours(self):
p1 = self.m.add_process('foo', 'ruby server.rb')
p2 = self.m.add_process('bar', 'python server.py')
assert p1.colour != p2.colour
def test_add_process_sets_quiet(self):
proc = self.m.add_process('foo', 'ruby server.rb', quiet=True)
assert proc.quiet
def test_add_process_name_must_be_unique(self):
self.m.add_process('foo', 'ruby server.rb')
with pytest.raises(AssertionError):
self.m.add_process('foo', 'another command')
def test_add_process_sets_cwd(self):
proc = self.m.add_process('foo', 'ruby server.rb', cwd='foo-dir')
assert proc.cwd == 'foo-dir'
def test_loop_with_empty_manager_returns_immediately(self):
self.m.loop()
def test_loop_calls_process_run(self):
self.run_history('one')
evts = self.h.find_events(type='run')
assert len(evts) == 1
assert evts[0]['name'] == 'foo'
assert evts[0]['events_passed']
def test_printer_receives_messages_in_correct_order(self):
self.run_history('one')
self.p.fetch_lines()
assert self.p.lines_local[0].data == 'foo started (pid=123)\n'
assert self.p.lines_local[1].data == b'hello, world!\n'
assert self.p.lines_local[2].data == 'foo stopped (rc=0)\n'
def test_printer_receives_lines_multi_process(self):
self.run_history('two')
l1 = self.p.find_line(b'process one\n')
l2 = self.p.find_line(b'process two\n')
assert l1.name == 'foo'
assert l2.name == 'bar'
def test_returncode_set_by_first_exiting_process(self):
self.run_history('returncode')
assert self.h.manager_returncode == 456
def test_printer_receives_lines_after_stop(self):
self.run_history('output_after_stop')
assert self.p.got_line(b'fishmongers\n')
assert self.p.got_line(b'butchers\n')<|fim▁end|> | |
<|file_name|>course2.rs<|end_file_name|><|fim▁begin|>extern crate cemu_smm;
use cemu_smm::course2::*;
use std::fs::{read, read_dir};
use std::io;
use std::process::Command;
fn decrypt_test_assets() -> io::Result<()> {
let save_folders = vec![
"tests/assets/saves/smm2/save1",
"tests/assets/saves/smm2/save2",
];
for folder in save_folders {
for entry in read_dir(folder)? {
let entry = entry?;
let file_name = entry.file_name();
let file_name = file_name.to_str().unwrap();
if file_name.starts_with("course_") && file_name.ends_with(".bcd") {
let path = entry.path();
let out_path: Vec<&str> = path.to_str().unwrap().split('.').collect();
let out_path = out_path[0].to_owned() + ".decrypted";
let mut command = Command::new("./decryptor_linux");
command.arg(entry.path()).arg(out_path);
command.output().unwrap();
}
}
}
Ok(())
}
#[test]
fn course_decrypt() {
decrypt_test_assets().unwrap();
let save_folders = vec![
"tests/assets/saves/smm2/save1",
"tests/assets/saves/smm2/save2",
];
for folder in save_folders {
for entry in read_dir(folder).unwrap() {
let entry = entry.unwrap();
let file_name = entry.file_name();
let file_name = file_name.to_str().unwrap();
if file_name.starts_with("course_data_") && file_name.ends_with(".bcd") {
let path = entry.path();
let out_path: Vec<&str> = path.to_str().unwrap().split('.').collect();
let out_path = out_path[0].to_owned() + ".decrypted";
let expected = read(out_path).unwrap();
let decrypted = Course2::decrypt(read(path).unwrap());
// @simontime's implementation truncates non relevant bytes, which we won't do
assert_eq!(decrypted[0x10..decrypted.len() - 0x30], expected[..]);
}
}
}
}
#[test]
fn course_encrypt() {
let save_folders = vec![
"tests/assets/saves/smm2/save1",
"tests/assets/saves/smm2/save2",
];
for folder in save_folders {
for entry in read_dir(folder).unwrap() {
let entry = entry.unwrap();
let file_name = entry.file_name();
let file_name = file_name.to_str().unwrap();
if file_name.starts_with("course_data_") && file_name.ends_with(".bcd") {
let path = entry.path();
let expected = read(&path).unwrap();
let decrypted = Course2::decrypt(read(path).unwrap());
let encrypted = Course2::encrypt(decrypted);
assert_eq!(encrypted[..], expected[..]);
}
}
}
}
#[test]
fn course2_from_packed() -> Result<(), failure::Error> {
decrypt_test_assets().unwrap();
use std::io::Write;
use zip::ZipWriter;
let w = std::io::Cursor::new(Vec::new());
let mut zip = ZipWriter::new(w);
let options =
zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Deflated);
let course_120 = include_bytes!("assets/saves/smm2/save1/course_data_120.bcd");
let course_thumb_120 = include_bytes!("assets/saves/smm2/save1/course_thumb_120.btl");
let course_121 = include_bytes!("assets/saves/smm2/save1/course_data_121.bcd");
let course_thumb_121 = include_bytes!("assets/saves/smm2/save1/course_thumb_121.btl");
zip.start_file("course_data_120.bcd", options.clone())?;
zip.write_all(course_120)?;
zip.start_file("course_thumb_120.btl", options.clone())?;
zip.write_all(course_thumb_120)?;
zip.start_file("course_data_121.bcd", options.clone())?;<|fim▁hole|> zip.start_file("course_thumb_121.btl", options.clone())?;
zip.write_all(course_thumb_121)?;
let zip_file = zip.finish()?.into_inner();
let res = Course2::from_packed(&zip_file[..])?;
assert_eq!(
res.get(0).unwrap().get_course_data(),
&Course2::decrypt(course_120.to_vec())
);
assert_eq!(
&res.get(0)
.unwrap()
.get_course_thumb()
.unwrap()
.get_encrypted()[..],
&course_thumb_120[..]
);
assert_eq!(
res.get(1).unwrap().get_course_data(),
&Course2::decrypt(course_121.to_vec())
);
assert_eq!(
&res.get(1)
.unwrap()
.get_course_thumb()
.unwrap()
.get_encrypted()[..],
&course_thumb_121[..]
);
Ok(())
}
#[test]
fn course2_from_packed_2() {
let save_files = vec![
"tests/assets/saves/smm2/save1.zip",
"tests/assets/saves/smm2/save2.zip",
"tests/assets/saves/smm2/save3.zip",
"tests/assets/saves/smm2/save4.zip",
"tests/assets/saves/smm2/save5.zip",
"tests/assets/saves/smm2/save6.zip",
];
for save in save_files {
let save = read(save).unwrap();
let courses = Course2::from_packed(&save).unwrap();
assert_eq!(courses.len(), 60);
for course in courses {
let header = course.get_course().get_header();
assert!(
(header.game_version as f32).log2() as u32 <= header.completion_version,
"testing game version {} against completion version {}",
header.game_version,
header.completion_version
);
}
}
}<|fim▁end|> | zip.write_all(course_121)?; |
<|file_name|>regions-escape-bound-fn.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn with_int(f: |x: &int|) {
let x = 3;
f(&x);
}
fn main() {
let mut x: Option<&int> = None; //~ ERROR cannot infer
with_int(|y| x = Some(y));
}<|fim▁end|> | |
<|file_name|>control.cc<|end_file_name|><|fim▁begin|>#include "control.h"
Control::Control (int special_index) :
_special_index (special_index)
{
}
void Control::process_g (double delta_t)
{
}
void Control::process_c (double delta_t)
{
for (size_t i = 0; i < _control_outs.size(); ++i)
{
_control_outs[i] = *(_control_ins[0] + _special_index + i);
}
}
TrigControl::TrigControl (int special_index) :
_special_index (special_index)
{
}
void TrigControl::process_g (double delta_t)
{
}
void TrigControl::process_c (double delta_t)
{
// TODO make it work for synth-wide controls, not just local ones
for (size_t i = 0; i < _control_outs.size(); ++i)
{
_control_outs[i] = *(_control_ins[0] + _special_index + i);
*(_control_ins[0] + _special_index + i) = 0.f;<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>MainActivity.java<|end_file_name|><|fim▁begin|>package com.mdg.droiders.samagra.shush;
import android.Manifest;
import android.app.NotificationManager;
import android.content.ContentValues;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.provider.Settings;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.app.ActivityCompat;
import android.support.v4.app.LoaderManager;
import android.support.v4.content.Loader;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.Switch;
import android.widget.Toast;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesNotAvailableException;
import com.google.android.gms.common.GooglePlayServicesRepairableException;
import com.google.android.gms.common.api.GoogleApiClient;
import com.google.android.gms.common.api.PendingResult;
import com.google.android.gms.common.api.ResultCallback;
import com.google.android.gms.location.LocationServices;
import com.google.android.gms.location.places.Place;
import com.google.android.gms.location.places.PlaceBuffer;
import com.google.android.gms.location.places.Places;
import com.google.android.gms.location.places.ui.PlacePicker;
import com.mdg.droiders.samagra.shush.data.PlacesContract;
import java.util.ArrayList;
import java.util.List;
public class MainActivity extends AppCompatActivity implements
GoogleApiClient.ConnectionCallbacks,
GoogleApiClient.OnConnectionFailedListener,
LoaderManager.LoaderCallbacks<Cursor>{
//constants
private static final String LOG_TAG = "MainActivity";
private static final int PERMISSIONS_REQUEST_FINE_LOCATION = 111;
private static final int PLACE_PICKER_REQUEST = 1;
//member variables
private PlaceListAdapter mAdapter;
private RecyclerView mRecyclerView;
private Button addPlaceButton;
private GoogleApiClient mClient;
private Geofencing mGeofencing;
private boolean mIsEnabled;
private CheckBox mRingerPermissionCheckBox;
/**
* Called when the activity is starting.
*
* @param savedInstanceState Bundle that contains the data provided to onSavedInstanceState
*/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mRecyclerView = (RecyclerView) findViewById(R.id.places_list_recycler_view);
mRecyclerView.setLayoutManager(new LinearLayoutManager(this));
mAdapter = new PlaceListAdapter(this,null);
mRecyclerView.setAdapter(mAdapter);
mRingerPermissionCheckBox = (CheckBox) findViewById(R.id.ringer_permissions_checkbox);
mRingerPermissionCheckBox.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
onRingerPermissionsClicked();
}
});
Switch onOffSwitch = (Switch) findViewById(R.id.enable_switch);
mIsEnabled = getPreferences(MODE_PRIVATE).getBoolean(getString(R.string.setting_enabled),false);
onOffSwitch.setChecked(mIsEnabled);
onOffSwitch.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton compoundButton, boolean isChecked) {
SharedPreferences.Editor editor = getPreferences(MODE_PRIVATE).edit();
editor.putBoolean(getString(R.string.setting_enabled),isChecked);
editor.commit();
if (isChecked) mGeofencing.registerAllGeofences();
else mGeofencing.unRegisterAllGeofences();
}
});
addPlaceButton = (Button) findViewById(R.id.add_location_button);
addPlaceButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
onAddPlaceButtonClicked();
}
});
mClient = new GoogleApiClient.Builder(this)
.addConnectionCallbacks(this)
.addOnConnectionFailedListener(this)
.addApi(LocationServices.API)
.addApi(Places.GEO_DATA_API)
.enableAutoManage(this,this)
.build();
mGeofencing = new Geofencing(mClient,this);
}
/**
* Button click event handler for the add place button.
*/
private void onAddPlaceButtonClicked() {
if (ActivityCompat.checkSelfPermission(this, Manifest.permission.ACCESS_FINE_LOCATION)!=PackageManager.PERMISSION_GRANTED){
Toast.makeText(this, getString(R.string.need_location_permission_message), Toast.LENGTH_SHORT).show();
return;
}
Toast.makeText(this, getString(R.string.location_permissions_granted_message), Toast.LENGTH_SHORT).show();
try {
PlacePicker.IntentBuilder builder = new PlacePicker.IntentBuilder();
Intent placePickerIntent = builder.build(this);
startActivityForResult(placePickerIntent,PLACE_PICKER_REQUEST);
} catch (GooglePlayServicesRepairableException e) {
e.printStackTrace();
} catch (GooglePlayServicesNotAvailableException e) {
e.printStackTrace();
}
}
/***
* Called when the Place Picker Activity returns back with a selected place (or after canceling)
*
* @param requestCode The request code passed when calling startActivityForResult
* @param resultCode The result code specified by the second activity
* @param data The Intent that carries the result data.
*/
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode==PLACE_PICKER_REQUEST&&resultCode==RESULT_OK){
Place place = PlacePicker.getPlace(this,data);
if (place==null){
Log.i(LOG_TAG,"No place selected");
return;
}
String placeName = place.getName().toString();
String placeAddress = place.getAddress().toString();
String placeId = place.getId();
ContentValues values = new ContentValues();
values.put(PlacesContract.PlaceEntry.COLUMN_PLACE_ID,placeId);
getContentResolver().insert(PlacesContract.PlaceEntry.CONTENT_URI,values);
refreshPlacesData();
}
}
@Override
protected void onResume() {
super.onResume();
//initialise location permissions checkbox
CheckBox locationPermissionsCheckBox = (CheckBox) findViewById(R.id.location_permission_checkbox);
if (ActivityCompat.checkSelfPermission(MainActivity.this, Manifest.permission.ACCESS_FINE_LOCATION)!= PackageManager.PERMISSION_GRANTED){
locationPermissionsCheckBox.setChecked(false);
}
else {
locationPermissionsCheckBox.setChecked(true);
locationPermissionsCheckBox.setEnabled(false);
}
NotificationManager notificationManager = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
if (Build.VERSION.SDK_INT>=24 && !notificationManager.isNotificationPolicyAccessGranted()){
mRingerPermissionCheckBox.setChecked(false);
}
else {
mRingerPermissionCheckBox.setChecked(true);
mRingerPermissionCheckBox.setEnabled(false);
}
}<|fim▁hole|> @Override
public Loader<Cursor> onCreateLoader(int id, Bundle args) {
return null;
}
@Override
public void onLoadFinished(Loader<Cursor> loader, Cursor data) {
}
@Override
public void onLoaderReset(Loader<Cursor> loader) {
}
/**
* Called when the google API client is successfully connected.
* @param bundle Bundle of data provided to the clients by google play services.
*/
@Override
public void onConnected(@Nullable Bundle bundle) {
Log.i(LOG_TAG,"Api connection successful");
Toast.makeText(this, "onConnected", Toast.LENGTH_SHORT).show();
refreshPlacesData();
}
/**
* Called when the google API client is suspended
* @param cause The reason for the disconnection. Defined by the constant CAUSE_*.
*/
@Override
public void onConnectionSuspended(int cause) {
Log.i(LOG_TAG,"API Client connection suspended.");
}
/**
* Called when the google API client failed to connect to the PlayServices.
* @param connectionResult A coonectionResult that can be used to solve the error.
*/
@Override
public void onConnectionFailed(@NonNull ConnectionResult connectionResult) {
Log.i(LOG_TAG,"API Connection client suspended.");
Toast.makeText(this, "onConectionFailed", Toast.LENGTH_SHORT).show();
}
public void refreshPlacesData(){
Uri uri = PlacesContract.PlaceEntry.CONTENT_URI;
Cursor dataCursor = getContentResolver().query(uri,
null,
null,
null,null,null);
if (dataCursor==null||dataCursor.getCount()==0) return;
List<String> placeIds = new ArrayList<String>();
while (dataCursor.moveToNext()){
placeIds.add(dataCursor.getString(dataCursor.getColumnIndex(PlacesContract.PlaceEntry.COLUMN_PLACE_ID)));
}
PendingResult<PlaceBuffer> placeBufferPendingResult = Places.GeoDataApi.getPlaceById(mClient,
placeIds.toArray(new String[placeIds.size()]));
placeBufferPendingResult.setResultCallback(new ResultCallback<PlaceBuffer>() {
@Override
public void onResult(@NonNull PlaceBuffer places) {
mAdapter.swapPlaces(places);
mGeofencing.updateGeofencesList(places);
if (mIsEnabled) mGeofencing.registerAllGeofences();
}
});
}
private void onRingerPermissionsClicked(){
Intent intent = null;
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.M) {
intent = new Intent(Settings.ACTION_NOTIFICATION_POLICY_ACCESS_SETTINGS);
}
startActivity(intent);
}
public void onLocationPermissionClicked (View view){
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.ACCESS_FINE_LOCATION},
PERMISSIONS_REQUEST_FINE_LOCATION);
}
}<|fim▁end|> | |
<|file_name|>lib-gode-rules.go<|end_file_name|><|fim▁begin|>// lib-gode-rules.go
package gode_rules
import (
"reflect"
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"log"
"strings"
"github.com/robertkrimen/otto"
)
type RuleEngine struct {
Rules []Rule
ActiveRules []Rule
IgnoreFactChanges bool
Otto *otto.Otto
}
type Rule map[string]interface{}
type Options map[string]interface{}
type Fact map[string]interface{}
type Filter map[string]interface{}
func quicksortRules(rules []Rule, lo int, hi int) {
if len(rules) == 2 && lo < hi {
if rules[0].higherThan(&rules[1]) {
rules[0], rules[1] = rules[1], rules[0]
}
return
}
if len(rules) > 2 && lo < hi {
p := partitionRules(rules, lo, hi)
quicksortRules(rules, lo, p - 1)
quicksortRules(rules, p + 1, hi)
}
}
func partitionRules(rules []Rule, lo int, hi int) int {
pivot := rules[len(rules)-1]
i := lo
for j := lo; j <= hi; j++ {
if rules[j].higherThan(&pivot) {
rules[j], rules[i] = rules[i], rules[j]
i += 1
}
}
rules[hi], rules[i] = rules[i], rules[hi]
return i
}
func (thisRule *Rule) higherThan(otherRule *Rule) bool {
a, ok_a := (*thisRule)["priority"]
b, ok_b := (*otherRule)["priority"]
if ok_a && ok_b {
a_i, ok_a := a.(int)
b_i, ok_b := b.(int)
if ok_a && ok_b {
return b_i < a_i
} else if ok_a {
return true
} else {
return false
}
} else if ok_a {
_, ok_a := a.(int)
if ok_a {
return true
} else {
return false
}
} else {
return false
}
}
func NewRuleEngine() *RuleEngine {
return NewRuleEngineFromRules(nil, nil)
}
func NewRuleEngineFromRule(rule Rule, options Options) *RuleEngine {
return NewRuleEngineFromRules([]Rule{ rule }, options)
}
func NewRuleEngineFromRules(rules []Rule, options Options) *RuleEngine {
R := new(RuleEngine)
R.Otto = otto.New()
R.Init(nil)
if rules != nil {
R.RegisterRules(rules)
}
if options != nil {
if value, ok := options["ignoreFactChanges"]; ok {
if ignoreFactChanges, ok := value.(bool); ok {
R.IgnoreFactChanges = ignoreFactChanges
} else {
R.IgnoreFactChanges = false
}
}
} else {
R.IgnoreFactChanges = false
}
return R
}
func (R *RuleEngine) Init(rules []Rule) {
R.Rules = []Rule{}
R.ActiveRules = []Rule{}
}
func (R *RuleEngine) RegisterRule(rule Rule) {
if rule != nil {
R.Rules = append(R.Rules, rule)
}
R.Sync()
}
func (R *RuleEngine) RegisterRules(rules []Rule) {
if rules != nil {
R.Rules = append(R.Rules, rules...)
}
R.Sync()
}
func (R *RuleEngine) Sync() {
activeRules := []Rule{}
for _, rule := range R.Rules {
if _, ok := rule["on"]; !ok {
rule["on"] = true
}
if value, ok := rule["on"].(bool); ok && value {
activeRules = append(activeRules, rule)
}
}
quicksortRules(activeRules, 0, len(activeRules)-1)
R.ActiveRules = activeRules
}
func copyFact(fact Fact) *Fact {
var err error
factCopy := Fact{ }
buffer := new(bytes.Buffer)
encoder := gob.NewEncoder(buffer)
decoder := gob.NewDecoder(buffer)
if err = encoder.Encode(fact); err != nil {
log.Fatalln(err.Error())
}
if err = decoder.Decode(&factCopy); err != nil {
log.Fatalln(err.Error())
}
return &factCopy
}
func (thisFact *Fact) equals(thatFact *Fact) bool {
return reflect.DeepEqual(*thisFact, *thatFact)
}
func (R *RuleEngine) Execute(fact *Fact, extf func(*Fact)) {
R.Otto.Set("LOG", func(call otto.FunctionCall) otto.Value {
log.Println(call.Argument(0).ToString())
return otto.UndefinedValue()
})
complete := false
(*fact)["result"] = true
session := copyFact(*fact)
lastSession := copyFact(*fact)
index := 0
RESTART := func() {
index = 0
}
NEXT := func() {
if R.IgnoreFactChanges || lastSession.equals(session) {
index += 1
} else {
lastSession = copyFact(*session)
RESTART()
}
}
STOP := func() {
complete = true
RESTART()
}
WHEN := func(ruleIndex int, outcome bool) {
if outcome {
if val, ok := R.ActiveRules[ruleIndex]["consequence"]; ok {
if _consequence, ok := val.(string); ok {
var err error
if err = R.Otto.Set("RESTART", func(call otto.FunctionCall) otto.Value {
RESTART()
return otto.UndefinedValue()
}); err != nil {
log.Fatal(err.Error())
}
if err = R.Otto.Set("STOP", func(call otto.FunctionCall) otto.Value {
STOP()
return otto.UndefinedValue()
}); err != nil {
log.Fatal(err.Error())
}
if err = R.Otto.Set("NEXT", func(call otto.FunctionCall) otto.Value {
NEXT()
return otto.UndefinedValue()
}); err != nil {
log.Fatal(err.Error())
}
if _, err := R.Otto.Run(fmt.Sprintf("var F = %v;", toJSON(session))); err != nil {
log.Fatal(err.Error())
}
if _, err = R.Otto.Run(_consequence); err != nil {
log.Fatal(err.Error())
}
var (
oval otto.Value
ival interface{}
fval map[string]interface{}
f Fact
)
if oval, err = R.Otto.Get("F"); err != nil {
log.Fatal(err.Error())
}
if ival, err = oval.Export(); err != nil {
log.Fatal(err.Error())
}
if fval, ok = ival.(map[string]interface{}); !ok {
log.Fatal("WHEN: F is not a Fact")
}
f = Fact(fval)
session = &f
} else {
log.Fatal("WHEN: consequence not a string")
}
} else {
log.Fatal("WHEN: rule has no field 'consequence'")
}
} else {
NEXT()
}
}
for index < len(R.ActiveRules) {
if complete {
break
}
if value, ok := R.ActiveRules[index]["condition"]; ok {
if _condition, ok := value.(string); ok {
var err error
when := func(call otto.FunctionCall) otto.Value {
if len(call.ArgumentList) == 1 && call.Argument(0).IsBoolean() {
var b bool<|fim▁hole|> } else {
WHEN(index, b)
}
} else {
log.Fatal("Execute: WHEN called with wrong argument(s)")
}
return otto.UndefinedValue()
}
if err := R.Otto.Set("WHEN", when); err != nil {
log.Fatal(err.Error())
}
if _, err = R.Otto.Run(fmt.Sprintf("var F = %v;", toJSON(session))); err != nil {
log.Fatal(err.Error())
}
if _, err = R.Otto.Run(_condition); err != nil {
log.Fatal(err.Error())
}
var (
oval otto.Value
ival interface{}
fval map[string]interface{}
f Fact
)
if oval, err = R.Otto.Get("F"); err != nil {
log.Fatal(err.Error())
}
if ival, err = oval.Export(); err != nil {
log.Fatal(err.Error())
}
if fval, ok = ival.(map[string]interface{}); !ok {
log.Fatal("Execute: F is not a Fact")
}
f = Fact(fval)
session = &f
} else {
log.Fatal("Execute: condition not a string")
}
} else {
log.Fatal("Execute: rule has no field 'condition'")
}
}
extf(session)
}
func (R *RuleEngine) FindRules(filter Filter) []Rule {
if filter == nil {
return R.Rules
} else {
var filteredRules = []Rule{ }
RuleCheck:
for _, rule := range R.Rules {
for filterKey, filterValue := range filter {
if elem, ok := rule[filterKey]; !ok || (ok && elem != filterValue) {
continue RuleCheck
} else {
filteredRules = append(filteredRules, rule)
}
}
}
return filteredRules
}
}
func (R *RuleEngine) Turn(state string, filter Filter) {
var ruleState bool
if state == "on" || state == "ON" {
ruleState = true
} else {
ruleState = false
}
rules := R.FindRules(filter)
for _, rule := range rules {
rule["on"] = ruleState
}
R.Sync()
}
func (R *RuleEngine) Prioritize(priority int, filter Filter) {
rules := R.FindRules(filter)
for _, rule := range rules {
rule["priority"] = priority
}
R.Sync()
}
func (R *RuleEngine) ToJSON() string {
return toJSON(R.Rules)
}
func (R *RuleEngine) FromJSON(rules string) {
R.Init(nil)
json := fromJSON(rules)
var (
arr []interface{}
rule map[string]interface{}
ok bool
)
if arr, ok = json.([]interface{}); !ok {
if rule, ok = json.(map[string]interface{}); ok {
R.RegisterRule(Rule(rule))
return
} else {
log.Fatal("FromJSON: string is neither rule or rules")
}
}
newRules := make([]Rule, len(arr))
for i, val := range arr {
if rule, ok = val.(map[string]interface{}); !ok {
log.Fatal("FromJSON: array doesn't contain rules")
}
newRules[i] = Rule(rule)
}
R.RegisterRules(newRules)
}
func toJSON(v interface{}) string {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(v); err != nil {
log.Fatalln(err.Error())
}
return string(buf.Bytes())
}
func fromJSON(j string) interface{} {
var v interface{}
dec := json.NewDecoder(strings.NewReader(j))
if err := dec.Decode(&v); err != nil {
log.Fatalln(err.Error())
}
return v
}<|fim▁end|> | if b, err = call.Argument(0).ToBoolean(); err != nil {
log.Fatal(err.Error()) |
<|file_name|>float.rs<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|> | pub fn f32_eq(f1: f32, f2: f32, epsilon: f32) -> bool {
let diff = (f1 - f2).abs();
diff < epsilon.abs()
} |
<|file_name|>auth.service.ts<|end_file_name|><|fim▁begin|>module op.common {
'use strict';
export interface IAuthService {
register: (user: IUser) => ng.IPromise<string>;
login: (email: string, password: string) => ng.IPromise<string>;
logout: () => void;
}
class AuthService implements IAuthService {
/* @ngInject */
constructor(public AUTH_URL: string,
public $window: ng.IWindowService,
public $http: ng.IHttpService,
public $q: ng.IQService,
public SessionService: op.common.ISessionService) {
}
register(user: IUser): ng.IPromise<string> {
var deferred: ng.IDeferred<string> = this.$q.defer();
var requestConfig: ng.IRequestConfig = {
method: 'POST',
url: this.AUTH_URL + '/register',
headers: {
'Content-Type': 'application/json'
},
data: user
};
this.$http(requestConfig)
.success((response: string) => {
deferred.resolve(response);
})
.error((response: string) => deferred.reject(response));
return deferred.promise;
}
login(email: string, password: string): ng.IPromise<string> {
var deferred: ng.IDeferred<string> = this.$q.defer();
var auth: string = this.$window.btoa(email + ':' + password);
var requestConfig: ng.IRequestConfig = {
method: 'POST',
url: this.AUTH_URL + '/login',
headers: {
'Authorization': 'Basic ' + auth
}
};
this.$http(requestConfig)
.success((response: IToken) => {
var token: string = response.token;
this.SessionService.setUser(response);
deferred.resolve(token);
})
.error((response: string) => deferred.reject(response));
return deferred.promise;
}
logout(): void {
this.SessionService.unsetUser();
}
}
// register LoginService
angular.module('op.common')<|fim▁hole|>}<|fim▁end|> | .service('AuthService', AuthService); |
<|file_name|>test_resources.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of txrecaptcha, a Twisted reCAPTCHA client.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <[email protected]>
# Matthew Finkel 0x017DD169EA793BE2 <[email protected]>
# :copyright: (c) 2013-2015, Isis Lovecruft
# (c) 2013-2015, Matthew Finkel
# (c) 2013-2015, The Tor Project, Inc.
# :license: see LICENSE file for licensing information
#_____________________________________________________________________________
"""Unittests for the txrecaptcha.resources module."""
from __future__ import print_function
import logging
import ipaddr
import random
from BeautifulSoup import BeautifulSoup
from twisted.internet import reactor
from twisted.internet import task
from twisted.internet.error import AlreadyCalled
from twisted.internet.error import AlreadyCancelled
from twisted.trial import unittest
from twisted.web.resource import Resource
from twisted.web.test import requesthelper
from txrecaptcha import resources
# For additional logger output for debugging, comment out the following:
logging.disable(50)
# and then uncomment the following line:
#resources.logging.getLogger().setLevel(10)
class MockWebResource(Resource):<|fim▁hole|>
:type request: :api:`twisted.web.server.Request`
:param request: An incoming request.
"""
try:
template = resources.lookup.get_template('index.html')
rendered = template.render(strings,
rtl=rtl,
lang=langs[0])
except Exception as err:
rendered = resources.replaceErrorPage(err)
return rendered
class DummyRequest(requesthelper.DummyRequest):
"""Wrapper for :api:`twisted.test.requesthelper.DummyRequest` to add
redirect support.
"""
def __init__(self, *args, **kwargs):
requesthelper.DummyRequest.__init__(self, *args, **kwargs)
self.redirect = self._redirect(self)
def URLPath(self):
"""Fake the missing Request.URLPath too."""
return self.uri
def _redirect(self, request):
"""Stub method to add a redirect() method to DummyResponse."""
newRequest = type(request)
newRequest.uri = request.uri
return newRequest
class ReCaptchaProtectedResourceTests(unittest.TestCase):
"""Tests for :mod:`txrecaptcha.resources.ReCaptchaProtectedResource`."""
def setUp(self):
"""Create a :class:`MockWebResource` and protect it with a
:class:`ReCaptchaProtectedResource`.
"""
self.timeout = 10.0 # Can't take longer than that, right?
# Set up our resources to fake a minimal HTTP(S) server:
self.pagename = b'captcha.html'
self.root = Resource()
# (None, None) is the (distributor, scheduleInterval):
self.protectedResource = MockWebResource()
self.captchaResource = resources.ReCaptchaProtectedResource(
publicKey='23',
secretKey='42',
remoteIP='111.111.111.111',
useForwardedHeader=True,
protectedResource=self.protectedResource)
self.root.putChild(self.pagename, self.captchaResource)
# Set up the basic parts of our faked request:
self.request = DummyRequest([self.pagename])
def tearDown(self):
"""Cleanup method for removing timed out connections on the reactor.
This seems to be the solution for the dirty reactor due to
``DelayedCall``s which is mentioned at the beginning of this
file. There doesn't seem to be any documentation anywhere which
proposes this solution, although this seems to solve the problem.
"""
for delay in reactor.getDelayedCalls():
try:
delay.cancel()
except (AlreadyCalled, AlreadyCancelled):
pass
def test_renderDeferred_invalid(self):
""":meth:`_renderDeferred` should redirect a ``Request`` (after the
CAPTCHA was NOT xsuccessfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIsInstance(request, DummyRequest)
soup = BeautifulSoup(b''.join(request.written)).find('meta')['http-equiv']
self.assertEqual(soup, 'refresh')
d = task.deferLater(reactor, 0, lambda x: x, (False, self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_renderDeferred_valid(self):
""":meth:`_renderDeferred` should correctly render a ``Request`` (after
the CAPTCHA has been successfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIsInstance(request, DummyRequest)
html = b''.join(request.written)
self.assertSubstring('Sorry! Something went wrong with your request.',
html)
d = task.deferLater(reactor, 0, lambda x: x, (True, self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_renderDeferred_nontuple(self):
""":meth:`_renderDeferred` should correctly render a ``Request`` (after
the CAPTCHA has been successfully solved) which results from a
``Deferred``'s callback.
"""
self.request.method = b'POST'
def testCB(request):
"""Check the ``Request`` returned from ``_renderDeferred``."""
self.assertIs(request, None)
d = task.deferLater(reactor, 0, lambda x: x, (self.request))
d.addCallback(self.captchaResource._renderDeferred)
d.addCallback(testCB)
return d
def test_checkSolution_blankFields(self):
""":meth:`txrecaptcha.resources.ReCaptchaProtectedResource.checkSolution`
should return a redirect if is the solution field is blank.
"""
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', '')
self.request.addArg('captcha_response_field', '')
self.assertEqual((False, self.request),
self.successResultOf(
self.captchaResource.checkSolution(self.request)))
def test_getRemoteIP_useRandomIP(self):
"""Check that removing our remoteip setting produces a random IP."""
self.captchaResource.remoteIP = None
ip = self.captchaResource.getRemoteIP()
realishIP = ipaddr.IPv4Address(ip).compressed
self.assertTrue(realishIP)
self.assertNotEquals(realishIP, '111.111.111.111')
def test_getRemoteIP_useConfiguredIP(self):
"""Check that our remoteip setting is used if configured."""
ip = self.captchaResource.getRemoteIP()
realishIP = ipaddr.IPv4Address(ip).compressed
self.assertTrue(realishIP)
self.assertEquals(realishIP, '111.111.111.111')
def test_render_GET_missingTemplate(self):
"""render_GET() with a missing template should raise an error and
return the result of replaceErrorPage().
"""
oldLookup = resources.lookup
try:
resources.lookup = None
self.request.method = b'GET'
page = self.captchaResource.render_GET(self.request)
errorPage = resources.replaceErrorPage(Exception('kablam'))
self.assertEqual(page, errorPage)
finally:
resources.lookup = oldLookup
def test_render_POST_blankFields(self):
"""render_POST() with a blank 'captcha_response_field' should return
a redirect to the CaptchaProtectedResource page.
"""
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', '')
self.request.addArg('captcha_response_field', '')
page = self.captchaResource.render_POST(self.request)
self.assertEqual(page, resources.server.NOT_DONE_YET)
def test_render_POST_wrongSolution(self):
"""render_POST() with a wrong 'captcha_response_field' should return
a redirect to the CaptchaProtectedResource page.
"""
expectedChallenge = '23232323232323232323'
expectedResponse = 'awefawefaefawefaewf'
self.request.method = b'POST'
self.request.addArg('captcha_challenge_field', expectedChallenge)
self.request.addArg('captcha_response_field', expectedResponse)
page = self.captchaResource.render_POST(self.request)
self.assertEqual(page, resources.server.NOT_DONE_YET)<|fim▁end|> | """A web resource for protecting."""
def render_GET(self, request):
"""Handles requests for the mock resource. |
<|file_name|>print_last_modif_date.js<|end_file_name|><|fim▁begin|>function print_last_modif_date(v) {
document.write("Last updated " + v.substr(7, 19));<|fim▁hole|><|fim▁end|> | } |
<|file_name|>factortools.py<|end_file_name|><|fim▁begin|>"""Polynomial factorization routines in characteristic zero. """
from __future__ import print_function, division
from sympy.polys.galoistools import (
gf_from_int_poly, gf_to_int_poly,
gf_lshift, gf_add_mul, gf_mul,
gf_div, gf_rem,
gf_gcdex,
gf_sqf_p,
gf_factor_sqf, gf_factor)
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dmp_ground_LC,
dup_TC,
dup_convert, dmp_convert,
dup_degree, dmp_degree,
dmp_degree_in, dmp_degree_list,
dmp_from_dict,
dmp_zero_p,
dmp_one,
dmp_nest, dmp_raise,
dup_strip,
dmp_ground,
dup_inflate,
dmp_exclude, dmp_include,
dmp_inject, dmp_eject,
dup_terms_gcd, dmp_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dmp_neg,
dup_add, dmp_add,
dup_sub, dmp_sub,
dup_mul, dmp_mul,
dup_sqr,
dmp_pow,
dup_div, dmp_div,
dup_quo, dmp_quo,
dmp_expand,
dmp_add_mul,
dup_sub_mul, dmp_sub_mul,
dup_lshift,
dup_max_norm, dmp_max_norm,
dup_l1_norm,
dup_mul_ground, dmp_mul_ground,
dup_quo_ground, dmp_quo_ground)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_trunc, dmp_ground_trunc,
dup_content,
dup_monic, dmp_ground_monic,
dup_primitive, dmp_ground_primitive,
dmp_eval_tail,
dmp_eval_in, dmp_diff_eval_in,
dmp_compose,
dup_shift, dup_mirror)
from sympy.polys.euclidtools import (
dmp_primitive,
dup_inner_gcd, dmp_inner_gcd)
from sympy.polys.sqfreetools import (
dup_sqf_p,
dup_sqf_norm, dmp_sqf_norm,
dup_sqf_part, dmp_sqf_part)
from sympy.polys.polyutils import _sort_factors
from sympy.polys.polyconfig import query
from sympy.polys.polyerrors import (
ExtraneousFactors, DomainError, CoercionFailed, EvaluationFailed)
from sympy.ntheory import nextprime, isprime, factorint
from sympy.utilities import subsets
from math import ceil as _ceil, log as _log
from sympy.core.compatibility import range
def dup_trial_division(f, factors, K):
"""
Determine multiplicities of factors for a univariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dup_div(f, factor, K)
if not r:
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dmp_trial_division(f, factors, u, K):
"""
Determine multiplicities of factors for a multivariate polynomial
using trial division.
"""
result = []
for factor in factors:
k = 0
while True:
q, r = dmp_div(f, factor, u, K)
if dmp_zero_p(r, u):
f, k = q, k + 1
else:
break
result.append((factor, k))
return _sort_factors(result)
def dup_zz_mignotte_bound(f, K):
"""Mignotte bound for univariate polynomials in `K[x]`. """
a = dup_max_norm(f, K)
b = abs(dup_LC(f, K))
n = dup_degree(f)
return K.sqrt(K(n + 1))*2**n*a*b
def dmp_zz_mignotte_bound(f, u, K):
"""Mignotte bound for multivariate polynomials in `K[X]`. """
a = dmp_max_norm(f, u, K)
b = abs(dmp_ground_LC(f, u, K))
n = sum(dmp_degree_list(f, u))
return K.sqrt(K(n + 1))*2**n*a*b
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f = g*h (mod m)
s*g + t*h = 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) = 1
deg(f) = deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f = G*H (mod m**2)
S*G + T*H = 1 (mod m**2)
References
==========
.. [1] [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
def dup_zz_hensel_lift(p, f, f_list, l, K):
"""
Multifactor Hensel lifting in `Z[x]`.
Given a prime `p`, polynomial `f` over `Z[x]` such that `lc(f)`
is a unit modulo `p`, monic pair-wise coprime polynomials `f_i`
over `Z[x]` satisfying::
f = lc(f) f_1 ... f_r (mod p)
and a positive integer `l`, returns a list of monic polynomials
`F_1`, `F_2`, ..., `F_r` satisfying::
f = lc(f) F_1 ... F_r (mod p**l)
F_i = f_i (mod p), i = 1..r
References
==========
.. [1] [Gathen99]_
"""
r = len(f_list)
lc = dup_LC(f, K)
if r == 1:
F = dup_mul_ground(f, K.gcdex(lc, p**l)[0], K)
return [ dup_trunc(F, p**l, K) ]
m = p
k = r // 2
d = int(_ceil(_log(l, 2)))
g = gf_from_int_poly([lc], p)
for f_i in f_list[:k]:
g = gf_mul(g, gf_from_int_poly(f_i, p), p, K)
h = gf_from_int_poly(f_list[k], p)
for f_i in f_list[k + 1:]:
h = gf_mul(h, gf_from_int_poly(f_i, p), p, K)
s, t, _ = gf_gcdex(g, h, p, K)
g = gf_to_int_poly(g, p)
h = gf_to_int_poly(h, p)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
for _ in range(1, d + 1):
(g, h, s, t), m = dup_zz_hensel_step(m, f, g, h, s, t, K), m**2
return dup_zz_hensel_lift(p, g, f_list[:k], l, K) \
+ dup_zz_hensel_lift(p, h, f_list[k:], l, K)
def _test_pl(fc, q, pl):
if q > pl // 2:
q = q - pl
if not q:
return True
return fc % q == 0
def dup_zz_zassenhaus(f, K):
"""Factor primitive square-free polynomials in `Z[x]`. """
n = dup_degree(f)
if n == 1:
return [f]
fc = f[-1]
A = dup_max_norm(f, K)
b = dup_LC(f, K)
B = int(abs(K.sqrt(K(n + 1))*2**n*A*b))
C = int((n + 1)**(2*n)*A**(2*n - 1))
gamma = int(_ceil(2*_log(C, 2)))
bound = int(2*gamma*_log(gamma))
a = []
# choose a prime number `p` such that `f` be square free in Z_p
# if there are many factors in Z_p, choose among a few different `p`
# the one with fewer factors
for px in range(3, bound + 1):
if not isprime(px) or b % px == 0:
continue
px = K.convert(px)
F = gf_from_int_poly(f, px)
if not gf_sqf_p(F, px, K):
continue
fsqfx = gf_factor_sqf(F, px, K)[1]
a.append((px, fsqfx))
if len(fsqfx) < 15 or len(a) > 4:
break
p, fsqf = min(a, key=lambda x: len(x[1]))
l = int(_ceil(_log(2*B + 1, p)))
modular = [gf_to_int_poly(ff, p) for ff in fsqf]
g = dup_zz_hensel_lift(p, f, modular, l, K)
sorted_T = range(len(g))
T = set(sorted_T)
factors, s = [], 1
pl = p**l
while 2*s <= len(T):
for S in subsets(sorted_T, s):
# lift the constant coefficient of the product `G` of the factors
# in the subset `S`; if it is does not divide `fc`, `G` does
# not divide the input polynomial
if b == 1:
q = 1
for i in S:
q = q*g[i][-1]
q = q % pl
if not _test_pl(fc, q, pl):
continue
else:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
G = dup_primitive(G, K)[1]
q = G[-1]
if q and fc % q != 0:
continue
H = [b]
S = set(S)
T_S = T - S
if b == 1:
G = [b]
for i in S:
G = dup_mul(G, g[i], K)
G = dup_trunc(G, pl, K)
for i in T_S:
H = dup_mul(H, g[i], K)
H = dup_trunc(H, pl, K)
G_norm = dup_l1_norm(G, K)
H_norm = dup_l1_norm(H, K)
if G_norm*H_norm <= B:
T = T_S
sorted_T = [i for i in sorted_T if i not in S]
G = dup_primitive(G, K)[1]
f = dup_primitive(H, K)[1]
factors.append(G)
b = dup_LC(f, K)
break
else:
s += 1
return factors + [f]
def dup_zz_irreducible_p(f, K):
"""Test irreducibility using Eisenstein's criterion. """
lc = dup_LC(f, K)
tc = dup_TC(f, K)
e_fc = dup_content(f[1:], K)
if e_fc:
e_ff = factorint(int(e_fc))
for p in e_ff.keys():
if (lc % p) and (tc % p**2):
return True
def dup_cyclotomic_p(f, K, irreducible=False):
"""
Efficiently test if ``f`` is a cyclotomic polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(f)
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> R.dup_cyclotomic_p(g)
True
"""
if K.is_QQ:
try:
K0, K = K, K.get_ring()
f = dup_convert(f, K0, K)
except CoercionFailed:
return False
elif not K.is_ZZ:
return False
lc = dup_LC(f, K)
tc = dup_TC(f, K)
if lc != 1 or (tc != -1 and tc != 1):
return False
if not irreducible:
coeff, factors = dup_factor_list(f, K)
if coeff != K.one or factors != [(f, 1)]:
return False
n = dup_degree(f)
g, h = [], []
for i in range(n, -1, -2):
g.insert(0, f[i])
for i in range(n - 1, -1, -2):
h.insert(0, f[i])
g = dup_sqr(dup_strip(g), K)
h = dup_sqr(dup_strip(h), K)
F = dup_sub(g, dup_lshift(h, 1, K), K)
if K.is_negative(dup_LC(F, K)):
F = dup_neg(F, K)
if F == f:
return True
g = dup_mirror(f, K)
if K.is_negative(dup_LC(g, K)):
g = dup_neg(g, K)
if F == g and dup_cyclotomic_p(g, K):
return True
G = dup_sqf_part(F, K)
if dup_sqr(G, K) == F and dup_cyclotomic_p(G, K):
return True
return False
def dup_zz_cyclotomic_poly(n, K):
"""Efficiently generate n-th cyclotomic polynomial. """
h = [K.one, -K.one]
for p, k in factorint(n).items():
h = dup_quo(dup_inflate(h, p, K), h, K)
h = dup_inflate(h, p**(k - 1), K)
return h
def _dup_cyclotomic_decompose(n, K):
H = [[K.one, -K.one]]
for p, k in factorint(n).items():
Q = [ dup_quo(dup_inflate(h, p, K), h, K) for h in H ]
H.extend(Q)
for i in range(1, k):
Q = [ dup_inflate(q, p, K) for q in Q ]
H.extend(Q)
return H
def dup_zz_cyclotomic_factor(f, K):
"""
Efficiently factor polynomials `x**n - 1` and `x**n + 1` in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` returns a list of factors
of `f`, provided that `f` is in the form `x**n - 1` or `x**n + 1` for
`n >= 1`. Otherwise returns None.
Factorization is performed using cyclotomic decomposition of `f`,
which makes this method much faster that any other direct factorization
approach (e.g. Zassenhaus's).
References
==========
.. [1] [Weisstein09]_
"""
lc_f, tc_f = dup_LC(f, K), dup_TC(f, K)
if dup_degree(f) <= 0:
return None
if lc_f != 1 or tc_f not in [-1, 1]:
return None
if any(bool(cf) for cf in f[1:-1]):
return None
n = dup_degree(f)
F = _dup_cyclotomic_decompose(n, K)
if not K.is_one(tc_f):
return F
else:
H = []
for h in _dup_cyclotomic_decompose(2*n, K):
if h not in F:
H.append(h)
return H
def dup_zz_factor_sqf(f, K):
"""Factor square-free (non-primitive) polynomials in `Z[x]`. """
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [g]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [g]
factors = None
if query('USE_CYCLOTOMIC_FACTOR'):
factors = dup_zz_cyclotomic_factor(g, K)
if factors is None:
factors = dup_zz_zassenhaus(g, K)
return cont, _sort_factors(factors, multiple=False)
def dup_zz_factor(f, K):
"""
Factor (non square-free) polynomials in `Z[x]`.
Given a univariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Zassenhaus algorithm. Trial division is used to recover the
multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Examples
========
Consider the polynomial `f = 2*x**4 - 2`::
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_zz_factor(2*x**4 - 2)
(2, [(x - 1, 1), (x + 1, 1), (x**2 + 1, 1)])
In result we got the following factorization::
f = 2 (x - 1) (x + 1) (x**2 + 1)
Note that this is a complete factorization over integers,
however over Gaussian integers we can factor the last term.
By default, polynomials `x**n - 1` and `x**n + 1` are factored
using cyclotomic decomposition to speedup computations. To
disable this behaviour set cyclotomic=False.
References
==========
.. [1] [Gathen99]_
"""
cont, g = dup_primitive(f, K)
n = dup_degree(g)
if dup_LC(g, K) < 0:
cont, g = -cont, dup_neg(g, K)
if n <= 0:
return cont, []
elif n == 1:
return cont, [(g, 1)]
if query('USE_IRREDUCIBLE_IN_FACTOR'):
if dup_zz_irreducible_p(g, K):
return cont, [(g, 1)]
g = dup_sqf_part(g, K)
H = None
if query('USE_CYCLOTOMIC_FACTOR'):
H = dup_zz_cyclotomic_factor(g, K)
if H is None:
H = dup_zz_zassenhaus(g, K)
factors = dup_trial_division(f, H, K)
return cont, factors
def dmp_zz_wang_non_divisors(E, cs, ct, K):
"""Wang/EEZ: Compute a set of valid divisors. """
result = [ cs*ct ]
for q in E:
q = abs(q)
for r in reversed(result):
while r != 1:
r = K.gcd(r, q)
q = q // r
if K.is_one(q):
return None
result.append(q)
return result[1:]
def dmp_zz_wang_test_points(f, T, ct, A, u, K):
"""Wang/EEZ: Test evaluation points for suitability. """
if not dmp_eval_tail(dmp_LC(f, K), A, u - 1, K):
raise EvaluationFailed('no luck')
g = dmp_eval_tail(f, A, u, K)
if not dup_sqf_p(g, K):
raise EvaluationFailed('no luck')
c, h = dup_primitive(g, K)
if K.is_negative(dup_LC(h, K)):
c, h = -c, dup_neg(h, K)
v = u - 1
E = [ dmp_eval_tail(t, A, v, K) for t, _ in T ]
D = dmp_zz_wang_non_divisors(E, c, ct, K)
if D is not None:
return c, h, E
else:
raise EvaluationFailed('no luck')
def dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K):
"""Wang/EEZ: Compute correct leading coefficients. """
C, J, v = [], [0]*len(E), u - 1
for h in H:
c = dmp_one(v, K)
d = dup_LC(h, K)*cs
for i in reversed(range(len(E))):
k, e, (t, _) = 0, E[i], T[i]
while not (d % e):
d, k = d//e, k + 1
if k != 0:
c, J[i] = dmp_mul(c, dmp_pow(t, k, v, K), v, K), 1
C.append(c)
if any(not j for j in J):
raise ExtraneousFactors # pragma: no cover
CC, HH = [], []
for c, h in zip(C, H):
d = dmp_eval_tail(c, A, v, K)
lc = dup_LC(h, K)
if K.is_one(cs):
cc = lc//d
else:
g = K.gcd(lc, d)
d, cc = d//g, lc//g
h, cs = dup_mul_ground(h, d, K), cs//d
c = dmp_mul_ground(c, cc, v, K)
CC.append(c)
HH.append(h)
if K.is_one(cs):
return f, HH, CC
CCC, HHH = [], []
for c, h in zip(CC, HH):
CCC.append(dmp_mul_ground(c, cs, v, K))
HHH.append(dmp_mul_ground(h, cs, 0, K))
f = dmp_mul_ground(f, cs**(len(H) - 1), u, K)
return f, HHH, CCC
def dup_zz_diophantine(F, m, p, K):
"""Wang/EEZ: Solve univariate Diophantine equations. """
if len(F) == 2:
a, b = F
f = gf_from_int_poly(a, p)
g = gf_from_int_poly(b, p)
s, t, G = gf_gcdex(g, f, p, K)
s = gf_lshift(s, m, K)
t = gf_lshift(t, m, K)
q, s = gf_div(s, f, p, K)
t = gf_add_mul(t, q, g, p, K)
s = gf_to_int_poly(s, p)
t = gf_to_int_poly(t, p)
result = [s, t]
else:
G = [F[-1]]
for f in reversed(F[1:-1]):
G.insert(0, dup_mul(f, G[0], K))
S, T = [], [[1]]
for f, g in zip(F, G):
t, s = dmp_zz_diophantine([g, f], T[-1], [], 0, p, 1, K)
T.append(t)
S.append(s)
result, S = [], S + [T[-1]]
for s, f in zip(S, F):
s = gf_from_int_poly(s, p)
f = gf_from_int_poly(f, p)
r = gf_rem(gf_lshift(s, m, K), f, p, K)
s = gf_to_int_poly(r, p)
result.append(s)
return result
def dmp_zz_diophantine(F, c, A, d, p, u, K):
"""Wang/EEZ: Solve multivariate Diophantine equations. """
if not A:
S = [ [] for _ in F ]
n = dup_degree(c)
for i, coeff in enumerate(c):
if not coeff:
continue
<|fim▁hole|>
for j, (s, t) in enumerate(zip(S, T)):
t = dup_mul_ground(t, coeff, K)
S[j] = dup_trunc(dup_add(s, t, K), p, K)
else:
n = len(A)
e = dmp_expand(F, u, K)
a, A = A[-1], A[:-1]
B, G = [], []
for f in F:
B.append(dmp_quo(e, f, u, K))
G.append(dmp_eval_in(f, a, n, u, K))
C = dmp_eval_in(c, a, n, u, K)
v = u - 1
S = dmp_zz_diophantine(G, C, A, d, p, v, K)
S = [ dmp_raise(s, 1, v, K) for s in S ]
for s, b in zip(S, B):
c = dmp_sub_mul(c, s, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
m = dmp_nest([K.one, -a], n, K)
M = dmp_one(n, K)
for k in K.map(range(0, d)):
if dmp_zero_p(c, u):
break
M = dmp_mul(M, m, u, K)
C = dmp_diff_eval_in(c, k + 1, a, n, u, K)
if not dmp_zero_p(C, v):
C = dmp_quo_ground(C, K.factorial(k + 1), v, K)
T = dmp_zz_diophantine(G, C, A, d, p, v, K)
for i, t in enumerate(T):
T[i] = dmp_mul(dmp_raise(t, 1, v, K), M, u, K)
for i, (s, t) in enumerate(zip(S, T)):
S[i] = dmp_add(s, t, u, K)
for t, b in zip(T, B):
c = dmp_sub_mul(c, t, b, u, K)
c = dmp_ground_trunc(c, p, u, K)
S = [ dmp_ground_trunc(s, p, u, K) for s in S ]
return S
def dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K):
"""Wang/EEZ: Parallel Hensel lifting algorithm. """
S, n, v = [f], len(A), u - 1
H = list(H)
for i, a in enumerate(reversed(A[1:])):
s = dmp_eval_in(S[0], a, n - i, u - i, K)
S.insert(0, dmp_ground_trunc(s, p, v - i, K))
d = max(dmp_degree_list(f, u)[1:])
for j, s, a in zip(range(2, n + 2), S, A):
G, w = list(H), j - 1
I, J = A[:j - 2], A[j - 1:]
for i, (h, lc) in enumerate(zip(H, LC)):
lc = dmp_ground_trunc(dmp_eval_tail(lc, J, v, K), p, w - 1, K)
H[i] = [lc] + dmp_raise(h[1:], 1, w - 1, K)
m = dmp_nest([K.one, -a], w, K)
M = dmp_one(w, K)
c = dmp_sub(s, dmp_expand(H, w, K), w, K)
dj = dmp_degree_in(s, w, w)
for k in K.map(range(0, dj)):
if dmp_zero_p(c, w):
break
M = dmp_mul(M, m, w, K)
C = dmp_diff_eval_in(c, k + 1, a, w, w, K)
if not dmp_zero_p(C, w - 1):
C = dmp_quo_ground(C, K.factorial(k + 1), w - 1, K)
T = dmp_zz_diophantine(G, C, I, d, p, w - 1, K)
for i, (h, t) in enumerate(zip(H, T)):
h = dmp_add_mul(h, dmp_raise(t, 1, w - 1, K), M, w, K)
H[i] = dmp_ground_trunc(h, p, w, K)
h = dmp_sub(s, dmp_expand(H, w, K), w, K)
c = dmp_ground_trunc(h, p, w, K)
if dmp_expand(H, u, K) != f:
raise ExtraneousFactors # pragma: no cover
else:
return H
def dmp_zz_wang(f, u, K, mod=None, seed=None):
"""
Factor primitive square-free polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x_1,...,x_n]`, which is
primitive and square-free in `x_1`, computes factorization of `f` into
irreducibles over integers.
The procedure is based on Wang's Enhanced Extended Zassenhaus
algorithm. The algorithm works by viewing `f` as a univariate polynomial
in `Z[x_2,...,x_n][x_1]`, for which an evaluation mapping is computed::
x_2 -> a_2, ..., x_n -> a_n
where `a_i`, for `i = 2, ..., n`, are carefully chosen integers. The
mapping is used to transform `f` into a univariate polynomial in `Z[x_1]`,
which can be factored efficiently using Zassenhaus algorithm. The last
step is to lift univariate factors to obtain true multivariate
factors. For this purpose a parallel Hensel lifting procedure is used.
The parameter ``seed`` is passed to _randint and can be used to seed randint
(when an integer) or (for testing purposes) can be a sequence of numbers.
References
==========
.. [1] [Wang78]_
.. [2] [Geddes92]_
"""
from sympy.utilities.randtest import _randint
randint = _randint(seed)
ct, T = dmp_zz_factor(dmp_LC(f, K), u - 1, K)
b = dmp_zz_mignotte_bound(f, u, K)
p = K(nextprime(b))
if mod is None:
if u == 1:
mod = 2
else:
mod = 1
history, configs, A, r = set([]), [], [K.zero]*u, None
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
_, H = dup_zz_factor_sqf(s, K)
r = len(H)
if r == 1:
return [f]
configs = [(s, cs, E, H, A)]
except EvaluationFailed:
pass
eez_num_configs = query('EEZ_NUMBER_OF_CONFIGS')
eez_num_tries = query('EEZ_NUMBER_OF_TRIES')
eez_mod_step = query('EEZ_MODULUS_STEP')
while len(configs) < eez_num_configs:
for _ in range(eez_num_tries):
A = [ K(randint(-mod, mod)) for _ in range(u) ]
if tuple(A) not in history:
history.add(tuple(A))
else:
continue
try:
cs, s, E = dmp_zz_wang_test_points(f, T, ct, A, u, K)
except EvaluationFailed:
continue
_, H = dup_zz_factor_sqf(s, K)
rr = len(H)
if r is not None:
if rr != r: # pragma: no cover
if rr < r:
configs, r = [], rr
else:
continue
else:
r = rr
if r == 1:
return [f]
configs.append((s, cs, E, H, A))
if len(configs) == eez_num_configs:
break
else:
mod += eez_mod_step
s_norm, s_arg, i = None, 0, 0
for s, _, _, _, _ in configs:
_s_norm = dup_max_norm(s, K)
if s_norm is not None:
if _s_norm < s_norm:
s_norm = _s_norm
s_arg = i
else:
s_norm = _s_norm
i += 1
_, cs, E, H, A = configs[s_arg]
orig_f = f
try:
f, H, LC = dmp_zz_wang_lead_coeffs(f, T, cs, E, H, A, u, K)
factors = dmp_zz_wang_hensel_lifting(f, H, LC, A, p, u, K)
except ExtraneousFactors: # pragma: no cover
if query('EEZ_RESTART_IF_NEEDED'):
return dmp_zz_wang(orig_f, u, K, mod + 1)
else:
raise ExtraneousFactors(
"we need to restart algorithm with better parameters")
result = []
for f in factors:
_, f = dmp_ground_primitive(f, u, K)
if K.is_negative(dmp_ground_LC(f, u, K)):
f = dmp_neg(f, u, K)
result.append(f)
return result
def dmp_zz_factor(f, u, K):
"""
Factor (non square-free) polynomials in `Z[X]`.
Given a multivariate polynomial `f` in `Z[x]` computes its complete
factorization `f_1, ..., f_n` into irreducibles over integers::
f = content(f) f_1**k_1 ... f_n**k_n
The factorization is computed by reducing the input polynomial
into a primitive square-free polynomial and factoring it using
Enhanced Extended Zassenhaus (EEZ) algorithm. Trial division
is used to recover the multiplicities of factors.
The result is returned as a tuple consisting of::
(content(f), [(f_1, k_1), ..., (f_n, k_n))
Consider polynomial `f = 2*(x**2 - y**2)`::
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> R.dmp_zz_factor(2*x**2 - 2*y**2)
(2, [(x - y, 1), (x + y, 1)])
In result we got the following factorization::
f = 2 (x - y) (x + y)
References
==========
.. [1] [Gathen99]_
"""
if not u:
return dup_zz_factor(f, K)
if dmp_zero_p(f, u):
return K.zero, []
cont, g = dmp_ground_primitive(f, u, K)
if dmp_ground_LC(g, u, K) < 0:
cont, g = -cont, dmp_neg(g, u, K)
if all(d <= 0 for d in dmp_degree_list(g, u)):
return cont, []
G, g = dmp_primitive(g, u, K)
factors = []
if dmp_degree(g, u) > 0:
g = dmp_sqf_part(g, u, K)
H = dmp_zz_wang(g, u, K)
factors = dmp_trial_division(f, H, u, K)
for g, k in dmp_zz_factor(G, u - 1, K)[1]:
factors.insert(0, ([g], k))
return cont, _sort_factors(factors)
def dup_ext_factor(f, K):
"""Factor univariate polynomials over algebraic number fields. """
n, lc = dup_degree(f), dup_LC(f, K)
f = dup_monic(f, K)
if n <= 0:
return lc, []
if n == 1:
return lc, [(f, 1)]
f, F = dup_sqf_part(f, K), f
s, g, r = dup_sqf_norm(f, K)
factors = dup_factor_list_include(r, K.dom)
if len(factors) == 1:
return lc, [(f, n//dup_degree(f))]
H = s*K.unit
for i, (factor, _) in enumerate(factors):
h = dup_convert(factor, K.dom, K)
h, _, g = dup_inner_gcd(h, g, K)
h = dup_shift(h, H, K)
factors[i] = h
factors = dup_trial_division(F, factors, K)
return lc, factors
def dmp_ext_factor(f, u, K):
"""Factor multivariate polynomials over algebraic number fields. """
if not u:
return dup_ext_factor(f, K)
lc = dmp_ground_LC(f, u, K)
f = dmp_ground_monic(f, u, K)
if all(d <= 0 for d in dmp_degree_list(f, u)):
return lc, []
f, F = dmp_sqf_part(f, u, K), f
s, g, r = dmp_sqf_norm(f, u, K)
factors = dmp_factor_list_include(r, u, K.dom)
if len(factors) == 1:
factors = [f]
else:
H = dmp_raise([K.one, s*K.unit], u, 0, K)
for i, (factor, _) in enumerate(factors):
h = dmp_convert(factor, u, K.dom, K)
h, _, g = dmp_inner_gcd(h, g, u, K)
h = dmp_compose(h, H, u, K)
factors[i] = h
return lc, dmp_trial_division(F, factors, u, K)
def dup_gf_factor(f, K):
"""Factor univariate polynomials over finite fields. """
f = dup_convert(f, K, K.dom)
coeff, factors = gf_factor(f, K.mod, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K.dom, K), k)
return K.convert(coeff, K.dom), factors
def dmp_gf_factor(f, u, K):
"""Factor multivariate polynomials over finite fields. """
raise NotImplementedError('multivariate polynomials over finite fields')
def dup_factor_list(f, K0):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
j, f = dup_terms_gcd(f, K0)
cont, f = dup_primitive(f, K0)
if K0.is_FiniteField:
coeff, factors = dup_gf_factor(f, K0)
elif K0.is_Algebraic:
coeff, factors = dup_ext_factor(f, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dup_convert(f, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dup_clear_denoms(f, K0, K)
f = dup_convert(f, K0, K)
else:
K = K0
if K.is_ZZ:
coeff, factors = dup_zz_factor(f, K)
elif K.is_Poly:
f, u = dmp_inject(f, 0, K)
coeff, factors = dmp_factor_list(f, u, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, u, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dup_convert(f, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dup_max_norm(f, K0)
f = dup_quo_ground(f, max_norm, K0)
f = dup_convert(f, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
if j:
factors.insert(0, ([K0.one, K0.zero], j))
return coeff*cont, _sort_factors(factors)
def dup_factor_list_include(f, K):
"""Factor univariate polynomials into irreducibles in `K[x]`. """
coeff, factors = dup_factor_list(f, K)
if not factors:
return [(dup_strip([coeff]), 1)]
else:
g = dup_mul_ground(factors[0][0], coeff, K)
return [(g, factors[0][1])] + factors[1:]
def dmp_factor_list(f, u, K0):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list(f, K0)
J, f = dmp_terms_gcd(f, u, K0)
cont, f = dmp_ground_primitive(f, u, K0)
if K0.is_FiniteField: # pragma: no cover
coeff, factors = dmp_gf_factor(f, u, K0)
elif K0.is_Algebraic:
coeff, factors = dmp_ext_factor(f, u, K0)
else:
if not K0.is_Exact:
K0_inexact, K0 = K0, K0.get_exact()
f = dmp_convert(f, u, K0_inexact, K0)
else:
K0_inexact = None
if K0.is_Field:
K = K0.get_ring()
denom, f = dmp_clear_denoms(f, u, K0, K)
f = dmp_convert(f, u, K0, K)
else:
K = K0
if K.is_ZZ:
levels, f, v = dmp_exclude(f, u, K)
coeff, factors = dmp_zz_factor(f, v, K)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_include(f, levels, v, K), k)
elif K.is_Poly:
f, v = dmp_inject(f, u, K)
coeff, factors = dmp_factor_list(f, v, K.dom)
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_eject(f, v, K), k)
coeff = K.convert(coeff, K.dom)
else: # pragma: no cover
raise DomainError('factorization not supported over %s' % K0)
if K0.is_Field:
for i, (f, k) in enumerate(factors):
factors[i] = (dmp_convert(f, u, K, K0), k)
coeff = K0.convert(coeff, K)
coeff = K0.quo(coeff, denom)
if K0_inexact:
for i, (f, k) in enumerate(factors):
max_norm = dmp_max_norm(f, u, K0)
f = dmp_quo_ground(f, max_norm, u, K0)
f = dmp_convert(f, u, K0, K0_inexact)
factors[i] = (f, k)
coeff = K0.mul(coeff, K0.pow(max_norm, k))
coeff = K0_inexact.convert(coeff, K0)
K0 = K0_inexact
for i, j in enumerate(reversed(J)):
if not j:
continue
term = {(0,)*(u - i) + (1,) + (0,)*i: K0.one}
factors.insert(0, (dmp_from_dict(term, u, K0), j))
return coeff*cont, _sort_factors(factors)
def dmp_factor_list_include(f, u, K):
"""Factor multivariate polynomials into irreducibles in `K[X]`. """
if not u:
return dup_factor_list_include(f, K)
coeff, factors = dmp_factor_list(f, u, K)
if not factors:
return [(dmp_ground(coeff, u), 1)]
else:
g = dmp_mul_ground(factors[0][0], coeff, u, K)
return [(g, factors[0][1])] + factors[1:]
def dup_irreducible_p(f, K):
"""
Returns ``True`` if a univariate polynomial ``f`` has no factors
over its domain.
"""
return dmp_irreducible_p(f, 0, K)
def dmp_irreducible_p(f, u, K):
"""
Returns ``True`` if a multivariate polynomial ``f`` has no factors
over its domain.
"""
_, factors = dmp_factor_list(f, u, K)
if not factors:
return True
elif len(factors) > 1:
return False
else:
_, k = factors[0]
return k == 1<|fim▁end|> | T = dup_zz_diophantine(F, n - i, p, K) |
<|file_name|>unio.py<|end_file_name|><|fim▁begin|>import io
import os
import sys
import codecs
import contextlib
# We do not trust traditional unixes about having reliable file systems.
# In that case we know better than what the env says and declare this to
# be utf-8 always.
has_likely_buggy_unicode_filesystem = \
sys.platform.startswith('linux') or 'bsd' in sys.platform
def is_ascii_encoding(encoding):
"""Given an encoding this figures out if the encoding is actually ASCII
(which is something we don't actually want in most cases). This is
necessary because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
codec = codecs.lookup(encoding)
except LookupError:
return False
return codec.name == 'ascii'
def get_filesystem_encoding():
"""Returns the filesystem encoding that should be used. Note that
this is different from the Python understanding of the filesystem
encoding which might be deeply flawed. Do not use this value against
Python's unicode APIs because it might be different.
The concept of a filesystem encoding in generally is not something
you should rely on. As such if you ever need to use this function
except for writing wrapper code reconsider.
"""
if has_likely_buggy_unicode_filesystem:
return 'utf-8'
rv = sys.getfilesystemencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
def get_file_encoding(for_writing=False):
"""Returns the encoding for text file data. This is always the same
on all operating systems because this is the only thing that makes
sense when wanting to make data exchange feasible. This is utf-8 no
questions asked. The only simplification is that if a file is opened
for reading then we allo utf-8-sig.
"""
if for_writing:
return 'utf-8'
return 'utf-8-sig'
def get_std_stream_encoding():
"""Returns the default stream encoding if not found."""
rv = sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class BrokenEnvironment(Exception):
"""This error is raised on Python 3 if the system was malconfigured
beyond repair.
"""
class _NonClosingTextIOWrapper(io.TextIOWrapper):
"""Subclass of the wrapper that does not close the underlying file
in the destructor. This is necessary so that our wrapping of the
standard streams does not accidentally close the original file.
"""
def __del__(self):
pass
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such this fixup stuff is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
PY2 = sys.version_info[0] == 2
if PY2:
import StringIO
text_type = unicode
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = StringIO.StringIO
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_std_stream_encoding()
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def get_binary_stdin():
return sys.stdin
def get_binary_stdout():
return sys.stdout
def get_binary_stderr():
return sys.stderr
def get_binary_argv():
return list(sys.argv)
def get_text_stdin(encoding=None, errors=None):
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
return _make_text_stream(sys.stderr, encoding, errors)
@contextlib.contextmanager
def wrap_standard_stream(stream_type, stream):
if stream_type not in ('stdin', 'stdout', 'stderr'):
raise TypeError('Invalid stream %s' % stream_type)
old_stream = getattr(sys, stream_type)
setattr(sys, stream_type, stream)
try:
yield stream
finally:
setattr(sys, stream_type, old_stream)
@contextlib.contextmanager
def capture_stdout(and_stderr=False):
stream = StringIO.StringIO()
old_stdout = sys.stdout
old_stderr = sys.stderr<|fim▁hole|> try:
yield stream
finally:
sys.stdout = old_stdout
if and_stderr:
sys.stderr = old_stderr
binary_env = os.environ
else:
text_type = str
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = io.StringIO
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case we assume the defalt.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
is_binary = _is_binary_reader(stream, False)
if is_binary:
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if it's encoding is ASCII."""
return is_ascii_encoding(getattr(stream, 'encoding', None))
def _wrap_stream_for_text(stream, encoding, errors):
if errors is None:
errors = 'replace'
if encoding is None:
encoding = get_std_stream_encoding()
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set we need to verify that the
# reader is actually not misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _wrap_stream_for_text(binary_reader, encoding, errors)
def _force_correct_text_writer(text_writer, encoding, errors):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set we need to verify that the
# writer is actually not misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _wrap_stream_for_text(binary_writer, encoding, errors)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
return _force_correct_text_reader(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
return _force_correct_text_writer(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
return _force_correct_text_writer(sys.stderr, encoding, errors)
def get_binary_argv():
fs_enc = sys.getfilesystemencoding()
return [x.encode(fs_enc, 'surrogateescape') for x in sys.argv]
binary_env = os.environb
@contextlib.contextmanager
def wrap_standard_stream(stream_type, stream):
old_stream = getattr(sys, stream_type, None)
if stream_type == 'stdin':
if _is_binary_reader(stream):
raise TypeError('Standard input stream cannot be set to a '
'binary reader directly.')
if _find_binary_reader(stream) is None:
raise TypeError('Standard input stream needs to be backed '
'by a binary stream.')
elif stream_type in ('stdout', 'stderr'):
if _is_binary_writer(stream):
raise TypeError('Standard output stream cannot be set to a '
'binary writer directly.')
if _find_binary_writer(stream) is None:
raise TypeError('Standard output and error streams need '
'to be backed by a binary streams.')
else:
raise TypeError('Invalid stream %s' % stream_type)
setattr(sys, stream_type, stream)
try:
yield old_stream
finally:
setattr(sys, stream_type, old_stream)
class _CapturedStream(object):
"""A helper that flushes before getvalue() to fix a few oddities
on Python 3.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def getvalue(self):
self._stream.flush()
return self._stream.buffer.getvalue()
def __repr__(self):
return repr(self._stream)
@contextlib.contextmanager
def capture_stdout(and_stderr=False):
"""Captures stdout and yields the new bytes stream that backs it.
It also wraps it in a fake object that flushes on getting the
underlying value.
"""
ll_stream = io.BytesIO()
stream = _NonClosingTextIOWrapper(ll_stream, sys.stdout.encoding,
sys.stdout.errors)
old_stdout = sys.stdout
sys.stdout = stream
if and_stderr:
old_stderr = sys.stderr
sys.stderr = stream
try:
yield _CapturedStream(stream)
finally:
stream.flush()
sys.stdout = old_stdout
if and_stderr:
sys.stderr = old_stderr
def _fixup_path(path):
if has_likely_buggy_unicode_filesystem \
and isinstance(path, text_type):
if PY2:
path = path.encode(get_filesystem_encoding())
else:
path = path.encode(get_filesystem_encoding(),
'surrogateescape')
return path
def open(filename, mode='r', encoding=None, errors=None):
"""Opens a file either in text or binary mode. The encoding for the
file is automatically detected.
"""
filename = _fixup_path(filename)
if 'b' not in mode:
encoding = get_file_encoding('w' in mode)
if encoding is not None:
return io.open(filename, mode, encoding=encoding, errors=errors)
return io.open(filename, mode)<|fim▁end|> | sys.stdout = stream
if and_stderr:
sys.stderr = stream |
<|file_name|>git.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
description:
- git, SSH, or HTTP(S) protocol address of the git repository.
type: str
required: true
aliases: [ name ]
dest:
description:
- The path of where the repository should be checked out. This
is equivalent to C(git clone [repo_url] [directory]). The repository
named in I(repo) is not appended to this path and the destination directory must be empty. This
parameter is required, unless I(clone) is set to C(no).
type: path
required: true
version:
description:
- What version of the repository to check out. This can be
the literal string C(HEAD), a branch name, a tag name.
It can also be a I(SHA-1) hash, in which case I(refspec) needs
to be specified if the given revision is not already available.
type: str
default: "HEAD"
accept_hostkey:
description:
- If C(yes), ensure that "-o StrictHostKeyChecking=no" is
present as an ssh option.
type: bool
default: 'no'
version_added: "1.5"
accept_newhostkey:
description:
- As of OpenSSH 7.5, "-o StrictHostKeyChecking=accept-new" can be
used which is safer and will only accepts host keys which are
not present or are the same. if C(yes), ensure that
"-o StrictHostKeyChecking=accept-new" is present as an ssh option.
type: bool
default: 'no'
version_added: "2.12"
ssh_opts:
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
(although this particular option is better set by
I(accept_hostkey)).
type: str
version_added: "1.5"
key_file:
description:
- Specify an optional private key file path, on the target host, to use for the checkout.
type: path
version_added: "1.5"
reference:
description:
- Reference repository (see "git clone --reference ...").
version_added: "1.4"
remote:
description:
- Name of the remote.
type: str
default: "origin"
refspec:
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the C(git fetch) command.
An example value could be "refs/meta/config".
type: str
version_added: "1.9"
force:
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`.
type: bool
default: 'no'
version_added: "0.7"
depth:
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored. Needs I(git>=1.9.1) to work correctly.
type: int
version_added: "1.2"
clone:
description:
- If C(no), do not clone the repository even if it does not exist locally.
type: bool
default: 'yes'
version_added: "1.9"
update:
description:
- If C(no), do not retrieve new revisions from the origin repository.
- Operations like archive will work on the existing (old) repository and might
not respond to changes to the options version or remote.
type: bool
default: 'yes'
version_added: "1.2"
executable:
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
type: path
version_added: "1.4"
bare:
description:
- If C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
type: bool
default: 'no'
version_added: "1.4"
umask:
description:
- The umask to set before doing any checkouts, or any other
repository maintenance.
type: raw
version_added: "2.2"
recursive:
description:
- If C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
type: bool
default: 'yes'
version_added: "1.6"
single_branch:
description:
- Clone only the history leading to the tip of the specified I(branch).<|fim▁hole|> default: 'no'
version_added: '2.11'
track_submodules:
description:
- If C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
type: bool
default: 'no'
version_added: "1.8"
verify_commit:
description:
- If C(yes), when cloning or checking out a I(version) verify the
signature of a GPG signed commit. This requires git version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be present in the GPG keyring.
type: bool
default: 'no'
version_added: "2.0"
archive:
description:
- Specify archive file path with extension. If specified, creates an
archive file of the specified format containing the tree structure
for the source tree.
Allowed archive formats ["zip", "tar.gz", "tar", "tgz"].
- This will clone and perform git archive from local directory as not
all git servers support git archive.
type: path
version_added: "2.4"
archive_prefix:
description:
- Specify a prefix to add to each file path in archive. Requires I(archive) to be specified.
version_added: "2.10"
type: str
separate_git_dir:
description:
- The path to place the cloned repository. If specified, Git repository
can be separated from working tree.
type: path
version_added: "2.7"
gpg_whitelist:
description:
- A list of trusted GPG fingerprints to compare to the fingerprint of the
GPG-signed commit.
- Only used when I(verify_commit=yes).
- Use of this feature requires Git 2.6+ due to its reliance on git's C(--raw) flag to C(verify-commit) and C(verify-tag).
type: list
elements: str
default: []
version_added: "2.9"
requirements:
- git>=1.7.1 (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to use the option accept_hostkey. Another solution is to
add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
- Supports C(check_mode).
'''
EXAMPLES = '''
- name: Git checkout
ansible.builtin.git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
version: release-0.22
- name: Read-write git checkout from github
ansible.builtin.git:
repo: [email protected]:mylogin/hello.git
dest: /home/mylogin/hello
- name: Just ensuring the repo checkout exists
ansible.builtin.git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
update: no
- name: Just get information about the repository whether or not it has already been cloned locally
ansible.builtin.git:
repo: 'https://foosball.example.org/path/to/repo.git'
dest: /srv/checkout
clone: no
update: no
- name: Checkout a github repo and use refspec to fetch all pull requests
ansible.builtin.git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
refspec: '+refs/pull/*:refs/heads/*'
- name: Create git archive from repo
ansible.builtin.git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
archive: /tmp/ansible-examples.zip
- name: Clone a repo with separate git directory
ansible.builtin.git:
repo: https://github.com/ansible/ansible-examples.git
dest: /src/ansible-examples
separate_git_dir: /src/ansible-examples.git
- name: Example clone of a single branch
ansible.builtin.git:
single_branch: yes
branch: master
- name: Avoid hanging when http(s) password is missing
ansible.builtin.git:
repo: https://github.com/ansible/could-be-a-private-repo
dest: /src/from-private-repo
environment:
GIT_TERMINAL_PROMPT: 0 # reports "terminal prompts disabled" on missing password
# or GIT_ASKPASS: /bin/true # for git before version 2.3.0, reports "Authentication failed" on missing password
'''
RETURN = '''
after:
description: Last commit revision of the repository retrieved during the update.
returned: success
type: str
sample: 4c020102a9cd6fe908c9a4a326a38f972f63a903
before:
description: Commit revision before the repository was updated, "null" for new repository.
returned: success
type: str
sample: 67c04ebe40a003bda0efb34eacfb93b0cafdf628
remote_url_changed:
description: Contains True or False whether or not the remote URL was changed.
returned: success
type: bool
sample: True
warnings:
description: List of warnings if requested features were not available due to a too old git version.
returned: error
type: str
sample: git version is too old to fully support the depth argument. Falling back to full checkouts.
git_dir_now:
description: Contains the new path of .git directory if it is changed.
returned: success
type: str
sample: /path/to/new/git/dir
git_dir_before:
description: Contains the original path of .git directory if it is changed.
returned: success
type: str
sample: /path/to/old/git/dir
'''
import filecmp
import os
import re
import shlex
import stat
import sys
import shutil
import tempfile
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b, string_types
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common.process import get_bin_path
def relocate_repo(module, result, repo_dir, old_repo_dir, worktree_dir):
if os.path.exists(repo_dir):
module.fail_json(msg='Separate-git-dir path %s already exists.' % repo_dir)
if worktree_dir:
dot_git_file_path = os.path.join(worktree_dir, '.git')
try:
shutil.move(old_repo_dir, repo_dir)
with open(dot_git_file_path, 'w') as dot_git_file:
dot_git_file.write('gitdir: %s' % repo_dir)
result['git_dir_before'] = old_repo_dir
result['git_dir_now'] = repo_dir
except (IOError, OSError) as err:
# if we already moved the .git dir, roll it back
if os.path.exists(repo_dir):
shutil.move(repo_dir, old_repo_dir)
module.fail_json(msg=u'Unable to move git dir. %s' % to_text(err))
def head_splitter(headfile, remote, module=None, fail_on_error=False):
'''Extract the head reference'''
# https://github.com/ansible/ansible-modules-core/pull/907
res = None
if os.path.exists(headfile):
rawdata = None
try:
f = open(headfile, 'r')
rawdata = f.readline()
f.close()
except Exception:
if fail_on_error and module:
module.fail_json(msg="Unable to read %s" % headfile)
if rawdata:
try:
rawdata = rawdata.replace('refs/remotes/%s' % remote, '', 1)
refparts = rawdata.split(' ')
newref = refparts[-1]
nrefparts = newref.split('/', 2)
res = nrefparts[-1].rstrip('\n')
except Exception:
if fail_on_error and module:
module.fail_json(msg="Unable to split head from '%s'" % rawdata)
return res
def unfrackgitpath(path):
if path is None:
return None
# copied from ansible.utils.path
return os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(path))))
def get_submodule_update_params(module, git_path, cwd):
# or: git submodule [--quiet] update [--init] [-N|--no-fetch]
# [-f|--force] [--rebase] [--reference <repository>] [--merge]
# [--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[', '')
update_line = update_line.replace(']', '')
update_line = update_line.replace('|', ' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper(module_tmpdir):
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_tmpdir, os.W_OK | os.R_OK | os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_tmpdir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = b("""#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
# Let ssh fail rather than prompt
BASEOPTS="$BASEOPTS -o BatchMode=yes"
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" -o IdentitiesOnly=yes $BASEOPTS "$@"
fi
""")
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = to_native(stdout).rstrip('\n')
return sha
def ssh_supports_acceptnewhostkey(module):
try:
ssh_path = get_bin_path('ssh')
except ValueError as err:
module.fail_json(
msg='Remote host is missing ssh command, so you cannot '
'use acceptnewhostkey option.', details=to_text(err))
supports_acceptnewhostkey = True
cmd = [ssh_path, '-o', 'StrictHostKeyChecking=accept-new', '-V']
rc, stdout, stderr = module.run_command(cmd)
if rc != 0:
supports_acceptnewhostkey = False
return supports_acceptnewhostkey
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(
msg='Unable to determine hashes of submodules',
stdout=out,
stderr=err,
rc=rc)
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except Exception:
pass
cmd = [git_path, 'clone']
if bare:
cmd.append('--bare')
else:
cmd.extend(['--origin', remote])
is_branch_or_tag = is_remote_branch(git_path, module, dest, repo, version) or is_remote_tag(git_path, module, dest, repo, version)
if depth:
if version == 'HEAD' or refspec:
cmd.extend(['--depth', str(depth)])
elif is_branch_or_tag:
cmd.extend(['--depth', str(depth)])
cmd.extend(['--branch', version])
else:
# only use depth if the remote object is branch or tag (i.e. fetchable)
module.warn("Ignoring depth argument. "
"Shallow clones are only available for "
"HEAD, branches, tags or in combination with refspec.")
if reference:
cmd.extend(['--reference', str(reference)])
if single_branch:
if git_version_used is None:
module.fail_json(msg='Cannot find git executable at %s' % git_path)
if git_version_used < LooseVersion('1.7.10'):
module.warn("git version '%s' is too old to use 'single-branch'. Ignoring." % git_version_used)
else:
cmd.append("--single-branch")
if is_branch_or_tag:
cmd.extend(['--branch', version])
needs_separate_git_dir_fallback = False
if separate_git_dir:
if git_version_used is None:
module.fail_json(msg='Cannot find git executable at %s' % git_path)
if git_version_used < LooseVersion('1.7.5'):
# git before 1.7.5 doesn't have separate-git-dir argument, do fallback
needs_separate_git_dir_fallback = True
else:
cmd.append('--separate-git-dir=%s' % separate_git_dir)
cmd.extend([repo, dest])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if needs_separate_git_dir_fallback:
relocate_repo(module, result, separate_git_dir, os.path.join(dest, ".git"), dest)
if bare and remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
cmd = [git_path, 'fetch']
if depth:
cmd.extend(['--depth', str(depth)])
cmd.extend([remote, refspec])
module.run_command(cmd, check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status --porcelain" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = list(filter(lambda c: not re.search('^\\?\\?.*$', c), lines))
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_diff(module, git_path, dest, repo, remote, depth, bare, before, after):
''' Return the difference between 2 versions '''
if before is None:
return {'prepared': '>> Newly checked out %s' % after}
elif before != after:
# Ensure we have the object we are referring to during git diff !
git_version_used = git_version(git_path, module)
fetch(git_path, module, repo, dest, after, remote, depth, bare, '', git_version_used)
cmd = '%s diff %s %s' % (git_path, before, after)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc == 0 and out:
return {'prepared': out}
elif rc == 0:
return {'prepared': '>> No visual differences between %s and %s' % (before, after)}
elif err:
return {'prepared': '>> Failed to get proper diff between %s and %s:\n>> %s' % (before, after, err)}
else:
return {'prepared': '>> Failed to get proper diff between %s and %s' % (before, after)}
return {}
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
elif remote == 'file://' + os.path.expanduser(module.params['repo']):
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version, stdout=out, stderr=err, rc=rc)
out = to_native(out)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch --no-color -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out, stdout=out, stderr=err)
for line in out.split('\n'):
if line.strip():
branches.append(line.strip())
return branches
def get_annotated_tags(git_path, module, dest):
tags = []
cmd = [git_path, 'for-each-ref', 'refs/tags/', '--format', '%(objecttype):%(refname:short)']
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out, stdout=out, stderr=err)
for line in to_native(out).split('\n'):
if line.strip():
tagtype, tagname = line.strip().split(':')
if tagtype == 'tag':
tags.append(tagname)
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if to_native(version, errors='surrogate_or_strict') in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for branch in branches:
if branch.startswith('* ') and ('no branch' in branch or 'detached from' in branch or 'detached at' in branch):
return True
return False
def get_repo_path(dest, bare):
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that the repository is in external directory respective to the working copy (e.g. we are in a
# submodule structure).
if os.path.isfile(repo_path):
with open(repo_path, 'r') as gitfile:
data = gitfile.read()
ref_prefix, gitdir = data.rstrip().split('gitdir: ', 1)
if ref_prefix:
raise ValueError('.git file has invalid git dir reference format')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
if not os.path.isdir(repo_path):
raise ValueError('%s is not a directory' % repo_path)
return repo_path
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
try:
repo_path = get_repo_path(dest, bare)
except (IOError, ValueError) as err:
# No repo path found
"""``.git`` file does not have a valid format for detached Git dir."""
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
details=to_text(err),
)
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
headfile = os.path.join(repo_path, "HEAD")
if is_not_a_branch(git_path, module, dest):
headfile = os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD')
branch = head_splitter(headfile, remote, module=module, fail_on_error=True)
return branch
def get_remote_url(git_path, module, dest, remote):
'''Return URL of remote source for repo.'''
command = [git_path, 'ls-remote', '--get-url', remote]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
# There was an issue getting remote URL, most likely
# command is not available in this version of Git.
return None
return to_native(out).rstrip('\n')
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
# Return if remote URL isn't changing.
remote_url = get_remote_url(git_path, module, dest, remote)
if remote_url == repo or unfrackgitpath(remote_url) == unfrackgitpath(repo):
return False
command = [git_path, 'remote', 'set-url', remote, repo]
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
label = "set a new url %s for %s" % (repo, remote)
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
# Return False if remote_url is None to maintain previous behavior
# for Git versions prior to 1.7.5 that lack required functionality.
return remote_url is not None
def fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=False):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
fetch_cmd = [git_path, 'fetch']
refspecs = []
if depth:
# try to find the minimal set of refs we need to fetch to get a
# successful checkout
currenthead = get_head_branch(git_path, module, dest, remote)
if refspec:
refspecs.append(refspec)
elif version == 'HEAD':
refspecs.append(currenthead)
elif is_remote_branch(git_path, module, dest, repo, version):
if currenthead != version:
# this workaround is only needed for older git versions
# 1.8.3 is broken, 1.9.x works
# ensure that remote branch is available as both local and remote ref
refspecs.append('+refs/heads/%s:refs/heads/%s' % (version, version))
refspecs.append('+refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version))
elif is_remote_tag(git_path, module, dest, repo, version):
refspecs.append('+refs/tags/' + version + ':refs/tags/' + version)
if refspecs:
# if refspecs is empty, i.e. version is neither heads nor tags
# assume it is a version hash
# fall back to a full clone, otherwise we might not be able to checkout
# version
fetch_cmd.extend(['--depth', str(depth)])
if not depth or not refspecs:
# don't try to be minimalistic but do a full clone
# also do this if depth is given, but version is something that can't be fetched directly
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
else:
# ensure all tags are fetched
if git_version_used >= LooseVersion('1.9'):
fetch_cmd.append('--tags')
else:
# old git versions have a bug in --tags that prevents updating existing tags
commands.append((fetch_str, fetch_cmd + [remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
if force:
fetch_cmd.append('--force')
fetch_cmd.extend([remote])
commands.append((fetch_str, fetch_cmd + refspecs))
for (label, command) in commands:
(rc, out, err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err), cmd=command)
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
# FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s' % (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules, force=False):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [git_path, 'submodule', 'sync']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive', '--remote']
else:
cmd = [git_path, 'submodule', 'update', '--init', '--recursive']
if force:
cmd.append('--force')
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def set_remote_branch(git_path, module, dest, remote, version, depth):
"""set refs for the remote branch version
This assumes the branch does not yet exist locally and is therefore also not checked out.
Can't use git remote set-branches, as it is not available in git 1.7.1 (centos6)
"""
branchref = "+refs/heads/%s:refs/heads/%s" % (version, version)
branchref += ' +refs/heads/%s:refs/remotes/%s/%s' % (version, remote, version)
cmd = "%s fetch --depth=%s %s %s" % (git_path, depth, remote, branchref)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch branch from remote: %s" % version, stdout=out, stderr=err, rc=rc)
def switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist):
cmd = ''
if version == 'HEAD':
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch,
stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s --" % (git_path, remote, branch)
else:
# FIXME check for local_branch first, should have been fetched already
if is_remote_branch(git_path, module, dest, remote, version):
if depth and not is_local_branch(git_path, module, dest, version):
# git clone --depth implies --single-branch, which makes
# the checkout fail if the version changes
# fetch the remote branch, to be able to check it out next
set_remote_branch(git_path, module, dest, remote, version, depth)
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version, stdout=out, stderr=err, rc=rc)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch),
stdout=out1, stderr=err1, rc=rc, cmd=cmd)
if verify_commit:
verify_commit_sign(git_path, module, dest, version, gpg_whitelist)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version, gpg_whitelist):
if version in get_annotated_tags(git_path, module, dest):
git_sub = "verify-tag"
else:
git_sub = "verify-commit"
cmd = "%s %s %s" % (git_path, git_sub, version)
if gpg_whitelist:
cmd += " --raw"
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version, stdout=out, stderr=err, rc=rc)
if gpg_whitelist:
fingerprint = get_gpg_fingerprint(err)
if fingerprint not in gpg_whitelist:
module.fail_json(msg='The gpg_whitelist does not include the public key "%s" for this commit' % fingerprint, stdout=out, stderr=err, rc=rc)
return (rc, out, err)
def get_gpg_fingerprint(output):
"""Return a fingerprint of the primary key.
Ref:
https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;hb=HEAD#l482
"""
for line in output.splitlines():
data = line.split()
if data[1] != 'VALIDSIG':
continue
# if signed with a subkey, this contains the primary key fingerprint
data_id = 11 if len(data) == 11 else 2
return data[data_id]
def git_version(git_path, module):
"""return the installed version of git"""
cmd = "%s --version" % git_path
(rc, out, err) = module.run_command(cmd)
if rc != 0:
# one could fail_json here, but the version info is not that important,
# so let's try to fail only on actual git commands
return None
rematch = re.search('git version (.*)$', to_native(out))
if not rematch:
return None
return LooseVersion(rematch.groups()[0])
def git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version):
""" Create git archive in given source directory """
cmd = [git_path, 'archive', '--format', archive_fmt, '--output', archive, version]
if archive_prefix is not None:
cmd.insert(-1, '--prefix')
cmd.insert(-1, archive_prefix)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to perform archive operation",
details="Git archive command failed to create "
"archive %s using %s directory."
"Error: %s" % (archive, dest, err))
return rc, out, err
def create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result):
""" Helper function for creating archive using git_archive """
all_archive_fmt = {'.zip': 'zip', '.gz': 'tar.gz', '.tar': 'tar',
'.tgz': 'tgz'}
_, archive_ext = os.path.splitext(archive)
archive_fmt = all_archive_fmt.get(archive_ext, None)
if archive_fmt is None:
module.fail_json(msg="Unable to get file extension from "
"archive file name : %s" % archive,
details="Please specify archive as filename with "
"extension. File extension can be one "
"of ['tar', 'tar.gz', 'zip', 'tgz']")
repo_name = repo.split("/")[-1].replace(".git", "")
if os.path.exists(archive):
# If git archive file exists, then compare it with new git archive file.
# if match, do nothing
# if does not match, then replace existing with temp archive file.
tempdir = tempfile.mkdtemp()
new_archive_dest = os.path.join(tempdir, repo_name)
new_archive = new_archive_dest + '.' + archive_fmt
git_archive(git_path, module, dest, new_archive, archive_fmt, archive_prefix, version)
# filecmp is supposed to be efficient than md5sum checksum
if filecmp.cmp(new_archive, archive):
result.update(changed=False)
# Cleanup before exiting
try:
shutil.rmtree(tempdir)
except OSError:
pass
else:
try:
shutil.move(new_archive, archive)
shutil.rmtree(tempdir)
result.update(changed=True)
except OSError as e:
module.fail_json(msg="Failed to move %s to %s" %
(new_archive, archive),
details=u"Error occurred while moving : %s"
% to_text(e))
else:
# Perform archive from local directory
git_archive(git_path, module, dest, archive, archive_fmt, archive_prefix, version)
result.update(changed=True)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path'),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
gpg_whitelist=dict(default=[], type='list', elements='str'),
accept_hostkey=dict(default='no', type='bool'),
accept_newhostkey=dict(default='no', type='bool'),
key_file=dict(default=None, type='path', required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None, type='path'),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
single_branch=dict(default=False, type='bool'),
track_submodules=dict(default='no', type='bool'),
umask=dict(default=None, type='raw'),
archive=dict(type='path'),
archive_prefix=dict(),
separate_git_dir=dict(type='path'),
),
mutually_exclusive=[('separate_git_dir', 'bare'), ('accept_hostkey', 'accept_newhostkey')],
required_by={'archive_prefix': ['archive']},
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
gpg_whitelist = module.params['gpg_whitelist']
reference = module.params['reference']
single_branch = module.params['single_branch']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
umask = module.params['umask']
archive = module.params['archive']
archive_prefix = module.params['archive_prefix']
separate_git_dir = module.params['separate_git_dir']
result = dict(changed=False, warnings=list())
if module.params['accept_hostkey']:
if ssh_opts is not None:
if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
ssh_opts += " -o StrictHostKeyChecking=no"
else:
ssh_opts = "-o StrictHostKeyChecking=no"
if module.params['accept_newhostkey']:
if not ssh_supports_acceptnewhostkey(module):
module.warn("Your ssh client does not support accept_newhostkey option, therefore it cannot be used.")
else:
if ssh_opts is not None:
if ("-o StrictHostKeyChecking=no" not in ssh_opts) and ("-o StrictHostKeyChecking=accept-new" not in ssh_opts):
ssh_opts += " -o StrictHostKeyChecking=accept-new"
else:
ssh_opts = "-o StrictHostKeyChecking=accept-new"
# evaluate and set the umask before doing anything else
if umask is not None:
if not isinstance(umask, string_types):
module.fail_json(msg="umask must be defined as a quoted octal integer")
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=str(sys.exc_info()[1]))
os.umask(umask)
# Certain features such as depth require a file:/// protocol for path based urls
# so force a protocol here ...
if os.path.expanduser(repo).startswith('/'):
repo = 'file://' + os.path.expanduser(repo)
# We screenscrape a huge amount of git commands so use C locale anytime we
# call run_command()
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if separate_git_dir:
separate_git_dir = os.path.realpath(separate_git_dir)
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(dest)
try:
repo_path = get_repo_path(dest, bare)
if separate_git_dir and os.path.exists(repo_path) and separate_git_dir != repo_path:
result['changed'] = True
if not module.check_mode:
relocate_repo(module, result, separate_git_dir, repo_path, dest)
repo_path = separate_git_dir
except (IOError, ValueError) as err:
# No repo path found
"""``.git`` file does not have a valid format for detached Git dir."""
module.fail_json(
msg='Current repo does not have a valid reference to a '
'separate Git dir or it refers to the invalid path',
details=to_text(err),
)
gitconfig = os.path.join(repo_path, 'config')
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = write_ssh_wrapper(module.tmpdir)
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
git_version_used = git_version(git_path, module)
if depth is not None and git_version_used < LooseVersion('1.9.1'):
module.warn("git version is too old to fully support the depth argument. Falling back to full checkouts.")
depth = None
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
result.update(before=None)
local_mods = False
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
result.update(changed=True, after=remote_head)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference,
refspec, git_version_used, verify_commit, separate_git_dir, result, gpg_whitelist, single_branch)
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
result['before'] = get_version(module, git_path, dest)
result.update(after=result['before'])
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
module.exit_json(**result)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
result['before'] = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
result.update(changed=True, msg='Local modifications exist.')
# exit if already at desired sha version
if module.check_mode:
remote_url = get_remote_url(git_path, module, dest, remote)
remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(remote_url) != unfrackgitpath(repo)
else:
remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
result.update(remote_url_changed=remote_url_changed)
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head)
# FIXME: This diff should fail since the new remote_head is not fetched yet?!
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
module.exit_json(**result)
else:
fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used, force=force)
result['after'] = get_version(module, git_path, dest)
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit, depth, gpg_whitelist)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if submodules_updated:
result.update(submodules_changed=submodules_updated)
if module.check_mode:
result.update(changed=True, after=remote_head)
module.exit_json(**result)
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules, force=force)
# determine if we changed anything
result['after'] = get_version(module, git_path, dest)
if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
result.update(changed=True)
if module._diff:
diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
if diff:
result['diff'] = diff
if archive:
# Git archive is not supported by all git servers, so
# we will first clone and perform git archive from local directory
if module.check_mode:
result.update(changed=True)
module.exit_json(**result)
create_archive(git_path, module, dest, archive, archive_prefix, version, repo, result)
module.exit_json(**result)
if __name__ == '__main__':
main()<|fim▁end|> | type: bool |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from os import path
from .taskqueue import TaskQueueClient
__all__ = ['TaskQueueClient']
<|fim▁hole|>with open(path.join(path.dirname(__file__), 'version.txt')) as fp:
__version__ = fp.read().strip()<|fim▁end|> | |
<|file_name|>repack_database.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Repacking Instaseis databases.
Requires click, h5py, and numpy.
:copyright:
Lion Krischer ([email protected]), 2016
Simon Stähler ([email protected]), 2016
:license:
GNU Lesser General Public License, Version 3 [non-commercial/academic use]
(http://www.gnu.org/copyleft/lgpl.html)
"""
import os
import click
import numpy as np
def maybe_encode(string, encoding='ascii'):
try:
return string.encode(encoding)
except AttributeError:
return string
except UnicodeEncodeError:
return string
def unroll_and_merge_netcdf4(filenames, output_folder):
"""
Completely unroll and merge both files.
"""
import netCDF4
from scipy.spatial import cKDTree
# Find MZZ, MXX_P_MYY, MXZ_MYZ, MXY_MXX_M_MYY directories
if len(filenames) == 4:
filenames = [os.path.normpath(_i) for _i in filenames]
mzz = [_i for _i in filenames if "MZZ" in _i]
mxx = [_i for _i in filenames if "MXX_P_MYY" in _i]
mxz = [_i for _i in filenames if "MXZ_MYZ" in _i]
mxy = [_i for _i in filenames if "MXY_MXX_M_MYY" in _i]
assert len(mzz) == 1
assert len(mxx) == 1
assert len(mxz) == 1
assert len(mxy) == 1
mzz = mzz[0]
mxx = mxx[0]
mxz = mxz[0]
mxy = mxy[0]
assert os.path.exists(mzz)
assert os.path.exists(mxx)
assert os.path.exists(mxz)
assert os.path.exists(mxy)
f_in_1 = netCDF4.Dataset(mzz, 'r')
f_in_2 = netCDF4.Dataset(mxx, 'r')
f_in_3 = netCDF4.Dataset(mxz, 'r')
f_in_4 = netCDF4.Dataset(mxy, 'r')
elif len(filenames) == 2:
pz = [_i for _i in filenames if "PZ" in _i]
px = [_i for _i in filenames if "PX" in _i]
assert len(pz) == 1
assert len(px) == 1
pz = pz[0]
px = px[0]
assert os.path.exists(pz)
assert os.path.exists(px)
f_in_1 = netCDF4.Dataset(pz, 'r')
f_in_2 = netCDF4.Dataset(px, 'r')
else:
print('Wrong number of simulations: ', len(filenames))
assert False
output_filename = os.path.join(output_folder, "merged_instaseis_db.nc4")
assert not os.path.exists(output_filename)
# Get sorting order
r = np.array([f_in_1.groups['Mesh'].variables['mp_mesh_Z'][:],
f_in_1.groups['Mesh'].variables['mp_mesh_S'][:]]).transpose()
ctree = cKDTree(r)
inds = ctree.indices
try:
f_out = netCDF4.Dataset(output_filename, 'w', format='NETCDF4')
# Copy attributes from the vertical file.
for name in f_in_1.ncattrs():
value = getattr(f_in_1, name)
print(name, value)
setattr(f_out, name, maybe_encode(value))
f_out.setncattr('nsim', len(filenames))
for name, dimension in f_in_1.dimensions.items():
if not dimension.isunlimited():
f_out.createDimension(name, len(dimension))
else:
f_out.createDimension(name, None)
# Create Mesh group and copy mesh variables
f_out.createGroup('Mesh')
for name, dimension in f_in_1['Mesh'].dimensions.items():
if not dimension.isunlimited():
f_out['Mesh'].createDimension(name, len(dimension))
else:
f_out['Mesh'].createDimension(name, None)
for name, variable in f_in_1['Mesh'].variables.items():
f_out['Mesh'].createVariable(name, variable.datatype,
variable.dimensions)
if ('elements',) == variable.dimensions:
print('Resorting %s' % name)
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][inds]
elif name == 'sem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :, :] = \
f_in_1['Mesh'].variables[name][inds, :, :]
elif name == 'fem_mesh':
print('Resorting first dim of %s' % name)
f_out['Mesh'].variables[name][:, :] = \
f_in_1['Mesh'].variables[name][inds, :]
else:
f_out['Mesh'].variables[name][:] = \
f_in_1['Mesh'].variables[name][:]
# Copy source time function variables
for name, variable in f_in_1['Snapshots'].variables.items():
if name in ['stf_dump', 'stf_d_dump']:
f_out.createVariable(name, variable.datatype,
variable.dimensions)
f_out.variables[name][:] = f_in_1['Snapshots'].variables[name][:]
# Create a new array but this time in 5D. The first dimension
# is the element number, the second and third are the GLL
# points in both directions, the fourth is the time axis, and the
# last the displacement axis.
ndumps = f_in_1.getncattr("number of strain dumps")
number_of_elements = f_in_1.getncattr("nelem_kwf_global")
npol = f_in_1.getncattr("npol")
# Get datasets and the dtype.
if len(filenames) == 2:
meshes = [
f_in_1["Snapshots"]["disp_s"], # PZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # PX
f_in_2["Snapshots"]["disp_p"],
f_in_2["Snapshots"]["disp_z"]]
elif len(filenames) == 4:
meshes = [
f_in_1["Snapshots"]["disp_s"], # MZZ
f_in_1["Snapshots"]["disp_z"],
f_in_2["Snapshots"]["disp_s"], # MXX + MYY
f_in_2["Snapshots"]["disp_z"],
f_in_3["Snapshots"]["disp_s"], # MXZ / MYZ
f_in_3["Snapshots"]["disp_p"],
f_in_3["Snapshots"]["disp_z"],
f_in_4["Snapshots"]["disp_s"], # MXY / MXX - MYY
f_in_4["Snapshots"]["disp_p"],
f_in_4["Snapshots"]["disp_z"]]
dtype = meshes[0].dtype
nvars = len(meshes)
dim_elements = f_out.createDimension('elements', number_of_elements)
dim_ipol = f_out.createDimension('ipol', npol + 1)
dim_jpol = f_out.createDimension('jpol', npol + 1)
dim_nvars = f_out.createDimension('variables', nvars)
dim_snaps = f_out.dimensions['snapshots']
ds_o = f_out.createVariable(varname="merged_snapshots",
dimensions=(dim_elements.name,
dim_nvars.name,
dim_jpol.name,
dim_ipol.name,
dim_snaps.name),
datatype=dtype, contiguous=True)
# Old order (Instaseis):
# dimensions=(dim_elements.name,
# dim_snaps.name,
# dim_ipol.name,
# dim_jpol.name,
# dim_nvars.name),
utemp = np.zeros((nvars, npol + 1, npol + 1, ndumps),
dtype=dtype)
# Now it becomes more interesting and very slow.
sem_mesh = f_in_1["Mesh"]["sem_mesh"]
with click.progressbar(range(number_of_elements),
length=number_of_elements,
label="\t ") as gll_idxs:
for gll_idx in gll_idxs:
gll_point_ids = sem_mesh[inds[gll_idx]]
# Load displacement from all GLL points.
for ivar, var in enumerate(meshes):
# The list of ids we have is unique but not sorted.
ids = gll_point_ids.flatten()
s_ids = np.sort(ids)
temp = var[:, s_ids]
for ipol in range(npol + 1):
for jpol in range(npol + 1):
idx = ipol * (npol + 1) + jpol
# ndumps, ipol, jpol, nvar (Fortran notation)
utemp[ivar, ipol, jpol, :] = \
temp[:, np.argwhere(s_ids == ids[idx])[0][0]]
ds_o[gll_idx] = utemp
finally:
try:<|fim▁hole|> except:
pass
try:
f_in_2.close()
except:
pass
try:
f_in_3.close()
except:
pass
try:
f_in_4.close()
except:
pass
try:
f_out.close()
except:
pass
@click.command()
@click.argument("input_folder", type=click.Path(exists=True,
file_okay=False,
dir_okay=True))
@click.argument("output_folder", type=click.Path(exists=False))
def repack_database(input_folder, output_folder):
found_filenames = []
for root, _, filenames in os.walk(input_folder, followlinks=True):
for filename in filenames:
if filename != "ordered_output.nc4":
continue
found_filenames.append(os.path.join(root, filename))
assert found_filenames, "No files named `ordered_output.nc4` found."
os.makedirs(output_folder)
# The unrolled merge completely unrolls everything, dededuplicates the GLL
# points, and merges both netCDF files into one big file.
unroll_and_merge_netcdf4(filenames=found_filenames,
output_folder=output_folder)
if __name__ == "__main__":
repack_database()<|fim▁end|> | f_in_1.close() |
<|file_name|>forocoches.py<|end_file_name|><|fim▁begin|>################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <[email protected]>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Forocoches(Platform):
"""A <Platform> object for Forocoches"""
def __init__(self):
self.platformName = "Forocoches"
self.tags = ["opinions", "activism"]
# Add the URL for enumeration below
#self.urlEnumeration = "http://www.forocoches.com/foro/member.php?u=" + "<HERE_GOES_THE_USER_ID>"
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.forocoches.com/foro/member.php?username=" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["main error message"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:<|fim▁hole|> #self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}<|fim▁end|> | |
<|file_name|>ospf.py<|end_file_name|><|fim▁begin|># Copyright (C) 2013 Cisco Systems Inc.
# All rights reserved
import ipaddress
import re
from .feature import Feature
from .vrf import VRF
from .cisco_secret import CiscoSecret
from .nxcli import *
import nxos_utils
class OSPFSession(Feature):
'''
Use this class to configure the OSPF feature.
'''
def __init__(self, Instance=None, vrf='default'):
self._name = 'ospf'
if Instance == None:
raise ValueError, 'Instance Name is Null.'
self.Instance = Instance
self.OSPFInterface.Instance = self.Instance
self.set_vrf(vrf)
def _run_cfg_at_router_ospf_level(self, cmds):
'''
Run the given list of commands at router ospf config level.
Arguments:
cmds: List of commands to be configured at router ospf config level.
Returns: True on success
'''
string = 'router ospf %s' % self.Instance
if self.vrf.get_name() != 'default':
if not self.vrf.exists():
self.vrf.create()
string += ' ; vrf %s' % self.vrf.get_name()
string += ' ; %s' % cmds
return NXCLI._run_cfg(string)
def set_Instance(self, Instance):
self.Instance = Instance
def set_vrf(self, vrf):
'''
Set the VRF (Virtual Routing and Forwarding) context for subsequent API
calls on this OSPFSession object. Any configuration done on this
OSPFSession object will be applied to this VRF.
Arguments:
vrf: VRF name (string) or the VRF ID (int).
Returns: Nothing
'''
if type(vrf) in [int, str]:
self.vrf = VRF(vrf)
elif isinstance(vrf, VRF):
self.vrf = vrf
else:
raise ValueError, ('Invalid argument type for vrf, acceptable types'
' are VRF, int and str')
def start(self):
'''
Arguments: None
Returns: True on success
'''
if self.is_shutdown():
return self._run_cfg_at_router_ospf_level('no shutdown')
if not self.is_enabled():
self.enable()
return self._run_cfg_at_router_ospf_level('')
def shutdown(self):
'''
Shutdown the OSPF routing process. All existing OSPF configurations will
be preserved.
Arguments: None
Returns: True on success
'''
return self._run_cfg_at_router_ospf_level('shutdown')
def is_shutdown(self):
'''
Check if the OSPF routing process is shutdown.
Arguments: None
Returns:
True if the OSPF process is enabled and shutdown.
False if the OSPF process is running or if OSPF is not enabled.
'''
if self.is_enabled():
output = nxos_utils.cli_ex('show run ospf | include shutdown')
rows = output.split('\n')
for row in rows:
if row.strip() == 'shutdown':
return True
return False
def cfg_router_id(self, router_id, **kwargs):
'''
Specify the IP address to use as router-id. To remove this
configuration set the optional 'no' argument to True.
Arguments:
router_id: A string in dotted quad format ('A.B.C.D') representing
the IP Address of the router.
Optional Arguments:
no: A boolean, set to True to remove the router-id.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(router_id, 'router_id',
'router-id %', {str: 'socket.inet_aton(router_id)'}), kwargs)
return self._run_cfg_at_router_ospf_level(cmd)
def cfg_distance(self, dist, **kwargs):
'''
Configure administrative distance for this OSPFv2 instance.
To set the distances back to the default set the optional 'no' argument
to True.
Arguments:
dist: Distance for ospf routes, an integer ranging from 1 to
255.
Optional Arguments:
no: Set to True to set distances back to the default values.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present('distance', kwargs)
cmd += NXCLI._read_arg(dist, 'dist', ' %', {int:
'dist >= 1 and dist <= 255'})
return self._run_cfg_at_router_ospf_level(cmd)
def log_adjacency_changes(self, **kwargs):
'''
Log a message for neighbor up/down event. To disable this behavior set
the optional 'no' argument to True.
Optional Arguments:
no: A boolean, set to True to disable this feature.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present('log-adjacency-changes', kwargs)
return self._run_cfg_at_router_ospf_level(cmd)
<|fim▁hole|> '''
Configures the maximum number of parallel routes that the OSPF
can support. To restore the default number of parallel
routes, set the optional 'no' argument to True.
Arguments:
max: Maximum number of parallel routes that an IP routing protocol
installs in a routing table. The range is from 1 to 64
Optional Arguments:
no: Set to True to restore the default number of parallel routes
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(max, 'max',
'maximum-paths %', {int: 'max >= 1 and max <= 64'}), kwargs)
return self._run_cfg_at_router_ospf_level(cmd)
class OSPFInterface(object):
def __init__(self, if_name, area, **kwargs):
self.if_name = NXCLI._read_arg(if_name, 'if_name', ' %', {str:None})
self.area = NXCLI._read_arg(area, 'area', ' %', {str:None})
def _run_cfg_at_interface_level(self, cmds):
'''
Run the given list of commands at interface config level.
Arguments:
cmds: List of commands to be configured at interface level.
Returns: True on success
'''
string = 'interface %s' % self.if_name
string += ' ; %s' % cmds
# print string
return NXCLI._run_cfg(string)
def add(self):
'''
Add this interface to OSPFv2 instance and area.
Arguments:
None.
Optional Arguments:
None.
Returns: True on success
'''
cmd = 'ip router ospf %s area %s' % (self.Instance,self.area)
return self._run_cfg_at_interface_level(cmd)
def cfg_ospf_cost(self, ospf_cost=60, **kwargs):
'''
Configure OSPFv2 cost for this interface.
Arguments:
priority: ip ospf cost (int). Acceptable Range 1 to 65535.
Optional Arguments:
no: A boolean, set to True to remove the ip ospf config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(ospf_cost, 'ospf_cost',
'ip ospf cost %',
{int: 'ospf_cost >= 0 and ospf_cost <= 65535'}), kwargs)
return self._run_cfg_at_interface_level(cmd)
def cfg_hello_interval(self, hello_interval=60, **kwargs):
'''
Configure OSPFv2 hello interval for this interface.
Arguments:
priority: ip ospf hello interval (int). Acceptable Range 1 to 65535.
Optional Arguments:
no: A boolean, set to True to remove the ip ospf hello interval config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(hello_interval, 'hello_interval',
'ip ospf hello-interval %',
{int: 'hello_interval >= 1 and hello_interval <= 65535'}), kwargs)
return self._run_cfg_at_interface_level(cmd)
def cfg_dead_interval(self, dead_interval=60, **kwargs):
'''
Configure OSPFv2 dead interval for this interface.
Arguments:
priority: ip ospf dead interval (int). Acceptable Range 1 to 65535.
Optional Arguments:
no: A boolean, set to True to remove the ip ospf dead interval config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(dead_interval, 'dead_interval',
'ip ospf dead-interval %',
{int: 'dead_interval >= 1 and dead_interval <= 65535'}), kwargs)
return self._run_cfg_at_interface_level(cmd)
def cfg_ospf_priority(self, ospf_priority=60, **kwargs):
'''
Configure OSPFv2 priority for this interface. Priority is used to determine
DR election in area.
Arguments:
priority: ip ospf priority (int). Acceptable Range 0 to 255.
Optional Arguments:
no: A boolean, set to True to remove the ip ospf priority config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present(NXCLI._read_arg(ospf_priority, 'ospf_priority',
'ip ospf priority %',
{int: 'ospf_priority >= 0 and ospf_priority <= 255'}), kwargs)
return self._run_cfg_at_interface_level(cmd)
def cfg_mtu_ignore(self, **kwargs):
'''
Configure OSPFv2 to ignore any IP MTU mismatch with a neighbor.
Arguments: None
Optional Arguments:
no: A boolean, set to True to remove the ip ospf mtu-ignore
config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present('ip ospf passive-interface', kwargs)
return self._run_cfg_at_interface_level('ip ospf mtu-ignore')
def cfg_passive_interface(self, **kwargs):
'''
Supress OSPF routing updates on this interface.
Arguments: None
Optional Arguments:
no: A boolean, set to True to remove the ip ospf passive-interface
config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present('ip ospf passive-interface', kwargs)
return self._run_cfg_at_interface_level(cmd)
def shutdown(self, **kwargs):
'''
Shutdown the OSPF on this interface. All existing OSPF
configurations will be preserved.
Arguments: None
Optional Arguments:
no: A boolean, set to True to remove the ip ospf shutdown config.
Returns: True on success
'''
cmd = NXCLI._add_no_if_present('ip ospf shutdown', kwargs)
return self._run_cfg_at_interface_level(cmd)<|fim▁end|> | def cfg_maximum_paths(self, max, **kwargs): |
<|file_name|>dhcpclientscapy.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# vim:ts=4:sw=4:expandtab 2
# -*- coding: utf-8 -*-
'''
Based on https://github.com/mortnerDHCPv4v6
'''
# TODO:
# * refactor
# * read conf from dhclient.conf
# * daemonize
# * requests in loop
# * send renew according to renew time
# * implement release
# * implement nak case
# FIXME:
# * build package with most common BOOTP/DCHCP options
__author__ = "duy <duy at rhizoma dot tk>"
__copyright__ = "GPL v3"
from scapy.all import *
import random
import ipaddr
from time import sleep
import subprocess
import argparse
# for debugging
#CLIENT_PORT= 8001
#SERVER_PORT= 8000
CLIENT_PORT= 68
SERVER_PORT= 67
BROADCAST_ADDR = '255.255.255.255'
META_ADDR = '0.0.0.0'
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
MAX_DHCP_LEASE = 1500
LEASE_TIME = 43200 # 7776000
# "subnet_mask", "router", "name_server", "domain"
PARAM_REQ_LIST = '\x01\x03\x06\x0fw\xfc'# \x1c3
INIT_STATE = 0
BOUND_STATE = 1
RENEW_STATE = 2
REBIND_STATE = 3
REBOOT_STATE = 4
TIMEOUT_STATE = 5
RENEW_TIME_ON_LEASE = 1.0/2
REBIND_TIME_ON_LEASE = 7.0/8
class Limits:
XID_MIN = 1
XID_MAX = 900000000
def randomHostname(length=8, charset=None):
charset = charset or string.ascii_uppercase + string.digits
return ''.join(random.choice(charset) for x in range(length))
def genXid():
return random.randint(Limits.XID_MIN, Limits.XID_MAX)
class DHCPv4Client(object):
def __init__(self, iface, server_port=None, client_port=None, server_ip=None,
server_mac=None, hostname=None):
self.iface = iface
self.state = INIT_STATE
self.renew_time = 0
self.rebind_time = 0
self.server_port = server_port or SERVER_PORT
self.client_port = client_port or CLIENT_PORT
self.server_ip = server_ip or BROADCAST_ADDR
self.server_mac = server_mac or BROADCAST_MAC
self.client_ip = META_ADDR
_, client_mac = get_if_raw_hwaddr(self.iface)
self.client_mac = client_mac
self.hostname = hostname or randomHostname()
self.client_xid = genXid()
# FIXME: when server xid is used?
self.server_xid = None
self.server_id = None
self.response_server_ip = None
self.response_server_mac = None
self.client_ip_offered = None
self.subnet_mask = None
self.offered_ip = None
self.lease_time = None
self.router = None
self.name_server = None
self.domain = None
self.options = []
self.callbacks = {}
self.history = []
def __str__(self):
return self.__repr__()
def __repr__(self):
return """DHCPv4 Client
Interface: %sp
Verbosity: %s
Client Configuration: | Server
-------------------------------------|------------------------------
IP = %-20s %-20s
HWAddr = %-20s %-20s
Hostname = %-20s
MASK = %-20s
xID = %-20s %-20s
DHCP Specific
--------------------
serverID = %-20s
Options = %-20s
Registered Callbacks
--------------------
%s
History
--------------------
%s
""" % (conf.iface, conf.verb,
self.client_ip,
self.server_ip,
self.client_mac,
self.server_mac,
self.hostname,
self.subnet_mask,
self.client_xid,
self.server_xid,
self.server_id,
repr(self.options),
self.callbacks,
self.history)
def register_callback(self, hook, func):
self.callbacks[hook] = func
def exec_callback(self, hook, args):
self.track_history("Hook:" + str(hook))
if self.callbacks.has_key(hook):
self.callbacks[hook]()
def track_history(self, name=None):
from inspect import stack
name = name or stack()[1][3]
self.history.append(name)
def genDiscover(self):
dhcp_discover = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "discover"),
("param_req_list", PARAM_REQ_LIST),
("max_dhcp_size", MAX_DHCP_LEASE),
("client_id", self.client_mac),
("lease_time", LEASE_TIME),
("hostname", self.hostname),
"end"
])
)
return dhcp_discover
def genRequest(self):
dhcp_req = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "request"),
("param_req_list", PARAM_REQ_LIST),
("max_dhcp_size", MAX_DHCP_LEASE),
("client_id", self.client_mac),
("requested_addr", self.client_ip_offered), # obtained from discover
("server_id", self.server_id), # obtained from discover
("hostname", self.hostname),
"end"
])
)
return dhcp_req
def genRelease(self):
dhcp_release = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "release"),
("server_id", self.server_id), # obtained from discover
("client_id", self.client_mac),
"end"
])
)
return dhcp_release
def parseOffer(self, packet):
print 'Parsing offer'
print packet.show()
self.response_server_ip = packet[IP].src
self.response_server_mac = packet[Ether].src
self.server_id = packet[BOOTP].siaddr
#FIXME: xid has to match the initial xid
# packet[BOOTP].xid
# FIXME: chaddr has to match client_mac
# str2mac(packet[BOOTP].chaddr)
# FIXME: check if yiaddr match current client ip or requested ip
self.client_ip_offered = packet[BOOTP].yiaddr
for option in packet[DHCP].options:
if type(option) == tuple:
if option[0] == 'subnet_mask':
self.subnet_mask = option[1]
if option[0] == 'router':
self.router = option[1]
if option[0] == 'domain':
self.domain = option[1]
if option[0] == 'name_server':
self.name_server = option[1]
if option[0] == 'lease_time':
self.lease_time = option[1]
def parseACK(self, packet):
print "Parsing ACK"
print packet.show()
# FIXME: check these fields match current ones?
#self.response_server_ip = packet[IP].src
#self.response_server_mac = packet[Ether].src
#self.server_id = packet[BOOTP].siaddr
#FIXME: xid has to match the initial xid
# packet[BOOTP].xid
# FIXME: chaddr has to match client_mac
# str2mac(packet[BOOTP].chaddr)
# FIXME: check if yiaddr match current client ip or requested ip
self.client_ip_offered = packet[BOOTP].yiaddr
#FIXME: check these options match offered ones?
for option in packet[DHCP].options:
if type(option) == tuple:
if option[0] == 'subnet_mask':
self.subnet_mask = option[1]
if option[0] == 'router':
self.router = option[1]
if option[0] == 'domain':
self.domain = option[1]
if option[0] == 'name_server':
self.name_server = option[1]
if option[0] == 'lease_time':
self.lease_time = option[1]
def isOffer(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'offer':
return True
return False
def isNAK(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'nak':
return True
return False
def isACK(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'ack':
return True
return False
def sendDiscover(self):
packet = self.genDiscover()
self.track_history()
print packet.show()<|fim▁hole|> sendp(packet)
print "Sent discover"
def sendRequest(self):
packet = self.genRequest()
self.track_history()
print packet.show()
sendp(packet)
print "Sent request"
def setAddr(self):
print "Setting address"
#FIXME: subprocess.call to really set ip, route, nameserver
set_ip = "ip addr add local %s netmask %s dev %s" % \
(self.client_ip_offered, self.subnet_mask, self.iface)
set_route = "route add default gw %s" % self.router
#FIXME: set nameserver with resolvconf if installed
print set_ip
print set_route
#subprocess.call([set_ip])
#subprocess.call([set_route])
def handleResponse(self, packet):
print "Handling response"
if self.isOffer(packet):
print "Offer detected"
self.parseOffer(packet)
self.sendRequest()
if self.isACK(packet):
print "ACK detected"
self.parseACK(packet)
self.setAddr()
self.state = BOUND_STATE
self.renew_time = self.lease_time * RENEW_TIME_ON_LEASE
self.rebind_time = self.lease_time * REBIND_TIME_ON_LEASE
print "Sleeping for % secs." % self.renew_time
sleep(self.renew_time)
self.state = RENEW_STATE
self.sendRequest()
if self.isNAK(packet):
print "NAK detected"
# FIXME: implement
def getResponse(self, timeout=3, tries=1):
#FIXME: server_port is src and client_port is dst
sniff(filter="udp and (port %s or %s)" % \
(self.server_port, self.client_port),
prn=self.handleResponse, store=0, iface=conf.iface)
def main():
# FIXME: add support for several ifaces
parser = argparse.ArgumentParser()
parser.add_argument('interface', nargs='?',
help='interface to configure with DHCP' )
args = parser.parse_args()
if not args.interface:
args.interface = 'wlan0'
conf.iface = args.interface
conf.checkIPaddr = False
conf.verb = False
c = DHCPv4Client(args.interface)
#FIXME: if interface has already and address, send request with that address
# instead of discover?
c.sendDiscover()
c.getResponse()
print c
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>viewpop.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2015 by Rafael Angel Aznar Aparici (rafaaznar at gmail dot com)
*
* sisane: The stunning micro-library that helps you to develop easily
* AJAX web applications by using Angular.js 1.x & sisane-server
* sisane is distributed under the MIT License (MIT)
* Sources at https://github.com/rafaelaznar/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
'use strict';
moduloEpisodio.controller('EpisodioViewpopController', ['$scope', '$routeParams', 'serverService', 'episodioService', '$location', '$uibModalInstance', 'id',
function ($scope, $routeParams, serverService, episodioService, $location, $uibModalInstance, id) {
$scope.fields = episodioService.getFields();
$scope.obtitle = episodioService.getObTitle();
$scope.icon = episodioService.getIcon();
$scope.ob = episodioService.getTitle();
$scope.title = "Vista de " + $scope.obtitle;
$scope.id = id;
$scope.status = null;
$scope.debugging = serverService.debugging();
serverService.promise_getOne($scope.ob, $scope.id).then(function (response) {
if (response.status == 200) {
if (response.data.status == 200) {
$scope.status = null;
$scope.bean = response.data.message;
var filter = "and,id_medico,equa," + $scope.bean.obj_medico.id;
serverService.promise_getPage("usuario", 1, 1, filter).then(function (data) {
if (data.data.message.length > 0)
$scope.medico = data.data.message[0];
});
} else {
$scope.status = "Error en la recepción de datos del servidor";
}
} else {
$scope.status = "Error en la recepción de datos del servidor";
}
}).catch(function (data) {
$scope.status = "Error en la recepción de datos del servidor";
});
$scope.cancel = function () {<|fim▁hole|> }
}]);<|fim▁end|> | $uibModalInstance.dismiss('cancel'); |
<|file_name|>test16.py<|end_file_name|><|fim▁begin|>from shutit_module import ShutItModule
<|fim▁hole|> def build(self, shutit):
shutit.login()
shutit.login(command='bash')
shutit.send('ls',note='We are listing files')
shutit.logout()
shutit.logout()
return True
def module():
return test16(
'shutit.test16.test16.test16', 210790650.002938762,
description='',
maintainer='',
depends=['shutit.tk.setup']
)<|fim▁end|> | class test16(ShutItModule):
|
<|file_name|>editorFunctions.js<|end_file_name|><|fim▁begin|><|fim▁hole|>function editorOn(divid){
$('#'+divid).parent().parent().find(' >*:last-child img').css('visibility','hidden');
borderstyle = $('#'+divid).parent().parent().css('border');
$('#'+divid).parent().parent().css('border','')
}
function editorOff(divid){
$('#'+divid).parent().parent().find(' >*:last-child img').css('visibility','');
$('#'+divid).parent().parent().css('border',borderstyle);
}<|fim▁end|> | // JScript File
var borderstyle |
<|file_name|>MainActivity.java<|end_file_name|><|fim▁begin|>package com.twopillar.jiba.activity;
import android.graphics.Color;
import android.graphics.drawable.Drawable;
import android.os.Bundle;
import android.support.v4.app.FragmentTabHost;
import android.view.KeyEvent;
import android.view.View;
import android.widget.RadioButton;
import android.widget.RadioGroup;
import android.widget.RadioGroup.OnCheckedChangeListener;
import android.widget.Toast;
import com.twopillar.jiba.R;
import com.twopillar.jiba.fragment.ActionFragment;
import com.twopillar.jiba.fragment.BBSFragment;
import com.twopillar.jiba.fragment.MeFragment;
import com.twopillar.jiba.fragment.PlanFragment;
public class MainActivity extends BaseActivity{
private FragmentTabHost mTabHost;
private RadioGroup radioGroup;
private RadioButton plan;//计划
private RadioButton action;//动作
private RadioButton bbs;//论坛
private RadioButton me;//我
private long exitTime = 0;
String tabs[] = {"Tab1","Tab2","Tab3","Tab4"};
Class cls[] = {PlanFragment.class,ActionFragment.class,BBSFragment.class,MeFragment.class};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main_fragment_tabs);
initView();
}
private void initView() {
plan = (RadioButton)findViewById(R.id.plan);
action = (RadioButton)findViewById(R.id.action);
bbs = (RadioButton)findViewById(R.id.bbs);
me = (RadioButton)findViewById(R.id.me);
mTabHost = (FragmentTabHost)this.findViewById(android.R.id.tabhost);
mTabHost.setup(this, getSupportFragmentManager(), R.id.realtabcontent);
mTabHost.getTabWidget().setVisibility(View.GONE);
for(int i=0;i<tabs.length;i++){
mTabHost.addTab(mTabHost.newTabSpec(tabs[i]).setIndicator(tabs[i]),cls[i], null);
}
radioGroup = (RadioGroup) findViewById(R.id.main_radiogroup);
radioGroup.setOnCheckedChangeListener(new OnCheckedChangeListener() {
@Override
public void onCheckedChanged(RadioGroup group, int checkedId) {
switch(checkedId){
case R.id.plan:
changeSeletedButton(0);
mTabHost.setCurrentTabByTag(tabs[0]);
break;
case R.id.action:
changeSeletedButton(1);
mTabHost.setCurrentTabByTag(tabs[1]);
break;
case R.id.bbs:
changeSeletedButton(2);
mTabHost.setCurrentTabByTag(tabs[2]);
break;
case R.id.me:
changeSeletedButton(3);
mTabHost.setCurrentTabByTag(tabs[3]);
break;
}
}
});
((RadioButton)radioGroup.getChildAt(0)).toggle();
}
private void changeSeletedButton(int type) {
Drawable drawable = null;
switch (type) {
case 0:
drawable = getResources().getDrawable(R.drawable.icon_plan_selected);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
plan.setCompoundDrawables(null, drawable, null, null);
plan.setTextColor(Color.parseColor("#FFDA44"));
drawable = getResources().getDrawable(R.drawable.icon_action);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
action.setCompoundDrawables(null, drawable, null, null);
action.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_bbs);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
bbs.setCompoundDrawables(null, drawable, null, null);
bbs.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_me);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
me.setCompoundDrawables(null, drawable, null, null);
me.setTextColor(Color.parseColor("#595959"));
break;
case 1:
drawable = getResources().getDrawable(R.drawable.icon_action_selected);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
action.setCompoundDrawables(null, drawable, null, null);
action.setTextColor(Color.parseColor("#FFDA44"));
drawable = getResources().getDrawable(R.drawable.icon_plan);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
plan.setCompoundDrawables(null, drawable, null, null);
plan.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_bbs);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
bbs.setCompoundDrawables(null, drawable, null, null);
bbs.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_me);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
me.setCompoundDrawables(null, drawable, null, null);
me.setTextColor(Color.parseColor("#595959"));
break;
case 2:
drawable = getResources().getDrawable(R.drawable.icon_bbs_selected);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
bbs.setCompoundDrawables(null, drawable, null, null);
bbs.setTextColor(Color.parseColor("#FFDA44"));
drawable = getResources().getDrawable(R.drawable.icon_plan);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
plan.setCompoundDrawables(null, drawable, null, null);
plan.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_action);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
action.setCompoundDrawables(null, drawable, null, null);
action.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_me);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
me.setCompoundDrawables(null, drawable, null, null);
me.setTextColor(Color.parseColor("#595959"));
break;
case 3:
drawable = getResources().getDrawable(R.drawable.icon_me_selected);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
me.setCompoundDrawables(null, drawable, null, null);
me.setTextColor(Color.parseColor("#FFDA44"));
drawable = getResources().getDrawable(R.drawable.icon_plan);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
plan.setCompoundDrawables(null, drawable, null, null);
plan.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_action);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
action.setCompoundDrawables(null, drawable, null, null);
action.setTextColor(Color.parseColor("#595959"));
drawable = getResources().getDrawable(R.drawable.icon_bbs);
drawable.setBounds(0, 0, drawable.getMinimumWidth(), drawable.getMinimumHeight());
bbs.setCompoundDrawables(null, drawable, null, null);
bbs.setTextColor(Color.parseColor("#595959"));
break;
default:
break;
}
}<|fim▁hole|> @Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_BACK && event.getRepeatCount() == 0) {
if (getSupportFragmentManager().getBackStackEntryCount() > 0) {
getSupportFragmentManager().popBackStack();
} else {
ExitApp();
}
}
return false;
}
// 返回键双击退出APP
public void ExitApp() {
if ((System.currentTimeMillis() - exitTime) > 2000) {
Toast.makeText(this, "再按一次退出程序", Toast.LENGTH_SHORT).show();
exitTime = System.currentTimeMillis();
} else {
finish();
}
}
}<|fim▁end|> | |
<|file_name|>containerpush.ts<|end_file_name|><|fim▁begin|>"use strict";
import * as fs from "fs";
import * as tl from "vsts-task-lib/task";
import ContainerConnection from "docker-common/containerconnection";
import * as sourceUtils from "docker-common/sourceutils";
import * as imageUtils from "docker-common/containerimageutils";
import * as utils from "./utils";
function dockerPush(connection: ContainerConnection, image: string, imageDigestFile?: string, useMultiImageMode?: boolean): any {
var command = connection.createCommand();
command.arg("push");
command.arg(image);
if (!imageDigestFile) {
return connection.execCommand(command);
}
var output = "";
command.on("stdout", data => {
output += data;
});
return connection.execCommand(command).then(() => {
// Parse the output to find the repository digest
var imageDigest = output.match(/^[^:]*: digest: ([^ ]*) size: \d*$/m)[1];
if (imageDigest) {
let baseImageName = imageUtils.imageNameWithoutTag(image);
let formattedDigestValue = baseImageName + "@" + imageDigest;
if (useMultiImageMode) {
// If we're pushing multiple images, we need to append all the digest values. Each one is contained on its own line.
fs.appendFileSync(imageDigestFile, formattedDigestValue + "\r\n");
} else {
fs.writeFileSync(imageDigestFile, formattedDigestValue);
}
}
});
}
export function run(connection: ContainerConnection): any {
let action = tl.getInput("action", true);
let imageNames;
let useMultiImageMode = action === "Push images";
if (useMultiImageMode) {
imageNames = utils.getImageNames();
} else {
imageNames = [imageUtils.getImageName()];
}
let imageMappings = utils.getImageMappings(connection, imageNames);
let imageDigestFile: string = null;
if (tl.filePathSupplied("imageDigestFile")) {
imageDigestFile = tl.getPathInput("imageDigestFile");
}
let firstImageMapping = imageMappings.shift();
let pushedSourceImages = [firstImageMapping.sourceImageName];
let promise = dockerPush(connection, firstImageMapping.targetImageName, imageDigestFile, useMultiImageMode);
imageMappings.forEach(imageMapping => {
// If we've already pushed a tagged version of this source image, then we don't want to write the digest info to the file since it will be duplicate.
if (pushedSourceImages.indexOf(imageMapping.sourceImageName) >= 0) {
promise = promise.then(() => dockerPush(connection, imageMapping.targetImageName));
} else {
pushedSourceImages.push(imageMapping.sourceImageName);
promise = promise.then(() => dockerPush(connection, imageMapping.targetImageName, imageDigestFile, useMultiImageMode));
}
});
return promise;<|fim▁hole|><|fim▁end|> | } |
<|file_name|>DefaultNewDatasourceConnectorViewImpl.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (c) 2012-2017 Codenvy, S.A.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Codenvy, S.A. - initial API and implementation
*******************************************************************************/
package org.eclipse.che.datasource.ide.newDatasource.connector;
import com.google.gwt.event.dom.client.ChangeEvent;
import com.google.gwt.event.dom.client.ClickEvent;
import com.google.gwt.event.dom.client.KeyPressEvent;
import com.google.gwt.event.dom.client.KeyUpEvent;
import com.google.gwt.event.logical.shared.ValueChangeEvent;
import com.google.gwt.resources.client.ImageResource;
import com.google.gwt.uibinder.client.UiBinder;
import com.google.gwt.uibinder.client.UiField;
import com.google.gwt.uibinder.client.UiHandler;
import com.google.gwt.user.client.ui.Button;
import com.google.gwt.user.client.ui.CheckBox;
import com.google.gwt.user.client.ui.Composite;
import com.google.gwt.user.client.ui.Label;
import com.google.gwt.user.client.ui.ListBox;
import com.google.gwt.user.client.ui.RadioButton;
import com.google.gwt.user.client.ui.TextBox;
import com.google.gwt.user.client.ui.Widget;
import com.google.inject.Inject;
import org.eclipse.che.datasource.ide.DatasourceUiResources;
import javax.annotation.Nullable;
public class DefaultNewDatasourceConnectorViewImpl extends Composite implements DefaultNewDatasourceConnectorView {
interface NewDatasourceViewImplUiBinder extends UiBinder<Widget, DefaultNewDatasourceConnectorViewImpl> {
}
@UiField
Label configureTitleCaption;
@UiField
TextBox hostField;
@UiField
TextBox portField;
@UiField
TextBox dbName;
@UiField
TextBox usernameField;
@UiField
TextBox passwordField;
@UiField
Button testConnectionButton;
@UiField
Label testConnectionErrorMessage;
@UiField
RadioButton radioUserPref;
@UiField
RadioButton radioProject;
@UiField
ListBox projectsList;
@UiField
CheckBox useSSL;
@UiField
CheckBox verifyServerCertificate;
@UiField
DatasourceUiResources datasourceUiResources;
@UiField
Label testConnectionText;
private ActionDelegate delegate;
protected String encryptedPassword;
protected boolean passwordFieldIsDirty = false;
private Long runnerProcessId;
@Inject
public DefaultNewDatasourceConnectorViewImpl(NewDatasourceViewImplUiBinder uiBinder) {
initWidget(uiBinder.createAndBindUi(this));
hostField.setText("localhost");
radioUserPref.setValue(true);
radioProject.setEnabled(false);
projectsList.setEnabled(false);
projectsList.setWidth("100px");
configureTitleCaption.setText("Settings");
}
@Override
public void setDelegate(DefaultNewDatasourceConnectorView.ActionDelegate delegate) {
this.delegate = delegate;
}
@Override
public void setImage(@Nullable ImageResource image) {
}
@Override
public void setDatasourceName(@Nullable String dsName) {
}
@Override
public String getDatabaseName() {
return dbName.getText();
}
@UiHandler("dbName")
public void onDatabaseNameFieldChanged(KeyUpEvent event) {
delegate.databaseNameChanged(dbName.getText());
}
@Override
public String getHostname() {
return hostField.getText();
}
@UiHandler("hostField")
public void onHostNameFieldChanged(KeyUpEvent event) {
delegate.hostNameChanged(hostField.getText());
}
@Override
public int getPort() {
return Integer.parseInt(portField.getText());
}
@Override
public String getUsername() {
return usernameField.getText();
}
@UiHandler("usernameField")
public void onUserNameFieldChanged(KeyUpEvent event) {
delegate.userNameChanged(usernameField.getText());
}
@Override
public String getPassword() {
return passwordField.getText();
}
@UiHandler("passwordField")
public void onPasswordNameFieldChanged(KeyUpEvent event) {
delegate.passwordChanged(passwordField.getText());
delegate.onClickTestConnectionButton();
}
@Override
public String getEncryptedPassword() {
return encryptedPassword;
}
@Override
public void setPort(int port) {
portField.setText(Integer.toString(port));
}
@UiHandler("portField")
public void onPortFieldChanged(KeyPressEvent event) {
if (!Character.isDigit(event.getCharCode())) {
portField.cancelKey();
}
delegate.portChanged(Integer.parseInt(portField.getText()));
}
@Override
public boolean getUseSSL() {
if (useSSL.getValue() != null) {
return useSSL.getValue();
} else {
return false;
}
}
@Override
public boolean getVerifyServerCertificate() {
if (verifyServerCertificate.getValue() != null) {
return verifyServerCertificate.getValue();<|fim▁hole|> } else {
return false;
}
}
@Override
public void setDatabaseName(final String databaseName) {
dbName.setValue(databaseName);
}
@Override
public void setHostName(final String hostName) {
hostField.setValue(hostName);
}
@Override
public void setUseSSL(final boolean useSSL) {
this.useSSL.setValue(useSSL);
}
@UiHandler({"useSSL"})
void onUseSSLChanged(ValueChangeEvent<Boolean> event) {
delegate.useSSLChanged(event.getValue());
}
@Override
public void setVerifyServerCertificate(final boolean verifyServerCertificate) {
this.verifyServerCertificate.setValue(verifyServerCertificate);
}
@UiHandler({"verifyServerCertificate"})
void onVerifyServerCertificateChanged(ValueChangeEvent<Boolean> event) {
delegate.verifyServerCertificateChanged(event.getValue());
}
@Override
public void setUsername(final String username) {
usernameField.setValue(username);
}
@Override
public void setPassword(final String password) {
passwordField.setValue(password);
}
@UiHandler("testConnectionButton")
void handleClick(ClickEvent e) {
delegate.onClickTestConnectionButton();
}
@UiHandler("testConnectionText")
void handleTextClick(ClickEvent e) {
delegate.onClickTestConnectionButton();
}
@Override
public void onTestConnectionSuccess() {
// turn button green
testConnectionButton.setStyleName(datasourceUiResources.datasourceUiCSS().datasourceWizardTestConnectionOK());
// clear error messages
testConnectionErrorMessage.setText("Connection Established Successfully!");
}
@Override
public void onTestConnectionFailure(String errorMessage) {
// turn test button red
testConnectionButton.setStyleName(datasourceUiResources.datasourceUiCSS().datasourceWizardTestConnectionKO());
// set message
testConnectionErrorMessage.setText(errorMessage);
}
@Override
public void setEncryptedPassword(String encryptedPassword, boolean resetPasswordField) {
this.encryptedPassword = encryptedPassword;
passwordFieldIsDirty = false;
if (resetPasswordField) {
passwordField.setText("");
}
}
@UiHandler("passwordField")
public void handlePasswordFieldChanges(ChangeEvent event) {
passwordFieldIsDirty = true;
}
@Override
public boolean isPasswordFieldDirty() {
return passwordFieldIsDirty;
}
@Override
public Long getRunnerProcessId() {
return runnerProcessId;
}
@Override
public void setRunnerProcessId(Long runnerProcessId) {
this.runnerProcessId = runnerProcessId;
}
}<|fim▁end|> | |
<|file_name|>ecooputil.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
###############################################################################
#
#
# Project: ECOOP, sponsored by The National Science Foundation
# Purpose: this code is part of the Cyberinfrastructure developed for the ECOOP project
# http://tw.rpi.edu/web/project/ECOOP
# from the TWC - Tetherless World Constellation
# at RPI - Rensselaer Polytechnic Institute
# founded by NSF
#
# Author: Massimo Di Stefano , [email protected] -
# http://tw.rpi.edu/web/person/MassimoDiStefano
#
###############################################################################
# Copyright (c) 2008-2014 Tetherless World Constellation at Rensselaer Polytechnic Institute
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from zipfile import ZipFile, ZIP_DEFLATED
from contextlib import closing
import paramiko
import qrcode
from IPython.core.display import HTML, Image
from IPython.display import display, Javascript
import envoy
from datetime import datetime
class shareUtil():
def zipdir(self, basedir, archivename, rm='no'):
"""
utility function to zip a single file or a directory
usage : zipdir(input, output)
@param basedir: input file or directory
@param archivename: output file.zip
@param rm: [yes, no], remove source file (optional, default=no)
"""
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
#print fn
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir) + len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
if rm != 'no':
instruction = 'rm -rf %s' % basedir
os.system(instruction)
def uploadfile(self, username='epi', password='epi', hostname='localhost', port=22,
inputfile=None, outputfile=None, link=False, apacheroot='/var/www/', zip=False, qr=False):
'''
utility to upload file on remote server using sftp protocol
usage : uploadfile(inputfile, outputfile)
@rtype : str
@param username: str - username on remote server
@param password: str - password to access remote server
@param hostname: str - hostname of remote server (default: localhost)
@param port: port number on remote server (default: 22)
@param inputfile: str - local path to the file to uploaded
@param outputfile: remote path to the file to upload
@param link: bolean [True, False] default False, print a link to download the file
(remote path needs to be in a web available directory)
@param apacheroot: path to apache root default to '/var/www/' required if link == True
@param zip: bolean deafault False, zip the output
@param qr: bolean deafault False, return qrcode as image
@return: link to uploaded file if link=True or qr image if qr=True & link=True, none if link is set to false
'''
if zip:
#print 'add zipfile'
zipfile = str(inputfile + '.zip')
self.zipdir(inputfile, zipfile)
inputfile = zipfile
#paramiko.util.log_to_file('/var/www/esr/paramiko.log')
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, username=username, password=password)
transport = paramiko.Transport((hostname, port))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
parts = outputfile.split('/')
for n in range(2, len(parts)):
path = '/'.join(parts[:n])
#print 'Path:', path,
sys.stdout.flush()
try:
s = sftp.stat(path)
#print 'mode =', oct(s.st_mode)
except IOError as e:
#print e
#print 'adding dir: ', path
sftp.mkdir(path)
try:
sftp.put(remotepath=outputfile, localpath=inputfile)
sftp.close()
transport.close()
print 'file uploaded'
if qr:
if link:
pass
if not link:
print 'WORNING: qrcode not generated, set the option link to True'
if link:
filelink = outputfile.replace(apacheroot, '')
link = 'http://' + os.path.normpath(hostname + '/' + filelink)
raw_html = '<a href="%s" target="_blank">ESR results</a>' % link
print 'results are now available for download at : ', link
image = None
if qr:
imagefile = parts[-1].split('.')[0] + '.jpeg'
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)
qr.add_data(link)
qr.make(fit=True)
img = qr.make_image()
img.save(imagefile, "JPEG")
print 'alive'
image = Image(imagefile)
return image
if not qr:
return HTML(raw_html)
except IOError:
print "Error: can\'t find file or read data check if input file exist and or remote location is writable"
def gistit(self, filename, jist='/usr/local/bin/jist', type='notebook'):
'''
use the jist utility to paste a txt file on github as gist and return a link to it
usage : gistit(notebookfile)
@param filename: str - path to the a text file or notebook file (.json)
@param jist: str - path to the executable jist (default=/usr/local/bin/jist)
@param type: str - notebook, text
@return: return a link to gist if type=text, link to nbviewer if type=notebook
'''
try:
with open(filename):
link = None
jist = self.which(jist)
if jist:
try:
r = envoy.run('%s -p %s' % (jist, filename))
if type == 'notebook':
link = r.std_out.replace('\n', '').replace('https://gist.github.com',
'http://nbviewer.ipython.org')
if type == 'text':
link = r.std_out.replace('\n', '')
return link
except:
print "can't generate gist, check if jist works bycommand line with: jist -p filename"
if not jist:
print 'cannot find jist utility, check if it is in your path'
except IOError:
print 'input file %s not found' % filename
def get_id(self, suffix, makedir=True):
'''
generate a directory based on the suffix and a time stamp
output looks like : suffix_Thursday_26_September_2013_06_28_49_PM
usage: getID(suffix)
@param suffix: str - suffix for the directory to be generated,
@return: str - directory name
'''
ID = suffix + '_' + str(datetime.now().utcnow().strftime("%A_%d_%B_%Y_%I_%M_%S_%p"))
if makedir:
self.ensure_dir(ID)<|fim▁hole|> '''
make a directory on the file system if it does not exist
usage: ensure_dir(dir)
@param dir: str - path to a directory existent on the local filesystem
@return: None
'''
if not os.path.exists(dir):
os.makedirs(dir)
def save_notebook(self, ID, notebookname, web=None, notebookdir=None):
"""
Save the notebook file as html and or as gist
@param ID: directory name where to store the saved notebook
@param notebookname: name of the notebook
@param web:
@param notebookdir:
"""
if not notebookdir:
notebookdir = os.getcwd()
display(Javascript("IPython.notebook.save_notebook()"))
notebookfile = os.path.join(notebookdir, notebookname)
savedir = os.path.join(os.getcwd(), ID)
command1 = 'cp %s %s' % (notebookfile, savedir)
newnotebook = os.path.join(savedir, notebookname)
command2 = 'ipython nbconvert %s' % newnotebook
os.system(command1)
os.system(command2)
if web:
try:
self.gistit(notebookfile)
except IOError:
print "can't genrate a gist"
def which(self, program):
"""
Check if a program exist and return the full path
@param program: executable name or path to executable
@return: full path to executable
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def getTime(self):
now = datetime.now()
return now<|fim▁end|> | print 'session data directory : ID', ID
return ID
def ensure_dir(self, dir): |
<|file_name|>playit.py<|end_file_name|><|fim▁begin|>import subprocess
from pkg_resources import resource_filename
<|fim▁hole|> """
filepath = resource_filename(__name__, 'sound/' + file)
subprocess.Popen(["paplay", filepath])<|fim▁end|> | def playit(file):
"""
Function used to play a sound file |
<|file_name|>0013_formpage_button_name.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-25 15:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0012_auto_20160519_1740'),
]<|fim▁hole|>
operations = [
migrations.AddField(
model_name='formpage',
name='button_name',
field=models.CharField(default='Submit', max_length=500, verbose_name='Button name'),
),
]<|fim▁end|> | |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>use std::path::PathBuf;
use std::{env, fs};
fn main() {
let current_dir = PathBuf::from(&env::current_dir().unwrap());
let out_dir = PathBuf::from(&env::var_os("OUT_DIR").unwrap());
//println!("cargo:rustc-flags=-L {:?} -l mosquitto", out_dir.display());<|fim▁hole|><|fim▁end|> | //println!("cargo:rustc-link-search=native={}", out_dir.display());
} |
<|file_name|>Trait.js<|end_file_name|><|fim▁begin|>/**
* Trait class
<|fim▁hole|> this.traits = [methods];
var extraTraits = methods.$traits;
if (extraTraits) {
if (typeof extraTraits === "string") {
extraTraits = extraTraits.replace(/ /g, '').split(',');
}
for (var i = 0, c = extraTraits.length; i < c; i++) {
this.use(allTraits[extraTraits[i]]);
}
}
}
Trait.prototype = {
constructor: Trait,
use: function (trait) {
if (trait) {
this.traits = this.traits.concat(trait.traits);
}
return this;
},
useBy: function (obj) {
for (var i = 0, c = this.traits.length; i < c; i++) {
var methods = this.traits[i];
for (var prop in methods) {
if (prop !== '$traits' && !obj[prop] && methods.hasOwnProperty(prop)) {
obj[prop] = methods[prop];
}
}
}
}
};
module.exports = Trait;<|fim▁end|> | */
function Trait(methods, allTraits) {
allTraits = allTraits || [];
|
<|file_name|>utils.py<|end_file_name|><|fim▁begin|># Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import email
import tempfile
import re
from email.header import Header
import email.charset as charset
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
from email.iterators import typed_subpart_iterator
import logging
import mailcap
from cStringIO import StringIO
import alot.crypto as crypto
import alot.helper as helper
from alot.errors import GPGProblem
from alot.settings import settings
from alot.helper import string_sanitize
from alot.helper import string_decode
from alot.helper import parse_mailcap_nametemplate
from alot.helper import split_commandstring
X_SIGNATURE_VALID_HEADER = 'X-Alot-OpenPGP-Signature-Valid'
X_SIGNATURE_MESSAGE_HEADER = 'X-Alot-OpenPGP-Signature-Message'
def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpgme.Signature`
:param error_msg: `str` containing an error message, the empty
string indicating no error
'''
sig_from = ''
if len(sigs) == 0:
error_msg = error_msg or 'no signature found'
else:
try:
sig_from = crypto.get_key(sigs[0].fpr).uids[0].uid
except:
sig_from = sigs[0].fpr
mail.add_header(
X_SIGNATURE_VALID_HEADER,
'False' if error_msg else 'True',
)
mail.add_header(
X_SIGNATURE_MESSAGE_HEADER,
u'Invalid: {0}'.format(error_msg)
if error_msg else
u'Valid: {0}'.format(sig_from),
)
def get_params(mail, failobj=list(), header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)}
def message_from_file(handle):
'''Reads a mail from the given file-like object and returns an email
object, very much like email.message_from_file. In addition to
that OpenPGP encrypted data is detected and decrypted. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param handle: a file-like object
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
m = email.message_from_file(handle)
# make sure noone smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
p = get_params(m)
app_pgp_sig = 'application/pgp-signature'
app_pgp_enc = 'application/pgp-encrypted'
# handle OpenPGP signed data
if (m.is_multipart() and
m.get_content_subtype() == 'signed' and
p.get('protocol', None) == app_pgp_sig):
# RFC 3156 is quite strict:
# * exactly two messages
# * the second is of type 'application/pgp-signature'
# * the second contains the detached signature
malformed = False
if len(m.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(m.get_payload()))
ct = m.get_payload(1).get_content_type()
if ct != app_pgp_sig:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_sig, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've
# seen a message with 'PGP-'. maybe we should be more
# permissive here, or maybe not, this is crypto stuff...
if not p.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
p.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(m.get_payload(0).as_string(),
m.get_payload(1).get_payload())
except GPGProblem as e:
malformed = unicode(e)
add_signature_headers(m, sigs, malformed)
# handle OpenPGP encrypted data
elif (m.is_multipart() and
m.get_content_subtype() == 'encrypted' and
p.get('protocol', None) == app_pgp_enc and
'Version: 1' in m.get_payload(0).get_payload()):
# RFC 3156 is quite strict:
# * exactly two messages
# * the first is of type 'application/pgp-encrypted'
# * the first contains 'Version: 1'
# * the second is of type 'application/octet-stream'
# * the second contains the encrypted and possibly signed data
malformed = False
ct = m.get_payload(0).get_content_type()
if ct != app_pgp_enc:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_enc, ct)
want = 'application/octet-stream'
ct = m.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want,
ct)
if not malformed:
try:
sigs, d = crypto.decrypt_verify(m.get_payload(1).get_payload())
except GPGProblem as e:
# signature verification failures end up here too if
# the combined method is used, currently this prevents
# the interpretation of the recovered plain text
# mail. maybe that's a feature.
malformed = unicode(e)
else:
# parse decrypted message
n = message_from_string(d)
# add the decrypted message to m. note that n contains
# all the attachments, no need to walk over n here.
m.attach(n)
# add any defects found
m.defects.extend(n.defects)
# there are two methods for both signed and encrypted
# data, one is called 'RFC 1847 Encapsulation' by
# RFC 3156, and one is the 'Combined method'.
if len(sigs) == 0:
# 'RFC 1847 Encapsulation', the signature is a
# detached signature found in the recovered mime
# message of type multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNATURE_MESSAGE_HEADER):
m[k] = n[k]
else:
# an encrypted message without signatures
# should arouse some suspicion, better warn
# the user
add_signature_headers(m, [], 'no signature found')
else:
# 'Combined method', the signatures are returned
# by the decrypt_verify function.
# note that if we reached this point, we know the
# signatures are valid. if they were not valid,
# the else block of the current try would not have
# been executed
add_signature_headers(m, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg.encode('utf-8'))
content.set_charset('utf-8')
m.attach(content)
return m
def message_from_string(s):
'''Reads a mail from the given string. This is the equivalent of
:func:`email.message_from_string` which does nothing but to wrap
the given string in a StringIO object and to call
:func:`email.message_from_file`.
Please refer to the documentation of :func:`message_from_file` for
details.
'''
return message_from_file(StringIO(s))
def extract_headers(mail, headers=None):
"""
returns subset of this messages headers as human-readable format:
all header values are decoded, the resulting string has
one line "KEY: VALUE" for each requested header present in the mail.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param headers: headers to extract
:type headers: list of str
"""
headertext = u''
if headers is None:
headers = mail.keys()
for key in headers:
value = u''
if key in mail:
value = decode_header(mail.get(key, ''))
headertext += '%s: %s\n' % (key, value)
return headertext
def extract_body(mail, types=None):
"""
returns a body text string for given mail.
If types is `None`, `text/*` is used:
The exact preferred type is specified by the prefer_plaintext config option
which defaults to text/html.
:param mail: the mail to use
:type mail: :class:`email.Message`
:param types: mime content types to use for body string
:type types: list of str
"""
preferred = 'text/plain' if settings.get(
'prefer_plaintext') else 'text/html'
has_preferred = False
# see if the mail has our preferred type
if types is None:
has_preferred = list(typed_subpart_iterator(
mail, *preferred.split('/')))
body_parts = []
for part in mail.walk():
ctype = part.get_content_type()
if types is not None:
if ctype not in types:
continue
cd = part.get('Content-Disposition', '')
if cd.startswith('attachment'):
continue
# if the mail has our preferred type, we only keep this type
# note that if types != None, has_preferred always stays False
if has_preferred and ctype != preferred:
continue
enc = part.get_content_charset() or 'ascii'
raw_payload = part.get_payload(decode=True)
if ctype in ['text/plain', 'text/']:
raw_payload = string_decode(raw_payload, enc)
body_parts.append(string_sanitize(raw_payload))
else:
# get mime handler
key = 'copiousoutput'
handler, entry = settings.mailcap_find_match(ctype, key=key)
tempfile_name = None
stdin = None
if entry:
handler_raw_commandstring = entry['view']
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
# open tempfile, respect mailcaps nametemplate
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
tmpfile = tempfile.NamedTemporaryFile(delete=False,
prefix=prefix,
suffix=suffix)
# write payload to tmpfile
tmpfile.write(raw_payload)
tmpfile.close()
tempfile_name = tmpfile.name
else:
stdin = raw_payload
# read parameter, create handler command<|fim▁hole|> # create and call external command
cmd = mailcap.subst(entry['view'], ctype,
filename=tempfile_name, plist=parms)
logging.debug('command: %s' % cmd)
logging.debug('parms: %s' % str(parms))
cmdlist = split_commandstring(cmd)
# call handler
rendered_payload, errmsg, retval = helper.call_cmd(
cmdlist, stdin=stdin)
# remove tempfile
if tempfile_name:
os.unlink(tempfile_name)
if rendered_payload: # handler had output
body_parts.append(string_sanitize(rendered_payload))
return u'\n\n'.join(body_parts)
def decode_header(header, normalize=False):
"""
decode a header value to a unicode string
values are usually a mixture of different substrings
encoded in quoted printable using different encodings.
This turns it into a single unicode string
:param header: the header value
:type header: str
:param normalize: replace trailing spaces after newlines
:type normalize: bool
:rtype: unicode
"""
# If the value isn't ascii as RFC2822 prescribes,
# we just return the unicode bytestring as is
value = string_decode(header) # convert to unicode
try:
value = value.encode('ascii')
except UnicodeEncodeError:
return value
# some mailers send out incorrectly escaped headers
# and double quote the escaped realname part again. remove those
# RFC: 2047
regex = r'"(=\?.+?\?.+?\?[^ ?]+\?=)"'
value = re.sub(regex, r'\1', value)
logging.debug("unquoted header: |%s|", value)
# otherwise we interpret RFC2822 encoding escape sequences
valuelist = email.header.decode_header(value)
decoded_list = []
for v, enc in valuelist:
v = string_decode(v, enc)
decoded_list.append(string_sanitize(v))
value = u' '.join(decoded_list)
if normalize:
value = re.sub(r'\n\s+', r' ', value)
return value
def encode_header(key, value):
"""
encodes a unicode string as a valid header value
:param key: the header field this value will be stored in
:type key: str
:param value: the value to be encoded
:type value: unicode
"""
# handle list of "realname <email>" entries separately
if key.lower() in ['from', 'to', 'cc', 'bcc']:
rawentries = value.split(',')
encodedentries = []
for entry in rawentries:
m = re.search('\s*(.*)\s+<(.*\@.*\.\w*)>\s*$', entry)
if m: # If a realname part is contained
name, address = m.groups()
# try to encode as ascii, if that fails, revert to utf-8
# name must be a unicode string here
namepart = Header(name)
# append address part encoded as ascii
entry = '%s <%s>' % (namepart.encode(), address)
encodedentries.append(entry)
value = Header(', '.join(encodedentries))
else:
value = Header(value)
return value
def is_subdir_of(subpath, superpath):
# make both absolute
superpath = os.path.realpath(superpath)
subpath = os.path.realpath(subpath)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([subpath, superpath]) == superpath<|fim▁end|> | parms = tuple(map('='.join, part.get_params()))
|
<|file_name|>ipc.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::time;
use crate::time::ProfilerCategory;
use crate::time::ProfilerChan;
use ipc_channel::ipc;
use serde::{Deserialize, Serialize};
use std::io::Error;
pub struct IpcReceiver<T>
where
T: for<'de> Deserialize<'de> + Serialize,
{
ipc_receiver: ipc::IpcReceiver<T>,
time_profile_chan: ProfilerChan,
}
impl<T> IpcReceiver<T>
where
T: for<'de> Deserialize<'de> + Serialize,
{<|fim▁hole|> self.time_profile_chan.clone(),
move || self.ipc_receiver.recv(),
)
}
pub fn try_recv(&self) -> Result<T, bincode::Error> {
self.ipc_receiver.try_recv()
}
pub fn to_opaque(self) -> ipc::OpaqueIpcReceiver {
self.ipc_receiver.to_opaque()
}
}
pub fn channel<T>(
time_profile_chan: ProfilerChan,
) -> Result<(ipc::IpcSender<T>, IpcReceiver<T>), Error>
where
T: for<'de> Deserialize<'de> + Serialize,
{
let (ipc_sender, ipc_receiver) = ipc::channel()?;
let profiled_ipc_receiver = IpcReceiver {
ipc_receiver,
time_profile_chan,
};
Ok((ipc_sender, profiled_ipc_receiver))
}
pub struct IpcBytesReceiver {
ipc_bytes_receiver: ipc::IpcBytesReceiver,
time_profile_chan: ProfilerChan,
}
impl IpcBytesReceiver {
pub fn recv(&self) -> Result<Vec<u8>, bincode::Error> {
time::profile(
ProfilerCategory::IpcBytesReceiver,
None,
self.time_profile_chan.clone(),
move || self.ipc_bytes_receiver.recv(),
)
}
}
pub fn bytes_channel(
time_profile_chan: ProfilerChan,
) -> Result<(ipc::IpcBytesSender, IpcBytesReceiver), Error> {
let (ipc_bytes_sender, ipc_bytes_receiver) = ipc::bytes_channel()?;
let profiled_ipc_bytes_receiver = IpcBytesReceiver {
ipc_bytes_receiver,
time_profile_chan,
};
Ok((ipc_bytes_sender, profiled_ipc_bytes_receiver))
}<|fim▁end|> | pub fn recv(&self) -> Result<T, bincode::Error> {
time::profile(
ProfilerCategory::IpcReceiver,
None, |
<|file_name|>001_initial_db.py<|end_file_name|><|fim▁begin|>def migrate_up(manager):
raise Exception('migrate.py should be migrating directly to schema 51 '
'instead of running migration 1...')<|fim▁hole|>
def migrate_down(manager):
manager.execute_script(DROP_DB_SQL)
DROP_DB_SQL = """\
DROP TABLE IF EXISTS `acl_groups`;
DROP TABLE IF EXISTS `acl_groups_hosts`;
DROP TABLE IF EXISTS `acl_groups_users`;
DROP TABLE IF EXISTS `autotests`;
DROP TABLE IF EXISTS `host_queue_entries`;
DROP TABLE IF EXISTS `hosts`;
DROP TABLE IF EXISTS `hosts_labels`;
DROP TABLE IF EXISTS `ineligible_host_queues`;
DROP TABLE IF EXISTS `jobs`;
DROP TABLE IF EXISTS `labels`;
DROP TABLE IF EXISTS `users`;
"""<|fim▁end|> | |
<|file_name|>user_team_mgmt.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# This example shows the different aspects of user/team management.
#
import sys
from sdcclient import SdcClient
#
# Parse arguments
#
if len(sys.argv) != 4:
print(('usage: %s <sysdig-token> team-name user-name' % sys.argv[0]))
print('You can find your token at https://app.sysdigcloud.com/#/settings/user')
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token, sdc_url='https://app.sysdigcloud.com')
team_name = sys.argv[2]
user_name = sys.argv[3]
print(('Trying to invite a user:', user_name))
ok, res = sdclient.create_user_invite(user_name)
if not ok:
if res == 'user ' + user_name + ' already exists':
print(('User creation failed because', user_name, 'already exists. Continuing.'))
else:
print(('User creation failed:', res, '. Exiting.'))
sys.exit(1)
else:
print('User creation succeeded')
# Possible failures on Team creation might include having reached the
# max limit on Teams for this customer account or if the Team by that
# name already exists. Since a previous successful run of this test
# would have deleted the Team by the same name, and we need to be able
# to configure Teams for this test to pass, we'll treat both types of
# error as a genuine fail of the test.
print(('Now trying to create a team with name:', team_name))
ok, res = sdclient.create_team(team_name)
if not ok:
print(('Team creation failed:', res, '. Exiting.'))
sys.exit(1)
else:
print(('Team creation succeeded.', res))
print(('Now trying to find team with name:', team_name))
ok, res = sdclient.get_team(team_name)
if not ok:
print(('Could not get team info:', res, '. Exiting.'))
sys.exit(1)
else:
print('Team fetch succeeded')
print(('Now trying to edit team:', team_name))
memberships = {
'[email protected]': 'ROLE_TEAM_MANAGER',
'[email protected]': 'ROLE_TEAM_READ'
}
ok, res = sdclient.edit_team(team_name, description='Nextgen2', memberships=memberships)
if not ok:
print(('Could not edit team:', res, '. Exiting.'))
sys.exit(1)
else:
print('Edited team to change description and add users')
print(('Now trying to edit user:', user_name))
ok, res = sdclient.edit_user(user_name, firstName='Just', lastName='Edited3', systemRole='ROLE_CUSTOMER')
if not ok:
print(('Could not edit user:', res, '. Exiting.'))
sys.exit(1)
else:
print('Edit user succeeded')
print(('Now trying to delete the team:', team_name))
ok, res = sdclient.delete_team(team_name)
if not ok:
print(('Could not delete team:', res, '. Exiting.'))
sys.exit(1)
else:<|fim▁hole|> print('Delete team succeeded')
sys.exit(0)<|fim▁end|> | |
<|file_name|>test_uniform.py<|end_file_name|><|fim▁begin|>import unittest
from chainer import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import numpy
@testing.parameterize(*testing.product({
'target': [
initializers.Uniform,
initializers.LeCunUniform,
initializers.HeUniform,
initializers.GlorotUniform,
],
'shape': [(2, 3), (2, 3, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUniform(unittest.TestCase):
scale = 0.1
def check_initializer(self, w):
initializer = self.target(scale=self.scale)
initializer(w)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = self.target(scale=self.scale, dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(cuda.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
<|fim▁hole|> def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)<|fim▁end|> | @attr.gpu |
<|file_name|>regress-122076.js<|end_file_name|><|fim▁begin|>/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
*
* Date: 12 Feb 2002
* SUMMARY: Don't crash on invalid regexp literals / \\/ /
*
* See http://bugzilla.mozilla.org/show_bug.cgi?id=122076
* The function checkURL() below sometimes caused a compile-time error:
*
* SyntaxError: unterminated parenthetical (:
*
* However, sometimes it would cause a crash instead. The presence of
* other functions below is merely fodder to help provoke the crash.
* The constant |STRESS| is number of times we'll try to crash on this.
*
*/
//-----------------------------------------------------------------------------
var BUGNUMBER = 122076;
var summary = "Don't crash on invalid regexp literals / \\/ /";
var STRESS = 10;
var sEval = '';
printBugNumber(BUGNUMBER);
printStatus(summary);
sEval += 'function checkDate()'
sEval += '{'
sEval += 'return (this.value.search(/^[012]?\d\/[0123]?\d\/[0]\d$/) != -1);'
sEval += '}'
sEval += 'function checkDNSName()'
sEval += '{'
sEval += ' return (this.value.search(/^([\w\-]+\.)+([\w\-]{2,3})$/) != -1);'
sEval += '}'
sEval += 'function checkEmail()'
sEval += '{'
sEval += ' return (this.value.search(/^([\w\-]+\.)*[\w\-]+@([\w\-]+\.)+([\w\-]{2,3})$/) != -1);'
sEval += '}'
sEval += 'function checkHostOrIP()'
sEval += '{'
sEval += ' if (this.value.search(/^([\w\-]+\.)+([\w\-]{2,3})$/) == -1)'
sEval += ' return (this.value.search(/^[1-2]?\d{1,2}\.[1-2]?\d{1,2}\.[1-2]?\d{1,2}\.[1-2]?\d{1,2}$/) != -1);'
sEval += ' else'<|fim▁hole|>
sEval += 'function checkIPAddress()'
sEval += '{'
sEval += ' return (this.value.search(/^[1-2]?\d{1,2}\.[1-2]?\d{1,2}\.[1-2]?\d{1,2}\.[1-2]?\d{1,2}$/) != -1);'
sEval += '}'
sEval += 'function checkURL()'
sEval += '{'
sEval += ' return (this.value.search(/^(((https?)|(ftp)):\/\/([\-\w]+\.)+\w{2,4}(\/[%\-\w]+(\.\w{2,})?)*(([\w\-\.\?\\/\*\$+@&#;`~=%!]*)(\.\w{2,})?)*\/?)$/) != -1);'
sEval += '}'
for (var i=0; i<STRESS; i++)
{
try
{
eval(sEval);
}
catch(e)
{
}
}
reportCompare('No Crash', 'No Crash', '');<|fim▁end|> | sEval += ' return true;'
sEval += '}' |
<|file_name|>model.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable=g-bad-name
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
from mapreduce.lib.graphy.backends import google_chart_api
from mapreduce.lib import simplejson
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from mapreduce import context
from mapreduce import hooks
from mapreduce import json_util
from mapreduce import util
# pylint: disable=protected-access
# Special datastore kinds for MR.
_MAP_REDUCE_KINDS = ("_AE_MR_MapreduceControl",
"_AE_MR_MapreduceState",
"_AE_MR_ShardState",
"_AE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
# Leave some wiggle room for headers and other fields.
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
# Update version when payload handling is changed
# in a backward incompatible way.
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
# TODO(user): Find a more space efficient way than urlencoding.
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
# Payload is small. Don't bother with anything.
if not compressed_payload:
self._payload = payload_str
# Compressed payload is small. Don't bother with datastore.
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
# Store payload in the datastore.
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name):
"""Get current counter value.
Args:
counter_name: counter name as string.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, 0)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
def to_dict(self):
"""Convert to dictionary.
Returns:
a dictionary with counter name as key and counter values as value.
"""
return self.counters
class MapperSpec(json_util.JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
"""
self.handler_spec = handler_spec
self.input_reader_spec = input_reader_spec
self.output_writer_spec = output_writer_spec
self.shard_count = int(shard_count)
self.params = params
def get_handler(self):
"""Get mapper handler instance.
This always creates a new instance of the handler. If the handler is a
callable instance, MR only wants to create a new instance at the
beginning of a shard or shard retry. The pickled callable instance
should be accessed from TransientShardState.
Returns:
handler instance as callable.
"""
return util.handler_for_name(self.handler_spec)
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def output_writer_class(self):
"""Get output writer class.
Returns:
output writer class object.
"""
return self.output_writer_spec and util.for_name(self.output_writer_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
result = {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count
}
if self.output_writer_spec:
result["mapper_output_writer"] = self.output_writer_spec
return result
def __str__(self):
return "MapperSpec(%s, %s, %s, %s)" % (
self.handler_spec, self.input_reader_spec, self.params,
self.shard_count)
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"],
json.get("mapper_output_writer")
)<|fim▁hole|> def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
class MapreduceSpec(json_util.JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params={},
hooks_class_name=None):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
self.hooks_class_name = hooks_class_name
self.__hooks = None
self.get_hooks() # Fail fast on an invalid hook class.
def get_hooks(self):
"""Returns a hooks.Hooks class or None if no hooks class has been set."""
if self.__hooks is None and self.hooks_class_name is not None:
hooks_class = util.for_name(self.hooks_class_name)
if not isinstance(hooks_class, type):
raise ValueError("hooks_class_name must refer to a class, got %s" %
type(hooks_class).__name__)
if not issubclass(hooks_class, hooks.Hooks):
raise ValueError(
"hooks_class_name must refer to a hooks.Hooks subclass")
self.__hooks = hooks_class(self)
return self.__hooks
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
"hooks_class_name": self.hooks_class_name,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
def __str__(self):
return str(self.to_json())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
@classmethod
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if this MR is still running.
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing. This starts as 0,
then set by KickOffJob handler to be the actual number of input
readers after input splitting, and is updated by Controller task
as shards finish.
start_time: When the job started.
writer_state: Json property to be used by writer to store its state.
This is filled when single output per job. Will be deprecated.
Use OutputWriter.get_filenames instead.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
# TODO(user): Replace mapreduce_spec with job_config.
mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
writer_state = json_util.JsonProperty(dict, indexed=False)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
result_status = db.StringProperty(required=False, choices=_RESULTS)
# For UI purposes only.
chart_url = db.TextProperty(default="")
chart_width = db.IntegerProperty(default=300, indexed=False)
sparkline_url = db.TextProperty(default="")
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceState"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), str(mapreduce_id))
@classmethod
def get_by_job_id(cls, mapreduce_id):
"""Retrieves the instance of state for a Job.
Args:
mapreduce_id: The mapreduce job to retrieve.
Returns:
instance of MapreduceState for passed id.
"""
return db.get(cls.get_key_by_job_id(mapreduce_id))
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
shard_count = len(shards_processed)
if shards_processed:
# Only 16 labels on the whole chart.
stride_length = max(1, shard_count / 16)
chart.bottom.labels = []
for x in xrange(shard_count):
if (x % stride_length == 0 or
x == shard_count - 1):
chart.bottom.labels.append(x)
else:
chart.bottom.labels.append("")
chart.left.labels = ["0", str(max(shards_processed))]
chart.left.min = 0
self.chart_width = min(700, max(300, shard_count * 20))
self.chart_url = chart.display.Url(self.chart_width, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
"""
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([])
return state
@staticmethod
def new_mapreduce_id():
"""Generate new mapreduce id."""
return util._get_descending_key()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
class TransientShardState(object):
"""A shard's states that are kept in task payload.
TransientShardState holds two types of states:
1. Some states just don't need to be saved to datastore. e.g.
serialized input reader and output writer instances.
2. Some states are duplicated from datastore, e.g. slice_id, shard_id.
These are used to validate the task.
"""
def __init__(self,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
initial_input_reader,
output_writer=None,
retries=0,
handler=None):
"""Init.
Args:
base_path: base path of this mapreduce job. Deprecated.
mapreduce_spec: an instance of MapReduceSpec.
shard_id: shard id.
slice_id: slice id. When enqueuing task for the next slice, this number
is incremented by 1.
input_reader: input reader instance for this shard.
initial_input_reader: the input reader instance before any iteration.
Used by shard retry.
output_writer: output writer instance for this shard, if exists.
retries: the number of retries of the current shard. Used to drop
tasks from old retries.
handler: map/reduce handler.
"""
self.base_path = base_path
self.mapreduce_spec = mapreduce_spec
self.shard_id = shard_id
self.slice_id = slice_id
self.input_reader = input_reader
self.initial_input_reader = initial_input_reader
self.output_writer = output_writer
self.retries = retries
self.handler = handler
self._input_reader_json = self.input_reader.to_json()
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler
def advance_for_next_slice(self, recovery_slice=False):
"""Advance relavent states for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
if recovery_slice:
self.slice_id += 2
# Restore input reader to the beginning of the slice.
self.input_reader = self.input_reader.from_json(self._input_reader_json)
else:
self.slice_id += 1
def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result
@classmethod
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = simplejson.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = simplejson.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
simplejson.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler)
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. ShardState key_name is equal to shard_id.
Shard state contains critical state to ensure the correctness of
shard execution. It is the single source of truth about a shard's
progress. For example:
1. A slice is allowed to run only if its payload matches shard state's
expectation.
2. A slice is considered running only if it has acquired the shard's lock.
3. A slice is considered done only if it has successfully committed shard
state to db.
Properties about the shard:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. All counters yielded
within mapreduce are stored here.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
retries: the number of times this shard has been retried.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
writer_state: writer state for this shard. The shard's output writer
instance can save in-memory output references to this field in its
"finalize" method.
Properties about slice management:
slice_id: slice id of current executing slice. A slice's task
will not run unless its slice_id matches this. Initial
value is 0. By the end of slice execution, this number is
incremented by 1.
slice_start_time: a slice updates this to now at the beginning of
execution. If the transaction succeeds, the current task holds
a lease of slice duration + some grace period. During this time, no
other task with the same slice_id will execute. Upon slice failure,
the task should try to unset this value to allow retries to carry on
ASAP.
slice_request_id: the request id that holds/held the lease. When lease has
expired, new request needs to verify that said request has indeed
ended according to logs API. Do this only when lease has expired
because logs API is expensive. This field should always be set/unset
with slice_start_time. It is possible Logs API doesn't log a request
at all or doesn't log the end of a request. So a new request can
proceed after a long conservative timeout.
slice_retries: the number of times a slice has been retried due to
processing data when lock is held. Taskqueue/datastore errors
related to slice/shard management are not counted. This count is
only a lower bound and is used to determined when to fail a slice
completely.
acquired_once: whether the lock for this slice has been acquired at
least once. When this is True, duplicates in outputs are possible.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
# Shard can be in aborted state when user issued abort, or controller
# issued abort because some other shard failed.
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Maximum number of shard states to hold in memory at any time.
_MAX_STATES_IN_MEMORY = 10
# Functional properties.
mapreduce_id = db.StringProperty(required=True)
active = db.BooleanProperty(default=True, indexed=False)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
retries = db.IntegerProperty(default=0, indexed=False)
writer_state = json_util.JsonProperty(dict, indexed=False)
slice_id = db.IntegerProperty(default=0, indexed=False)
slice_start_time = db.DateTimeProperty(indexed=False)
slice_request_id = db.ByteStringProperty(indexed=False)
slice_retries = db.IntegerProperty(default=0, indexed=False)
acquired_once = db.BooleanProperty(default=False, indexed=False)
# For UI purposes only.
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def __str__(self):
kv = {"active": self.active,
"slice_id": self.slice_id,
"last_work_item": self.last_work_item,
"update_time": self.update_time}
if self.result_status:
kv["result_status"] = self.result_status
if self.retries:
kv["retries"] = self.retries
if self.slice_start_time:
kv["slice_start_time"] = self.slice_start_time
if self.slice_retries:
kv["slice_retries"] = self.slice_retries
if self.slice_request_id:
kv["slice_request_id"] = self.slice_request_id
if self.acquired_once:
kv["acquired_once"] = self.acquired_once
keys = kv.keys()
keys.sort()
result = "ShardState is {"
for k in keys:
result += k + ":" + str(kv[k]) + ","
result += "}"
return result
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
def set_for_failure(self):
self.active = False
self.result_status = self.RESULT_FAILED
def set_for_abort(self):
self.active = False
self.result_status = self.RESULT_ABORTED
def set_for_success(self):
self.active = False
self.result_status = self.RESULT_SUCCESS
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_ShardState"
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Deprecated. Use find_all_by_mapreduce_state.
This will be removed after 1.8.9 release.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of ShardStates.
"""
return list(cls.find_all_by_mapreduce_state(mapreduce_state))
@classmethod
def find_all_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Never runs within a transaction since it may touch >5 entity groups (one
for each shard).
Args:
mapreduce_state: MapreduceState instance
Yields:
shard states sorted by shard id.
"""
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
i = 0
while i < len(keys):
@db.non_transactional
def no_tx_get(i):
return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY])
# We need a separate function to so that we can mix non-transactional and
# use be a generator
states = no_tx_get(i)
for s in states:
i += 1
if s is not None:
yield s
@classmethod
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
"""Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
"""
if mapreduce_state is None:
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceControl"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs)
class QuerySpec(object):
"""Encapsulates everything about a query needed by DatastoreInputReader."""
DEFAULT_BATCH_SIZE = 50
def __init__(self,
entity_kind,
keys_only=None,
filters=None,
batch_size=None,
model_class_path=None,
app=None,
ns=None):
self.entity_kind = entity_kind
self.keys_only = keys_only or False
self.filters = filters or None
self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE
self.model_class_path = model_class_path
self.app = app
self.ns = ns
def to_json(self):
return {"entity_kind": self.entity_kind,
"keys_only": self.keys_only,
"filters": self.filters,
"batch_size": self.batch_size,
"model_class_path": self.model_class_path,
"app": self.app,
"ns": self.ns}
@classmethod
def from_json(cls, json):
return cls(json["entity_kind"],
json["keys_only"],
json["filters"],
json["batch_size"],
json["model_class_path"],
json["app"],
json["ns"])<|fim▁end|> | |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>"""Test config for channels"""
import pytest<|fim▁hole|>
@pytest.fixture(autouse=True)
def mock_search_tasks(mocker):
"""Patch the helpers so they don't fire celery tasks"""
return mocker.patch("channels.api.search_task_helpers")<|fim▁end|> | |
<|file_name|>RoleService.java<|end_file_name|><|fim▁begin|>package pe.com.ccpl.siconc.web.service;
<|fim▁hole|>
public Role getRole(int id);
}<|fim▁end|> | import pe.com.ccpl.siconc.web.model.Role;
public interface RoleService { |
<|file_name|>fai.go<|end_file_name|><|fim▁begin|>// Copyright ©2013 The bíogo Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fai implements FAI fasta sequence file index handling.
package fai
import (
"bufio"
"bytes"
"encoding/csv"
"errors"
"fmt"
"io"
"sort"
"strconv"
)
const (
nameField = iota
lengthField
startField
basesField
bytesField
)
var ErrNonUnique = errors.New("non-unique record name")
// Index is an FAI index.
type Index map[string]Record
// NewIndex returns a new Index constructed from the FASTA sequence
// in the provided io.Reader.
func NewIndex(fasta io.Reader) (Index, error) {
sc := bufio.NewScanner(fasta)
sc.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
return i + 1, data[:i+1], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
})
idx := make(Index)
var (
rec Record
offset int64
wantDescLine bool
)
for sc.Scan() {
b := bytes.TrimSpace(sc.Bytes())
if len(b) == 0 {
continue
}
if b[0] == '>' {
if rec.Name != "" {
idx[rec.Name] = rec
rec = Record{}
}
lenID := bytes.IndexAny(b, " \t")
if lenID < 0 {
rec.Name = string(b[1:])
} else {
rec.Name = string(b[1:lenID])
}
if _, exists := idx[rec.Name]; exists {
return nil, fmt.Errorf("fai: duplicate sequence identifier %s at %d", rec.Name, offset)
}
rec.Start = offset + int64(len(sc.Bytes()))
wantDescLine = false
} else {
if wantDescLine {
return nil, fmt.Errorf("fai: unexpected short line before offset %d", offset)
}
switch {
case rec.BytesPerLine == 0:
rec.BytesPerLine = len(sc.Bytes())
case len(sc.Bytes()) > rec.BytesPerLine:
return nil, fmt.Errorf("fai: unexpected long line at offset %d", offset)
case len(sc.Bytes()) < rec.BytesPerLine:
wantDescLine = true
}
switch {
case len(b) == 0:
// Do nothing.
case rec.BasesPerLine == 0:
rec.BasesPerLine = len(b)
case len(b) > rec.BasesPerLine:
return nil, fmt.Errorf("fai: unexpected long line at offset %d", offset)
case len(b) < rec.BasesPerLine:
wantDescLine = true
}
rec.Length += len(b)
}
offset += int64(len(sc.Bytes()))
}
if rec.Name != "" {
idx[rec.Name] = rec
rec = Record{}
}
return idx, sc.Err()
}
// Record is a single FAI index record.
type Record struct {
// Name is the name of the sequence.
Name string
// Length is the length of the sequence.
Length int
// Start is the starting seek offset of
// the sequence.
Start int64
// BasesPerLine is the number of sequences
// bases per line.
BasesPerLine int
// BytesPerLine is the number of bytes
// used to represent each line.
BytesPerLine int
}
// Position returns the seek offset of the sequence position p for the
// given Record.
func (r Record) Position(p int) int64 {
if p < 0 || r.Length <= p {
panic("fai: index out of range")
}
return r.position(p)
}
func (r Record) position(p int) int64 {
return r.Start + int64(p/r.BasesPerLine*r.BytesPerLine+p%r.BasesPerLine)
}
// endOfLineOffset returns the number of bytes until the end of the line
// holding position p.
func (r Record) endOfLineOffset(p int) int {
if p/r.BasesPerLine == r.Length/r.BasesPerLine {
return r.Length - p
}
return r.BasesPerLine - p%r.BasesPerLine
}
func mustAtoi(fields []string, index, line int) int {
i, err := strconv.ParseInt(fields[index], 10, 0)
if err != nil {
panic(parseError(line, index, err))
}
return int(i)
}
func mustAtoi64(fields []string, index, line int) int64 {
i, err := strconv.ParseInt(fields[index], 10, 64)
if err != nil {
panic(parseError(line, index, err))
}
return i
}
// ReadFrom returns an Index from the stream provided by an io.Reader or an error. If the input
// contains non-unique records the error is a csv.ParseError identifying the second non-unique
// record.
func ReadFrom(r io.Reader) (idx Index, err error) {
tr := csv.NewReader(r)
tr.Comma = '\t'
tr.FieldsPerRecord = 5
defer func() {
r := recover()
if r != nil {
e, ok := r.(error)
if !ok {
panic(r)
}
if _, ok = r.(*csv.ParseError); !ok {
panic(r)
}
err = e
idx = nil
}
}()
for line := 1; ; line++ {
rec, err := tr.Read()
if err == io.EOF {
return idx, nil
}
if err != nil {
return nil, err
}
if idx == nil {
idx = make(Index)
} else if _, exists := idx[rec[nameField]]; exists {
return nil, parseError(line, 0, ErrNonUnique)
}
idx[rec[nameField]] = Record{
Name: rec[nameField],
Length: mustAtoi(rec, lengthField, line),
Start: mustAtoi64(rec, startField, line),
BasesPerLine: mustAtoi(rec, basesField, line),
BytesPerLine: mustAtoi(rec, bytesField, line),
}
}
}<|fim▁hole|>
func parseError(line, column int, err error) *csv.ParseError {
return &csv.ParseError{
StartLine: line,
Line: line,
Column: column,
Err: err,
}
}
// WriteTo writes the the given index to w in order of ascending start position.
func WriteTo(w io.Writer, idx Index) error {
recs := make([]Record, 0, len(idx))
for _, r := range idx {
recs = append(recs, r)
}
sort.Sort(byStart(recs))
for _, r := range recs {
_, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%d\n", r.Name, r.Length, r.Start, r.BasesPerLine, r.BytesPerLine)
if err != nil {
return err
}
}
return nil
}
type byStart []Record
func (r byStart) Len() int { return len(r) }
func (r byStart) Less(i, j int) bool { return r[i].Start < r[j].Start }
func (r byStart) Swap(i, j int) { r[i], r[j] = r[j], r[i] }<|fim▁end|> | |
<|file_name|>korw.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn korw_1() {
run_test(&Instruction { mnemonic: Mnemonic::KORW, operand1: Some(Direct(K7)), operand2: Some(Direct(K6)), operand3: Some(Direct(K1)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 204, 69, 249], OperandSize::Dword)
}
fn korw_2() {<|fim▁hole|><|fim▁end|> | run_test(&Instruction { mnemonic: Mnemonic::KORW, operand1: Some(Direct(K7)), operand2: Some(Direct(K3)), operand3: Some(Direct(K2)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[197, 228, 69, 250], OperandSize::Qword)
} |
<|file_name|>PathState.cpp<|end_file_name|><|fim▁begin|>/* Copyright 2015 ETH Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "Mutex.h"
#include "PathState.h"
#include "Utils.h"
/* calculate the cubic root of x using a table lookup followed by one
* Newton-Raphson iteration.
* Avg err ~= 0.195%
*
* Taken (and slightly modified) from Linux TCP Cubic implementation
*/
static uint32_t cubeRoot(uint64_t a)
{
uint32_t x, b, shift;
uint64_t c;
/*
* cbrt(x) MSB values for x MSB values in [0..63].
* Precomputed then refined by hand - Willy Tarreau
*
* For x in [0..63],
* v = cbrt(x << 18) - 1
* cbrt(x) = (v[x] + 10) >> 6
*/
static const uint8_t v[] = {
/* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
/* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
/* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
/* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
/* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
/* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
/* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
/* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
};
/* Probably not the fastest way but works without using asm */
b = 0;
c = a;
while (c >>= 1)
b++;
b++;
if (b < 7) {
/* a in [0..63] */
return ((uint32_t)v[(uint32_t)a] + 35) >> 6;
}
b = ((b * 84) >> 8) - 1;
shift = (a >> (b * 3));
x = ((uint32_t)(((uint32_t)v[shift] + 10) << b)) >> 6;
/*
* Newton-Raphson iteration
* 2
* x = ( 2 * x + a / x ) / 3
* k+1 k k
*/
x = 2 * x + (uint32_t)(a / ((uint64_t)x * (uint64_t)(x - 1)));
x = ((x * 341) >> 10);
return x;
}
PathState::PathState(int rtt, int mtu)
: mPathIndex(-1),
mMTU(mtu),
mSRTT(rtt),
mLastRTT(rtt),
mSendWindow(2),
mCongestionWindow(1),
mWindow(1),
mInFlight(0),
mCurrentBurst(0),
mInLoss(false),
mTotalSent(0),
mTotalAcked(0),
mLastTotalAcked(0),
mTotalLost(0),
mAverageLossInterval(0)
{
mVAR = rtt >> VAR_SHIFT;
mRTO = mSRTT + (mVAR << 2);
mSRTT = 0; // initial RTT estimate used for RTO only
memset(mLossBursts, 0, sizeof(mLossBursts));
Mutex mMutex;
}
PathState::~PathState()
{
}
void PathState::setIndex(int index)
{
mPathIndex = index;
}
void PathState::setRemoteWindow(uint32_t sendWindow)
{
mSendWindow = sendWindow;
DEBUG("send window set to %d\n", mSendWindow);
}
int PathState::timeUntilReady()
{
return 0;
}
int PathState::bandwidth()
{
if (mSRTT == 0)
return 0;
return mWindow * mMTU / mSRTT * 1000000;
}
int PathState::estimatedRTT() EXCLUDES(mMutex)
{
int ret;
mMutex.Lock();
ret = mSRTT;
mMutex.Unlock();
return ret;
}
int PathState::getRTO() EXCLUDES(mMutex)
{
int ret;
mMutex.Lock();
ret = mRTO;
mMutex.Unlock();
return ret;
}
int PathState::packetsInFlight() EXCLUDES(mMutex)
{
int ret;
mMutex.Lock();
ret = mInFlight;
mMutex.Unlock();
return ret;
}
double PathState::getLossRate() EXCLUDES(mMutex)
{
mMutex.Lock();
uint64_t currentInterval = mTotalAcked - mLastTotalAcked;
if (currentInterval > mAverageLossInterval) {
if (currentInterval > 2 * mAverageLossInterval)
calculateLoss(ALI_HISTORY_DISCOUNTING);
else
calculateLoss(ALI_FROM_INTERVAL_0);
}
mMutex.Unlock();
if (mAverageLossInterval == 0)
return 0.0;
return 1.0 / mAverageLossInterval;
}
void PathState::addLoss(uint64_t packetNum) EXCLUDES(mMutex)
{
mTotalLost++;
mMutex.Lock();
mInFlight--;
mMutex.Unlock();
mCurrentBurst++;
mInLoss = true;
if (mCurrentBurst == SSP_MAX_LOSS_BURST) {
mLossBursts[SSP_MAX_LOSS_BURST - 1]++;
mCurrentBurst = 0;
mInLoss = false;
}
}
void PathState::addRTTSample(int rtt, uint64_t packetNum) EXCLUDES(mMutex)
{
mTotalAcked++;
mMutex.Lock();
mInFlight--;
DEBUG("path %d: receive ack: %d packets now in flight\n", mPathIndex, mInFlight);
if (rtt > 0) {
mLastRTT = rtt;
if (mSRTT == 0) {
mSRTT = rtt;
mVAR = rtt >> 1;
} else {
int err = rtt - mSRTT;
mSRTT += err >> ERR_SHIFT;
err = err >= 0 ? err : -err;
mVAR += (err - mVAR) >> VAR_SHIFT;
}
mRTO = mSRTT + (mVAR << 2);
if (mRTO > SSP_MAX_RTO)
mRTO = SSP_MAX_RTO;
DEBUG("path %d: RTT sample %d us, sRTT = %d us, RTO = %d us\n", mPathIndex, rtt, mSRTT, mRTO);
}
if (mInLoss) {
mLossBursts[mCurrentBurst]++;
mCurrentBurst = 0;
mInLoss = false;
}
mMutex.Unlock();
}
void PathState::addRetransmit() EXCLUDES(mMutex)
{
mMutex.Lock();
mLossIntervals.push_front(mTotalAcked - mLastTotalAcked);
if (mLossIntervals.size() > MAX_LOSS_INTERVALS)
mLossIntervals.pop_back();
DEBUG("loss on path %d: new loss interval = %ld, %d/%d in flight\n",
mPathIndex, mTotalAcked - mLastTotalAcked, mInFlight, mWindow);
mLastTotalAcked = mTotalAcked;
calculateLoss(ALI_FROM_INTERVAL_1);
mMutex.Unlock();
}
void PathState::handleSend(uint64_t packetNum) EXCLUDES(mMutex)
{
mMutex.Lock();
mInFlight++;
mTotalSent++;
DEBUG("path %d: send: %d/%d packets now in flight\n", mPathIndex, mInFlight, mWindow);
mMutex.Unlock();
}
void PathState::handleTimeout() EXCLUDES(mMutex)
{
mMutex.Lock();
mRTO = mRTO << 1;
if (mRTO > SSP_MAX_RTO)
mRTO = SSP_MAX_RTO;
DEBUG("timeout: new rto = %d\n", mRTO);
mMutex.Unlock();
}
void PathState::handleDupAck()
{
}
void PathState::calculateLoss(ALIType type)
{
if (mLossIntervals.empty())
return;
uint64_t currentInterval = mTotalAcked - mLastTotalAcked;
size_t i;
list<uint64_t>::iterator it;
size_t n = mLossIntervals.size();
double ws = 0.0;
double w = 0.0;
double d[MAX_LOSS_INTERVALS + 1];
double di = 2.0 * mAverageLossInterval / currentInterval;
double wi;
DEBUG("calculate average loss interval (%d), currentInterval = %ld\n", type, currentInterval);
if (di < 0.5)
di = 0.5;
for (i = 0; i <= MAX_LOSS_INTERVALS; i++)
d[i] = 1.0;
switch (type) {
case ALI_HISTORY_DISCOUNTING:
for (i = 1; i < MAX_LOSS_INTERVALS; i++)
d[i] = di;
case ALI_FROM_INTERVAL_0:
mLossIntervals.push_front(currentInterval);
case ALI_FROM_INTERVAL_1:
for (it = mLossIntervals.begin(), i = 1; it != mLossIntervals.end() && i < MAX_LOSS_INTERVALS; it++, i++) {
if (i <= n / 2) {
ws += d[i - 1] * (*it);
w += d[i - 1];
} else {
wi = 1 - (i - n / 2.0) / (n / 2.0 + 1);
ws += d[i - 1] * wi * (*it);
w += d[i - 1] * wi;
}
}
break;
default:
break;
}
if (type != ALI_FROM_INTERVAL_1)
mLossIntervals.pop_front();
mAverageLossInterval = ws / w;
DEBUG("average loss interval = %ld\n", mAverageLossInterval);
}
bool PathState::isWindowBased()
{
return false;
}
int PathState::window()
{
return 0;
}
int PathState::profileLoss()
{
double p, q;
int m = 0;
for (int i = 1; i < SSP_MAX_LOSS_BURST; i++)
m += mLossBursts[i];
p = (double)m / mTotalAcked;
int mi1 = 0, mi2 = 0;
for (int i = 2; i < SSP_MAX_LOSS_BURST; i++)
mi2 += mLossBursts[i] * (i - 1);
for (int i = 1; i < SSP_MAX_LOSS_BURST; i++)
mi1 += mLossBursts[i] * i;
q = 1 - (double)mi2 / mi1;
printf("p = %f, q = %f\n", p, q);
return 0;
}
// CBR
CBRPathState::CBRPathState(int rtt, int mtu)
: PathState(rtt, mtu),
mSendInterval(SSP_SEND_INTERVAL)
{
memset(&mLastSendTime, 0, sizeof(mLastSendTime));
}
int CBRPathState::timeUntilReady()
{
if (mLastSendTime.tv_sec == 0)
return 0;
struct timeval current;
gettimeofday(¤t, NULL);
DEBUG("%ld us since last send\n", elapsedTime(&mLastSendTime, ¤t));
int time = mSendInterval - elapsedTime(&mLastSendTime, ¤t);
if (time < 0)
time = 0;
return time;
}
int CBRPathState::bandwidth()
{
return mMTU / mSendInterval * 1000000;
}
void CBRPathState::handleSend(uint64_t packetNum)
{
PathState::handleSend(packetNum);
gettimeofday(&mLastSendTime, NULL);
}
// PCC
PCCPathState::PCCPathState(int rtt, int mtu)
: CBRPathState(rtt, mtu),
mLastSendInterval(SSP_SEND_INTERVAL),
mMonitorRTT(0.0),
mMonitorReceived(0),
mMonitorLost(0),
mMonitoring(false),
mUtility(0.0),
mCurrentTrial(0),
mAdjustCount(0),
mDirection(0),
mState(PCC_START)
{
memset(&mMonitorStartTime, 0, sizeof(mMonitorStartTime));
memset(&mMonitorEndTime, 0, sizeof(mMonitorEndTime));
memset(mTrialResults, 0, sizeof(mTrialResults));
memset(mTrialIntervals, 0, sizeof(mTrialIntervals));
Mutex mMonitorMutex;
}
int PCCPathState::timeUntilReady()
{
int currentInterval = mSendInterval;
if (mState == PCC_DECISION)
mSendInterval = mTrialIntervals[mCurrentTrial];
int res = CBRPathState::timeUntilReady();
mSendInterval = currentInterval;
return res;
}
void PCCPathState::handleSend(uint64_t packetNum) EXCLUDES(mMonitorMutex)
{
struct timeval t;
gettimeofday(&t, NULL);
CBRPathState::handleSend(packetNum);
if (!mMonitoring) {
DEBUG("%ld.%06ld: current state = %d, begin monitoring\n", t.tv_sec, t.tv_usec, mState);
mMonitorStartTime = t;
srand(t.tv_usec);
double x = (double)rand() / RAND_MAX; // 0 ~ 1.0
x /= 2.0; // 0 ~ 0.5
x += 1.7; // 1.7 ~ 2.2
mMonitorDuration = x * mSRTT;
if (mMonitorDuration < PCC_MIN_PACKETS * mSendInterval)
mMonitorDuration = PCC_MIN_PACKETS * mSendInterval;
mMonitorRTT = 0;
mMonitorReceived = 0;
mMonitorLost = 0;
mMonitoring = true;
}
if (mMonitoring) {
if (elapsedTime(&mMonitorStartTime, &t) < mMonitorDuration ) {
mMonitorMutex.Lock();
mMonitoredPackets.insert(packetNum);
mMonitorMutex.Unlock();
}
}
}
void PCCPathState::addRTTSample(int rtt, uint64_t packetNum) EXCLUDES(mMonitorMutex)
{
PathState::addRTTSample(rtt, packetNum);
if (mMonitoring) {
bool found = false;
mMonitorMutex.Lock();
found = mMonitoredPackets.find(packetNum) != mMonitoredPackets.end();
mMonitorMutex.Unlock();
if (found) {
mMonitorReceived++;
mMonitorRTT += rtt;
DEBUG("current state = %d: got ack %ld\n", mState, packetNum);
}
}
struct timeval t;
gettimeofday(&t, NULL);
if (elapsedTime(&mMonitorStartTime, &t) >= mMonitorDuration + mSRTT)
handleMonitorEnd();
}
void PCCPathState::addLoss(uint64_t packetNum)
{
PathState::addLoss(packetNum);
struct timeval t;
gettimeofday(&t, NULL);
if (elapsedTime(&mMonitorStartTime, &t) >= mMonitorDuration + mSRTT)
handleMonitorEnd();
}
void PCCPathState::handleMonitorEnd() EXCLUDES(mMonitorMutex)
{
if (!mMonitoring)
return;
mMonitorMutex.Lock();
gettimeofday(&mMonitorEndTime, NULL);
DEBUG("%ld.%06ld: monitor end\n", mMonitorEndTime.tv_sec, mMonitorEndTime.tv_usec);
long monitorTime = elapsedTime(&mMonitorStartTime, &mMonitorEndTime);
if (mMonitorReceived == 0) {
mMonitorRTT = SSP_MAX_RTO;
} else {
mMonitorRTT /= mMonitorReceived;
}
DEBUG("%lu packets sent during this interval, %lu received\n", mMonitoredPackets.size(), mMonitorReceived);
mMonitorLost = mMonitoredPackets.size() - mMonitorReceived;
double u = utility(mMonitorReceived, mMonitorLost, monitorTime / 1000000.0, mMonitorRTT);
DEBUG("utility %f\n", u);
if (mState == PCC_DECISION) {
DEBUG("decision phase, trial %d\n", mCurrentTrial);
mTrialResults[mCurrentTrial++] = u;
if (mCurrentTrial == PCC_TRIALS) {
int direction = 0;
for (int i = 0; i < PCC_TRIALS - 1; i += 2) {
if (mTrialIntervals[i] < mSendInterval) {
// i: shorter period, i + 1: longer period
if (mTrialResults[i] > mTrialResults[i + 1])
direction--;
else if (mTrialResults[i] < mTrialResults[i + 1])
direction++;
} else {
// i: longer period, i + 1: shorter period
if (mTrialResults[i] > mTrialResults[i + 1])
direction++;
else if (mTrialResults[i] < mTrialResults[i + 1])
direction--;
}
}
if (direction == 0) {
DEBUG("inconclusive, do over with larger deltas\n");
mAdjustCount++;
if (mAdjustCount > PCC_MAX_ADJUST_COUNT)
mAdjustCount = PCC_MAX_ADJUST_COUNT;
startDecision();
} else {
mDirection = direction / 2; // direction = +-2, mDirection = +-1
mState = PCC_ADJUST;
mLastSendInterval = mSendInterval;
mSendInterval += mSendInterval * mDirection * mAdjustCount * PCC_ADJUST_RATE;
DEBUG("switched to adjust phase, direction = %d with %d us period\n", mDirection, mSendInterval);
}
}
} else if (mState == PCC_ADJUST) {
if (u >= mUtility) {
mAdjustCount++;
if (mAdjustCount > PCC_MAX_ADJUST_COUNT)
mAdjustCount = PCC_MAX_ADJUST_COUNT;
mLastSendInterval = mSendInterval;
mSendInterval += mSendInterval * mDirection * mAdjustCount * PCC_ADJUST_RATE;
DEBUG("utility increased, keep going in direction %d with %d us period\n", mDirection, mSendInterval);
} else {
mSendInterval = mLastSendInterval;
mAdjustCount = 1;
DEBUG("utility decreased, drop back to decision phase with %d us period\n", mSendInterval);
startDecision();
}
mUtility = u;
} else if (mState == PCC_START) {
if (u >= mUtility) {
mLastSendInterval = mSendInterval;
mSendInterval /= 2;
DEBUG("utility increased, double speed: %d us period\n", mSendInterval);
} else {
mSendInterval = mLastSendInterval;
mAdjustCount = 1;
DEBUG("utility decreased, drop down to decision phase with %d us period\n", mSendInterval);
startDecision();
}
mUtility = u;
}
if (mSendInterval > SSP_MAX_SEND_INTERVAL)
mSendInterval = SSP_MAX_SEND_INTERVAL;
mMonitoredPackets.clear();
mMonitoring = false;
if (mMonitorReceived == 0)
mSendInterval *= 2;
mMonitorMutex.Unlock();
}
void PCCPathState::startDecision()
{
srand(time(NULL));
for (int i = 0; i < PCC_TRIALS - 1; i += 2) {
int delta = (rand() % 2) * 2 - 1;
delta *= mAdjustCount * PCC_ADJUST_RATE * mSendInterval;
mTrialIntervals[i] = mSendInterval + delta;
mTrialIntervals[i + 1] = mSendInterval - delta;
}
mCurrentTrial = 0;
mState = PCC_DECISION;
}
double PCCPathState::utility(int received, int lost, double time, double rtt)
{
DEBUG("%d %d %f %f\n", received, lost, time, rtt);
//utility = ((t-l)/time*(1-1/(1+exp(-100*(l/t-0.05))))-1*l/time);
//utility = ((t-l)/time*(1-1/(1+exp(-100*(l/t-0.05))))* (1-1/(1+exp(-10*(1-previous_rtt/rtt)))) -1*l/time)/rtt*1000;
return received / time * (1 - 1 / (1 + exp(-100 * (lost / (received + lost) - 0.05)))) - lost / time;
}
// TCP Reno
RenoPathState::RenoPathState(int rtt, int mtu)
: PathState(rtt, mtu),
mState(TCP_STATE_START),
mThreshold(-1),
mDupAckCount(0),
mAckCount(0)
{
}
int RenoPathState::timeUntilReady()
{
if (mInFlight < mWindow) {
DEBUG("path %d: room in window (%d/%d), send right away\n", mPathIndex, mInFlight, mWindow);
return 0;
} else {
DEBUG("path %d: window full, wait about 1 RTT (%d us)\n", mPathIndex, mSRTT);
return mSRTT ? mSRTT : mRTO;
}
}
void RenoPathState::handleTimeout()
{
PathState::handleTimeout();
mState = TCP_STATE_TIMEOUT;
mCongestionWindow = 1;
DEBUG("path %d: timeout: congestion window set to 1\n", mPathIndex);
}
void RenoPathState::handleDupAck()
{
mDupAckCount++;
if (mState > SSP_FR_THRESHOLD && mState == TCP_STATE_FAST_RETRANSMIT) {
mCongestionWindow++;
mWindow = mCongestionWindow > mSendWindow ? mSendWindow : mCongestionWindow;
DEBUG("path %d: duplicate ack received: window set to %d (%d/%d)\n", mPathIndex, mWindow, mCongestionWindow, mSendWindow);
}
}
void RenoPathState::addRTTSample(int rtt, uint64_t packetNum)
{
PathState::addRTTSample(rtt, packetNum);
mDupAckCount = 0;
mAckCount++;
switch (mState) {
case TCP_STATE_START:
case TCP_STATE_TIMEOUT:
DEBUG("path %d: slow start: %d -> %d\n", mPathIndex, mCongestionWindow, mCongestionWindow + 1);
mCongestionWindow++;
if (mCongestionWindow == mThreshold) {
DEBUG("path %d: reached threshold: %d\n", mPathIndex, mThreshold);
mState = TCP_STATE_NORMAL;
}
break;
case TCP_STATE_FAST_RETRANSMIT:
mState = TCP_STATE_NORMAL;
mCongestionWindow = mThreshold;
break;
case TCP_STATE_NORMAL:
if (mAckCount == mCongestionWindow) {
DEBUG("path %d: congestion avoidance: %d -> %d\n", mPathIndex, mCongestionWindow, mCongestionWindow + 1);
mAckCount = 0;
mCongestionWindow++;
}
break;
default:
break;
}
mWindow = mCongestionWindow > mSendWindow ? mSendWindow : mCongestionWindow;
DEBUG("path %d: ack received: window set to %d (%d/%d)\n", mPathIndex, mWindow, mCongestionWindow, mSendWindow);
}
void RenoPathState::addRetransmit()
{
PathState::addRetransmit();
mThreshold = mWindow >> 1;
if (mThreshold < 2)
mThreshold = 2;
mAckCount = 0;
if (mState != TCP_STATE_TIMEOUT && mState != TCP_STATE_FAST_RETRANSMIT) {
mState = TCP_STATE_FAST_RETRANSMIT;
mCongestionWindow = mThreshold + 3;
}
mWindow = mCongestionWindow > mSendWindow ? mSendWindow : mCongestionWindow;
DEBUG("path %d: packet loss: window set to %d (%d/%d)\n", mPathIndex, mWindow, mCongestionWindow, mSendWindow);
}
bool RenoPathState::isWindowBased()
{
return true;
}
int RenoPathState::window()
{
return mWindow;
}
// TCP CUBIC
CUBICPathState::CUBICPathState(int rtt, int mtu)
: PathState(rtt, mtu),
mThreshold(-1),
mTimeout(false)
{
reset();
}
int CUBICPathState::timeUntilReady() EXCLUDES(mMutex)
{
mMutex.Lock();
if (mInFlight < mWindow) {
DEBUG("path %d: room in window (%d/%d), send right away\n", mPathIndex, mInFlight, mWindow);
mMutex.Unlock();
return 0;
} else {
DEBUG("path %d: window full (%d/%d), wait about 1 RTT (%d us)\n", mPathIndex, mInFlight, mWindow, mSRTT);
mMutex.Unlock();
return mSRTT ? mSRTT : mRTO;
}
}
void CUBICPathState::addRTTSample(int rtt, uint64_t packetNum)
{
PathState::addRTTSample(rtt, packetNum);
if (rtt == 0)
return;
mTimeout = false;
if (mMinDelay == 0 || mMinDelay > rtt)
mMinDelay = rtt;
mAckCount++;
int thresh = mThreshold > 0 ? mThreshold : CUBIC_SSTHRESH;
if (mCongestionWindow < thresh) {
mCongestionWindow++;
DEBUG("path %d: slow start, increase to %d\n", mPathIndex, mCongestionWindow);
} else {
update();
DEBUG("path %d: congestion avoidance (%d/%d)\n", mPathIndex, mWindowCount, mCount);
if (mWindowCount > mCount) {
mCongestionWindow++;
DEBUG("path %d: increase window to %d\n", mPathIndex, mCongestionWindow);
mWindowCount = 0;
} else {
mWindowCount++;
}
}
mWindow = mCongestionWindow < mSendWindow ? mCongestionWindow : mSendWindow;
DEBUG("path %d: ack received: window set to %d (%d|%d)\n", mPathIndex, mWindow, mCongestionWindow, mSendWindow);
}
void CUBICPathState::addRetransmit() EXCLUDES(mMutex)
{
PathState::addRetransmit();
mEpochStart = 0;
if (mCongestionWindow < mMaxWindow)
mMaxWindow = mCongestionWindow * (2 - BETA) / 2;
else
mMaxWindow = mCongestionWindow;
mCongestionWindow *= (1 - BETA);
if (mCongestionWindow < 1)
mCongestionWindow = 1;
mThreshold = mCongestionWindow;
if (mTimeout)
mCongestionWindow = 1;
mMutex.Lock();
mWindow = mCongestionWindow < mSendWindow ? mCongestionWindow : mSendWindow;
mMutex.Unlock();
DEBUG("path %d: packet loss: window set to %d (last max window %d)\n", mPathIndex, mWindow, mMaxWindow);
}
void CUBICPathState::handleSend(uint64_t packetNum)
{
PathState::handleSend(packetNum);
}
void CUBICPathState::handleTimeout()
{
PathState::handleTimeout();
mTimeout = true;
mThreshold = (1 - BETA) * mCongestionWindow;
reset();
DEBUG("path %d: timeout: congestion window dropped to 1\n", mPathIndex);
}
void CUBICPathState::reset()
{
mWindowCount = 0;
mAckCount = 0;
mMinDelay = 0;
mMaxWindow = 0;
mTCPWindow = 0;
mOrigin = 0;
mCount = 0;
mK = 0;
mEpochStart = 0;
}
void CUBICPathState::doTCPFriendly()
{
mTCPWindow += 3 * BETA / (2 - BETA) * mAckCount / mCongestionWindow;
mAckCount = 0;
if (mTCPWindow > mCongestionWindow) {
if (mCount > mCongestionWindow / (mTCPWindow - mCongestionWindow))
mCount = mCongestionWindow / (mTCPWindow - mCongestionWindow);
}
}
void CUBICPathState::update()
{
time_t timestamp = time(NULL);
if (mEpochStart == 0) {
mEpochStart = timestamp;
if (mCongestionWindow < mMaxWindow) {
mK = cubeRoot((mMaxWindow - mCongestionWindow) / C);
mOrigin = mMaxWindow;
} else {
mK = 0;
mOrigin = mCongestionWindow;
}
mAckCount = 1;
mTCPWindow = mCongestionWindow;
}
int t = timestamp + mMinDelay / 1000000 - mEpochStart;
int x = t - mK;<|fim▁hole|> mCount = 100 * mCongestionWindow;
doTCPFriendly();
}
bool CUBICPathState::isWindowBased()
{
return true;
}
int CUBICPathState::window()
{
return mWindow;
}<|fim▁end|> | int target = mOrigin + C * x * x * x;
if (target > mCongestionWindow)
mCount = mCongestionWindow / (target - mCongestionWindow);
else |
<|file_name|>simple_request.py<|end_file_name|><|fim▁begin|>__author__ = 'thorwhalen'
import requests
from serialize.khan_logger import KhanLogger
import logging
class SimpleRequest(object):
def __init__(self, log_file_name=None, log_level=logging.INFO):
full_log_path_and_name = KhanLogger.default_log_path_with_unique_name(log_file_name)
self.logger = KhanLogger(file_path_and_name=full_log_path_and_name, level=log_level)
def slurp(self, url):
r = requests.get(url, timeout=30.0)
if not r.ok:
self.logger.log(level=logging.WARN, simple_request="HTTP Error: {} for url {}".format(r.status_code, url))
else:
self.logger.log(level=logging.INFO, simple_request="Slurped url {}".format(url))<|fim▁hole|> return r.text
if __name__ == '__main__':
sr = SimpleRequest()<|fim▁end|> | |
<|file_name|>test.py<|end_file_name|><|fim▁begin|>__author__ = 'swhite'
"""
This package contains the test modules for the repository app of the ReFlow project,
organized by test type (unit, integration, etc.)
To run all the tests in the repository app, using the manage.py command:
"python manage.py test repository".
Notes:
- add new test constants in the constants module<|fim▁hole|>
from unit_tests import *
from integration_tests import *<|fim▁end|> | """
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright (c) 2015 Holger Nahrstaedt
# See COPYING for license details.
"""
wavelet toolbox
"""
from __future__ import division, print_function, absolute_import
from .dwt import *
from .utility import *
from ._pyyawt import *
from .dwt1d import *
from .dwt2d import *
from .dwt3d import *
from .cowt import *
from .cwt import *
from .swt import *
from .denoising import *
from pyyawt.version import version as __version__
<|fim▁hole|>__all__ = [s for s in dir() if not s.startswith('_')]
test = Tester().test<|fim▁end|> | from numpy.testing import Tester
|
<|file_name|>IEditor.cpp<|end_file_name|><|fim▁begin|>#include <XEEditor/IEditor.hpp>
//#include <EditorI/SelectionRectangle.h>
//#include "E:/Projekte/Src Game/_Engine/XEngine/TestXE/XETUI/GUI.h"
//#include <EditorI/TFViewport.h>
#include <XEEditor/Editor.hpp>
namespace EI {
EI::Editor* gEditor = nullptr;
//LNK2005 error Because you enabled STATIC_STD_LIBS (which is not recommended unless you know what you do), you must set the same option in your project, which is /MTd.
//http://en.sfml-dev.org/forums/index.php?topic=7071.0
//struct ImplData
//{
// Ogre::Root* pRoot;
// Ogre::SceneManager* pSceneManager;
// Ogre::Camera* pCamera;
// Ogre::Viewport* pViewport;
// Ogre::RenderWindow* pRenderWindow;
// Ogre::RenderSystem* pRenderSystem;<|fim▁hole|> // Ogre::Entity* pEntity;
// // CameraNodes* pCameraNodes;
// // AnimationSystem* pAnimationSystem;
// SelectionRectangle* pSelectionRect;
// Ogre::PlaneBoundedVolumeListSceneQuery* pVolumeQuery;
// bool mouseBoneSelectionModeEnabled;
// // std::vector< BoneInfo > boneInfoList;
// // std::vector< BoneDisplayInfo > boneDisplayInfoList;
// bool boneSelectionChangedSinceLastCall;
// ImplData()
// : pRoot(NULL)
// , pSceneManager(NULL)
// , pCamera(NULL)
// , pViewport(NULL)
// , pRenderWindow(NULL)
// , pRenderSystem(NULL)
// , pSceneNode(NULL)
// , pEntity(NULL)
// // , pCameraNodes(NULL)
// // , pAnimationSystem(NULL)
// , pSelectionRect(NULL)
// , pVolumeQuery(NULL)
// , mouseBoneSelectionModeEnabled(false)
// , boneSelectionChangedSinceLastCall(true)
// {
// }
//};
//void CreateResourceGroups()
//{
// Ogre::ResourceGroupManager::getSingleton().addResourceLocation("TecnoFreak.data", "Zip", "TecnoFreak");
//}
//XE::XEngine* engine;
//XE::OgreConsole* console;
//EI::Editor* gEditor = nullptr;
void getStates(char* statesinfo)
{
//std::cout << "getStates";
// statesinfo->size = 9;
//Ogre::String test("hmmm");
// statesinfo = "xxx";
strncpy(statesinfo, "xxx\0", 5);
std::cout << "getStates:" << statesinfo;
//char temp[51];
//if (strlen(word)>50)
// return false;
// statesinfo[0] = test.c_str();
// statesinfo->buffer[1] = "1";
// for (int i = 0; i < statesinfo->size; i++)
// statesinfo->buffer[i] = "h";
//Ogre::String str(statesinfo->buffer);
//Ogre::String* states = new Ogre::String[5];
//str = "asdasd";
//states[0] = "State1";
// states[1] = "State2";
//states[2] = "State3";
//states[3] = "State4";
//statesinfo->buffer = states->c_str();
}
unsigned char* command(const char* command, unsigned char* data, int len)
{
std::cout << "command";
if (gEditor)
return gEditor->consoleCmd(command, data, len);
return 0;
}
bool moveToState(const char* stateName)
{
std::cout << "moveToState";
if (gEditor)
{
gEditor->moveToState(stateName);
return true;
}
return false;
}
void renderTargetSize(const char* rtName, Ogre::Real x, Ogre::Real y)
{
gEditor->renderTargetSize(rtName, x, y);
}
int pushEvent(sfEvent pushEvent)
{
//std::cout << "pushEvent";
// return pushEvent.key.code;
int test = 0;
if (gEditor)
{
test = gEditor->pushEvent(pushEvent);
return test;
}
/*if (pushEvent.key.code == 23)
return true;
else*/
return -1;
}
void* renderOnceTexturePtr(const char* stateName, int width, int height)
{
void* bbSurface;
if (gEditor)
{
if (gEditor->getEngine()->running())
{
gEditor->getEngine()->update();
//Ogre::WindowEventUtilities::messagePump();
}
}
return bbSurface;
}
//LPVOID stateInit(sf::WindowHandle hwnd, const char* stateName)
void* stateInit(const char* stateName, int width, int height)
{
std::cout << "initState";
void* bbSurface;
if (!gEditor)
{
gEditor = new EI::Editor();
bbSurface = gEditor->InitState(stateName, width, height);
std::cout << "initState OK";
/*bool running = true;
while (running)
{
EI::stateUpdate();
}*/
return bbSurface;
}
return bbSurface;
// std::thread t1(hello);
// t1.join();
//Ogre::ResourceGroupManager::getSingleton().initialiseAllResourceGroups();
//XE::LogManager::getSingleton().logMessage("TecnoFreak: Initialising scene manger");
//g_data.pSceneManager = g_data.pRoot->createSceneManager(Ogre::ST_GENERIC);
//g_data.pCamera = g_data.pSceneManager->createCamera("Default");
//g_data.pCamera->setNearClipDistance(0.1f);
//g_data.pCamera->setAutoAspectRatio(true);
//g_data.pViewport = g_data.pRenderWindow->addViewport(g_data.pCamera);
//g_data.pViewport->setBackgroundColour(Ogre::ColourValue::Red);
//Ogre::SceneNode* pRootSceneNode = g_data.pSceneManager->getRootSceneNode();
//// g_data.pCameraNodes = new CameraNodes(pRootSceneNode, g_data.pCamera);
//g_data.pSelectionRect = new SelectionRectangle("Selection SelectionRectangle");
//pRootSceneNode->attachObject(g_data.pSelectionRect);
//g_data.pVolumeQuery = g_data.pSceneManager->createPlaneBoundedVolumeQuery(Ogre::PlaneBoundedVolumeList());
//XE::LogManager::getSingleton().logMessage("TecnoFreak: Finished initialising Ogre viewport");
}
bool stateUpdate()
{
if (gEditor)
{
if (gEditor->getEngine()->running())
{
gEditor->getEngine()->update();
//Ogre::WindowEventUtilities::messagePump();
return true;
}
}
return false;
//if (g_data.pRoot == NULL)
//{
// return;
//}
//if (g_data.pRenderWindow == NULL)
//{
// return;
//}
///*if (g_data.pCameraNodes == NULL)
//{
//return;
//}*/
//static Ogre::Timer s_frameTimer;
//const unsigned long elapsedMilliseconds = s_frameTimer.getMilliseconds();
//const float elapsedSeconds = static_cast< float >(elapsedMilliseconds) / 1000.f;
//s_frameTimer.reset();
//g_data.pRenderWindow->windowMovedOrResized();
//g_data.pRoot->renderOneFrame();
//g_data.pCameraNodes->Update();
//UpdateBoneColours();
//if (g_data.pAnimationSystem != NULL)
//{
// g_data.pAnimationSystem->update(elapsedSeconds);
//}
}
void quit()
{
if (gEditor)
{
gEditor->getEngine()->quit();
delete gEditor;
}
//ImplData tmp = g_data;
//g_data = ImplData();
////delete tmp.pAnimationSystem;
////delete tmp.pCameraNodes;
//delete tmp.pSelectionRect;
//delete tmp.pRoot;
}
//sf::WindowHandle
// bool startState(sf::WindowHandle hwnd)
// {
// // sf::WindowHandle winHandle = reinterpret_cast<sf::WindowHandle>(hwnd);
//
// sf::Window* window = new sf::Window(hwnd);
//
// // Create the window
// //sfWindow* window = new sfWindow;
// //window->This.create(videoMode, title, style, params);
//
// //sf::Window mWindow(sf::VideoMode(800, 600), "My window");
//
// unsigned long winHandle = reinterpret_cast<unsigned long>(window->getSystemHandle());
//
//
//
//
// // initialise root
// Ogre::NameValuePairList misc;
// misc["externalWindowHandle"] = Ogre::StringConverter::toString(winHandle);
// // misc["externalGLContext"] = XE::StringConverter::toString(winGlContext);
// // misc["externalGLControl"] = Ogre::String("True");
////#else
////misc["currentGLContext"] = String("True");
////#endif
//
//// XE::RenderWindow *renderWindow = root->createRenderWindow("Main", 820, 440, false, &misc);
//
// return true;
// }
} // ns EI<|fim▁end|> |
// Ogre::SceneNode* pSceneNode; |
<|file_name|>command_line.py<|end_file_name|><|fim▁begin|>import argparse
import docker
import logging
import os
import docket
logger = logging.getLogger('docket')
logging.basicConfig()
parser = argparse.ArgumentParser(description='')
parser.add_argument('-t --tag', dest='tag', help='tag for final image')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose output', default=False)
parser.add_argument('--no-cache', dest='no_cache', action='store_true', help='Do not use cache when building the image', default=False)
parser.add_argument('buildpath', nargs='*')
args = parser.parse_args()
if args.verbose:<|fim▁hole|>
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
tls_verify = os.environ.get('DOCKER_TLS_VERIFY', '0')
base_url = os.environ.get('DOCKER_HOST', 'tcp://127.0.0.1:2375')
base_url = base_url.replace('tcp:', 'https:')
tls_config = None
if cert_path:
tls_config = docker.tls.TLSConfig(verify=tls_verify,
client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem')
)
client = docker.Client(base_url=base_url, version='1.15', timeout=10, tls=tls_config)
tag = args.tag or None
buildpath = args.buildpath[0]
def main():
docket.build(client=client, tag=tag, buildpath=buildpath, no_cache=args.no_cache)
exit()
if __name__ == '__main__':
main()<|fim▁end|> | logger.setLevel(logging.DEBUG) |
<|file_name|>insn_assert_lt.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_ASSERT_LT(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_ASSERT_LT, rawInsn)
self.assertOpCount(2)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
val0 = self.cpu.fetch(self.ops[0])
val1 = self.cpu.fetch(self.ops[1])
if not (val0 < val1):
raise AwlSimError("Assertion failed")<|fim▁hole|><|fim▁end|> | s.NER = 0 |
<|file_name|>facetrack.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Released under the BSD license. See LICENSE file for details.
"""
This program basically does face detection an blurs the face out.
"""
print __doc__
from SimpleCV import Camera, Display, HaarCascade
# Initialize the camera
cam = Camera()
# Create the display to show the image
display = Display()
# Haar Cascade face detection, only faces
haarcascade = HaarCascade("face")
# Loop forever
while display.isNotDone():
# Get image, flip it so it looks mirrored, scale to speed things up
img = cam.getImage().flipHorizontal().scale(0.5)
# Load in trained face file
faces = img.findHaarFeatures(haarcascade)
# Pixelize the detected face
if faces:
bb = faces[-1].boundingBox()
img = img.pixelize(10, region=(bb[0], bb[1], bb[2], bb[3]))
# Display the image
img.save(display)<|fim▁end|> | #!/usr/bin/env python
# |
<|file_name|>production.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts<|fim▁hole|>
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ('collectfast', )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('Blog API <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[Blog API] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings<|fim▁end|> | ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", ) |
<|file_name|>abc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
:author: Patrick Lauer
This class holds the Artificial Bee Colony(ABC) algorithm, based on Karaboga (2007):
D. Karaboga, AN IDEA BASED ON HONEY BEE SWARM FOR NUMERICAL OPTIMIZATION,TECHNICAL REPORT-TR06, Erciyes University, Engineering Faculty, Computer Engineering Department 2005.
D. Karaboga, B. Basturk, A powerful and Efficient Algorithm for Numerical Function Optimization: Artificial Bee Colony (ABC) Algorithm, Journal of Global Optimization, Volume:39, Issue:3,pp:459-171, November 2007,ISSN:0925-5001 , doi: 10.1007/s10898-007-9149-x
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import spotpy
import numpy as np
import time
import random
import itertools
class abc(_algorithm):
'''
Implements the ABC algorithm from Karaboga (2007).
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os).
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
'''
def __init__(self, spot_setup, dbname=None, dbformat=None, parallel='seq',save_sim=True):
_algorithm.__init__(self,spot_setup, dbname=dbname, dbformat=dbformat, parallel=parallel,save_sim=save_sim)
def simulate(self,id_params_tuple):
id,params = id_params_tuple
simulations=self.model(params)
return id,params,simulations
def sample(self,repetitions,eb=48,a=(1/10),peps=0.0001,ownlimit=False,limit=24):
"""
Parameters
----------
repetitions: int
maximum number of function evaluations allowed during optimization
eb: int
number of employed bees (half of population size)
a: float
mutation factor
peps: float
Convergence criterium
ownlimit: boolean
determines if an userdefined limit is set or not
limit: int
sets the limit
"""
#Initialize the Progress bar
starttime = time.time()
intervaltime = starttime
#Initialize ABC parameters:
randompar=self.parameter()['random']
self.nopt=randompar.size
random.seed()
if ownlimit == True:
self.limit=limit
else:
self.limit=eb
lb,ub=self.parameter()['minbound'],self.parameter()['maxbound']
#Initialization
work=[]
#Calculate the objective function
param_generator = ((rep,list(self.parameter()['random'])) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
like = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
self.status(rep,like,randompar)
#Save everything in the database
self.datawriter.save(like,randompar,simulations=simulations)<|fim▁hole|> work.append([like,randompar,like,randompar,c,p])#(fit_x,x,fit_v,v,limit,normalized fitness)
#Progress bar
acttime=time.time()
#get str showing approximate timeleft to end of simulation in H, M, S
timestr = time.strftime("%H:%M:%S", time.gmtime(round(((acttime-starttime)/
(rep + 1))*(repetitions-(rep + 1 )))))
#Refresh progressbar every second
if acttime-intervaltime>=2:
text='%i of %i (best like=%g) est. time remaining: %s' % (rep,repetitions,
self.status.objectivefunction,timestr)
print(text)
intervaltime=time.time()
icall=0
gnrng=1e100
while icall<repetitions and gnrng>peps: #and criter_change>pcento:
psum=0
#Employed bee phase
#Generate new input parameters
for i,val in enumerate(work):
k=i
while k==i: k=random.randint(0,(eb-1))
j=random.randint(0,(self.nopt-1))
work[i][3][j]=work[i][1][j]+random.uniform(-a,a)*(work[i][1][j]-work[k][1][j])
if work[i][3][j]<lb[j]: work[i][3][j]=lb[j]
if work[i][3][j]>ub[j]: work[i][3][j]=ub[j]
'''
#Scout bee phase
if work[i][4] >= self.limit:
work[i][3]=self.parameter()['random']
work[i][4]=0
'''
#Calculate the objective function
param_generator = ((rep,work[rep][3]) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
if clike > work[rep][0]:
work[rep][1]=work[rep][3]
work[rep][0]=clike
work[rep][4]=0
else:
work[rep][4]=work[rep][4]+1
self.status(rep,work[rep][0],work[rep][1])
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
icall += 1
#Probability distribution for roulette wheel selection
bn=[]
for i,val in enumerate(work):
psum=psum+(1/work[i][0])
for i,val in enumerate(work):
work[i][5]=((1/work[i][0])/psum)
bn.append(work[i][5])
bounds = np.cumsum(bn)
#Onlooker bee phase
#Roulette wheel selection
for i,val in enumerate(work):
pn=random.uniform(0,1)
k=i
while k==i:
k=random.randint(0,eb-1)
for t,vol in enumerate(bounds):
if bounds[t]-pn>=0:
z=t
break
j=random.randint(0,(self.nopt-1))
#Generate new input parameters
work[i][3][j]=work[z][1][j]+random.uniform(-a,a)*(work[z][1][j]-work[k][1][j])
if work[i][3][j]<lb[j]: work[i][3][j]=lb[j]
if work[i][3][j]>ub[j]: work[i][3][j]=ub[j]
#Calculate the objective function
param_generator = ((rep,work[rep][3]) for rep in range(eb))
for rep,randompar,simulations in self.repeat(param_generator):
#Calculate fitness
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
if clike > work[rep][0]:
work[rep][1]=work[rep][3]
work[rep][0]=clike
work[rep][4]=0
else:
work[rep][4]=work[rep][4]+1
self.status(rep,work[rep][0],work[rep][1])
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
icall += 1
#Scout bee phase
for i,val in enumerate(work):
if work[i][4] >= self.limit:
work[i][1]=self.parameter()['random']
work[i][4]=0
t,work[i][0],simulations=self.simulate((icall,work[i][1]))
clike = self.objectivefunction(evaluation = self.evaluation, simulation = simulations)
self.datawriter.save(clike,work[rep][3],simulations=simulations,chains=icall)
work[i][0]=clike
icall += 1
gnrng=-self.status.objectivefunction
text='%i of %i (best like=%g) est. time remaining: %s' % (icall,repetitions,self.status.objectivefunction,timestr)
print(text)
if icall >= repetitions:
print('*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT')
print('ON THE MAXIMUM NUMBER OF TRIALS ')
print(repetitions)
print('HAS BEEN EXCEEDED.')
if gnrng < peps:
print('THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE')
print('Best parameter set:')
print(self.status.params)
text='Duration:'+str(round((acttime-starttime),2))+' s'
print(-self.status.objectivefunction)
print(icall)
try:
self.datawriter.finalize()
except AttributeError: #Happens if no database was assigned
pass<|fim▁end|> | c=0
p=0 |
<|file_name|>test_config_scope.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding=utf-8
import pytest
import sacred.optional as opt
from sacred.config.config_scope import (
ConfigScope,
dedent_function_body,
dedent_line,
get_function_body,
is_empty_or_comment,
)
from sacred.config.custom_containers import DogmaticDict, DogmaticList
@pytest.fixture
def conf_scope():
@ConfigScope
def cfg():
# description for a
a = 1
# description for b and c
b, c = 2.0, True
# d and dd are both strings
d = dd = "string"
e = [1, 2, 3] # inline description for e
f = {"a": "b", "c": "d"}
composit1 = a + b
# pylint: this comment is filtered out
composit2 = f["c"] + "ada"
func1 = lambda: 23
deriv = func1()
def func2(a):
return "Nothing to report" + a
some_type = int
cfg()
return cfg
def test_result_of_config_scope_is_dict(conf_scope):
cfg = conf_scope()
assert isinstance(cfg, dict)
def test_result_of_config_scope_contains_keys(conf_scope):
cfg = conf_scope()
assert set(cfg.keys()) == {
"a",
"b",
"c",
"d",
"dd",
"e",
"f",
"composit1",
"composit2",
"deriv",
"func1",
"func2",
"some_type",
}
assert cfg["a"] == 1
assert cfg["b"] == 2.0
assert cfg["c"]
assert cfg["d"] == "string"
assert cfg["dd"] == "string"
assert cfg["e"] == [1, 2, 3]
assert cfg["f"] == {"a": "b", "c": "d"}
assert cfg["composit1"] == 3.0
assert cfg["composit2"] == "dada"
assert cfg["func1"]() == 23
assert cfg["func2"](", sir!") == "Nothing to report, sir!"
assert cfg["some_type"] == int
assert cfg["deriv"] == 23
def test_fixing_values(conf_scope):
cfg = conf_scope({"a": 100})
assert cfg["a"] == 100
assert cfg["composit1"] == 102.0
def test_fixing_nested_dicts(conf_scope):
cfg = conf_scope({"f": {"c": "t"}})
assert cfg["f"]["a"] == "b"
assert cfg["f"]["c"] == "t"
assert cfg["composit2"] == "tada"
def test_adding_values(conf_scope):
cfg = conf_scope({"g": 23, "h": {"i": 10}})
assert cfg["g"] == 23
assert cfg["h"] == {"i": 10}
assert cfg.added == {"g", "h", "h.i"}
def test_typechange(conf_scope):
cfg = conf_scope({"a": "bar", "b": "foo", "c": 1})
assert cfg.typechanged == {
"a": (int, type("bar")),
"b": (float, type("foo")),
"c": (bool, int),
}
def test_nested_typechange(conf_scope):
cfg = conf_scope({"f": {"a": 10}})
assert cfg.typechanged == {"f.a": (type("a"), int)}
def test_config_docs(conf_scope):
cfg = conf_scope()
assert cfg.docs == {
"a": "description for a",
"b": "description for b and c",
"c": "description for b and c",
"d": "d and dd are both strings",
"dd": "d and dd are both strings",
"e": "inline description for e",
"seed": "the random seed for this experiment",
}
def is_dogmatic(a):
if isinstance(a, (DogmaticDict, DogmaticList)):
return True
elif isinstance(a, dict):
return any(is_dogmatic(v) for v in a.values())
elif isinstance(a, (list, tuple)):
return any(is_dogmatic(v) for v in a)
def test_conf_scope_is_not_dogmatic(conf_scope):
assert not is_dogmatic(conf_scope({"e": [1, 1, 1]}))
@pytest.mark.skipif(not opt.has_numpy, reason="requires numpy")
def test_conf_scope_handles_numpy_bools():
@ConfigScope
def conf_scope():
a = opt.np.bool_(1)
cfg = conf_scope()
assert "a" in cfg
assert cfg["a"]
def test_conf_scope_can_access_preset():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(preset={"a": 21})
assert cfg["answer"] == 42
def test_conf_scope_contains_presets():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(preset={"a": 21, "unrelated": True})
assert set(cfg.keys()) == {"a", "answer", "unrelated"}
assert cfg["a"] == 21
assert cfg["answer"] == 42
assert cfg["unrelated"] is True
def test_conf_scope_cannot_access_undeclared_presets():
@ConfigScope
def conf_scope():
answer = 2 * a
with pytest.raises(NameError):
conf_scope(preset={"a": 21})
def test_conf_scope_can_access_fallback():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(fallback={"a": 21})
assert cfg["answer"] == 42
def test_conf_scope_does_not_contain_fallback():
@ConfigScope
def conf_scope(a):
answer = 2 * a
cfg = conf_scope(fallback={"a": 21, "b": 10})
assert set(cfg.keys()) == {"answer"}
def test_conf_scope_cannot_access_undeclared_fallback():
@ConfigScope
def conf_scope():
answer = 2 * a
with pytest.raises(NameError):
conf_scope(fallback={"a": 21})
def test_conf_scope_can_access_fallback_and_preset():
@ConfigScope
def conf_scope(a, b):
answer = a + b
cfg = conf_scope(preset={"b": 40}, fallback={"a": 2})
assert cfg["answer"] == 42
def test_conf_raises_for_unaccessible_arguments():
@ConfigScope
def conf_scope(a, b, c):<|fim▁hole|> with pytest.raises(KeyError):
conf_scope(preset={"a": 1}, fallback={"b": 2})
def test_can_access_globals_from_original_scope():
from .enclosed_config_scope import cfg as conf_scope
cfg = conf_scope()
assert set(cfg.keys()) == {"answer"}
assert cfg["answer"] == 42
SEVEN = 7
def test_cannot_access_globals_from_calling_scope():
from .enclosed_config_scope import cfg2 as conf_scope
with pytest.raises(NameError):
conf_scope() # would require SEVEN
def test_fixed_subentry_of_preset():
@ConfigScope
def conf_scope():
pass
cfg = conf_scope(preset={"d": {"a": 1, "b": 2}}, fixed={"d": {"a": 10}})
assert set(cfg.keys()) == {"d"}
assert set(cfg["d"].keys()) == {"a", "b"}
assert cfg["d"]["a"] == 10
assert cfg["d"]["b"] == 2
# fmt: off
@pytest.mark.parametrize("line,indent,expected", [
(' a=5', ' ', 'a=5'),
(' a=5', ' ', 'a=5'),
('a=5', ' ', 'a=5'),
(' a=5', ' ', ' a=5'),
(' a=5', '', ' a=5'),
(' a=5', '\t', ' a=5'),
(' a=5', ' ', 'a=5'),
(' a=5', ' \t', ' a=5')
])
def test_dedent_line(line, indent, expected):
assert dedent_line(line, indent) == expected
@pytest.mark.parametrize("line,expected", [
('', True),
(' ', True),
('\n', True),
(' \n', True),
(' \t \n', True),
('#comment', True),
(' #comment', True),
(' a=5 # not comment', False),
('a=5', False),
('"""', False),
("'''", False)
])
def test_is_empty_or_comment(line, expected):
assert is_empty_or_comment(line) == expected
def evil_indentation_func(a,
b,
c, d): # test comment
# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
body = '''# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
'''
dedented_body = '''# Lets do the most evil things with indentation
# 1
# 2
# ran
""" and also in the docstring
atrne
uiaeue
utdr
"""
alpha = 0.1
d = ('even', 'more',
'evilness')
wat = """ multi
line
strings
"""
# another comment
# this one is ok
# madness
foo=12
def subfunc():
return 23
'''
# fmt: on
def test_dedent_body():
assert dedent_function_body(body) == dedented_body
def test_get_function_body():
func_body, line_offset = get_function_body(evil_indentation_func)
assert func_body == body
def test_config_scope_can_deal_with_indentation_madness():
# assert_no_raise:
ConfigScope(evil_indentation_func)<|fim▁end|> | answer = 42
|
<|file_name|>test_stacking.py<|end_file_name|><|fim▁begin|>"""Test the stacking classifier and regressor."""
# Authors: Guillaume Lemaitre <[email protected]>
# License: BSD 3 clause
import pytest
import numpy as np
import scipy.sparse as sparse
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.datasets import load_iris
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import make_regression
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVC
from sklearn.svm import LinearSVR
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import scale
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import StackingRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_iris, y_iris = load_iris(return_X_y=True)
@pytest.mark.parametrize(
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator", [None, RandomForestClassifier(random_state=42)]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
X_trans = clf.transform(X_test)
expected_column_count = 10 if passthrough else 6
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
clf.set_params(lr='drop')
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
if final_estimator is None:
# LogisticRegression has decision_function method
clf.decision_function(X_test)
X_trans = clf.transform(X_test)
expected_column_count_drop = 7 if passthrough else 3
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
def test_stacking_classifier_drop_column_binary_classification():
# check that a column is dropped in binary classification
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, _ = train_test_split(
scale(X), y, stratify=y, random_state=42
)
# both classifiers implement 'predict_proba' and will both drop one column
estimators = [('lr', LogisticRegression()),
('rf', RandomForestClassifier(random_state=42))]
clf = StackingClassifier(estimators=estimators, cv=3)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
# LinearSVC does not implement 'predict_proba' and will not drop one column
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
clf.set_params(estimators=estimators)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
def test_stacking_classifier_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [('lr', 'drop'), ('svc', LinearSVC(random_state=0))]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=[('svc', LinearSVC(random_state=0))],
final_estimator=rf, cv=5
)
clf_drop = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5
)
clf.fit(X_train, y_train)
clf_drop.fit(X_train, y_train)
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
def test_stacking_regressor_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', 'drop'), ('svr', LinearSVR(random_state=0))]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
reg = StackingRegressor(
estimators=[('svr', LinearSVR(random_state=0))],
final_estimator=rf, cv=5
)
reg_drop = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5
)
reg.fit(X_train, y_train)
reg_drop.fit(X_train, y_train)
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
@pytest.mark.parametrize(
"cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator, predict_params",
[(None, {}),
(RandomForestRegressor(random_state=42), {}),
(DummyRegressor(), {'return_std': True})]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params,
passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
reg = StackingRegressor(
estimators=estimators, final_estimator=final_estimator, cv=cv,
passthrough=passthrough
)
reg.fit(X_train, y_train)
result = reg.predict(X_test, **predict_params)
expected_result_length = 2 if predict_params else 1
if predict_params:
assert len(result) == expected_result_length
X_trans = reg.transform(X_test)
expected_column_count = 12 if passthrough else 2
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
reg.set_params(lr='drop')
reg.fit(X_train, y_train)
reg.predict(X_test)
X_trans = reg.transform(X_test)
expected_column_count_drop = 11 if passthrough else 1
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
@pytest.mark.parametrize('fmt', ['csc', 'csr', 'coo'])
def test_stacking_regressor_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_diabetes)).asformat(fmt),
y_diabetes, random_state=42
)
estimators = [('lr', LinearRegression()), ('svr', LinearSVR())]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
clf = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
@pytest.mark.parametrize('fmt', ['csc', 'csr', 'coo'])
def test_stacking_classifier_sparse_passthrough(fmt):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse.coo_matrix(scale(X_iris)).asformat(fmt),
y_iris, random_state=42
)
estimators = [('lr', LogisticRegression()), ('svc', LinearSVC())]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
def test_stacking_classifier_drop_binary_prob():
# check that classifier will drop one of the probability column for
# binary classification problem
# Select only the 2 first classes
X_, y_ = scale(X_iris[:100]), y_iris[:100]
estimators = [
('lr', LogisticRegression()), ('rf', RandomForestClassifier())
]
clf = StackingClassifier(estimators=estimators)
clf.fit(X_, y_)
X_meta = clf.transform(X_)
assert X_meta.shape[1] == 2
class NoWeightRegressor(RegressorMixin, BaseEstimator):
def fit(self, X, y):
self.reg = DummyRegressor()
return self.reg.fit(X, y)
def predict(self, X):
return np.ones(X.shape[0])
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.clf = DummyClassifier(strategy='stratified')
return self.clf.fit(X, y)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[(y_iris,
{'estimators': None},
ValueError, "Invalid 'estimators' attribute,"),
(y_iris,
{'estimators': []},
ValueError, "Invalid 'estimators' attribute,"),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('svm', SVC(max_iter=5e4))],
'stack_method': 'predict_proba'},
ValueError, 'does not implement the method predict_proba'),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('cor', NoWeightClassifier())]},
TypeError, 'does not support sample weight'),
(y_iris,
{'estimators': [('lr', LogisticRegression()),
('cor', LinearSVC(max_iter=5e4))],
'final_estimator': NoWeightClassifier()},
TypeError, 'does not support sample weight')]
)
def test_stacking_classifier_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
clf = StackingClassifier(**params, cv=3)
clf.fit(
scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0])
)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[(y_diabetes,
{'estimators': None},
ValueError, "Invalid 'estimators' attribute,"),
(y_diabetes,
{'estimators': []},
ValueError, "Invalid 'estimators' attribute,"),
(y_diabetes,
{'estimators': [('lr', LinearRegression()),
('cor', NoWeightRegressor())]},
TypeError, 'does not support sample weight'),
(y_diabetes,
{'estimators': [('lr', LinearRegression()),
('cor', LinearSVR())],
'final_estimator': NoWeightRegressor()},
TypeError, 'does not support sample weight')]
)
def test_stacking_regressor_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
reg = StackingRegressor(**params, cv=3)
reg.fit(
scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0])
)
@pytest.mark.parametrize(
"estimator, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression(random_state=0)),
('svm', LinearSVC(random_state=0))]),
X_iris[:100], y_iris[:100]), # keep only classes 0 and 1
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=0))]),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_randomness(estimator, X, y):
# checking that fixing the random state of the CV will lead to the same
# results
estimator_full = clone(estimator)
estimator_full.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
estimator_drop = clone(estimator)
estimator_drop.set_params(lr='drop')
estimator_drop.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
assert_allclose(
estimator_full.fit(X, y).transform(X)[:, 1:],
estimator_drop.fit(X, y).transform(X)
)
# These warnings are raised due to _BaseComposition
@pytest.mark.filterwarnings("ignore:TypeError occurred during set_params")
@pytest.mark.filterwarnings("ignore:Estimator's parameters changed after")
@pytest.mark.parametrize(
"estimator",
[StackingClassifier(
estimators=[('lr', LogisticRegression(random_state=0)),
('tree', DecisionTreeClassifier(random_state=0))]),
StackingRegressor(
estimators=[('lr', LinearRegression()),
('tree', DecisionTreeRegressor(random_state=0))])],<|fim▁hole|> ids=['StackingClassifier', 'StackingRegressor']
)
def test_check_estimators_stacking_estimator(estimator):
check_estimator(estimator)
check_no_attributes_set_in_init(estimator.__class__.__name__, estimator)
def test_stacking_classifier_stratify_default():
# check that we stratify the classes for the default CV
clf = StackingClassifier(
estimators=[('lr', LogisticRegression(max_iter=1e4)),
('svm', LinearSVC(max_iter=1e4))]
)
# since iris is not shuffled, a simple k-fold would not contain the
# 3 classes during training
clf.fit(X_iris, y_iris)
@pytest.mark.parametrize(
"stacker, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression()),
('svm', LinearSVC(random_state=42))],
final_estimator=LogisticRegression(),
cv=KFold(shuffle=True, random_state=42)),
*load_breast_cancer(return_X_y=True)),
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=42))],
final_estimator=LinearRegression(),
cv=KFold(shuffle=True, random_state=42)),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_with_sample_weight(stacker, X, y):
# check that sample weights has an influence on the fitting
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
n_half_samples = len(y) // 2
total_sample_weight = np.array(
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
)
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
X, y, total_sample_weight, random_state=42
)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train)
y_pred_no_weight = stacker.predict(X_test)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
y_pred_unit_weight = stacker.predict(X_test)
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred_biased = stacker.predict(X_test)
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[
('lr', CheckingClassifier(expected_fit_params=['sample_weight']))
],
final_estimator=CheckingClassifier(
expected_fit_params=['sample_weight']
)
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize(
"stacker, X, y",
[(StackingClassifier(
estimators=[('lr', LogisticRegression()),
('svm', LinearSVC(random_state=42))],
final_estimator=LogisticRegression()),
*load_breast_cancer(return_X_y=True)),
(StackingRegressor(
estimators=[('lr', LinearRegression()),
('svm', LinearSVR(random_state=42))],
final_estimator=LinearRegression()),
X_diabetes, y_diabetes)],
ids=['StackingClassifier', 'StackingRegressor']
)
def test_stacking_cv_influence(stacker, X, y):
# check that the stacking affects the fit of the final estimator but not
# the fit of the base estimators
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
stacker_cv_3 = clone(stacker)
stacker_cv_5 = clone(stacker)
stacker_cv_3.set_params(cv=3)
stacker_cv_5.set_params(cv=5)
stacker_cv_3.fit(X, y)
stacker_cv_5.fit(X, y)
# the base estimators should be identical
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_,
stacker_cv_5.estimators_):
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
# the final estimator should be different
with pytest.raises(AssertionError, match='Not equal'):
assert_allclose(stacker_cv_3.final_estimator_.coef_,
stacker_cv_5.final_estimator_.coef_)
@pytest.mark.parametrize("make_dataset, Stacking, Estimator", [
(make_classification, StackingClassifier, LogisticRegression),
(make_regression, StackingRegressor, LinearRegression)
])
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
# Stacking supports estimators without `n_features_in_`. Regression test
# for #17353
class MyEstimator(Estimator):
"""Estimator without n_features_in_"""
def fit(self, X, y):
super().fit(X, y)
del self.n_features_in_
X, y = make_dataset(random_state=0, n_samples=100)
stacker = Stacking(estimators=[('lr', MyEstimator())])
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
# Does not raise
stacker.fit(X, y)
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_<|fim▁end|> | |
<|file_name|>restoration_status.rs<|end_file_name|><|fim▁begin|>// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Restoration status type definition
/// Statuses for restorations.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[cfg_attr(feature = "ipc", binary)]
pub enum RestorationStatus {
/// No restoration.
Inactive,
/// Ongoing restoration.
Ongoing {
/// Total number of state chunks.
state_chunks: u32,
/// Total number of block chunks.
block_chunks: u32,
/// Number of state chunks completed.
state_chunks_done: u32,
/// Number of block chunks completed.
block_chunks_done: u32,
},
/// Failed restoration.
Failed,<|fim▁hole|><|fim▁end|> | } |
<|file_name|>wordcount.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
from __future__ import print_function
from collections import Counter
from operator import itemgetter
import os
_path = os.path.abspath(os.path.dirname(__file__))
SOURCE = os.path.join(_path, 'poems_for_wordcount.txt')
DESTINATION = os.path.join(_path, 'poem_words_out.txt')
def sort_word_counts(word_dict):
# first sort to get k by alpha
sorted_by_key = sorted(word_dict.items(), key=itemgetter(0))
# then reverse sort on number of occurrences (v) to get list in desc order
return sorted(sorted_by_key, key=itemgetter(1), reverse=1)
def main():
with open(SOURCE, 'rb') as source, open(DESTINATION, 'wb') as destination:
word_counts = Counter(source.read().lower().split())
for item in sort_word_counts(word_counts):
print("{} {}".format(*item), file=destination)
def test_sort_word_counts():
word_list = 'you watch the brown fox jumped over the fence'.split()
word_counts = Counter(word_list)
sorted_list = sort_word_counts(word_counts)
assert sorted_list[0][0] == 'the'
assert sorted_list[1][0] == 'brown'
assert sorted_list[-1][0] == 'you'
def test_output():
main()
output = open(DESTINATION, 'rb').readlines()
word, count = output[0].split()
assert len(output) == 3518
assert word == 'the'
assert int(count) == 1085
<|fim▁hole|>
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url<|fim▁hole|>app_name = "producto"
urlpatterns = [
url(r'list-productos/$', list_products, name='list_products'),
]<|fim▁end|> |
from .views import list_products
|
<|file_name|>sidebar.js<|end_file_name|><|fim▁begin|>'use strict'
const React = require('react');
import {Sidebar} from 'react-semantify';
<|fim▁hole|> <a className="item">
Our company
</a>
<a className="item">
Dashboard
</a>
<a className="item">
My recipes
</a>
<a href="/logout" className="item right floated">
Logout
</a>
</Sidebar>
)
}
});
module.exports = UISidebar;<|fim▁end|> | const UISidebar = React.createClass({
render : function() {
return (
<Sidebar className="ui top sidebar menu push scale down" init={true}> |
<|file_name|>integration.test.ts<|end_file_name|><|fim▁begin|>// tslint:disable:deprecation
import { Integration } from '@sentry/types';
import { getIntegrationsToSetup } from '../../src/integration';
/** JSDoc */
class MockIntegration implements Integration {
public constructor(name: string) {
this.name = name;
}
public name: string;
public setupOnce(): void {
// noop
}
}
describe('getIntegrationsToSetup', () => {
it('works with empty array', () => {
const integrations = getIntegrationsToSetup({
integrations: [],
});
expect(integrations.map(i => i.name)).toEqual([]);
});
it('works with single item', () => {
const integrations = getIntegrationsToSetup({
integrations: [new MockIntegration('foo')],
});
expect(integrations.map(i => i.name)).toEqual(['foo']);
});
it('works with multiple items', () => {
const integrations = getIntegrationsToSetup({
integrations: [new MockIntegration('foo'), new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
});
it('filter duplicated items', () => {
const integrations = getIntegrationsToSetup({
integrations: [new MockIntegration('foo'), new MockIntegration('foo'), new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
});
it('filter duplicated items and always let first win', () => {
const first = new MockIntegration('foo');
(first as any).order = 'first';
const second = new MockIntegration('foo');
(second as any).order = 'second';
const integrations = getIntegrationsToSetup({
integrations: [first, second, new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
expect((integrations[0] as any).order).toEqual('first');
});
it('work with empty defaults', () => {
const integrations = getIntegrationsToSetup({
defaultIntegrations: [],
});
expect(integrations.map(i => i.name)).toEqual([]);
});
it('work with single defaults', () => {
const integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('foo')],
});
expect(integrations.map(i => i.name)).toEqual(['foo']);
});
it('work with multiple defaults', () => {
const integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('foo'), new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
});
it('work with user integrations and defaults and pick defaults first', () => {
const integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('foo')],
integrations: [new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
});
it('work with user integrations and defaults and filter duplicates', () => {
const integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('foo'), new MockIntegration('foo')],
integrations: [new MockIntegration('bar'), new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
});
it('user integrations override defaults', () => {<|fim▁hole|> const firstUser = new MockIntegration('foo');
(firstUser as any).order = 'firstUser';
const secondUser = new MockIntegration('bar');
(secondUser as any).order = 'secondUser';
const integrations = getIntegrationsToSetup({
defaultIntegrations: [firstDefault, secondDefault],
integrations: [firstUser, secondUser],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar']);
expect((integrations[0] as any).order).toEqual('firstUser');
expect((integrations[1] as any).order).toEqual('secondUser');
});
it('always moves Debug integration to the end of the list', () => {
let integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('Debug'), new MockIntegration('foo')],
integrations: [new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar', 'Debug']);
integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('foo')],
integrations: [new MockIntegration('Debug'), new MockIntegration('bar')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'bar', 'Debug']);
integrations = getIntegrationsToSetup({
defaultIntegrations: [new MockIntegration('Debug')],
integrations: [new MockIntegration('foo')],
});
expect(integrations.map(i => i.name)).toEqual(['foo', 'Debug']);
});
});<|fim▁end|> | const firstDefault = new MockIntegration('foo');
(firstDefault as any).order = 'firstDefault';
const secondDefault = new MockIntegration('bar');
(secondDefault as any).order = 'secondDefault'; |
<|file_name|>common.js<|end_file_name|><|fim▁begin|>$.fn.onEnterKey =
function( closure ) {<|fim▁hole|>
if (code == 13) {
closure();
return false;
}
} );
}
String.prototype.trunc = String.prototype.trunc ||
function(n){
return this.length>n ? this.substr(0,n-1)+'…' : this;
};<|fim▁end|> | $(this).keypress(
function( event ) {
var code = event.keyCode ? event.keyCode : event.which; |
<|file_name|>util.ts<|end_file_name|><|fim▁begin|>import * as $ from 'jquery';
import { Timestamp } from 'index';
export const endlessWaiting = new Promise<void>(() => {});
export function waitDOMContentLoaded(): Promise<void> {
return new Promise(resolve => {
switch (document.readyState) {
case 'interactive': case 'complete': { resolve(); break; }
default: {
window.addEventListener('DOMContentLoaded', () => resolve());
} break;
}
});
};
export function waitForSelector(selector: string): Promise<void> {
return new Promise(resolve => {
const i = setInterval(() => {
if ($(selector).length > 0) {
clearInterval(i);
resolve();
}
}, 100);
});
}
export function wait(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
export function clearStyles(element: HTMLElement | Node) {
if ('jquery' in element) {
throw new Error('`clearStyles` 함수는 인자로 DOM element만 받습니다.');
}
const ele = element as HTMLElement;
try {<|fim▁hole|> for (const image of Array.from(ele.querySelectorAll('img'))) {
image.removeAttribute('width');
image.removeAttribute('height');
image.removeAttribute('border');
}
for (const table of Array.from(ele.getElementsByTagName('table'))) {
table.removeAttribute('width');
table.removeAttribute('height');
}
} catch (e) {
console.error('스타일 청소에 실패하였습니다.');
console.error('`article.content`를 처리하는 부분이 의심됩니다.');
console.error('청소하려던 element:', ele);
console.error('에러:', e);
}
return ele;
}
export function getQueryParam(param: string, search: string = location.search): string {
const searchParams = new URLSearchParams(search);
return searchParams.get(param) || '';
}
/**
* 작성일, 수정일이 들어있는 문자열을 파싱해서 Timestamp 객체를 반환합니다.
* 이 함수는 입력 문자열에 작성일이 수정일보다 앞에 써있을거라고 가정합니다.
*/
export function parseTimestamp(text: string): Timestamp {
const timestamp: Timestamp = {};
const dateRegex = /(\d{4})[\.\s/\-]*(\d{1,2})[\.\s/\-]*(\d{1,2})\s*(?:(\d{1,2})[:\s]+(\d{1,2})(?:[:\s]+(\d{1,2}))?)/g;
const [createdText, lastModifiedText] = matchAll(text, dateRegex);
if (createdText) timestamp.created = parseTimestamp.getDate(createdText);
if (lastModifiedText) timestamp.lastModified = parseTimestamp.getDate(lastModifiedText);
return timestamp;
}
parseTimestamp.getDate = (fragments: string[]): Date => {
const [, year, month, date, hour, minute, second] = fragments;
const dateText = [year.padStart(4, '0'), month.padStart(2, '0'), date.padStart(2, '0')].join('-');
if (!hour) return new Date(dateText);
if (!second) return new Date(`${dateText}T${[hour, minute].map(t => t.padStart(2, '0')).join(':')}`);
return new Date(`${dateText}T${[hour, minute, second].map(t => t.padStart(2, '0')).join(':')}`);
};
export function matchAll(text: string, regex: RegExp) {
const result = [];
let match;
do {
match = regex.exec(text);
if (match) result.push(match);
} while (match);
return result;
}<|fim▁end|> | for (const child of Array.from(ele.querySelectorAll('*[style]'))) {
child.removeAttribute('style');
} |
<|file_name|>OidcSettings.ts<|end_file_name|><|fim▁begin|>import {Type} from "@tsed/core";
import {JwksKeyParameters} from "@tsed/jwks";
import {Configuration} from "oidc-provider";
import {OidcAccountsMethods} from "./OidcAccountsMethods";
export interface OidcSettings extends Configuration {
/**
* Path on which the oidc-provider instance is mounted.
*/
path?: string;
/**
* Issuer URI. By default Ts.ED create issuer with http://localhost:${httpPort}
*/
issuer?: string;
/**
* Path to store jwks keys.
*/
jwksPath?: string;
/**
* Generate jwks from given certificates
*/<|fim▁hole|> certificates?: JwksKeyParameters[];
/**
* Secure keys.
*/
secureKey?: string[];
/**
* Enable proxy.
*/
proxy?: boolean;
/**
* Injectable service to manage accounts.
*/
Accounts?: Type<OidcAccountsMethods>;
/**
* Injectable service to manage clients.
*/
// Clients?: Type<OidcClientsMethods>;
}
declare global {
namespace TsED {
interface Configuration {
oidc: OidcSettings;
}
}
}<|fim▁end|> | |
<|file_name|>FATSuite.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (c) 2017, 2020 IBM Corporation and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* IBM Corporation - initial API and implementation
*******************************************************************************/
package com.ibm.ws.transaction.test;
import org.junit.ClassRule;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.junit.runners.Suite.SuiteClasses;
import com.ibm.ws.transaction.test.tests.EJBNewTxDBRoSTest;<|fim▁hole|>import componenttest.rules.repeater.FeatureReplacementAction;
import componenttest.rules.repeater.JakartaEE9Action;
import componenttest.rules.repeater.RepeatTests;
@RunWith(Suite.class)
@SuiteClasses({
EJBNewTxTest.class,
EJBNewTxDBTest.class,
EJBNewTxRoSTest.class,
EJBNewTxDBRoSTest.class
})
public class FATSuite {
// Using the RepeatTests @ClassRule will cause all tests to be run twice.
// First without any modifications, then again with all features upgraded to their EE8 equivalents.
@ClassRule
public static RepeatTests r = RepeatTests.withoutModification()
.andWith(FeatureReplacementAction.EE8_FEATURES())
.andWith(new JakartaEE9Action());
}<|fim▁end|> | import com.ibm.ws.transaction.test.tests.EJBNewTxDBTest;
import com.ibm.ws.transaction.test.tests.EJBNewTxRoSTest;
import com.ibm.ws.transaction.test.tests.EJBNewTxTest;
|
<|file_name|>raft.go<|end_file_name|><|fim▁begin|>package raft
import (
"github.com/iketheadore/raft/comm"
"github.com/iketheadore/raft/logic"
)
type Raft struct {
localServ logic.Server
listener comm.Listener
sender comm.Sender
logic *logic.Logic
}
func New(addr string) *Raft {
r := &Raft{localServ: logic.Server{Addr: addr, Role: logic.Follower}, listener: comm.NewListener(addr)}
r.listener.Run()
r.logic = logic.New(r.localServ)<|fim▁hole|> r.logic.Subscribe(r.listener)
return r
}
func (r *Raft) Connect(addr string) error {
return r.logic.Connect(logic.Server{Addr: addr, Role: logic.Follower})
}
func (r *Raft) Run() {
r.logic.Run()
}
func (r *Raft) ReplicateCmd(cmd comm.Command) {
r.logic.ReplicateCmd(cmd)
}<|fim▁end|> | |
<|file_name|>UserService.java<|end_file_name|><|fim▁begin|>package com.lynx.service;
import java.util.Collection;
import java.util.Optional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Sort;
import org.springframework.stereotype.Service;
import com.lynx.domain.User;
import com.lynx.repository.UserRepository;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@Service
public class UserService {
@Autowired
private UserRepository userRepository;
public Optional<User> getUserById(String id) {
log.debug("Getting user={}", id);
return Optional.ofNullable(userRepository.findOne(id));
}
public Optional<User> getUserByEmail(String email) {
log.debug("Getting user by email={}", email.replaceFirst("@.*", "@***"));
return userRepository.findOneByEmail(email);
}
public Collection<User> getAllUsers() {
log.debug("Getting all users");
return userRepository.findAll(new Sort("email"));
}
public User create(User user) {<|fim▁hole|>
}<|fim▁end|> | return userRepository.save(user);
} |
<|file_name|>demo_mp_async.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import freenect
import signal
import matplotlib.pyplot as mp
from misc.demo import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()<|fim▁hole|> if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)<|fim▁end|> |
def body(*args): |
<|file_name|>eventlet_backdoor.py<|end_file_name|><|fim▁begin|># Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from neutron.openstack.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:<|fim▁hole|> try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port<|fim▁end|> | start, end = port_range, port_range
else:
start, end = port_range.split(':', 1) |
<|file_name|>test_compositor_host_ozone.cc<|end_file_name|><|fim▁begin|>// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/compositor/test/test_compositor_host.h"
#include "base/basictypes.h"
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/thread_task_runner_handle.h"
#include "ui/compositor/compositor.h"
#include "ui/gfx/geometry/rect.h"
namespace ui {
class TestCompositorHostOzone : public TestCompositorHost {
public:
TestCompositorHostOzone(const gfx::Rect& bounds,
ui::ContextFactory* context_factory);
~TestCompositorHostOzone() override;
private:
// Overridden from TestCompositorHost:
void Show() override;
ui::Compositor* GetCompositor() override;
gfx::Rect bounds_;
<|fim▁hole|>
scoped_ptr<ui::Compositor> compositor_;
DISALLOW_COPY_AND_ASSIGN(TestCompositorHostOzone);
};
TestCompositorHostOzone::TestCompositorHostOzone(
const gfx::Rect& bounds,
ui::ContextFactory* context_factory)
: bounds_(bounds),
context_factory_(context_factory) {}
TestCompositorHostOzone::~TestCompositorHostOzone() {}
void TestCompositorHostOzone::Show() {
// Ozone should rightly have a backing native framebuffer
// An in-memory array draw into by OSMesa is a reasonble
// fascimile of a dumb framebuffer at present.
// GLSurface will allocate the array so long as it is provided
// with a non-0 widget.
// TODO(rjkroege): Use a "real" ozone widget when it is
// available: http://crbug.com/255128
compositor_.reset(new ui::Compositor(1,
context_factory_,
base::ThreadTaskRunnerHandle::Get()));
compositor_->SetScaleAndSize(1.0f, bounds_.size());
}
ui::Compositor* TestCompositorHostOzone::GetCompositor() {
return compositor_.get();
}
// static
TestCompositorHost* TestCompositorHost::Create(
const gfx::Rect& bounds,
ui::ContextFactory* context_factory) {
return new TestCompositorHostOzone(bounds, context_factory);
}
} // namespace ui<|fim▁end|> | ui::ContextFactory* context_factory_; |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin
from django.contrib.auth.models import Group as AuthGroup
from sigma_core.models.user import User
from sigma_core.models.group import Group
from sigma_core.models.group_member import GroupMember
from sigma_core.models.group_field import GroupField
from sigma_core.models.group_field_value import GroupFieldValue
from sigma_core.models.group_invitation import GroupInvitation
from sigma_core.models.participation import Participation
from sigma_core.models.publication import Publication
from sigma_core.models.event import Event
from sigma_core.models.shared_publication import SharedPublication
admin.site.unregister(AuthGroup)
from sigma_core.models.acknowledgment import Acknowledgment
from sigma_core.models.acknowledgment_invitation import AcknowledgmentInvitation
admin.site.register(Acknowledgment)
admin.site.register(AcknowledgmentInvitation)
admin.site.register(GroupMember)
#admin.site.register(GroupInvitation)
#admin.site.register(SharedPublication)
#admin.site.register(Participation)
admin.site.register(GroupField)
admin.site.register(GroupFieldValue)
class ParticipationInline(admin.TabularInline):
model = Participation
extra = 0
class EventAdmin(admin.ModelAdmin):
list_display = ['name', 'date_start', 'date_end', 'place_name']
list_filter = ['date_start', 'date_end']
search_fields = ['name', 'place_name']
inlines = [ParticipationInline]
admin.site.register(Event, EventAdmin)
class SharedInline(admin.TabularInline):
model = SharedPublication
extra = 0
class PublicationAdmin(admin.ModelAdmin):
inlines = [SharedInline]
list_display = ['title', 'group', 'author', 'related_event', 'internal']
list_filter = ['group', 'author', 'internal']
admin.site.register(Publication, PublicationAdmin)
class GroupsInline(admin.TabularInline):
model = GroupMember
extra = 0
class InvitationsInline(admin.TabularInline):
model = GroupInvitation<|fim▁hole|>class UserAdmin(admin.ModelAdmin):
list_display = ['firstname', 'lastname', 'email', 'is_active', 'is_superuser']
list_filter = ['is_active', 'is_superuser']
search_fields = ['firstname', 'lastname', 'email']
inlines = [GroupsInline, InvitationsInline]
admin.site.register(User, UserAdmin)
class MembersInline(admin.TabularInline):
model = GroupMember
extra = 0
class ParentsInline(admin.TabularInline):
model = Acknowledgment
extra = 0
fk_name = "acknowledged"
class ChildrenInline(admin.TabularInline):
model = Acknowledgment
extra = 0
fk_name = "acknowledged_by"
class GroupAdmin(admin.ModelAdmin):
list_display = ['name', 'is_protected', 'can_anyone_ask', 'need_validation_to_join', 'members_visibility', 'group_visibility']
list_filter = ['is_protected', 'can_anyone_ask', 'need_validation_to_join']
search_fields = ['name', 'description']
inlines = [MembersInline, InvitationsInline, ParentsInline, ChildrenInline]
admin.site.register(Group, GroupAdmin)
from sigma_core.models.tag import Tag
from sigma_core.models.like import Like
from sigma_core.models.comment import Comment
admin.site.register(Tag)
admin.site.register(Like)
admin.site.register(Comment)<|fim▁end|> | extra = 0
|
<|file_name|>DecafError.java<|end_file_name|><|fim▁begin|>package stage2;
public class DecafError {
int numErrors;
DecafError(){
}
public static String errorPos(Position p){
return "(L: " + p.startLine +
", Col: " + p.startCol +
") -- (L: " + p.endLine +
", Col: " + p.endCol +
")";
}
public void error(String s, Position p) {
System.out.println("Error found at location "+
errorPos(p) + ":\n"+s);
}
public boolean haveErrors() {
return (numErrors>0);<|fim▁hole|> }
}<|fim▁end|> | |
<|file_name|>redirect.rs<|end_file_name|><|fim▁begin|>use response::{Response, Responder};
use http::hyper::header;
use http::Status;
/// An empty redirect response to a given URL.
///
/// This type simplifies returning a redirect response to the client.
#[derive(Debug)]
pub struct Redirect(Status, String);
impl Redirect {
/// Construct a temporary "see other" (303) redirect response. This is the
/// typical response when redirecting a user to another page. This type of
/// redirect indicates that the client should look elsewhere, but always via
/// a `GET` request, for a given resource.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///<|fim▁hole|> }
/// Construct a "temporary" (307) redirect response. This response instructs
/// the client to reissue the current request to a different URL,
/// maintaining the contents of the request identically. This means that,
/// for example, a `POST` request will be resent, contents included, to the
/// requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::temporary("/other_url");
/// ```
pub fn temporary(uri: &str) -> Redirect {
Redirect(Status::TemporaryRedirect, String::from(uri))
}
/// Construct a "permanent" (308) redirect response. This redirect must only
/// be used for permanent redirects as it is cached by clients. This
/// response instructs the client to reissue requests for the current URL to
/// a different URL, now and in the future, maintaining the contents of the
/// request identically. This means that, for example, a `POST` request will
/// be resent, contents included, to the requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::permanent("/other_url");
/// ```
pub fn permanent(uri: &str) -> Redirect {
Redirect(Status::PermanentRedirect, String::from(uri))
}
/// Construct a temporary "found" (302) redirect response. This response
/// instructs the client to reissue the current request to a different URL,
/// ideally maintaining the contents of the request identically.
/// Unfortunately, different clients may respond differently to this type of
/// redirect, so `303` or `307` redirects, which disambiguate, are
/// preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::found("/other_url");
/// ```
pub fn found(uri: &str) -> Redirect {
Redirect(Status::Found, String::from(uri))
}
/// Construct a permanent "moved" (301) redirect response. This response
/// should only be used for permanent redirects as it can be cached by
/// browsers. Because different clients may respond differently to this type
/// of redirect, a `308` redirect, which disambiguates, is preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::moved("/other_url");
/// ```
pub fn moved(uri: &str) -> Redirect {
Redirect(Status::MovedPermanently, String::from(uri))
}
}
/// Constructs a response with the appropriate status code and the given URL in
/// the `Location` header field. The body of the response is empty. This
/// responder does not fail.
impl Responder<'static> for Redirect {
fn respond(self) -> Result<Response<'static>, Status> {
Response::build()
.status(self.0)
.header(header::Location(self.1))
.ok()
}
}<|fim▁end|> | /// let redirect = Redirect::to("/other_url");
/// ```
pub fn to(uri: &str) -> Redirect {
Redirect(Status::SeeOther, String::from(uri)) |
<|file_name|>parse_ctf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
# Copyright (c) 2020 Intel Corporation.
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to parse CTF data and print to the screen in a custom and colorful
format.
Generate trace using samples/subsys/tracing for example:
west build -b qemu_x86 samples/subsys/tracing -t run \
-- -DCONF_FILE=prj_uart_ctf.conf
mkdir ctf
cp build/channel0_0 ctf/
cp subsys/tracing/ctf/tsdl/metadata ctf/
./scripts/tracing/parse_ctf.py -t ctf
"""
import sys
import datetime
from colorama import Fore
import argparse
try:
import bt2
except ImportError:
sys.exit("Missing dependency: You need to install python bindings of babletrace.")
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-t", "--trace",
required=True,
help="tracing data (directory with metadata and trace file)")
args = parser.parse_args()
return args
def main():
args = parse_args()
msg_it = bt2.TraceCollectionMessageIterator(args.trace)
last_event_ns_from_origin = None
timeline = []
def get_thread(name):
for t in timeline:
if t.get('name', None) == name and t.get('in', 0 ) != 0 and not t.get('out', None):
return t<|fim▁hole|>
if not isinstance(msg, bt2._EventMessageConst):
continue
ns_from_origin = msg.default_clock_snapshot.ns_from_origin
event = msg.event
# Compute the time difference since the last event message.
diff_s = 0
if last_event_ns_from_origin is not None:
diff_s = (ns_from_origin - last_event_ns_from_origin) / 1e9
dt = datetime.datetime.fromtimestamp(ns_from_origin / 1e9)
if event.name in [
'thread_switched_out',
'thread_switched_in',
'thread_pending',
'thread_ready',
'thread_resume',
'thread_suspend',
'thread_create',
'thread_abort'
]:
cpu = event.payload_field.get("cpu", None)
thread_id = event.payload_field.get("thread_id", None)
thread_name = event.payload_field.get("name", None)
th = {}
if event.name in ['thread_switched_out', 'thread_switched_in'] and cpu is not None:
cpu_string = f"(cpu: {cpu})"
else:
cpu_string = ""
if thread_name:
print(f"{dt} (+{diff_s:.6f} s): {event.name}: {thread_name} {cpu_string}")
elif thread_id:
print(f"{dt} (+{diff_s:.6f} s): {event.name}: {thread_id} {cpu_string}")
else:
print(f"{dt} (+{diff_s:.6f} s): {event.name}")
if event.name in ['thread_switched_out', 'thread_switched_in']:
if thread_name:
th = get_thread(thread_name)
if not th:
th['name'] = thread_name
else:
th = get_thread(thread_id)
if not th:
th['name'] = thread_id
if event.name in ['thread_switched_out']:
th['out'] = ns_from_origin
tin = th.get('in', None)
tout = th.get('out', None)
if tout is not None and tin is not None:
diff = (tout - tin)
th['runtime'] = diff
elif event.name in ['thread_switched_in']:
th['in'] = ns_from_origin
timeline.append(th)
elif event.name in ['thread_info']:
stack_size = event.payload_field['stack_size']
print(f"{dt} (+{diff_s:.6f} s): {event.name} (Stack size: {stack_size})")
elif event.name in ['start_call', 'end_call']:
if event.payload_field['id'] == 39:
c = Fore.GREEN
elif event.payload_field['id'] in [37, 38]:
c = Fore.CYAN
else:
c = Fore.YELLOW
print(c + f"{dt} (+{diff_s:.6f} s): {event.name} {event.payload_field['id']}" + Fore.RESET)
elif event.name in ['semaphore_init', 'semaphore_take', 'semaphore_give']:
c = Fore.CYAN
print(c + f"{dt} (+{diff_s:.6f} s): {event.name} ({event.payload_field['id']})" + Fore.RESET)
elif event.name in ['mutex_init', 'mutex_take', 'mutex_give']:
c = Fore.MAGENTA
print(c + f"{dt} (+{diff_s:.6f} s): {event.name} ({event.payload_field['id']})" + Fore.RESET)
else:
print(f"{dt} (+{diff_s:.6f} s): {event.name}")
last_event_ns_from_origin = ns_from_origin
if __name__=="__main__":
main()<|fim▁end|> | return {}
for msg in msg_it: |
<|file_name|>epw2wea.py<|end_file_name|><|fim▁begin|># coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath
import os
class Epw2wea(RadianceCommand):
"""epw2wea transforms an EnergyPlus weather data (.epw) file into
the DAYSIM weather file format, for use with the RADIANCE gendaymtx
program.
Attributes:
epw_file: Filepath of the epw file that is to be converted into wea
format.
Usage:
from honeybee_plus.radiance.command.epw2wea import Epw2wea.
#create an epw2wea command.
epwWea = Epw2wea(epw_fileName='c:/ladybug/test.epw')
"""
_epw_file = RadiancePath('_epw_file',
descriptive_name='Epw weather data file',
relative_path=None, check_exists=False)
output_wea_file = RadiancePath('output_wea_file',
descriptive_name='Output wea file',
relative_path=None, check_exists=False)
def __init__(self, epw_file=None, output_wea_file=None):
RadianceCommand.__init__(self)
self.epw_file = epw_file
"""The path of the epw file that is to be converted to a wea file."""
self.output_wea_file = output_wea_file
"""The path of the output wea file. Note that this path will be created
if not specified by the user."""
@property
def epw_file(self):
return self._epw_file
@epw_file.setter
def epw_file(self, value):
"""The path of the epw file that is to be converted to a wea file."""
if value:
self._epw_file = value
if not self.output_wea_file._value:
self.output_wea_file = os.path.splitext(value)[0] + '.wea'
else:
self._epw_file = None
def to_rad_string(self, relative_path=False):
"""Return full radiance command as string"""
rad_string = "%s %s %s" % (
'"%s"' % os.path.join(self.radbin_path, 'epw2wea'),
self.epw_file.to_rad_string(),
self.output_wea_file.to_rad_string())
# self.check_input_files(rad_string)<|fim▁hole|> def input_files(self):
"""Return input files specified by user."""
return self.epw_file.normpath,<|fim▁end|> | return rad_string
@property |
<|file_name|>api_test.go<|end_file_name|><|fim▁begin|>// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package juju_test
import (
"context"
"crypto/tls"
"fmt"
"net"
"time"
"github.com/juju/errors"
"github.com/juju/names/v4"
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
"gopkg.in/macaroon.v2"
"github.com/juju/juju/api"
apitesting "github.com/juju/juju/api/testing"
"github.com/juju/juju/apiserver/params"
"github.com/juju/juju/core/model"
"github.com/juju/juju/core/network"
sstesting "github.com/juju/juju/environs/simplestreams/testing"
envtesting "github.com/juju/juju/environs/testing"
"github.com/juju/juju/juju"
"github.com/juju/juju/juju/keys"
"github.com/juju/juju/jujuclient"
"github.com/juju/juju/jujuclient/jujuclienttesting"
"github.com/juju/juju/provider/dummy"
"github.com/juju/juju/rpc/jsoncodec"
coretesting "github.com/juju/juju/testing"
"github.com/juju/juju/version"
)
type NewAPIClientSuite struct {
coretesting.FakeJujuXDGDataHomeSuite
testing.MgoSuite
envtesting.ToolsFixture
}
var fakeUUID = "df136476-12e9-11e4-8a70-b2227cce2b54"
var _ = gc.Suite(&NewAPIClientSuite{})
func (s *NewAPIClientSuite) SetUpSuite(c *gc.C) {
s.FakeJujuXDGDataHomeSuite.SetUpSuite(c)
s.MgoSuite.SetUpSuite(c)
s.PatchValue(&keys.JujuPublicKey, sstesting.SignedMetadataPublicKey)
}
func (s *NewAPIClientSuite) TearDownSuite(c *gc.C) {
s.MgoSuite.TearDownSuite(c)
s.FakeJujuXDGDataHomeSuite.TearDownSuite(c)
}
func (s *NewAPIClientSuite) SetUpTest(c *gc.C) {
s.ToolsFixture.SetUpTest(c)
s.FakeJujuXDGDataHomeSuite.SetUpTest(c)
s.MgoSuite.SetUpTest(c)
s.PatchValue(&dummy.LogDir, c.MkDir())
}
func (s *NewAPIClientSuite) TearDownTest(c *gc.C) {
dummy.Reset(c)
s.ToolsFixture.TearDownTest(c)
s.MgoSuite.TearDownTest(c)
s.FakeJujuXDGDataHomeSuite.TearDownTest(c)
}
func (s *NewAPIClientSuite) TestWithBootstrapConfig(c *gc.C) {
store := newClientStore(c, "noconfig")
called := 0
expectState := mockedAPIState(mockedHostPort | mockedModelTag)
apiOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) {
checkCommonAPIInfoAttrs(c, apiInfo, opts)
c.Check(apiInfo.ModelTag, gc.Equals, names.NewModelTag(fakeUUID))
called++
return expectState, nil
}
st, err := newAPIConnectionFromNames(c, "noconfig", "admin/admin", store, apiOpen)
c.Assert(err, jc.ErrorIsNil)
c.Assert(st, gc.Equals, expectState)
c.Assert(called, gc.Equals, 1)
// The addresses should have been updated.
c.Assert(
store.Controllers["noconfig"].APIEndpoints,
jc.DeepEquals,
[]string{"0.1.2.3:1234", "[2001:db8::1]:1234"},
)
c.Assert(
store.Controllers["noconfig"].AgentVersion,
gc.Equals,
"1.2.3",
)
controllerBefore, err := store.ControllerByName("noconfig")
c.Assert(err, jc.ErrorIsNil)
// If APIHostPorts or agent version haven't changed, then the store won't be updated.
stubStore := jujuclienttesting.WrapClientStore(store)
st, err = newAPIConnectionFromNames(c, "noconfig", "admin/admin", stubStore, apiOpen)
c.Assert(err, jc.ErrorIsNil)<|fim▁hole|> c.Assert(st, gc.Equals, expectState)
c.Assert(called, gc.Equals, 2)
stubStore.CheckCallNames(c, "AccountDetails", "ModelByName", "ControllerByName", "AccountDetails", "UpdateAccount")
controllerAfter, err := store.ControllerByName("noconfig")
c.Assert(err, jc.ErrorIsNil)
c.Assert(controllerBefore, gc.DeepEquals, controllerAfter)
}
func (s *NewAPIClientSuite) TestUpdatesLastKnownAccess(c *gc.C) {
store := newClientStore(c, "noconfig")
called := 0
expectState := mockedAPIState(mockedHostPort | mockedModelTag)
apiOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) {
checkCommonAPIInfoAttrs(c, apiInfo, opts)
c.Check(apiInfo.ModelTag, gc.Equals, names.NewModelTag(fakeUUID))
called++
return expectState, nil
}
stubStore := jujuclienttesting.WrapClientStore(store)
st, err := newAPIConnectionFromNames(c, "noconfig", "admin/admin", stubStore, apiOpen)
c.Assert(err, jc.ErrorIsNil)
c.Assert(st, gc.Equals, expectState)
c.Assert(called, gc.Equals, 1)
stubStore.CheckCallNames(c, "AccountDetails", "ModelByName", "ControllerByName", "UpdateController", "AccountDetails", "UpdateAccount")
c.Assert(
store.Accounts["noconfig"],
jc.DeepEquals,
jujuclient.AccountDetails{User: "admin", Password: "hunter2", LastKnownAccess: "superuser"},
)
}
func (s *NewAPIClientSuite) TestUpdatesPublicDNSName(c *gc.C) {
apiOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) {
conn := mockedAPIState(noFlags)
conn.publicDNSName = "somewhere.invalid"
conn.addr = "0.1.2.3:1234"
return conn, nil
}
store := newClientStore(c, "controllername")
_, err := newAPIConnectionFromNames(c, "controllername", "", store, apiOpen)
c.Assert(err, jc.ErrorIsNil)
c.Assert(store.Controllers["controllername"].PublicDNSName, gc.Equals, "somewhere.invalid")
}
func (s *NewAPIClientSuite) TestWithInfoNoAddresses(c *gc.C) {
store := newClientStore(c, "noconfig")
err := store.UpdateController("noconfig", jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
CACert: "certificate",
})
c.Assert(err, jc.ErrorIsNil)
st, err := newAPIConnectionFromNames(c, "noconfig", "", store, panicAPIOpen)
c.Assert(err, jc.Satisfies, juju.IsNoAddressesError)
c.Assert(st, gc.IsNil)
}
func (s *NewAPIClientSuite) TestWithMacaroons(c *gc.C) {
store := newClientStore(c, "withmac")
mac, err := apitesting.NewMacaroon("id")
c.Assert(err, jc.ErrorIsNil)
err = store.UpdateAccount("withmac", jujuclient.AccountDetails{
User: "admin",
Password: "",
Macaroons: []macaroon.Slice{{mac}},
})
c.Assert(err, jc.ErrorIsNil)
ad, err := store.AccountDetails("withmac")
c.Assert(err, jc.ErrorIsNil)
info, _, err := juju.ConnectionInfo(juju.NewAPIConnectionParams{
ControllerName: "withmac",
Store: store,
AccountDetails: ad,
})
c.Assert(err, jc.ErrorIsNil)
c.Assert(info.Macaroons, gc.DeepEquals, []macaroon.Slice{{mac}})
}
func (s *NewAPIClientSuite) TestWithRedirect(c *gc.C) {
store := newClientStore(c, "ctl")
err := store.UpdateController("ctl", jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
CACert: "certificate",
APIEndpoints: []string{"0.1.2.3:5678"},
})
c.Assert(err, jc.ErrorIsNil)
controllerBefore, err := store.ControllerByName("ctl")
c.Assert(err, jc.ErrorIsNil)
redirHPs := []network.MachineHostPorts{{
network.MachineHostPort{MachineAddress: network.NewMachineAddress("0.0.9.9"), NetPort: network.NetPort(1234)},
network.MachineHostPort{MachineAddress: network.NewMachineAddress("0.0.9.10"), NetPort: network.NetPort(1235)},
}}
openCount := 0
redirOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) {
c.Check(apiInfo.ModelTag.Id(), gc.Equals, fakeUUID)
openCount++
switch openCount {
case 1:
c.Check(apiInfo.Addrs, jc.DeepEquals, []string{"0.1.2.3:5678"})
c.Check(apiInfo.CACert, gc.Equals, "certificate")
return nil, errors.Trace(&api.RedirectError{
Servers: redirHPs,
CACert: "alternative CA cert",
FollowRedirect: true,
})
case 2:
c.Check(apiInfo.Addrs, jc.DeepEquals, network.CollapseToHostPorts(redirHPs).Strings())
c.Check(apiInfo.CACert, gc.Equals, "alternative CA cert")
st := mockedAPIState(noFlags)
st.apiHostPorts = redirHPs
st.modelTag = fakeUUID
return st, nil
}
c.Errorf("OpenAPI called too many times")
return nil, fmt.Errorf("OpenAPI called too many times")
}
st0, err := newAPIConnectionFromNames(c, "ctl", "admin/admin", store, redirOpen)
c.Assert(err, jc.ErrorIsNil)
c.Assert(openCount, gc.Equals, 2)
st := st0.(*mockAPIState)
c.Assert(st.modelTag, gc.Equals, fakeUUID)
// Check that the addresses of the original controller
// have not been changed.
controllerAfter, err := store.ControllerByName("ctl")
c.Assert(err, jc.ErrorIsNil)
c.Assert(controllerBefore, gc.DeepEquals, controllerAfter)
}
func (s *NewAPIClientSuite) TestWithInfoAPIOpenError(c *gc.C) {
jujuClient := newClientStore(c, "noconfig")
apiOpen := func(apiInfo *api.Info, opts api.DialOpts) (api.Connection, error) {
return nil, errors.Errorf("an error")
}
st, err := newAPIConnectionFromNames(c, "noconfig", "", jujuClient, apiOpen)
// We expect to get the error from apiOpen, because it is not
// fatal to have no bootstrap config.
c.Assert(err, gc.ErrorMatches, "an error")
c.Assert(st, gc.IsNil)
}
func (s *NewAPIClientSuite) TestDialedAddressIsCached(c *gc.C) {
store := jujuclient.NewMemStore()
err := store.AddController("foo", jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
APIEndpoints: []string{
"example1:1111",
"example2:2222",
},
})
c.Assert(err, jc.ErrorIsNil)
dialed := make(chan string, 10)
start := make(chan struct{})
// Wait for both dials to complete, so we
// know their addresses are cached.
go func() {
addrs := make(map[string]bool)
for len(addrs) < 2 {
addrs[<-dialed] = true
}
// Allow the dials to complete.
close(start)
}()
conn, err := juju.NewAPIConnection(juju.NewAPIConnectionParams{
Store: store,
ControllerName: "foo",
DialOpts: api.DialOpts{
DialWebsocket: func(ctx context.Context, urlStr string, tlsConfig *tls.Config, ipAddr string) (jsoncodec.JSONConn, error) {
apiConn := testRootAPI{
serverAddrs: params.FromProviderHostsPorts([]network.ProviderHostPorts{{
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("example3"), NetPort: 3333},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("example4"), NetPort: 4444},
}}),
}
dialed <- ipAddr
<-start
if ipAddr != "0.1.1.2:1111" {
return nil, errors.New("fail")
}
return jsoncodec.NetJSONConn(apitesting.FakeAPIServer(apiConn)), nil
},
IPAddrResolver: apitesting.IPAddrResolverMap{
"example1": {"0.1.1.1", "0.1.1.2"},
"example2": {"0.2.2.2"},
},
},
AccountDetails: new(jujuclient.AccountDetails),
})
c.Assert(err, jc.ErrorIsNil)
defer conn.Close()
details, err := store.ControllerByName("foo")
c.Assert(err, jc.ErrorIsNil)
// The cache should contain both results. The IP address
// that was successfully dialed should be at the start of its
// slice.
c.Assert(details.DNSCache, jc.DeepEquals, map[string][]string{
"example1": {"0.1.1.2", "0.1.1.1"},
"example2": {"0.2.2.2"},
})
// The API addresses should have all the returned server addresses
// there as well as the one we actually succeeded in dialing.
// The successfully dialed address should be at the start.
c.Assert(details.APIEndpoints, jc.DeepEquals, []string{
"example1:1111",
"example3:3333",
"example4:4444",
})
}
func (s *NewAPIClientSuite) TestWithExistingDNSCache(c *gc.C) {
store := jujuclient.NewMemStore()
err := store.AddController("foo", jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
APIEndpoints: []string{
"example1:1111",
"example3:3333",
"example4:4444",
},
DNSCache: map[string][]string{
"example1": {"0.1.1.2", "0.1.1.1"},
"example2": {"0.2.2.2"},
},
})
c.Assert(err, jc.ErrorIsNil)
start := make(chan struct{})
conn, err := juju.NewAPIConnection(juju.NewAPIConnectionParams{
Store: store,
ControllerName: "foo",
DialOpts: api.DialOpts{
DialWebsocket: func(ctx context.Context, urlStr string, tlsConfig *tls.Config, ipAddr string) (jsoncodec.JSONConn, error) {
apiConn := testRootAPI{
serverAddrs: params.FromProviderHostsPorts([]network.ProviderHostPorts{{
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("example3"), NetPort: 3333},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("example5"), NetPort: 5555},
}}),
}
c.Logf("Dial: %q requested", ipAddr)
if ipAddr != "0.1.1.2:1111" {
// It's not the blessed IP address - block indefinitely
// until we're called upon to start.
select {
case <-start:
case <-time.After(testing.LongWait):
c.Fatalf("timeout while waiting for start dialing %v", ipAddr)
}
return nil, errors.New("fail")
}
// We're trying to connect to the blessed IP address.
// Succeed immediately.
return jsoncodec.NetJSONConn(apitesting.FakeAPIServer(apiConn)), nil
},
IPAddrResolver: ipAddrResolverFunc(func(ctx context.Context, host string) ([]net.IPAddr, error) {
c.Logf("Resolve: %q requested", host)
// We shouldn't block here, because IP Address lookups are done blocking in the main loop.
return nil, errors.New("no DNS available")
}),
},
AccountDetails: new(jujuclient.AccountDetails),
})
c.Assert(err, jc.ErrorIsNil)
defer conn.Close()
close(start)
details, err := store.ControllerByName("foo")
c.Assert(err, jc.ErrorIsNil)
// The DNS cache should not have changed.
c.Assert(details.DNSCache, jc.DeepEquals, map[string][]string{
"example1": {"0.1.1.2", "0.1.1.1"},
"example2": {"0.2.2.2"},
})
// The API addresses should have all the returned server addresses
// there as well as the one we actually succeeded in dialing.
// The successfully dialed address should be still at the start.
c.Assert(details.APIEndpoints, jc.DeepEquals, []string{
"example1:1111",
"example3:3333",
"example5:5555",
})
}
func (s *NewAPIClientSuite) TestEndpointFiltering(c *gc.C) {
store := jujuclient.NewMemStore()
err := store.AddController("foo", jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
APIEndpoints: []string{
"example1:1111",
},
DNSCache: map[string][]string{
"example1": {"0.1.1.1"},
},
})
c.Assert(err, jc.ErrorIsNil)
serverAddrs := params.FromProviderHostsPorts([]network.ProviderHostPorts{{
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("0.1.2.3"), NetPort: 1234},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("2001:db8::1"), NetPort: 1234},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("10.0.0.1"), NetPort: 1234},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("127.0.0.1"), NetPort: 1234},
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("169.254.1.1"), NetPort: 1234},
//Duplicate
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("0.1.2.3"), NetPort: 1234},
//Duplicate host, same IP.
network.ProviderHostPort{ProviderAddress: network.NewProviderAddress("0.1.2.3"), NetPort: 1235},
}})
conn, err := juju.NewAPIConnection(juju.NewAPIConnectionParams{
Store: store,
ControllerName: "foo",
DialOpts: api.DialOpts{
DialWebsocket: func(ctx context.Context, urlStr string, tlsConfig *tls.Config, ipAddr string) (jsoncodec.JSONConn, error) {
apiConn := testRootAPI{
serverAddrs: serverAddrs,
}
return jsoncodec.NetJSONConn(apitesting.FakeAPIServer(apiConn)), nil
},
IPAddrResolver: ipAddrResolverFunc(func(ctx context.Context, host string) ([]net.IPAddr, error) {
return nil, errors.New("no DNS available")
}),
},
AccountDetails: new(jujuclient.AccountDetails),
})
c.Assert(err, jc.ErrorIsNil)
defer conn.Close()
details, err := store.ControllerByName("foo")
c.Assert(err, jc.ErrorIsNil)
// The API addresses should have filtered out duplicates
// and unusable addresses.
c.Assert(details.APIEndpoints, jc.DeepEquals, []string{
"example1:1111",
"0.1.2.3:1234",
"[2001:db8::1]:1234",
"10.0.0.1:1234",
"0.1.2.3:1235",
})
}
var moveToFrontTests = []struct {
item string
items []string
expect []string
}{{
item: "x",
items: []string{"y", "x"},
expect: []string{"x", "y"},
}, {
item: "z",
items: []string{"y", "x"},
expect: []string{"y", "x"},
}, {
item: "y",
items: []string{"y", "x"},
expect: []string{"y", "x"},
}, {
item: "x",
items: []string{"y", "x", "z"},
expect: []string{"x", "y", "z"},
}, {
item: "d",
items: []string{"a", "b", "c", "d", "e", "f"},
expect: []string{"d", "a", "b", "c", "e", "f"},
}}
func (s *NewAPIClientSuite) TestMoveToFront(c *gc.C) {
for i, test := range moveToFrontTests {
c.Logf("test %d: moveToFront %q %v", i, test.item, test.items)
juju.MoveToFront(test.item, test.items)
c.Assert(test.items, jc.DeepEquals, test.expect)
}
}
type testRootAPI struct {
serverAddrs [][]params.HostPort
}
func (r testRootAPI) Admin(id string) (testAdminAPI, error) {
return testAdminAPI{r: r}, nil
}
type testAdminAPI struct {
r testRootAPI
}
func (a testAdminAPI) Login(req params.LoginRequest) params.LoginResult {
return params.LoginResult{
ControllerTag: names.NewControllerTag(fakeUUID).String(),
Servers: a.r.serverAddrs,
ServerVersion: version.Current.String(),
}
}
func checkCommonAPIInfoAttrs(c *gc.C, apiInfo *api.Info, opts api.DialOpts) {
opts.DNSCache = nil
c.Check(apiInfo.Tag, gc.Equals, names.NewUserTag("admin"))
c.Check(apiInfo.CACert, gc.Equals, "certificate")
c.Check(apiInfo.Password, gc.Equals, "hunter2")
c.Check(opts, gc.DeepEquals, api.DefaultDialOpts())
}
// newClientStore returns a client store that contains information
// based on the given controller name and info.
func newClientStore(c *gc.C, controllerName string) *jujuclient.MemStore {
store := jujuclient.NewMemStore()
err := store.AddController(controllerName, jujuclient.ControllerDetails{
ControllerUUID: fakeUUID,
CACert: "certificate",
APIEndpoints: []string{"0.1.2.3:5678"},
})
c.Assert(err, jc.ErrorIsNil)
err = store.UpdateModel(controllerName, "admin/admin", jujuclient.ModelDetails{
ModelUUID: fakeUUID, ModelType: model.IAAS,
})
c.Assert(err, jc.ErrorIsNil)
// Models belong to accounts, so we must have an account even
// if "creds" is not initialised. If it is, it may overwrite
// this one.
err = store.UpdateAccount(controllerName, jujuclient.AccountDetails{
User: "admin",
Password: "hunter2",
})
c.Assert(err, jc.ErrorIsNil)
return store
}
func newAPIConnectionFromNames(
c *gc.C,
controller, model string,
store jujuclient.ClientStore,
apiOpen api.OpenFunc,
) (api.Connection, error) {
args := juju.NewAPIConnectionParams{
Store: store,
ControllerName: controller,
DialOpts: api.DefaultDialOpts(),
OpenAPI: apiOpen,
}
accountDetails, err := store.AccountDetails(controller)
if !errors.IsNotFound(err) {
c.Assert(err, jc.ErrorIsNil)
args.AccountDetails = accountDetails
}
if model != "" {
modelDetails, err := store.ModelByName(controller, model)
c.Assert(err, jc.ErrorIsNil)
args.ModelUUID = modelDetails.ModelUUID
}
return juju.NewAPIConnection(args)
}
type ipAddrResolverFunc func(ctx context.Context, host string) ([]net.IPAddr, error)
func (f ipAddrResolverFunc) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
return f(ctx, host)
}<|fim▁end|> | |
<|file_name|>GradeofDifferentation.java<|end_file_name|><|fim▁begin|>//#############################################################################
//# #
//# Copyright (C) <2015> <IMS MAXIMS> #
//# #
//# This program is free software: you can redistribute it and/or modify #
//# it under the terms of the GNU Affero General Public License as #
//# published by the Free Software Foundation, either version 3 of the #
//# License, or (at your option) any later version. #
//# #
//# This program is distributed in the hope that it will be useful, #
//# but WITHOUT ANY WARRANTY; without even the implied warranty of #
//# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
//# GNU Affero General Public License for more details. #
//# #
//# You should have received a copy of the GNU Affero General Public License #
//# along with this program. If not, see <http://www.gnu.org/licenses/>. #
//# #
//# IMS MAXIMS provides absolutely NO GUARANTEE OF THE CLINICAL SAFTEY of #
//# this program. Users of this software do so entirely at their own risk. #
//# IMS MAXIMS only ensures the Clinical Safety of unaltered run-time #
//# software that it builds, deploys and maintains. #
//# #
//#############################################################################
//#EOH
// This code was generated by Barbara Worwood using IMS Development Environment (version 1.80 build 5589.25814)
// Copyright (C) 1995-2015 IMS MAXIMS. All rights reserved.
// WARNING: DO NOT MODIFY the content of this file
package ims.oncology.vo.lookups;
<|fim▁hole|>
import ims.framework.cn.data.TreeNode;
import java.util.ArrayList;
import ims.framework.utils.Image;
import ims.framework.utils.Color;
public class GradeofDifferentation extends ims.vo.LookupInstVo implements TreeNode
{
private static final long serialVersionUID = 1L;
public GradeofDifferentation()
{
super();
}
public GradeofDifferentation(int id)
{
super(id, "", true);
}
public GradeofDifferentation(int id, String text, boolean active)
{
super(id, text, active, null, null, null);
}
public GradeofDifferentation(int id, String text, boolean active, GradeofDifferentation parent, Image image)
{
super(id, text, active, parent, image);
}
public GradeofDifferentation(int id, String text, boolean active, GradeofDifferentation parent, Image image, Color color)
{
super(id, text, active, parent, image, color);
}
public GradeofDifferentation(int id, String text, boolean active, GradeofDifferentation parent, Image image, Color color, int order)
{
super(id, text, active, parent, image, color, order);
}
public static GradeofDifferentation buildLookup(ims.vo.LookupInstanceBean bean)
{
return new GradeofDifferentation(bean.getId(), bean.getText(), bean.isActive());
}
public String toString()
{
if(getText() != null)
return getText();
return "";
}
public TreeNode getParentNode()
{
return (GradeofDifferentation)super.getParentInstance();
}
public GradeofDifferentation getParent()
{
return (GradeofDifferentation)super.getParentInstance();
}
public void setParent(GradeofDifferentation parent)
{
super.setParentInstance(parent);
}
public TreeNode[] getChildren()
{
ArrayList children = super.getChildInstances();
GradeofDifferentation[] typedChildren = new GradeofDifferentation[children.size()];
for (int i = 0; i < children.size(); i++)
{
typedChildren[i] = (GradeofDifferentation)children.get(i);
}
return typedChildren;
}
public int addChild(TreeNode child)
{
if (child instanceof GradeofDifferentation)
{
super.addChild((GradeofDifferentation)child);
}
return super.getChildInstances().size();
}
public int removeChild(TreeNode child)
{
if (child instanceof GradeofDifferentation)
{
super.removeChild((GradeofDifferentation)child);
}
return super.getChildInstances().size();
}
public Image getExpandedImage()
{
return super.getImage();
}
public Image getCollapsedImage()
{
return super.getImage();
}
public static ims.framework.IItemCollection getNegativeInstancesAsIItemCollection()
{
GradeofDifferentationCollection result = new GradeofDifferentationCollection();
return result;
}
public static GradeofDifferentation[] getNegativeInstances()
{
return new GradeofDifferentation[] {};
}
public static String[] getNegativeInstanceNames()
{
return new String[] {};
}
public static GradeofDifferentation getNegativeInstance(String name)
{
if(name == null)
return null;
// No negative instances found
return null;
}
public static GradeofDifferentation getNegativeInstance(Integer id)
{
if(id == null)
return null;
// No negative instances found
return null;
}
public int getTypeId()
{
return TYPE_ID;
}
public static final int TYPE_ID = 1251032;
}<|fim▁end|> | |
<|file_name|>authoring.spec.ts<|end_file_name|><|fim▁begin|>describe('authoring', () => {
var GUID = 'urn:tag:superdesk-1';
var USER = 'user:1';
var ITEM: any = {guid: GUID};
beforeEach(window.module(($provide) => {
$provide.constant('lodash', _);
}));
beforeEach(window.module('angular-embed'));
beforeEach(window.module('superdesk.apps.publish'));
beforeEach(window.module('superdesk.core.preferences'));
beforeEach(window.module('superdesk.apps.archive'));
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.core.auth'));
beforeEach(window.module('superdesk.apps.workspace.content'));
beforeEach(window.module('superdesk.mocks'));
beforeEach(window.module('superdesk.core.privileges'));
beforeEach(window.module('superdesk.apps.desks'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.vocabularies'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(window.module('superdesk.core.editor3'));
beforeEach(window.module('superdesk.apps.editor2'));
beforeEach(inject(($window) => {
$window.onbeforeunload = angular.noop;
}));
beforeEach(inject((preferencesService, desks, $q) => {
spyOn(preferencesService, 'get').and.returnValue($q.when({items: ['urn:tag:superdesk-1']}));
spyOn(preferencesService, 'update').and.returnValue($q.when({}));
spyOn(preferencesService, 'getPrivileges').and.returnValue($q.when({}));
spyOn(desks, 'fetchCurrentUserDesks').and.returnValue($q.when([]));
}));
beforeEach(inject(($route) => {
$route.current = {params: {_id: GUID}};
}));
beforeEach(inject((session) => {
session.start({_id: 'sess'}, {_id: USER});
expect(session.identity._id).toBe(USER);
}));
it('can open an item',
inject((superdesk, api, lock, autosave, $injector, $q, $rootScope) => {
var _item,
lockedItem = angular.extend({_locked: false}, ITEM);
spyOn(api, 'find').and.returnValue($q.when(ITEM));
spyOn(lock, 'lock').and.returnValue($q.when(lockedItem));
spyOn(autosave, 'open').and.returnValue($q.when(lockedItem));
$injector.invoke(superdesk.activity('authoring').resolve.item).then((resolvedItem) => {
_item = resolvedItem;
});
$rootScope.$digest();
expect(api.find).toHaveBeenCalledWith('archive', GUID, jasmine.any(Object));
expect(lock.lock).toHaveBeenCalledWith(ITEM, false, undefined);
expect(autosave.open).toHaveBeenCalledWith(lockedItem);
expect(_item.guid).toBe(GUID);
}));
it('does lock item only once',
inject((superdesk, api, lock, autosave, session, $injector, $q, $rootScope) => {
var lockedItem: any = ITEM;
lockedItem.lock_user = USER;
lockedItem.lock_session = session.sessionId;
spyOn(api, 'find').and.returnValue($q.when(lockedItem));
$injector.invoke(superdesk.activity('authoring').resolve.item);
$rootScope.$digest();
expect(ITEM._locked).toBe(true);
}));
it('unlocks a locked item and locks by current user',
inject((authoring, lock, $rootScope, $timeout, api, $q, $location) => {
spyOn(api, 'save').and.returnValue($q.when({}));
spyOn(lock, 'unlock').and.returnValue($q.when({}));
var lockedItem = {guid: GUID, _id: GUID, _locked: true, lock_user: 'user:5', task: 'desk:1'};
var $scope = startAuthoring(lockedItem, 'edit');
$rootScope.$digest();
$scope.unlock();
$timeout.flush(5000);
$rootScope.$digest();
expect($location.path(), '/authoring/' + $scope.item._id);
}));
it('can autosave and save an item', inject((api, $q, $timeout, $rootScope) => {
var $scope = startAuthoring({guid: GUID, _id: GUID, task: 'desk:1', _locked: true, _editable: true},
'edit'),
headline = 'test headline';
expect($scope.dirty).toBe(false);
expect($scope.item.guid).toBe(GUID);
spyOn(api, 'save').and.returnValue($q.when({headline: 'foo'}));
// edit
$scope.item.headline = headline;
$scope.autosave($scope.item);
expect($scope.dirty).toBe(true);
// autosave
$timeout.flush(5000);
expect(api.save).toHaveBeenCalled();
expect($scope.item.headline).toBe(headline);
// save
$scope.save();
$rootScope.$digest();
expect($scope.dirty).toBe(false);
expect(api.save).toHaveBeenCalled();
}));
it('can use a previously created autosave', inject(() => {
var $scope = startAuthoring({_autosave: {headline: 'test'}}, 'edit');
expect($scope.item._autosave.headline).toBe('test');
expect($scope.item.headline).toBe('test');
}));
it('can save while item is being autosaved', inject(($rootScope, $timeout, $q, api) => {
var $scope = startAuthoring({headline: 'test', task: 'desk:1'}, 'edit');
$scope.item.body_html = 'test';
$rootScope.$digest();
$timeout.flush(1000);
spyOn(api, 'save').and.returnValue($q.when({}));
$scope.save();
$rootScope.$digest();
$timeout.flush(5000);
expect($scope.item._autosave).toBeNull();
}));
it('can close item after save work confirm', inject(($rootScope, $q, $location, authoring, reloadService) => {
startAuthoring({headline: 'test'}, 'edit');
$location.search('item', 'foo');
$location.search('action', 'edit');
$rootScope.$digest();
spyOn(authoring, 'saveWorkConfirmation').and.returnValue($q.when());
spyOn(reloadService, 'forceReload');
$rootScope.$broadcast('savework', 'test');
$rootScope.$digest();
expect($location.search().item).toBe(undefined);
expect($location.search().action).toBe(undefined);
expect(reloadService.forceReload).toHaveBeenCalled();
}));
it('can populate content metadata for undo', inject(($rootScope) => {
var orig = {headline: 'foo'};
var scope = startAuthoring(orig, 'edit');
expect(scope.origItem.headline).toBe('foo');
expect(scope.item.headline).toBe('foo');
expect(scope.item.slugline).toBe('');
scope.$apply(() => {
scope.origItem.headline = 'bar';
scope.origItem.slugline = 'slug';
});
expect(scope.item.headline).toBe('foo');
expect(scope.item.slugline).toBe('');
}));
it('confirm the associated media not called',
inject((api, $q, $rootScope, config, confirm) => {
let item = {
_id: 'test',
headline: 'headline',
};
let rewriteOf = {
_id: 'rewriteOf',
headline: 'rewrite',
associations: {
featuremedia: {
},
},
};
let defered = $q.defer();
config.features = {
editFeaturedImage: 1,
};
spyOn(api, 'find').and.returnValue($q.when({rewriteOf}));
spyOn(confirm, 'confirmFeatureMedia').and.returnValue(defered.promise);
let scope = startAuthoring(item, 'edit');
scope.publish();
$rootScope.$digest();
expect(confirm.confirmFeatureMedia).not.toHaveBeenCalled();
expect(api.find).not.toHaveBeenCalledWith('archive', 'rewriteOf');
}));
it('confirm the associated media not called if not rewrite_of',
inject((api, $q, $rootScope, config, confirm) => {
let item = {
_id: 'test',
headline: 'headline',
};
let rewriteOf = {
_id: 'rewriteOf',
headline: 'rewrite',
associations: {
featuremedia: {
},
},
};
let defered = $q.defer();
config.features = {
editFeaturedImage: 1,
confirmMediaOnUpdate: 1,
};
spyOn(api, 'find').and.returnValue($q.when({rewriteOf}));
spyOn(confirm, 'confirmFeatureMedia').and.returnValue(defered.promise);
let scope = startAuthoring(item, 'edit');
scope.publish();
$rootScope.$digest();
expect(confirm.confirmFeatureMedia).not.toHaveBeenCalled();
expect(api.find).not.toHaveBeenCalledWith('archive', 'rewriteOf');
}));
it('confirm the associated media called if rewrite_of but no associated media on edited item',
inject((api, $q, $rootScope, config, confirm, authoring) => {
let item = {
_id: 'test',
headline: 'headline',
rewrite_of: 'rewriteOf',
};
let rewriteOf = {
_id: 'rewriteOf',
headline: 'rewrite',
associations: {
featuremedia: {
},
},
};
let defered = $q.defer();
config.features = {
editFeaturedImage: 1,
confirmMediaOnUpdate: 1,
};
spyOn(api, 'find').and.returnValue($q.when(rewriteOf));
spyOn(confirm, 'confirmFeatureMedia').and.returnValue(defered.promise);
spyOn(authoring, 'autosave').and.returnValue(item);
spyOn(authoring, 'publish').and.returnValue(item);
let scope = startAuthoring(item, 'edit');
scope.publish();
$rootScope.$digest();
expect(api.find).toHaveBeenCalledWith('archive', 'rewriteOf');
expect(confirm.confirmFeatureMedia).toHaveBeenCalledWith(rewriteOf);
defered.resolve(rewriteOf);
$rootScope.$digest();
expect(authoring.autosave).toHaveBeenCalled();
expect(authoring.publish).not.toHaveBeenCalled();
}));
it('confirm the associated media but do not use the associated media',
inject((api, $q, $rootScope, config, confirm, authoring) => {
let item = {
_id: 'test',
rewrite_of: 'rewriteOf',
};
let rewriteOf = {
_id: 'rewriteOf',
associations: {
featuremedia: {
test: 'test',
},
},
};
let defered = $q.defer();
config.features = {
editFeaturedImage: 1,
confirmMediaOnUpdate: 1,
};
spyOn(api, 'find').and.returnValue($q.when(rewriteOf));
spyOn(confirm, 'confirmFeatureMedia').and.returnValue(defered.promise);
spyOn(authoring, 'autosave').and.returnValue({});
spyOn(authoring, 'publish').and.returnValue({});
let scope = startAuthoring(item, 'edit');
scope.publish();
$rootScope.$digest();
expect(api.find).toHaveBeenCalledWith('archive', 'rewriteOf');
expect(confirm.confirmFeatureMedia).toHaveBeenCalledWith(rewriteOf);
defered.resolve({});
$rootScope.$digest();
expect(authoring.publish).toHaveBeenCalled();
expect(authoring.autosave).not.toHaveBeenCalled();
}));
it('can reject publishing on error', inject((api, $q, $rootScope, authoring, lock) => {
let success = jasmine.createSpy('success');
let error = jasmine.createSpy('error');
spyOn(api, 'update').and.returnValue($q.reject('err'));
spyOn(lock, 'unlock').and.returnValue();
authoring.publish({}, {}).then(success, error);
$rootScope.$digest();
expect(api.update).toHaveBeenCalled();
expect(lock.unlock).not.toHaveBeenCalled();
expect(success).not.toHaveBeenCalled();
expect(error).toHaveBeenCalledWith('err');
}));
it('can continue publishing on unlock error', inject((api, $q, $rootScope, authoring, lock) => {
let success = jasmine.createSpy('success');
let error = jasmine.createSpy('error');
let item = {};
spyOn(api, 'update').and.returnValue($q.when(item));
spyOn(lock, 'unlock').and.returnValue($q.reject({}));
authoring.publish({}, {}).then(success, error);
$rootScope.$digest();
expect(lock.unlock).toHaveBeenCalledWith(item);
expect(success).toHaveBeenCalledWith(item);
expect(error).not.toHaveBeenCalled();
}));
/**
* Start authoring ctrl for given item.
*
* @param {object} item
* @param {string} action
* @returns {object}
*/
function startAuthoring(item, action) {
var $scope;
inject(($rootScope, $controller, superdesk, $compile) => {
$scope = $rootScope.$new();
$controller(superdesk.activity('authoring').controller, {
$scope: $scope,
item: item,
action: action,
});
$compile(angular.element('<div sd-authoring-workspace><div sd-authoring></div></div>'))($scope);
});
return $scope;
}
describe('authoring service', () => {
var confirmDefer;
beforeEach(inject((confirm, lock, $q) => {
confirmDefer = $q.defer();
spyOn(confirm, 'confirm').and.returnValue(confirmDefer.promise);
spyOn(confirm, 'confirmPublish').and.returnValue(confirmDefer.promise);
spyOn(confirm, 'confirmSaveWork').and.returnValue(confirmDefer.promise);
spyOn(confirm, 'confirmFeatureMedia').and.returnValue(confirmDefer.promise);
spyOn(lock, 'unlock').and.returnValue($q.when());
}));
it('can check if an item is editable', inject((authoring, session) => {
expect(authoring.isEditable({})).toBe(false);
expect(authoring.isEditable({lock_user: session.identity._id, lock_session: session.sessionId}))
.toBe(true);
}));
it('can close a read-only item', inject((authoring, confirm, lock, $rootScope) => {
var done = jasmine.createSpy('done');
authoring.close({}).then(done);
$rootScope.$digest();
expect(confirm.confirm).not.toHaveBeenCalled();
expect(lock.unlock).not.toHaveBeenCalled();
expect(done).toHaveBeenCalled();
}));
it('can unlock on close editable item without changes made',
inject((authoring, confirm, lock, $rootScope) => {
expect(authoring.isEditable(ITEM)).toBe(true);
authoring.close(ITEM, false);
$rootScope.$digest();
expect(confirm.confirm).not.toHaveBeenCalled();
expect(lock.unlock).toHaveBeenCalled();
}));
it('confirms if an item is dirty and saves',
inject((authoring, confirm, lock, $q, $rootScope) => {
var edit = Object.create(ITEM);
edit.headline = 'test';
authoring.close(edit, ITEM, true);
$rootScope.$digest();
expect(confirm.confirm).toHaveBeenCalled();
expect(lock.unlock).not.toHaveBeenCalled();
spyOn(authoring, 'save').and.returnValue($q.when());
confirmDefer.resolve();
$rootScope.$digest();
expect(authoring.save).toHaveBeenCalledWith(ITEM, edit);
expect(lock.unlock).toHaveBeenCalled();
}));
it('confirms if an item is dirty on opening new or existing item and not unlocking on save',
inject((authoring, confirm, lock, $q, $rootScope) => {
var edit = Object.create(ITEM);
edit.headline = 'test';
authoring.close(edit, ITEM, true, true);
$rootScope.$digest();
expect(confirm.confirm).toHaveBeenCalled();
expect(lock.unlock).not.toHaveBeenCalled();
spyOn(authoring, 'save').and.returnValue($q.when());
confirmDefer.resolve();
$rootScope.$digest();
expect(authoring.save).toHaveBeenCalledWith(ITEM, edit);
expect(lock.unlock).not.toHaveBeenCalled();
}));
it('can unlock an item', inject((authoring, session, confirm, autosave) => {
var item = {lock_user: session.identity._id, lock_session: session.sessionId};
expect(authoring.isEditable(item)).toBe(true);
spyOn(confirm, 'unlock');
spyOn(autosave, 'stop');
authoring.unlock(item);
expect(authoring.isEditable(item)).toBe(false);
expect(confirm.unlock).toHaveBeenCalled();
expect(autosave.stop).toHaveBeenCalled();
}));
it('can publish items', inject((authoring, api, $q) => {
var item = {_id: 1, state: 'submitted'};
spyOn(api, 'update').and.returnValue($q.when());
authoring.publish(item);
expect(api.update).toHaveBeenCalledWith('archive_publish', item, {});
}));
it('confirms if an item is dirty and saves and publish',
inject((authoring, api, confirm, lock, $q, $rootScope) => {
var edit = Object.create(ITEM);
_.extend(edit, {
_id: 1,
headline: 'test',
lock_user: 'user:1',
state: 'submitted',
});
authoring.publishConfirmation(ITEM, edit, true, 'publish');
$rootScope.$digest();
expect(confirm.confirmPublish).toHaveBeenCalled();
expect(lock.unlock).not.toHaveBeenCalled();
spyOn(api, 'update').and.returnValue($q.when(_.extend({}, edit, {})));
authoring.publish(edit);
$rootScope.$digest();
expect(api.update).toHaveBeenCalledWith('archive_publish', edit, {});
expect(lock.unlock).toHaveBeenCalled();
}));
it('confirms if an item is dirty and save work in personal',
inject((authoring, api, confirm, lock, $q, $rootScope) => {
var edit = Object.create(ITEM);
_.extend(edit, {
task: {desk: null, stage: null, user: 1},
type: 'text',
version: 1,
});
authoring.saveWorkConfirmation(ITEM, edit, true, 'User is disabled');
$rootScope.$digest();
expect(confirm.confirmSaveWork).toHaveBeenCalled();
spyOn(api, 'save').and.returnValue($q.when(_.extend({}, edit, {})));
authoring.saveWork(edit);
$rootScope.$digest();
expect(api.save).toHaveBeenCalledWith('archive', {}, edit);
}));
it('close the published dirty item without confirmation',
inject((authoring, api, confirm, lock, autosave, $q, $rootScope) => {
var publishedItem = Object.create(ITEM);
publishedItem.state = 'published';
var edit = Object.create(publishedItem);
edit.headline = 'test';
spyOn(authoring, 'isEditable').and.returnValue(true);
spyOn(autosave, 'drop').and.returnValue($q.when({}));
authoring.close(edit, publishedItem, true, false);
$rootScope.$digest();
expect(confirm.confirm).not.toHaveBeenCalled();
expect(lock.unlock).toHaveBeenCalled();
expect(autosave.drop).toHaveBeenCalled();
}));
it('close the corrected dirty item without confirmation',
inject((authoring, api, confirm, lock, autosave, $q, $rootScope) => {
var publishedItem = Object.create(ITEM);
publishedItem.state = 'corrected';
var edit = Object.create(publishedItem);
edit.headline = 'test';
spyOn(authoring, 'isEditable').and.returnValue(true);
spyOn(autosave, 'drop').and.returnValue($q.when({}));
authoring.close(edit, publishedItem, true, false);
$rootScope.$digest();
expect(confirm.confirm).not.toHaveBeenCalled();
expect(lock.unlock).toHaveBeenCalled();
expect(autosave.drop).toHaveBeenCalled();
}));
it('can validate schedule', inject((authoring) => {
var errors = authoring.validateSchedule('2010-10-10', '08:10:10', '2010-10-10T08:10:10', 'Europe/Prague');
expect(errors).toBeTruthy();
expect(errors.future).toBeTruthy();
errors = authoring.validateSchedule('2099-10-10', '11:32:21', '2099-10-10T08:10:10', 'Europe/Prague');
expect(errors).toBeFalsy();
}));
it('can validate schedule for pre utc timezone', inject((authoring, moment) => {
// utc - 1h and matching server tz format
var timestamp = moment.utc()
.subtract(1, 'hours')
.format()
.replace('+00:00', '+0000');
expect(authoring.validateSchedule(
timestamp.slice(0, 10),
timestamp.slice(11, 19),
timestamp,
'America/Toronto', // anything before utc
)).toBeFalsy();
}));
it('updates orig item on save',
inject((authoring, $rootScope, $httpBackend, api, $q, urls) => {
var item = {headline: 'foo'};
var orig: any = {_links: {self: {href: 'archive/foo'}}};
spyOn(urls, 'item').and.returnValue($q.when(orig._links.self.href));
$httpBackend.expectPATCH(orig._links.self.href, item)
.respond(200, {_etag: 'new', _current_version: 2});
authoring.save(orig, item);
$rootScope.$digest();
$httpBackend.flush();
expect(orig._etag).toBe('new');
expect(orig._current_version).toBe(2);
}));
});
describe('media identifer generator service', () => {
it('generates media field identifer', inject((mediaIdGenerator) => {
expect(mediaIdGenerator.getFieldVersionName('media1')).toBe('media1');
expect(mediaIdGenerator.getFieldVersionName('media1', 1)).toBe('media1--1');
expect(mediaIdGenerator.getFieldParts('media1')).toEqual(['media1', null]);
expect(mediaIdGenerator.getFieldParts('media1--1')).toEqual(['media1', 1]);
}));
});
describe('carousel directive', () => {
it('initializes the current related item identifer', inject(($rootScope, $compile) => {
let scope = $rootScope.$new();
let elem = $compile('<div sd-item-carousel data-item="item" data-items="items"></div>')(scope);
scope.$digest();
let iscope = elem.isolateScope();
scope.item = {guid: 'item1', associations: {'media1--1': {guid: 'foo', type: 'picture'}}};
scope.items = [{fieldId: 'media1--1', 'media1--1': {guid: 'foo', type: 'picture'}}];
scope.$digest();
expect(iscope.rel).toBe(null);
scope.item = {guid: 'item1', associations: {'media1--1': null}};
scope.items = [{fieldId: 'media1--1', 'media1--1': null}];
scope.$digest();
expect(iscope.rel).toBe('media1--1');
}));
});
});
describe('Item Crops directive', () => {
beforeEach(window.module('superdesk.apps.publish'));
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.mocks'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.vocabularies'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(window.module('superdesk.core.editor3'));
beforeEach(window.module('superdesk.apps.editor2'));
it('showCrops return true if image renditions are present',
inject(($rootScope, $compile, $q, metadata, vocabularies) => {
var metaInit = $q.defer();
metadata.values = {
crop_sizes: [
{name: '4-3'}, {name: '16-9'},
],
};
spyOn(metadata, 'initialize').and.returnValue(metaInit.promise);
spyOn(vocabularies, 'getAllActiveVocabularies').and.returnValue($q.when([]));
let scope = $rootScope.$new();
scope.item = {
type: 'picture',
renditions: {
},
};
var elem = $compile('<div sd-item-crops data-item="item"></div>')(scope);
metaInit.resolve();
scope.$digest();
let iScope = elem.isolateScope();
expect(iScope.showCrops()).not.toBe(true);
scope.item = {
type: 'picture',
renditions: {
'4-3': {
},
},
};
scope.$digest();
expect(iScope.showCrops()).toBe(true);
}),
);
});
describe('autosave', () => {
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.mocks'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.searchProviders'));
it('can fetch an autosave for item locked by user and is editable',
inject((autosave, api, $q, $rootScope) => {
spyOn(api, 'find').and.returnValue($q.when({}));
autosave.open({_locked: true, _editable: true, _id: 1});
$rootScope.$digest();
expect(api.find).toHaveBeenCalledWith('archive_autosave', 1);
}));
it('will skip autosave fetch when item is locked by user but not editable',
inject((autosave, api, $q, $rootScope) => {
spyOn(api, 'find').and.returnValue($q.when({}));
autosave.open({_locked: false, _editable: false, _id: 1});
$rootScope.$digest();
expect(api.find).not.toHaveBeenCalled();
}));
it('will skip autosave fetch when item is locked by another user',
inject((autosave, api, $rootScope) => {
spyOn(api, 'find');
autosave.open({_locked: true});
$rootScope.$digest();
expect(api.find).not.toHaveBeenCalled();
}));
it('can create an autosave', inject((autosave, api, $q, $timeout, $rootScope) => {
var orig: any = {_id: 1, _etag: 'x', _locked: true, _editable: true};
var item = Object.create(orig);
item.headline = 'test';
spyOn(api, 'save').and.returnValue($q.when({_id: 2}));
autosave.save(item, orig);
$rootScope.$digest();
expect(api.save).not.toHaveBeenCalled();
$timeout.flush(5000);
expect(api.save).toHaveBeenCalledWith('archive_autosave', {}, {_id: 1, headline: 'test'});
expect(orig._autosave._id).toBe(2);
expect(item.headline).toBe('test');
expect(orig.headline).not.toBe('test');
}));
it('can save multiple items', inject((autosave, api, $q, $timeout, $rootScope) => {
var item1 = {_id: 1, _etag: '1', _locked: true, _editable: true},
item2 = {_id: 2, _etag: '2', _locked: true, _editable: true};
spyOn(api, 'save').and.returnValue($q.when({}));
autosave.save(_.create(item1), item1);
$timeout.flush(1500);
autosave.save(_.create(item2), item2);
$timeout.flush(2500);
expect(api.save).toHaveBeenCalled();
expect(api.save.calls.count()).toBe(1);
$timeout.flush(5000);
expect(api.save.calls.count()).toBe(2);
}));
});
describe('lock service', () => {
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.mocks'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.searchProviders'));
var user = {_id: 'user'};
var sess = {_id: 'sess'};
var anotherUser = {_id: 'another_user'};
beforeEach(inject((session) => {
session.start(sess, user);
}));
it('can test if item is locked', inject((lock) => {
expect(lock.isLocked({})).toBe(false);
expect(lock.isLocked({lock_user: '1'})).toBe(true);
}));
it('can detect lock by same user and different session', inject((lock) => {
expect(lock.isLocked({lock_user: 'user'})).toBe(true);
expect(lock.isLocked({lock_user: 'user', lock_session: 'other_sess'})).toBe(true);
}));
it('can use lock_user dict', inject((lock, session) => {
expect(lock.isLocked({lock_user: {_id: 'user'}})).toBe(true);
expect(lock.isLocked({lock_user: {_id: 'user'}, lock_session: session.sessionId})).toBe(false);
}));
it('can unlock the item if user has unlock privileges', inject((lock, privileges, $rootScope) => {
privileges.setUserPrivileges({unlock: 1});
$rootScope.$digest();
// testing if the user can unlock its own content.
expect(lock.can_unlock({lock_user: user._id})).toBe(true);
expect(lock.can_unlock({lock_user: user._id, lock_session: 'another_session'})).toBe(true);
expect(lock.can_unlock({lock_user: anotherUser._id, lock_session: 'another_session'})).toBe(1);
}));
it('can unlock the item if user has no unlock privileges', inject((lock, privileges, $rootScope) => {
privileges.setUserPrivileges({unlock: 0});
$rootScope.$digest();
// testing if the user can unlock its own content.
expect(lock.can_unlock({lock_user: user._id})).toBe(true);
expect(lock.can_unlock({lock_user: user._id, lock_session: 'another_session'})).toBe(true);
expect(lock.can_unlock({lock_user: anotherUser._id, lock_session: 'another_session'})).toBe(0);
}));
it('can unlock own draft but not other users item', inject((lock, privileges, $rootScope) => {
privileges.setUserPrivileges({unlock: 1});
$rootScope.$digest();
// testing if the user can unlock its own content.
expect(lock.can_unlock({lock_user: user._id, state: 'draft'})).toBe(true);
expect(lock.can_unlock({lock_user: user._id, state: 'draft', lock_session: 'another_session'})).toBe(true);
var item = {lock_user: anotherUser._id, state: 'draft', lock_session: 'another_session'};
expect(lock.can_unlock(item)).toBe(false);
}));
});
describe('authoring actions', () => {
var userDesks = [{_id: 'desk1'}, {_id: 'desk2'}];
/**
* Assert the actions
*
* @param {Object} actions : actions to be asserted.
* @param {string[]} keys : keys to be truthy.
*/
function allowedActions(actions, keys) {
_.forOwn(actions, (value, key) => {
if (_.includes(keys, key)) {
expect(value).toBeTruthy();
} else {
expect(value).toBeFalsy();
}
});
}
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.mocks'));
beforeEach(window.module('superdesk.apps.desks'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(inject((desks, $q) => {
spyOn(desks, 'fetchCurrentUserDesks').and.returnValue($q.when(userDesks));
}));
it('can perform actions if the item is located on the personal workspace',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'draft',
flags: {marked_for_not_publication: false},
type: 'text',
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'copy', 'spike', 'multi_edit', 'export', 'set_label']);
}));
it('can perform actions if the item is located on the desk',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'submitted',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
mark_for_desks: true,
unlock: true,
publish: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 're_write',
'mark_item_for_highlight', 'mark_item_for_desks',
'package_item', 'multi_edit', 'publish', 'add_to_current', 'export', 'set_label']);
}));
it('cannot perform publish if the item is marked for not publication',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'submitted',
flags: {marked_for_not_publication: true},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 're_write',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'add_to_current',
'export', 'set_label']);
}));
it('cannot perform publish if the item is highlight package',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'submitted',
type: 'composite',
highlight: 1,
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike',
'package_item', 'multi_edit', 'add_to_current', 'set_label']);
}));
it('cannot publish if user does not have publish privileges on the desk',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'submitted',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 're_write',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'add_to_current',
'export', 'set_label']);
}));
it('can only view the item if the user does not have desk membership',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'submitted',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk3',
},
_current_version: 2,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
mark_for_desks: false,
unlock: true,
archive: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 're_write', 'export', 'set_label']);
}));
it('can only view the item if the item is killed',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'killed',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 'export', 'set_label']);
}));
it('can only view the item if the item is recalled',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'recalled',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 'export', 'set_label']);
}));
it('cannot create an update for a rewritten story ',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
type: 'text',
rewritten_by: 1,
task: {
desk: 'desk1',
},
};
var userPrivileges = {
archive: true,
rewrite: true,
unlock: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 'package_item', 'multi_edit', 'add_to_current',
'resend', 'export', 'set_label']);
}));
it('can only view or unmark item if the item is spiked',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'spiked',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions,
['view', 'unspike', 'export', 'mark_item_for_desks', 'mark_item_for_highlight', 'set_label']);
}));
it('Can perform correction or kill or takedown on published item',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
archive_item: {
_id: 'test',
state: 'published',
marked_for_not_publication: false,
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
takedown: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'view', 'add_to_current',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'correct', 'takedown', 'kill', 're_write',
'create_broadcast', 'resend', 'export', 'set_label']);
}));
it('Can perform resend on rewritten item',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item: any = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
archive_item: {
_id: 'test',
state: 'published',
marked_for_not_publication: false,
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'view', 'add_to_current',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'correct', 'kill', 're_write',
'create_broadcast', 'resend', 'export', 'set_label']);
item.archive_item.rewritten_by = 'abc';
itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'view', 'add_to_current', 'mark_item_for_highlight',
'package_item', 'multi_edit', 'correct', 'kill', 'create_broadcast', 'resend', 'export',
'set_label']);
}));
it('Cannot perform correction or kill or takedown on published item without privileges',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: false,
kill: false,
takedown: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'view', 'add_to_current',
'mark_item_for_highlight', 'package_item', 'multi_edit', 're_write', 'resend',
'export', 'set_label']);
}));
it('Can only view if the item is not the current version',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
last_published_version: false,
_current_version: 8,
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 'export', 'set_label']);
}));
it('Can only view, duplicate and deschedule if the item is scheduled',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'scheduled',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 8,
archive_item: {
_id: 'test',
state: 'scheduled',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 8,
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['view', 'duplicate', 'deschedule', 'export', 'set_label']);
}));
it('Cannot send item if the version is zero',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 0,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
mark_for_desks: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: false,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'publish', 'export',
'mark_item_for_desks', 're_write', 'set_label']);
}));
it('Can edit if the version is zero',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 0,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: false,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
'mark_item_for_highlight', 'package_item', 'multi_edit', 'publish', 'export',
're_write', 'set_label']);
}));
it('Cannot send item if the no move privileges',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: false,
move: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
're_write', 'mark_item_for_highlight', 'package_item', 'multi_edit', 'publish',
'export', 'set_label']);
}));
it('Can send item if the version greater then zero',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: false,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
're_write', 'mark_item_for_highlight', 'package_item', 'multi_edit', 'publish',
'send', 'export', 'set_label']);
}));
it('Can do edit for embargo item.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
embargo: Date(),
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: true,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
'mark_item_for_highlight', 'multi_edit', 'publish', 'send', 'export', 'set_label']);
}));
it('Can do edit for scheduled item.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 1,
publish_schedule: Date(),
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: true,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'duplicate', 'spike', 'add_to_current',
'mark_item_for_highlight', 'multi_edit', 'publish', 'send', 'export', 're_write',
'set_label']);
}));
it('Can do rewrite and package item for scheduled item after passing publish schedule.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var pastTimestamp = new Date();
pastTimestamp.setHours(pastTimestamp.getHours() - 1);
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 2,
publish_schedule: pastTimestamp,
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
package_item: true,
move: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['correct', 'kill', 'duplicate', 'add_to_current', 're_write',
'view', 'package_item', 'mark_item_for_highlight', 'multi_edit', 'resend', 'export',
'set_label']);
}));
it('Create broadcast icon is available for text item.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [],
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [],
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 're_write', 'mark_item_for_highlight', 'multi_edit',
'correct', 'kill', 'package_item', 'view', 'create_broadcast', 'add_to_current', 'resend',
'export', 'set_label']);
}));
it('Create broadcast icon is available for text item with genre Article.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [{name: 'Article', value: 'Article'}],
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [{name: 'Article', value: 'Article'}],
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 're_write', 'mark_item_for_highlight', 'multi_edit',
'correct', 'kill', 'package_item', 'view', 'create_broadcast', 'add_to_current', 'resend',
'export', 'set_label']);
}));
it('Create broadcast icon is not available for broadcast item',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [
{name: 'Interview', value: 'Interview'},
{name: 'Broadcast Script', value: 'Broadcast Script'},
],
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
genre: [
{name: 'Interview', value: 'Interview'},
{name: 'Broadcast Script', value: 'Broadcast Script'},
],
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'mark_item_for_highlight', 'multi_edit',
'correct', 'kill', 'package_item', 'view', 'add_to_current', 'resend', 'export',
're_write', 'set_label']);
}));
it('Export action is available for text item.',
inject((privileges, authoring, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
};
var userPrivileges = {
mark_item: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['re_write', 'save', 'edit', 'package_item',
'multi_edit', 'add_to_current', 'export', 'set_label']);
}));
it('Export action is not available for non-text item.',
inject((privileges, authoring, $rootScope) => {
var item = {
_id: 'test',
state: 'in_progress',
flags: {marked_for_not_publication: false},
type: 'composite',
task: {
desk: 'desk1',
},
};
var userPrivileges = {
mark_item: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['save', 'edit', 'package_item', 'multi_edit', 'add_to_current',
'set_label']);
}));
it('rewrite is not allowed if re-written item exists.',
inject((privileges, desks, authoring, $q, $rootScope) => {
var item = {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
rewritten_by: '123',
genre: [
{name: 'Interview', value: 'Interview'},
],
archive_item: {
_id: 'test',
state: 'published',
flags: {marked_for_not_publication: false},
type: 'text',
task: {
desk: 'desk1',
},
_current_version: 10,
rewritten_by: '123',
genre: [
{name: 'Interview', value: 'Interview'},
],
},
};
var userPrivileges = {
duplicate: true,
mark_item: false,
spike: true,
unspike: true,
mark_for_highlights: true,
unlock: true,
publish: true,
correct: true,
kill: true,
archive_broadcast: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
var itemActions = authoring.itemActions(item);
allowedActions(itemActions, ['duplicate', 'mark_item_for_highlight', 'multi_edit',
'create_broadcast', 'correct', 'kill', 'package_item', 'view', 'add_to_current',
'resend', 'export', 'set_label']);
}));
it('cannot mark or highlight if the item is not a text item',
inject((privileges, desks, authoring, $q, $rootScope) => {
let item = {
_id: 'test',
type: 'text',
task: {
desk: 'desk1',
},
};
let userPrivileges = {
mark_for_highlights: true,
mark_for_desks: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
let itemActions = authoring.itemActions(item);
expect(itemActions.mark_item_for_desks).toBeTruthy();
expect(itemActions.mark_item_for_highlight).toBeTruthy();
item.type = 'picture';
itemActions = authoring.itemActions(item);
expect(itemActions.mark_item_for_desks).toBeFalsy();
expect(itemActions.mark_item_for_highlight).toBeFalsy();
}));
});
describe('authoring workspace', () => {
var item, lockedItem;
beforeEach(() => {
item = {_id: 'foo', type: 'text'};
lockedItem = {_id: item._id, _editable: true};
});
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(inject(($q, authoring) => {
spyOn(authoring, 'open').and.returnValue($q.when(lockedItem));
}));
it('can edit item', inject((superdeskFlags, authoringWorkspace, $rootScope) => {
expect(superdeskFlags.flags.authoring).toBeFalsy();
authoringWorkspace.edit(item);
$rootScope.$apply();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('edit');
expect(authoringWorkspace.getItem()).toBe(lockedItem);
expect(authoringWorkspace.getAction()).toBe('edit');
expect(superdeskFlags.flags.authoring).toBeTruthy();
authoringWorkspace.close(true);
expect(authoringWorkspace.item).toBe(null);
expect(authoringWorkspace.getItem()).toBe(null);
expect(superdeskFlags.flags.authoring).toBeFalsy();
}));
it('can open item in readonly mode', inject((superdeskFlags, authoringWorkspace, $rootScope,
authoring, $q) => {
lockedItem._editable = false;
authoringWorkspace.view(item);
$rootScope.$apply();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('view');
expect(superdeskFlags.flags.authoring).toBe(true);
lockedItem._editable = true;
}));
it('can kill an item', inject((authoringWorkspace, $rootScope) => {
authoringWorkspace.kill(item);
$rootScope.$apply();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('kill');
}));
it('can handle edit.item activity', inject((superdesk, authoringWorkspace, $rootScope) => {
superdesk.intent('edit', 'item', item);
$rootScope.$digest();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('edit');
}));
it('can open an item for edit or readonly', inject((authoringWorkspace, authoring, send, $q, $rootScope) => {
item.state = 'draft';
authoringWorkspace.open(item);
expect(authoring.open).toHaveBeenCalledWith(item._id, false, null, 'edit');
item.state = 'published';
authoringWorkspace.open(item);
expect(authoring.open).toHaveBeenCalledWith(item._id, true, null, 'view');
var archived = {_id: 'bar'};
spyOn(send, 'one').and.returnValue($q.when(archived));
item._type = 'ingest';
authoringWorkspace.open(item);
expect(send.one).toHaveBeenCalledWith(item);
$rootScope.$digest();
expect(authoring.open).toHaveBeenCalledWith(archived._id, false, null, 'edit');
}));
describe('init', () => {
it('can open item from $location for editing', inject((api, $location, $rootScope, $injector) => {
$location.search('item', item._id);
$location.search('action', 'edit');
$rootScope.$digest();
var authoringWorkspace = $injector.get('authoringWorkspace');
$rootScope.$digest();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('edit');
}));
it('can open item from $location for viewing', inject(($location, $rootScope, $injector) => {
$location.search('item', 'bar');
$location.search('action', 'view');
$rootScope.$digest();
var authoringWorkspace = $injector.get('authoringWorkspace');
$rootScope.$digest();
expect(authoringWorkspace.item).toBe(lockedItem);
expect(authoringWorkspace.action).toBe('view');
}));
});
});
describe('authoring container directive', () => {
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(inject(($templateCache) => {
// avoid loading of authoring
$templateCache.put('scripts/apps/authoring/views/authoring-container.html', '<div></div>');
}));
var item, lockedItem, scope, elem, iscope;
beforeEach(inject(($compile, $rootScope, $q, authoring) => {
item = {_id: 'foo'};
lockedItem = {_id: item._id, _editable: true};
spyOn(authoring, 'open').and.returnValue($q.when(lockedItem));
scope = $rootScope.$new();
elem = $compile('<div sd-authoring-container></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
}));
it('handles edit', inject((authoringWorkspace, $rootScope) => {
authoringWorkspace.edit(item);
$rootScope.$digest();
// testing reset in first cycle between
expect(iscope.authoring.item).toBe(null);
$rootScope.$digest();
expect(iscope.authoring.item).toBe(lockedItem);
expect(iscope.authoring.action).toBe('edit');
expect(iscope.authoring.state.opened).toBe(true);
authoringWorkspace.close(true);
$rootScope.$digest();
expect(iscope.authoring.item).toBe(null);
expect(iscope.authoring.state.opened).toBe(false);
}));
it('handles view', inject((authoringWorkspace, $rootScope) => {
lockedItem._editable = false;
authoringWorkspace.view(item);
$rootScope.$digest();
$rootScope.$digest();
expect(iscope.authoring.item).toBe(lockedItem);
expect(iscope.authoring.action).toBe('view');
expect(iscope.authoring.state.opened).toBe(true);
lockedItem._editable = true;
}));
it('handles kill', inject((authoringWorkspace, $rootScope) => {
authoringWorkspace.kill(item);
$rootScope.$digest();
$rootScope.$digest();
expect(iscope.authoring.item).toBe(lockedItem);
expect(iscope.authoring.action).toBe('kill');
}));
it('handles correct', inject((authoringWorkspace, $rootScope) => {
authoringWorkspace.correct(item);
$rootScope.$digest();
$rootScope.$digest();
expect(iscope.authoring.item).toBe(lockedItem);
expect(iscope.authoring.action).toBe('correct');
}));
describe('authoring embed directive', () => {
beforeEach(inject(($templateCache) => {
$templateCache.put('scripts/apps/authoring/views/authoring.html', '<div></div>');
}));
it('applies kill template',
inject((authoringWorkspace, $rootScope, api, $compile, $q) => {
authoringWorkspace.kill(item);
$rootScope.$digest();
$rootScope.$digest();
expect(iscope.authoring.item).toBe(lockedItem);
expect(iscope.authoring.action).toBe('kill');
spyOn(api, 'save').and.returnValue($q.when({}));
var elemEmbed = $compile('<div sd-authoring-embedded data-item="authoring.item"' +
' data-action="authoring.action"></div>')(iscope);
iscope.$digest();
var iscopeEmbed = elemEmbed.isolateScope();
expect(iscopeEmbed.action).toBe('kill');
expect(api.save)
.toHaveBeenCalledWith('content_templates_apply', {}, {
template_name: 'kill',
item: {_id: 'foo'},
}, {});
}));
});
});
describe('authoring themes', () => {
beforeEach(window.module('superdesk.core.preferences'));
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(inject(($q, preferencesService) => {
spyOn(preferencesService, 'get').and.returnValue($q.when({'editor:theme': ['theme:proofreadTheme']}));
}));
var normalTheme = {
cssClass: '',
label: 'Default',
key: 'default',
},
darkTheme = {
cssClass: 'dark-theme-mono',
label: 'Dark monospace',
key: 'dark-mono',
};
it('can define normal theme', inject((authThemes) => {
spyOn(authThemes, 'save');
authThemes.save('theme', normalTheme);
expect(authThemes.save).toHaveBeenCalledWith('theme', normalTheme);
}));
it('can define proofread theme', inject((authThemes) => {
spyOn(authThemes, 'save');
authThemes.save('proofreadTheme', darkTheme);
expect(authThemes.save).toHaveBeenCalledWith('proofreadTheme', darkTheme);
}));
it('can get normal theme', inject((authThemes, $rootScope) => {
var theme = null;
authThemes.get('theme').then((_theme) => {
theme = _theme;
});
$rootScope.$digest();
expect(theme).not.toBe(null);
}));
it('can get proofread theme', inject((authThemes, $rootScope) => {
var proofreadTheme = null;
<|fim▁hole|> authThemes.get('proofreadTheme').then((_theme) => {
proofreadTheme = _theme;
});
$rootScope.$digest();
expect(proofreadTheme).not.toBe(null);
}));
});
describe('send item directive', () => {
beforeEach(window.module(($provide) => {
$provide.constant('config', {
server: {url: undefined},
iframely: {key: '123'},
editor: {},
features: {onlyEditor3: false},
});
}));
beforeEach(window.module('superdesk.core.editor3'));
beforeEach(window.module('superdesk.apps.editor2'));
beforeEach(window.module('superdesk.core.preferences'));
beforeEach(window.module('superdesk.apps.authoring'));
beforeEach(window.module('superdesk.templates-cache'));
beforeEach(window.module('superdesk.core.api'));
beforeEach(window.module('superdesk.apps.vocabularies'));
beforeEach(window.module('superdesk.apps.searchProviders'));
beforeEach(window.module('superdesk.apps.extension-points'));
beforeEach(inject(($templateCache) => {
$templateCache.put('scripts/apps/authoring/views/send-item.html', '');
}));
it('can hide embargo if user does not have the privilege',
inject(($compile, $rootScope, privileges) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
};
var userPrivileges = {
embargo: false,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
expect(iscope.showPublishSchedule()).toBe(true);
expect(iscope.showEmbargo()).toBe(false);
}));
it('can show embargo and publish schedule for text item',
inject(($compile, $rootScope, privileges) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
};
var userPrivileges = {
embargo: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
expect(iscope.showPublishSchedule()).toBe(true);
expect(iscope.showEmbargo()).toBe(true);
}));
it('can show embargo date',
inject(($compile, $rootScope, privileges) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
embargo_date: Date(),
};
var userPrivileges = {
embargo: true,
};
privileges.setUserPrivileges(userPrivileges);
$rootScope.$digest();
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
expect(iscope.showPublishSchedule()).toBe(false);
expect(iscope.showEmbargo()).toBe(true);
}));
it('can show published schedule date',
inject(($compile, $rootScope) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
publish_schedule_date: Date(),
};
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
expect(iscope.showPublishSchedule()).toBe(true);
expect(iscope.showEmbargo()).toBe(false);
}));
it('can get last destination desk and stage',
inject(($compile, $rootScope, preferencesService, $q) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: '123456',
type: 'text',
};
var destination = {desk: '123', stage: '456'};
spyOn(preferencesService, 'get').and.returnValue($q.when(destination));
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
iscope.destination_last = null;
preferencesService.get().then((prefs) => {
iscope.destination_last = {
desk: prefs.desk,
stage: prefs.stage,
};
});
iscope.$digest();
expect(iscope.destination_last.desk).toEqual('123');
expect(iscope.destination_last.stage).toEqual('456');
}));
it('can show send and publish button',
inject(($compile, $rootScope, config) => {
var scope, elem, iscope;
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
task: {
desk: '123',
stage: '456',
},
_current_version: 1,
};
scope.action = 'edit';
elem = $compile('<div sd-send-item data-item="item" data-orig="item" data-mode="authoring" ' +
'data-action="action"></div>')(scope);
scope.$digest();
iscope = elem.isolateScope();
expect(iscope.canSendAndPublish()).toBeFalsy();
config.ui = {sendAndPublish: 1};
expect(iscope.canSendAndPublish()).toBeFalsy();
iscope.selectedDesk = {_id: '123'};
iscope.selectedStage = {_id: '456'};
expect(iscope.canSendAndPublish()).toBeFalsy();
iscope.selectedDesk = {_id: '123'};
iscope.selectedStage = {_id: '4566'};
iscope.itemActions = {publish: 1};
expect(iscope.canSendAndPublish()).toBeFalsy();
iscope.selectedDesk = {_id: '1234'};
iscope.selectedStage = {_id: '456'};
expect(iscope.canSendAndPublish()).toBeTruthy();
}));
describe('Send And Publish', () => {
var scope, iScope, elem, publish;
var movedItem = {
_id: 'foo',
type: 'text',
state: 'in-progress',
task: {
desk: 'New Desk',
stage: 'New Stage',
},
_current_version: 2,
_etag: '1111',
_locked: true,
};
var selectedDesk = {
_id: 'New Desk', name: 'new desk',
};
var selectedStage = {
_id: 'New Stage', name: 'new stage',
};
beforeEach(inject(($q, $compile, $rootScope, api, editor) => {
spyOn(api, 'find').and.returnValue($q.when({}));
spyOn(api, 'save').and.returnValue($q.when({task: {desk: 'new', stage: 'new'}}));
scope = $rootScope.$new();
scope.item = {
_id: 'foo',
type: 'text',
state: 'in-progress',
task: {
desk: '123',
stage: '456',
},
_current_version: 1,
_etag: '123',
};
scope.action = 'edit';
scope.publish = function() {
return publish;
};
elem = $compile('<div sd-send-item data-item="item" data-orig="item" data-mode="authoring" ' +
'data-action="action" data-publish="publish()"></div>')(scope);
scope.$digest();
iScope = elem.isolateScope();
iScope.beforeSend = function() {
return $q.when({});
};
}));
it('can send and publish item to different desk', inject((authoring, $q, authoringWorkspace) => {
publish = true; // publish succeeds
iScope.selectedDesk = selectedDesk;
iScope.selectedStage = selectedStage;
spyOn(authoring, 'open').and.returnValue($q.when(movedItem));
spyOn(authoringWorkspace, 'close').and.returnValue($q.when(true));
expect(iScope.orig.task.desk).toBe('123');
expect(iScope.orig.task.stage).toBe('456');
expect(iScope.orig._etag).toBe('123');
iScope.sendAndPublish();
iScope.$digest();
expect(authoring.open).toHaveBeenCalledWith('foo', false);
expect(authoringWorkspace.close).toHaveBeenCalledWith(false);
expect(iScope.orig.task.desk).toBe(selectedDesk._id);
expect(iScope.orig.task.stage).toBe(selectedStage._id);
expect(iScope.orig._locked).toBe(true);
expect(iScope.orig._etag).toBe('1111');
}));
it('can send and publish item to different desk publish fails',
inject((authoring, $q, authoringWorkspace, notify) => {
publish = false; // publish succeeds
iScope.selectedDesk = selectedDesk;
iScope.selectedStage = selectedStage;
spyOn(authoring, 'open').and.returnValue($q.when(movedItem));
spyOn(authoringWorkspace, 'close').and.returnValue($q.when(true));
expect(iScope.orig.task.desk).toBe('123');
expect(iScope.orig.task.stage).toBe('456');
expect(iScope.orig._etag).toBe('123');
iScope.sendAndPublish();
iScope.$digest();
expect(authoring.open).toHaveBeenCalledWith('foo', false);
expect(authoringWorkspace.close).not.toHaveBeenCalledWith(false);
expect(iScope.orig.task.desk).toBe(selectedDesk._id);
expect(iScope.orig.task.stage).toBe(selectedStage._id);
expect(iScope.orig._locked).toBe(true);
expect(iScope.orig._etag).toBe('1111');
}));
it('can send and publish item to different desk but locking failed',
inject((authoring, $q, authoringWorkspace, notify) => {
publish = true; // publish succeeds
movedItem._locked = false; // locked failed.
iScope.selectedDesk = selectedDesk;
iScope.selectedStage = selectedStage;
spyOn(authoring, 'open').and.returnValue($q.when(movedItem));
spyOn(authoringWorkspace, 'close').and.returnValue($q.when(true));
spyOn(notify, 'error');
expect(iScope.orig.task.desk).toBe('123');
expect(iScope.orig.task.stage).toBe('456');
expect(iScope.orig._etag).toBe('123');
iScope.sendAndPublish();
iScope.$digest();
expect(authoring.open).toHaveBeenCalledWith('foo', false);
expect(authoringWorkspace.close).not.toHaveBeenCalledWith(false);
expect(iScope.orig.task.desk).toBe(selectedDesk._id);
expect(iScope.orig.task.stage).toBe(selectedStage._id);
expect(iScope.orig._locked).toBe(false);
expect(iScope.orig._etag).toBe('1111');
expect(notify.error).toHaveBeenCalledWith('Failed to send and publish.');
}));
});
});<|fim▁end|> | |
<|file_name|>monitors_test.py<|end_file_name|><|fim▁begin|># Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import mock
from openhtf import plugs
from openhtf.core import base_plugs
from openhtf.core import monitors
from six.moves import queue
class EmptyPlug(base_plugs.BasePlug):
pass
class TestMonitors(unittest.TestCase):
def setUp(self):
super(TestMonitors, self).setUp()
self.test_state = mock.MagicMock(execution_uid='01234567890')
def provide_plugs(plug_map):
return {name: cls() for name, cls in plug_map}
self.test_state.plug_manager.provide_plugs = provide_plugs
def test_basics(self):
# Use a queue to ensure that we got at least 1 complete response. An Event
# would cause a race condition, so we'd need 2 Events, so a Queue is easier.
q = queue.Queue()<|fim▁hole|> q.put(1)
return 1
@monitors.monitors('meas', monitor_func, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
# For some reason, self.test_state.test_api differs between what monitors.py
# gets and what the monitor-phase/monitored-phase get in 1/100 runs. As a
# result, we have to use test_state.mock_calls directly and just assert the
# name is correct.
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
1, first_meas[1], msg="And it should be the monitor func's return val")
def testPlugs(self):
q = queue.Queue()
@plugs.plug(empty=EmptyPlug)
def monitor(test, empty):
del test # Unused.
del empty # Unused.
q.put(2)
return 2
@monitors.monitors('meas', monitor, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
2, first_meas[1], msg="And it should be the monitor func's return val")<|fim▁end|> |
def monitor_func(test):
del test # Unused. |
<|file_name|>PlaceholderImage.js<|end_file_name|><|fim▁begin|>import cx from 'clsx'
import PropTypes from 'prop-types'
import React from 'react'
import { customPropTypes, getElementType, getUnhandledProps, useKeyOnly } from '../../lib'
/**
* A placeholder can contain an image.
*/
function PlaceholderImage(props) {
const { className, square, rectangular } = props
const classes = cx(
useKeyOnly(square, 'square'),
useKeyOnly(rectangular, 'rectangular'),
'image',
className,
)
const rest = getUnhandledProps(PlaceholderImage, props)
const ElementType = getElementType(PlaceholderImage, props)
return <ElementType {...rest} className={classes} />
}
PlaceholderImage.propTypes = {
/** An element type to render as (string or function). */
as: PropTypes.elementType,
/** Additional classes. */
className: PropTypes.string,
/** An image can modify size correctly with responsive styles. */
square: customPropTypes.every([customPropTypes.disallow(['rectangular']), PropTypes.bool]),
/** An image can modify size correctly with responsive styles. */
rectangular: customPropTypes.every([customPropTypes.disallow(['square']), PropTypes.bool]),<|fim▁hole|><|fim▁end|> | }
export default PlaceholderImage |
<|file_name|>test_views.py<|end_file_name|><|fim▁begin|>import re
from django.core.urlresolvers import reverse
def test_view_with_scss_file(client, precompiled):
"""
Test view that renders *SCSS file* that *imports SCSS file from another Django app*.
:param client: ``pytest-django`` fixture: Django test client
:param precompiled: custom fixture that asserts pre-compiled content
"""
response = client.get(reverse('scss-file'))
assert response.status_code == 200
assert precompiled('app/layout.scss', 'css').strip() == \
'.title {\n font: bold 30px Arial, sans-serif;\n}'
def test_view_with_inline_scss(client):
"""
Test view that renders *inline SCSS* that *imports SCSS file from another Django app*.
:param client: ``pytest-django`` fixture: Django test client
"""
response = client.get(reverse('scss-inline'))
assert response.status_code == 200
assert re.search(
r'<style type="text/css">.title \{\n\s*font: bold 30px Arial, sans-serif;\n\}\s*</style>',
response.content.decode('utf8')
)
def test_view_with_es6_file(client, precompiled):
"""
Test view that renders *ES6 file* into *ES5 file*.
:param client: ``pytest-django`` fixture: Django test client
:param precompiled: custom fixture that asserts pre-compiled content
"""
response = client.get(reverse('es6-file'))
assert response.status_code == 200
assert precompiled('app/scripts.js', 'js') == (
'(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=='
'"function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f='
'new Error("Cannot find module \'"+o+"\'");throw f.code="MODULE_NOT_FOUND",f}'
'var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];'
'return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=='
'"function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:['
'function(require,module,exports){\n'
'\'use strict\';\n'
'\n'
'var _framework = require(\'base/framework\');\n'
'\n'
'var _framework2 = _interopRequireDefault(_framework);\n'
'\n'
'function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : '
'{ default: obj }; }\n'
'\n'
'new _framework2.default();\n'
'new _framework2.default(\'1.0.1\');\n'
'\n'
'},{"base/framework":2}],2:[function(require,module,exports){\n'
'\'use strict\';\n'
'\n'
'Object.defineProperty(exports, "__esModule", {\n'
' value: true\n'
'});\n'
'\n'
'function _classCallCheck(instance, Constructor) {'
' if (!(instance instanceof Constructor)) {'
' throw new TypeError("Cannot call a class as a function"); } }\n'
'\n'
'var version = exports.version = \'1.0\';\n'
'\n'<|fim▁hole|> '};\n'
'\n'
'exports.default = _class;\n'
'\n'
'},{}]},{},[1]);\n'
)
def test_view_with_inline_es6(client):
"""
Test view that renders *inline ES6* into *inline ES5*.
:param client: ``pytest-django`` fixture: Django test client
"""
response = client.get(reverse('es6-inline'))
assert response.status_code == 200
assert b'"use strict";\n' \
b'\n' \
b'var square = function square(x) {\n' \
b' return x * x;\n' \
b'};\n'\
b'console.log("Square of 2:", square(2));' in response.content<|fim▁end|> | 'var _class = function _class(customVersion) {\n'
' _classCallCheck(this, _class);\n'
'\n'
' console.log(\'Framework v\' + (customVersion || version) + \' initialized\');\n' |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Hello World API implemented using Google Cloud Endpoints.
Defined here are the ProtoRPC messages needed to define Schemas for methods
as well as those methods defined in an API.
"""
import endpoints
from protorpc import messages, message_types, remote # TODO remove messages and message types when possible
from google.appengine.ext import ndb
from libs.endpoints_proto_datastore.ndb import EndpointsModel
WEB_CLIENT_ID = '' # TODO make this secure
ANDROID_CLIENT_ID = '' # TODO figure out android at some point
IOS_CLIENT_ID = '' # probably not, unless I get some help
ANDROID_AUDIENCE = WEB_CLIENT_ID
class Note(EndpointsModel):
title = ndb.StringProperty()
content = ndb.StringProperty()
date_created = ndb.DateTimeProperty(auto_now_add=True)
owner = ndb.UserProperty()
"""
class NoteMessage(messages.Message):
title = messages.StringField(1)
content = messages.StringField(2)
# Hotness
hotness = messages.IntegerField(3, default=0)
# Color
color = messages.BytesField(4, default='#ffffff')
# Attach
# Due Date
due_date = message_types.DateTimeField(5)
# Reminder
reminders = message_types.DateTimeField(6, repeated=True)
# Task : is a note
# Note
sub_notes = messages.MessageField('Note', 7, repeated=True)
# Image : is a file
# File
# A URL to a file. The file can probably be served by the datastore.
files = messages.BytesField(8, repeated=True)
#dsid = messages.BytesField(12, required=True)
#date_created = message_types.DateTimeField(9, required=True)
#date_updated = message_types.DateTimeField(10, required=True)
#date_accessed = message_types.DateTimeField(11, required=True)
class NoteCollectionMessage(messages.Message):
items = messages.MessageField(Note, 1, repeated=True)
FOO_NOTES = NoteCollection(items=[
Note(#dsid='esgihsel',
title='my first note!',
content='this is my very first note'),
Note(#dsid='3f2o02hg',
title='my second note!',
content='i have more notes'),
Note(#dsid='0evwhfwf',
title='my third note!',
content='',
color=None,<|fim▁hole|> title='my first note'),
])
"""
@endpoints.api(name='helloendpoints', version='v1',
allowed_client_ids=[WEB_CLIENT_ID, ANDROID_CLIENT_ID,
endpoints.API_EXPLORER_CLIENT_ID],
audiences=[ANDROID_AUDIENCE],
scopes=[endpoints.EMAIL_SCOPE])
class EndpointsTestAPI(remote.Service):
"""This is a test for Endpoints, a learning experience."""
#@endpoints.method(message_types.VoidMessage, NoteCollection,
# path='notes', http_method='GET',
# name='notes.list')
#def notes_list(self, request):
# return FOO_NOTES
@Note.method(user_required=True,
path='note', http_method='PUT', name='note.add')
def note_add(self, note):
note.owner = endpoints.get_current_user()
note.put()
return note
@Note.query_method(user_required=True,
query_fields=('limit', 'order', 'pageToken'),
path='notes', http_method='GET', name='notes.list')
def notes_list(self, query):
return query.filter(Note.owner == endpoints.get_current_user())
application = endpoints.api_server([EndpointsTestAPI])<|fim▁end|> | hotness=2),
Note(#dsid='rkbn31ha', |
<|file_name|>server.js<|end_file_name|><|fim▁begin|>angular.module('qmsk.e2.server', [
'qmsk.e2',
'qmsk.e2.console',
'qmsk.e2.web',
'ngResource',
'ngRoute',
'jsonFormatter',
'ui.bootstrap',
])
.config(function($routeProvider) {
$routeProvider
.when('/main', {
templateUrl: '/static/qmsk.e2/server/main.html',
controller: 'MainCtrl',
reloadOnSearch: false,
})
.when('/sources', {
templateUrl: '/static/qmsk.e2/server/sources.html',
controller: 'SourcesCtrl',
})
.when('/screens', {
templateUrl: '/static/qmsk.e2/server/screens.html',
controller: 'ScreensCtrl',
})
.when('/auxes', {
templateUrl: '/static/qmsk.e2/server/auxes.html',
controller: 'AuxesCtrl',
})
.when('/presets', {
templateUrl: '/static/qmsk.e2/server/presets.html',
controller: 'PresetsCtrl',
reloadOnSearch: false,
})
.when('/system', {
templateUrl: '/static/qmsk.e2/server/system.html',
controller: 'SystemCtrl',
})
.otherwise({
redirectTo: '/main',
});
})
.factory('Preset', function($resource) {
return $resource('/api/presets/:id', { }, {
get: {
method: 'GET',
url: '/api/presets',
},
all: {
method: 'GET',
isArray: true,
},
query: {
method: 'GET',
isArray: false,
},
activate: {
method: 'POST',
url: '/api/presets',
},
}, {stripTrailingSlashes: false});
})
.filter('dimensions', function() {
return function(dimensions) {
if (dimensions && dimensions.width && dimensions.height) {
return dimensions.width + "x" + dimensions.height;
} else {
return null;
}
};
})
.directive('e2Source', function() {
return {
restrict: 'AE',
scope: {
source: '=source',
input: '=input',
detail: '=detail',
},
templateUrl: '/static/qmsk.e2/server/source.html',
};
})
.controller('MainCtrl', function($scope, $location) {
$scope.sources = [];
$scope.$watch('state.System', function(system) {
// compute a merged state mapping sources to their active destinations
// TODO: aux destinations
$scope.sources = $.map(system.SrcMgr.SourceCol, function(source, sourceID){
var item = {
id: sourceID,
type: source.SrcType,
name: source.Name,
source: source,
active: false,
preview: [],
program: [],
};
if (source.SrcType == "input") {
item.input = system.SrcMgr.InputCfgCol[source.InputCfgIndex];
}
$.each(system.DestMgr.ScreenDestCol, function(screenID, screen) {
var output = {
type: "screen",
id: screenID,
name: screen.Name,
active: screen.IsActive > 0,
};
$.each(screen.LayerCollection, function(layerID, layer) {
if (layer.PgmMode > 0 && layer.LastSrcIdx == sourceID) {
output.program = true;
}
if (layer.PvwMode > 0 && layer.LastSrcIdx == sourceID) {
output.preview = true;
}
});
if (output.program) {
item.program.push(output);
}
if (output.preview) {
item.preview.push(output);
}
if (output.active && output.preview) {
item.active = true;
}
});
return item;
});
});
$scope.selectOrder = function(order) {
$scope.order = order;
$scope.orderBy = function(){
switch (order) {
case 'source':
return ['-type', 'name'];
case 'preview':
return ['preview_screens', 'program_screens'];
case 'program':
return ['program_screens', 'preview_screens'];
default:
return [];
}
}();
$location.search('order', order || null);
};
$scope.selectOrder($location.search().order || 'source');
})
.controller('SourcesCtrl', function($scope) {
})
.controller('ScreensCtrl', function($scope) {
})
.controller('AuxesCtrl', function($scope) {
})
.controller('PresetsCtrl', function($scope, Preset, $location, Console) {
// size
$scope.displaySize = $location.search().size || 'normal';
$scope.$watch('displaySize', function(displaySize) {
$location.search('size', displaySize);
});
// collapsing
$scope.showGroup = $location.search().group || null;
$scope.collapseGroups = {};
$scope.selectGroup = function(groupID) {
$scope.collapseGroups = {};
if (groupID != $scope.showGroup) {
$scope.showGroup = groupID;
$location.search('group', groupID);
}
}
$scope.clearGroup = function() {
$scope.collapseGroups = {};
$scope.showGroup = null;
$location.search('group', null);
};
$scope.toggleGroup = function(groupID) {
$scope.collapseGroups[groupID] = !$scope.collapseGroups[groupID];
};
// grouping
$scope.groupBy = $location.search().groupBy || 'sno';
$scope.$watch('groupBy', function(groupBy) {
$location.search('groupBy', groupBy);
});
function groupBySno(presets) {
var groups = { };
$.each(presets, function(id, preset) {
var groupID = preset.presetSno.Group;
var groupIndex = preset.presetSno.Index;
preset = $.extend({groupIndex: groupIndex}, preset);
// group it
var group = groups[groupID];
if (!group) {
group = groups[groupID] = {
id: groupID,
name: groupID,
presets: []
};
}
group.presets.push(preset);
});
return $.map(groups, function(group, id){
return group;
});
};
function groupByConsole(presets) {
var groups = { };
$.each($scope.state.System.ConsoleLayoutMgr.ConsoleLayout.PresetBusColl, function(buttonID, button) {
var groupID = Math.floor(button.id / 12); // rows of 12 keys
var group = groups[groupID];
var preset = presets[button.ConsoleButtonTypeIndex];
if (button.ConsoleButtonType != 'preset' || !preset) {
return;
}
// copy with groupIndex, since the same preset can be included multiple times
preset = $.extend({ groupIndex: button.id }, preset);
if (!group) {
group = groups[groupID] = {
id: groupID+1,
name: "Preset PG" + (groupID+1),
presets: []
};
}
group.presets.push(preset);
});
return $.map(groups, function(group) {
return group;
});
}
$scope.groups = [];
$scope.$watchGroup(['groupBy', 'state.System'], function() {
var presets = $scope.state.System.PresetMgr.Preset;
var groups;
if ($scope.groupBy == 'sno') {
groups = groupBySno(presets);
} else if ($scope.groupBy == 'console') {
groups = groupByConsole(presets);
} else {
groups = [{
id: 0,
name: "",
presets: $.map(presets, function(preset){
return $.extend({groupIndex: preset.id}, preset);
}),
}];
}
Console.log("Refresh presets: presets=" + Object.keys(presets).length + ", groupBy=" + $scope.groupBy + ", groups=" + groups.length);
$scope.groups = groups;
});
// active preset on server; reset while changing...
$scope.activePresetID = null;
$scope.$watch('state.System', function(system) {
$scope.activePresetID = system.PresetMgr.LastRecall
});
// select preset for preview
$scope.previewPreset = null
$scope.select = function(preset) {
$scope.activePresetID = null;
Console.log("Recall preset " + preset.id + ": " + preset.name);
Preset.activate({id: preset.id},
function success(r) {
$scope.previewPreset = preset;
},
function error(e) {
}
);
};
// take preset for program
$scope.autoTake = $location.search().autotake || false;
$scope.$watch('autoTake', function(autoTake) {
$location.search('autotake', autoTake ? true : null);
});
$scope.programPreset = null;
$scope.take = function(preset) {
if (preset) {
} else if ($scope.previewPreset) {
preset = $scope.previewPreset;
} else {
return;
}
Console.log("Take preset " + preset.id + ": " + preset.name);
$scope.activePresetID = null;<|fim▁hole|> function error(e) {
}
);
};
// preview -> program
$scope.cut = function() {
Console.log("Cut")
Preset.activate({cut: true});
};
$scope.autotrans = function() {
Console.log("AutoTrans")
Preset.activate({autotrans: 0});
};
})
.controller('SystemCtrl', function($scope) {
})
;<|fim▁end|> | Preset.activate({id: preset.id, live: true},
function success(r) {
$scope.programPreset = preset;
}, |
<|file_name|>all.py<|end_file_name|><|fim▁begin|># Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|># limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo.config import cfg
from nova import config
from nova.objectstore import s3server
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % '%s-api' % api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s') % mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor', 'nova-kvmha']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_('Failed to load %s'), binary)
launcher.wait()<|fim▁end|> | # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and |
<|file_name|>task.model.js<|end_file_name|><|fim▁begin|>'use strict';
var mongoose = require('mongoose');
var Schema = mongoose.Schema;
var TaskSchema = new Schema({
name: String,
description: String,
point: Number,
task_users: [{ type: Schema.Types.ObjectId, ref: "TaskUser"}],<|fim▁hole|>});
module.exports = mongoose.model('Task', TaskSchema);<|fim▁end|> | _week: { type: Schema.Types.ObjectId, ref: "Week"} |
<|file_name|>tutorial_quanconv_cifar10.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
# prepare cifar10 data
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
# training settings
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
inputs = X_test
targets = y_test
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
<|fim▁hole|>
# dataset API and augmentation
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# train_ds = train_ds.repeat(n_epoch)
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
# value = train_ds.make_one_shot_iterator().get_next()
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# test_ds = test_ds.shuffle(shuffle_buffer_size)
# test_ds = test_ds.repeat(n_epoch)
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
# value_test = test_ds.make_one_shot_iterator().get_next()
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
# use testing data to evaluate the model
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter))<|fim▁end|> | |
<|file_name|>grpc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.kms_v1.types import resources
from google.cloud.kms_v1.types import service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from .base import KeyManagementServiceTransport, DEFAULT_CLIENT_INFO
class KeyManagementServiceGrpcTransport(KeyManagementServiceTransport):
"""gRPC backend transport for KeyManagementService.
Google Cloud Key Management Service
Manages cryptographic keys and operations using those keys.
Implements a REST model with the following objects:
- [KeyRing][google.cloud.kms.v1.KeyRing]
- [CryptoKey][google.cloud.kms.v1.CryptoKey]
- [CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]
- [ImportJob][google.cloud.kms.v1.ImportJob]
If you are using manual gRPC libraries, see `Using gRPC with Cloud
KMS <https://cloud.google.com/kms/docs/grpc>`__.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "cloudkms.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:<|fim▁hole|> cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "cloudkms.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def list_key_rings(
self,
) -> Callable[[service.ListKeyRingsRequest], service.ListKeyRingsResponse]:
r"""Return a callable for the list key rings method over gRPC.
Lists [KeyRings][google.cloud.kms.v1.KeyRing].
Returns:
Callable[[~.ListKeyRingsRequest],
~.ListKeyRingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_key_rings" not in self._stubs:
self._stubs["list_key_rings"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListKeyRings",
request_serializer=service.ListKeyRingsRequest.serialize,
response_deserializer=service.ListKeyRingsResponse.deserialize,
)
return self._stubs["list_key_rings"]
@property
def list_crypto_keys(
self,
) -> Callable[[service.ListCryptoKeysRequest], service.ListCryptoKeysResponse]:
r"""Return a callable for the list crypto keys method over gRPC.
Lists [CryptoKeys][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.ListCryptoKeysRequest],
~.ListCryptoKeysResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_crypto_keys" not in self._stubs:
self._stubs["list_crypto_keys"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListCryptoKeys",
request_serializer=service.ListCryptoKeysRequest.serialize,
response_deserializer=service.ListCryptoKeysResponse.deserialize,
)
return self._stubs["list_crypto_keys"]
@property
def list_crypto_key_versions(
self,
) -> Callable[
[service.ListCryptoKeyVersionsRequest], service.ListCryptoKeyVersionsResponse
]:
r"""Return a callable for the list crypto key versions method over gRPC.
Lists [CryptoKeyVersions][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.ListCryptoKeyVersionsRequest],
~.ListCryptoKeyVersionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_crypto_key_versions" not in self._stubs:
self._stubs["list_crypto_key_versions"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListCryptoKeyVersions",
request_serializer=service.ListCryptoKeyVersionsRequest.serialize,
response_deserializer=service.ListCryptoKeyVersionsResponse.deserialize,
)
return self._stubs["list_crypto_key_versions"]
@property
def list_import_jobs(
self,
) -> Callable[[service.ListImportJobsRequest], service.ListImportJobsResponse]:
r"""Return a callable for the list import jobs method over gRPC.
Lists [ImportJobs][google.cloud.kms.v1.ImportJob].
Returns:
Callable[[~.ListImportJobsRequest],
~.ListImportJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_import_jobs" not in self._stubs:
self._stubs["list_import_jobs"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ListImportJobs",
request_serializer=service.ListImportJobsRequest.serialize,
response_deserializer=service.ListImportJobsResponse.deserialize,
)
return self._stubs["list_import_jobs"]
@property
def get_key_ring(self) -> Callable[[service.GetKeyRingRequest], resources.KeyRing]:
r"""Return a callable for the get key ring method over gRPC.
Returns metadata for a given
[KeyRing][google.cloud.kms.v1.KeyRing].
Returns:
Callable[[~.GetKeyRingRequest],
~.KeyRing]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_key_ring" not in self._stubs:
self._stubs["get_key_ring"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetKeyRing",
request_serializer=service.GetKeyRingRequest.serialize,
response_deserializer=resources.KeyRing.deserialize,
)
return self._stubs["get_key_ring"]
@property
def get_crypto_key(
self,
) -> Callable[[service.GetCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the get crypto key method over gRPC.
Returns metadata for a given
[CryptoKey][google.cloud.kms.v1.CryptoKey], as well as its
[primary][google.cloud.kms.v1.CryptoKey.primary]
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.GetCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_crypto_key" not in self._stubs:
self._stubs["get_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetCryptoKey",
request_serializer=service.GetCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["get_crypto_key"]
@property
def get_crypto_key_version(
self,
) -> Callable[[service.GetCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the get crypto key version method over gRPC.
Returns metadata for a given
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
Returns:
Callable[[~.GetCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_crypto_key_version" not in self._stubs:
self._stubs["get_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetCryptoKeyVersion",
request_serializer=service.GetCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["get_crypto_key_version"]
@property
def get_public_key(
self,
) -> Callable[[service.GetPublicKeyRequest], resources.PublicKey]:
r"""Return a callable for the get public key method over gRPC.
Returns the public key for the given
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ASYMMETRIC_SIGN][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_SIGN]
or
[ASYMMETRIC_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ASYMMETRIC_DECRYPT].
Returns:
Callable[[~.GetPublicKeyRequest],
~.PublicKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_public_key" not in self._stubs:
self._stubs["get_public_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetPublicKey",
request_serializer=service.GetPublicKeyRequest.serialize,
response_deserializer=resources.PublicKey.deserialize,
)
return self._stubs["get_public_key"]
@property
def get_import_job(
self,
) -> Callable[[service.GetImportJobRequest], resources.ImportJob]:
r"""Return a callable for the get import job method over gRPC.
Returns metadata for a given
[ImportJob][google.cloud.kms.v1.ImportJob].
Returns:
Callable[[~.GetImportJobRequest],
~.ImportJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_import_job" not in self._stubs:
self._stubs["get_import_job"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GetImportJob",
request_serializer=service.GetImportJobRequest.serialize,
response_deserializer=resources.ImportJob.deserialize,
)
return self._stubs["get_import_job"]
@property
def create_key_ring(
self,
) -> Callable[[service.CreateKeyRingRequest], resources.KeyRing]:
r"""Return a callable for the create key ring method over gRPC.
Create a new [KeyRing][google.cloud.kms.v1.KeyRing] in a given
Project and Location.
Returns:
Callable[[~.CreateKeyRingRequest],
~.KeyRing]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_key_ring" not in self._stubs:
self._stubs["create_key_ring"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateKeyRing",
request_serializer=service.CreateKeyRingRequest.serialize,
response_deserializer=resources.KeyRing.deserialize,
)
return self._stubs["create_key_ring"]
@property
def create_crypto_key(
self,
) -> Callable[[service.CreateCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the create crypto key method over gRPC.
Create a new [CryptoKey][google.cloud.kms.v1.CryptoKey] within a
[KeyRing][google.cloud.kms.v1.KeyRing].
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] and
[CryptoKey.version_template.algorithm][google.cloud.kms.v1.CryptoKeyVersionTemplate.algorithm]
are required.
Returns:
Callable[[~.CreateCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_crypto_key" not in self._stubs:
self._stubs["create_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateCryptoKey",
request_serializer=service.CreateCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["create_crypto_key"]
@property
def create_crypto_key_version(
self,
) -> Callable[[service.CreateCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the create crypto key version method over gRPC.
Create a new
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in a
[CryptoKey][google.cloud.kms.v1.CryptoKey].
The server will assign the next sequential id. If unset,
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set
to
[ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED].
Returns:
Callable[[~.CreateCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_crypto_key_version" not in self._stubs:
self._stubs["create_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateCryptoKeyVersion",
request_serializer=service.CreateCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["create_crypto_key_version"]
@property
def import_crypto_key_version(
self,
) -> Callable[[service.ImportCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the import crypto key version method over gRPC.
Import wrapped key material into a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion].
All requests must specify a
[CryptoKey][google.cloud.kms.v1.CryptoKey]. If a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] is
additionally specified in the request, key material will be
reimported into that version. Otherwise, a new version will be
created, and will be assigned the next sequential id within the
[CryptoKey][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.ImportCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_crypto_key_version" not in self._stubs:
self._stubs["import_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/ImportCryptoKeyVersion",
request_serializer=service.ImportCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["import_crypto_key_version"]
@property
def create_import_job(
self,
) -> Callable[[service.CreateImportJobRequest], resources.ImportJob]:
r"""Return a callable for the create import job method over gRPC.
Create a new [ImportJob][google.cloud.kms.v1.ImportJob] within a
[KeyRing][google.cloud.kms.v1.KeyRing].
[ImportJob.import_method][google.cloud.kms.v1.ImportJob.import_method]
is required.
Returns:
Callable[[~.CreateImportJobRequest],
~.ImportJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_import_job" not in self._stubs:
self._stubs["create_import_job"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/CreateImportJob",
request_serializer=service.CreateImportJobRequest.serialize,
response_deserializer=resources.ImportJob.deserialize,
)
return self._stubs["create_import_job"]
@property
def update_crypto_key(
self,
) -> Callable[[service.UpdateCryptoKeyRequest], resources.CryptoKey]:
r"""Return a callable for the update crypto key method over gRPC.
Update a [CryptoKey][google.cloud.kms.v1.CryptoKey].
Returns:
Callable[[~.UpdateCryptoKeyRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key" not in self._stubs:
self._stubs["update_crypto_key"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKey",
request_serializer=service.UpdateCryptoKeyRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["update_crypto_key"]
@property
def update_crypto_key_version(
self,
) -> Callable[[service.UpdateCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the update crypto key version method over gRPC.
Update a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion]'s
metadata.
[state][google.cloud.kms.v1.CryptoKeyVersion.state] may be
changed between
[ENABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.ENABLED]
and
[DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED]
using this method. See
[DestroyCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.DestroyCryptoKeyVersion]
and
[RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
to move between other states.
Returns:
Callable[[~.UpdateCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key_version" not in self._stubs:
self._stubs["update_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyVersion",
request_serializer=service.UpdateCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["update_crypto_key_version"]
@property
def update_crypto_key_primary_version(
self,
) -> Callable[[service.UpdateCryptoKeyPrimaryVersionRequest], resources.CryptoKey]:
r"""Return a callable for the update crypto key primary
version method over gRPC.
Update the version of a
[CryptoKey][google.cloud.kms.v1.CryptoKey] that will be used in
[Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt].
Returns an error if called on a key whose purpose is not
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.UpdateCryptoKeyPrimaryVersionRequest],
~.CryptoKey]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_crypto_key_primary_version" not in self._stubs:
self._stubs[
"update_crypto_key_primary_version"
] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/UpdateCryptoKeyPrimaryVersion",
request_serializer=service.UpdateCryptoKeyPrimaryVersionRequest.serialize,
response_deserializer=resources.CryptoKey.deserialize,
)
return self._stubs["update_crypto_key_primary_version"]
@property
def destroy_crypto_key_version(
self,
) -> Callable[[service.DestroyCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the destroy crypto key version method over gRPC.
Schedule a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] for
destruction.
Upon calling this method,
[CryptoKeyVersion.state][google.cloud.kms.v1.CryptoKeyVersion.state]
will be set to
[DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED],
and
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
will be set to the time
[destroy_scheduled_duration][google.cloud.kms.v1.CryptoKey.destroy_scheduled_duration]
in the future. At that time, the
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will
automatically change to
[DESTROYED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROYED],
and the key material will be irrevocably destroyed.
Before the
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
is reached,
[RestoreCryptoKeyVersion][google.cloud.kms.v1.KeyManagementService.RestoreCryptoKeyVersion]
may be called to reverse the process.
Returns:
Callable[[~.DestroyCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "destroy_crypto_key_version" not in self._stubs:
self._stubs["destroy_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/DestroyCryptoKeyVersion",
request_serializer=service.DestroyCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["destroy_crypto_key_version"]
@property
def restore_crypto_key_version(
self,
) -> Callable[[service.RestoreCryptoKeyVersionRequest], resources.CryptoKeyVersion]:
r"""Return a callable for the restore crypto key version method over gRPC.
Restore a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] in the
[DESTROY_SCHEDULED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DESTROY_SCHEDULED]
state.
Upon restoration of the CryptoKeyVersion,
[state][google.cloud.kms.v1.CryptoKeyVersion.state] will be set
to
[DISABLED][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionState.DISABLED],
and
[destroy_time][google.cloud.kms.v1.CryptoKeyVersion.destroy_time]
will be cleared.
Returns:
Callable[[~.RestoreCryptoKeyVersionRequest],
~.CryptoKeyVersion]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_crypto_key_version" not in self._stubs:
self._stubs["restore_crypto_key_version"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/RestoreCryptoKeyVersion",
request_serializer=service.RestoreCryptoKeyVersionRequest.serialize,
response_deserializer=resources.CryptoKeyVersion.deserialize,
)
return self._stubs["restore_crypto_key_version"]
@property
def encrypt(self) -> Callable[[service.EncryptRequest], service.EncryptResponse]:
r"""Return a callable for the encrypt method over gRPC.
Encrypts data, so that it can only be recovered by a call to
[Decrypt][google.cloud.kms.v1.KeyManagementService.Decrypt]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.EncryptRequest],
~.EncryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "encrypt" not in self._stubs:
self._stubs["encrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/Encrypt",
request_serializer=service.EncryptRequest.serialize,
response_deserializer=service.EncryptResponse.deserialize,
)
return self._stubs["encrypt"]
@property
def decrypt(self) -> Callable[[service.DecryptRequest], service.DecryptResponse]:
r"""Return a callable for the decrypt method over gRPC.
Decrypts data that was protected by
[Encrypt][google.cloud.kms.v1.KeyManagementService.Encrypt]. The
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] must
be
[ENCRYPT_DECRYPT][google.cloud.kms.v1.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT].
Returns:
Callable[[~.DecryptRequest],
~.DecryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "decrypt" not in self._stubs:
self._stubs["decrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/Decrypt",
request_serializer=service.DecryptRequest.serialize,
response_deserializer=service.DecryptResponse.deserialize,
)
return self._stubs["decrypt"]
@property
def asymmetric_sign(
self,
) -> Callable[[service.AsymmetricSignRequest], service.AsymmetricSignResponse]:
r"""Return a callable for the asymmetric sign method over gRPC.
Signs data using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
ASYMMETRIC_SIGN, producing a signature that can be verified with
the public key retrieved from
[GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey].
Returns:
Callable[[~.AsymmetricSignRequest],
~.AsymmetricSignResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "asymmetric_sign" not in self._stubs:
self._stubs["asymmetric_sign"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/AsymmetricSign",
request_serializer=service.AsymmetricSignRequest.serialize,
response_deserializer=service.AsymmetricSignResponse.deserialize,
)
return self._stubs["asymmetric_sign"]
@property
def asymmetric_decrypt(
self,
) -> Callable[
[service.AsymmetricDecryptRequest], service.AsymmetricDecryptResponse
]:
r"""Return a callable for the asymmetric decrypt method over gRPC.
Decrypts data that was encrypted with a public key retrieved
from
[GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
corresponding to a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose]
ASYMMETRIC_DECRYPT.
Returns:
Callable[[~.AsymmetricDecryptRequest],
~.AsymmetricDecryptResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "asymmetric_decrypt" not in self._stubs:
self._stubs["asymmetric_decrypt"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/AsymmetricDecrypt",
request_serializer=service.AsymmetricDecryptRequest.serialize,
response_deserializer=service.AsymmetricDecryptResponse.deserialize,
)
return self._stubs["asymmetric_decrypt"]
@property
def mac_sign(self) -> Callable[[service.MacSignRequest], service.MacSignResponse]:
r"""Return a callable for the mac sign method over gRPC.
Signs data using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC,
producing a tag that can be verified by another source with the
same key.
Returns:
Callable[[~.MacSignRequest],
~.MacSignResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mac_sign" not in self._stubs:
self._stubs["mac_sign"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/MacSign",
request_serializer=service.MacSignRequest.serialize,
response_deserializer=service.MacSignResponse.deserialize,
)
return self._stubs["mac_sign"]
@property
def mac_verify(
self,
) -> Callable[[service.MacVerifyRequest], service.MacVerifyResponse]:
r"""Return a callable for the mac verify method over gRPC.
Verifies MAC tag using a
[CryptoKeyVersion][google.cloud.kms.v1.CryptoKeyVersion] with
[CryptoKey.purpose][google.cloud.kms.v1.CryptoKey.purpose] MAC,
and returns a response that indicates whether or not the
verification was successful.
Returns:
Callable[[~.MacVerifyRequest],
~.MacVerifyResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mac_verify" not in self._stubs:
self._stubs["mac_verify"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/MacVerify",
request_serializer=service.MacVerifyRequest.serialize,
response_deserializer=service.MacVerifyResponse.deserialize,
)
return self._stubs["mac_verify"]
@property
def generate_random_bytes(
self,
) -> Callable[
[service.GenerateRandomBytesRequest], service.GenerateRandomBytesResponse
]:
r"""Return a callable for the generate random bytes method over gRPC.
Generate random bytes using the Cloud KMS randomness
source in the provided location.
Returns:
Callable[[~.GenerateRandomBytesRequest],
~.GenerateRandomBytesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_random_bytes" not in self._stubs:
self._stubs["generate_random_bytes"] = self.grpc_channel.unary_unary(
"/google.cloud.kms.v1.KeyManagementService/GenerateRandomBytes",
request_serializer=service.GenerateRandomBytesRequest.serialize,
response_deserializer=service.GenerateRandomBytesResponse.deserialize,
)
return self._stubs["generate_random_bytes"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
self.grpc_channel.close()
__all__ = ("KeyManagementServiceGrpcTransport",)<|fim▁end|> |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.