prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>mlp.py<|end_file_name|><|fim▁begin|>import sys
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import f1_score,accuracy_score, recall_score, precision_score
import scipy
from random import shuffle
def load_dataset(filename):
f = open(filename)
x = []
y = []
for line in f:
v = line.rstrip('\n').split(',')
vf = [float(i) for i in v[:-1]]
x.append(vf)
y.append(float(v[-1]))
return x,y
def inductor(x,y):
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(20, 8), max_iter=1000,random_state=1)
clf.fit(x,y)
return clf
if __name__ == '__main__':
fname = sys.argv[1]
print("loading data ..")
x,y = load_dataset(fname)
x = np.array(x)
y = np.array(y)
n = len(x)
kf = StratifiedKFold(n_splits=3, shuffle=True)
for train_index, test_index in kf.split(x,y):
shuffle(train_index)
shuffle(test_index)
xtrain = x[train_index]
ytrain = y[train_index]
xtest = x[test_index]
ytest = y[test_index]
print("training ...")
clf = inductor(xtrain,ytrain)
print("predicting ...")
ypred = clf.predict(xtest)
print "(accuracy : %4.3f) "%(accuracy_score(ytest,ypred))<|fim▁hole|> print "(precision : %4.3f) "%(precision_score(ytest,ypred,average='weighted'))<|fim▁end|> | print "(f1 : %4.3f) "%(f1_score(ytest,ypred, average='weighted'))
print "(recall : %4.3f) "%(recall_score(ytest,ypred,average='weighted')) |
<|file_name|>GatewayDetails.java<|end_file_name|><|fim▁begin|>package bp.details;
import javax.swing.JLabel;
import javax.swing.JSpinner;
import javax.swing.SpinnerModel;
import javax.swing.SpinnerNumberModel;
import javax.swing.event.ChangeEvent;
import javax.swing.event.ChangeListener;
import bp.model.data.Gateway;
import bp.model.util.BPKeyWords;
import bp.model.util.Controller;
public class GatewayDetails extends ElementDetails {
/**
*
*/
private static final long serialVersionUID = -2243209273015769935L;
public static final String MIN_INPUT = "Minimal input:";
private Gateway gateway = (Gateway) getElement();
private JLabel minInputLb;
private JSpinner minInputSp;
public GatewayDetails(Gateway element) {
super(element);
}
@Override
protected void initComponents() {
super.initComponents();
this.minInputLb = new JLabel(MIN_INPUT);
final SpinnerModel sm = new SpinnerNumberModel(0, 0, Integer.MAX_VALUE, 1);
this.minInputSp = new JSpinner(sm);
// Set the texts if available
gateway = (Gateway) getElement();
if (gateway.getMinInput() != null)
<|fim▁hole|>
@Override
protected void layoutComponents() {
super.layoutComponents();
createAdvanced();
getAdvanced().add(this.minInputLb);
getAdvanced().add(this.minInputSp);
}
@Override
protected void addActions() {
super.addActions();
this.minInputSp.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(final ChangeEvent arg0) {
GatewayDetails.this.gateway.updateMinInput((Integer) GatewayDetails.this.minInputSp.getValue(),
Controller.DETAILS);
}
});
}
@Override
protected void dataAttributeChanged(final BPKeyWords keyWord, final Object value) {
super.dataAttributeChanged(keyWord, value);
if (value != null) {
if (keyWord == BPKeyWords.MIN_INPUT) {
this.minInputSp.setValue(value);
}
}
}
}<|fim▁end|> | minInputSp.setValue(gateway.getMinInput());
}
|
<|file_name|>solver.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Solver object
# Copyright (C) 2011-2012, Tomi Leppänen (aka Tomin)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#<|fim▁hole|># along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Solver object"""
class Solver():
"""Solver object
This object solves sudokus. It can be used with tools to create sudoku
solver application or combined with Runner object to make life easier.
See Runner object in sudoku.runner for more information about it.
"""
def __init__(self, sudoku):
"""Constructor
sudoku parameter is an list created by parse_sudoku in sudoko.tools.
"""
self.sudoku = sudoku
self.done = False # if Solver should be stopped
self.good = False # if sudoku is completed
self.split_mode = False # if split mode is on or not :)
self.split_numbers = 10
self.split_request = False # if split is requested or not
def __str__(self):
s = None
for row in self.sudoku:
for col in row:
if s == None:
s = str(col)
else:
s = s+","+str(col)
return s
def get_grid(self,row,col):
"""checks which grid is being procecced"""
return [int((row+3)/3),int((col+3)/3)]
def isgood_final(self):
"""Checks if sudoku is completed correctly
Use only for completed sudokus
"""
for a in range(0,9):
suma = 0
sumb = 0
for b in range(0,9):
suma = suma+self.sudoku[a][b]
sumb = sumb+self.sudoku[b][a]
if suma != 45 or sumb != 45:
return False
for r in range(1,4):
for c in range(1,4):
sumc = 0
for r_n in range(r*3-3,r*3):
for c_n in range(c*3-3,c*3):
sumc = sumc+self.sudoku[r_n][c_n]
if sumc != 45:
return False
return True
def isgood(self):
"""Checks if a partial (or complete) sudoku is correct
This is slower than isgood_final
"""
for a in range(0,9):
numbersa = []
numbersb = []
for b in range(0,9):
if self.sudoku[a][b] != "":
try:
numbersa.index(self.sudoku[a][b])
except ValueError:
numbersa.append(self.sudoku[a][b])
else:
return False
if self.sudoku[b][a] != "":
try:
numbersb.index(self.sudoku[b][a])
except ValueError:
numbersb.append(self.sudoku[b][a])
else:
return False
for r in range(1,4):
for c in range(1,4):
numbersc = []
for r_n in range(r*3-3,r*3):
for c_n in range(c*3-3,c*3):
if self.sudoku[r_n][c_n] != "":
try:
numbersc.index(self.sudoku[r_n][c_n])
except ValueError:
numbersc.append(self.sudoku[r_n][c_n])
else:
return False
return True
def isready(self):
"""Checks if all cells are filled"""
for row in self.sudoku:
try:
row.index("")
except ValueError:
pass
else:
return False
return True
def get_numbers(self,row,col):
"""Returns numbers that can be filled into a cell"""
numbers = []
numbers.append(self.sudoku[row][col])
numbers = list(range(1,10))
for i in range(0,9):
try:
numbers.remove(self.sudoku[row][i])
except ValueError:
pass
try:
numbers.remove(self.sudoku[i][col])
except ValueError:
pass
x,y = self.get_grid(row,col)
for r in range(int(x*3-3),int(x*3)):
for c in range(int(y*3-3),int(y*3)):
if self.sudoku[r][c] != "":
try:
numbers.remove(self.sudoku[r][c])
except ValueError:
pass
return numbers
def run(self):
"""Solves the sudoku
This solves some of the sudoku and should be called until the sudoku
is ready. The status can be monitored using Sudoku objects good, done
and split_request attributes. Also returns False if something is wrong
otherwise returns True.
"""
changed = False
if self.isready():
if self.isgood_final():
self.done = True
self.good = True
return True
else:
self.done = True
self.good = False
return False
for row in range(0,9):
for col in range(0,9):
if self.sudoku[row][col] == "":
numbers = self.get_numbers(row,col)
if len(numbers) == 1:
changed = True # changed!
self.sudoku[row][col] = numbers[0]
elif len(numbers) == 0: # got into deadlock
self.done = True
self.good = False
return False
elif self.split_mode != False and len(numbers) >= 2:
changed = True # changed!
if self.split_mode == 1 and \
len(numbers) < self.split_numbers:
self.split_numbers = len(numbers)
elif self.split_mode == 2 and \
len(numbers) == self.split_numbers:
# prepare for splitting
self.numbers = numbers
self.row = row
self.col = col
self.done = True
self.good = False
self.split_request = True
return True
if self.split_mode == 1:
self.split_mode = 2
if changed == False: # if nothing has been solved in this round
if self.isgood():
self.split_mode = 1 # turns split mode on
else: # give up if sudoku is faulty
self.done = True
self.good = False
return False
return True<|fim▁end|> | # You should have received a copy of the GNU General Public License |
<|file_name|>FileRecord.ts<|end_file_name|><|fim▁begin|>/*
fTelnet: An HTML5 WebSocket client
Copyright (C) Rick Parrish, R&M Software
This file is part of fTelnet.
fTelnet is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or any later version.
fTelnet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with fTelnet. If not, see <http://www.gnu.org/licenses/>.
*/
class FileRecord {
private _Data: ByteArray = new ByteArray();
private _Name: string = '';
private _Size: number = 0;
<|fim▁hole|> this._Size = size;
}
public get data(): ByteArray {
return this._Data;
}
public get name(): string {
return this._Name;
}
public get size(): number {
return this._Size;
}
}<|fim▁end|> | constructor(name: string, size: number) {
this._Name = name; |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/*!
This crate provides a number of conversion traits with more specific semantics than those provided by `as` or `From`/`Into`.
The goal with the traits provided here is to be more specific about what generic code can rely on, as well as provide reasonably self-describing alternatives to the standard `From`/`Into` traits. For example, the although `T: From<U>` might be satisfied, it imposes no restrictions on the *kind* of conversion being implemented. As such, the traits in this crate try to be very specific about what conversions are allowed. This makes them less generally applicable, but more useful where they *do* apply.
In addition, `From`/`Into` requires all conversions to succeed or panic. All conversion traits in this crate define an associated error type, allowing code to react to failed conversions as appropriate.
# API Stability Notice
The API of this crate is still not entirely decided. In particular, errors may change in the future to carry the value that failed to convert (allowing it to be recovered).
# Overview
The following traits are used to define various conversion semantics:
- [`ApproxFrom`](./trait.ApproxFrom.html)/[`ApproxInto`](./trait.ApproxInto.html) - approximate conversions, with selectable approximation scheme (see [`ApproxScheme`](./trait.ApproxScheme.html)).
- [`TryFrom`](./trait.TryFrom.html)/[`TryInto`](./trait.TryInto.html) - general, potentially failing value conversions.
- [`ValueFrom`](./trait.ValueFrom.html)/[`ValueInto`](./trait.ValueInto.html) - exact, value-preserving conversions.
These extension methods are provided to help with some common cases:
- [`ApproxWith::approx`](./trait.ApproxWith.html#method.approx) - calls `ApproxInto::approx_into` with the `DefaultApprox` scheme.
- [`ApproxWith::approx_with<S>`](./trait.ApproxWith.html#method.approx_with) - calls `ApproxInto::approx_into` with the `S` approximation scheme.
- [`UnwrapOk::unwrap_ok`](./errors/trait.UnwrapOk.html#tymethod.unwrap_ok) - unwraps results from conversions that cannot fail.
- [`UnwrapOrInf::unwrap_or_inf`](./errors/trait.UnwrapOrInf.html#tymethod.unwrap_or_inf) - saturates to ±∞ on failure.
- [`UnwrapOrInvalid::unwrap_or_invalid`](./errors/trait.UnwrapOrInvalid.html#tymethod.unwrap_or_invalid) - substitutes the target type's "invalid" sentinel value on failure.
- [`UnwrapOrSaturate::unwrap_or_saturate`](./errors/trait.UnwrapOrSaturate.html#tymethod.unwrap_or_saturate) - saturates to the maximum or minimum value of the target type on failure.
A macro is provided to assist in implementing conversions:
- [`TryFrom!`](./macros/index.html#tryfrom!) - derives an implementation of [`TryFrom`](./trait.TryFrom.html).
If you are implementing your own types, you may also be interested in the traits contained in the [`misc`](./misc/index.html) module.
## Provided Implementations
The crate provides several blanket implementations:
- `*From<A> for A` (all types can be converted from and into themselves).
- `*Into<Dst> for Src where Dst: *From<Src>` (`*From` implementations imply a matching `*Into` implementation).
Conversions for the builtin numeric (integer and floating point) types are provided. In general, `ValueFrom` conversions exist for all pairs except for float → integer (since such a conversion is generally unlikely to *exactly* succeed) and `f64 → f32` (for the same reason). `ApproxFrom` conversions with the `DefaultApprox` scheme exist between all pairs. `ApproxFrom` with the `Wrapping` scheme exist between integers.
## Errors
A number of error types are defined in the [`errors`](./errors/index.html) module. Generally, conversions use whichever error type most *narrowly* defines the kinds of failures that can occur. For example:
- `ValueFrom<u8> for u16` cannot possibly fail, and as such it uses `NoError`.
- `ValueFrom<i8> for u16` can *only* fail with an underflow, thus it uses the `Underflow` type.
- `ValueFrom<i32> for u16` can underflow *or* overflow, hence it uses `RangeError`.
- Finally, `ApproxFrom<f32> for u16` can underflow, overflow, or attempt to convert NaN; `FloatError` covers those three cases.
Because there are *numerous* error types, the `GeneralError` enum is provided. `From<E> for GeneralError` exists for each error type `E` defined by this crate (even for `NoError`!), allowing errors to be translated automatically by `try!`. In fact, all errors can be "expanded" to *all* more general forms (*e.g.* `NoError` → `Underflow`, `Overflow` → `RangeError` → `FloatError`).
The reason for not just using `GeneralError` in the first place is to statically reduce the number of potential error cases you need to deal with. It also allows the `Unwrap*` extension traits to be defined *without* the possibility for runtime failure (*e.g.* you cannot use `unwrap_or_saturate` with a `FloatError`, because what do you do if the error is `NotANumber`; saturate to max or to min? Or panic?).
# Examples
```
# extern crate conv;
# use conv::*;
# fn main() {
// This *cannot* fail, so we can use `unwrap_ok` to discard the `Result`.
assert_eq!(u8::value_from(0u8).unwrap_ok(), 0u8);
// This *can* fail. Specifically, it can underflow.
assert_eq!(u8::value_from(0i8), Ok(0u8));
assert_eq!(u8::value_from(-1i8), Err(Underflow));
// This can underflow *and* overflow; hence the change to `RangeError`.
assert_eq!(u8::value_from(-1i16), Err(RangeError::Underflow));
assert_eq!(u8::value_from(0i16), Ok(0u8));
assert_eq!(u8::value_from(256i16), Err(RangeError::Overflow));
// We can use the extension traits to simplify this a little.
assert_eq!(u8::value_from(-1i16).unwrap_or_saturate(), 0u8);
assert_eq!(u8::value_from(0i16).unwrap_or_saturate(), 0u8);
assert_eq!(u8::value_from(256i16).unwrap_or_saturate(), 255u8);
// Obviously, all integers can be "approximated" using the default scheme (it
// doesn't *do* anything), but they can *also* be approximated with the
// `Wrapping` scheme.
assert_eq!(
<u8 as ApproxFrom<_, DefaultApprox>>::approx_from(400u16),
Err(Overflow));
assert_eq!(
<u8 as ApproxFrom<_, Wrapping>>::approx_from(400u16),
Ok(144u8));
// This is rather inconvenient; as such, provided the return type can be
// inferred, you can use `ApproxWith::approx` (for the default scheme) and
// `ApproxWith::approx_with`.
assert_eq!(400u16.approx(), Err::<u8, _>(Overflow));
assert_eq!(400u16.approx_with::<Wrapping>(), Ok::<u8, _>(144u8));
// Integer -> float conversions *can* fail due to limited precision.
// Once the continuous range of exactly representable integers is exceeded, the
// provided implementations fail with over/underflow errors.
assert_eq!(f32::value_from(16_777_216i32), Ok(16_777_216.0f32));
assert_eq!(f32::value_from(16_777_217i32), Err(RangeError::Overflow));
// Float -> integer conversions have to be done using approximations. Although
// exact conversions are *possible*, "advertising" this with an implementation
// is misleading.
//
// Note that `DefaultApprox` for float -> integer uses whatever rounding
// mode is currently active (*i.e.* whatever `as` would do).
assert_eq!(41.0f32.approx(), Ok(41u8));
assert_eq!(41.3f32.approx(), Ok(41u8));
assert_eq!(41.5f32.approx(), Ok(41u8));
assert_eq!(41.8f32.approx(), Ok(41u8));
assert_eq!(42.0f32.approx(), Ok(42u8));
assert_eq!(255.0f32.approx(), Ok(255u8));
assert_eq!(256.0f32.approx(), Err::<u8, _>(FloatError::Overflow));
// If you really don't care about the specific kind of error, you can just rely
// on automatic conversion to `GeneralError`.
fn too_many_errors() -> Result<(), GeneralError> {
assert_eq!({let r: u8 = try!(0u8.value_into()); r}, 0u8);
assert_eq!({let r: u8 = try!(0i8.value_into()); r}, 0u8);
assert_eq!({let r: u8 = try!(0i16.value_into()); r}, 0u8);
assert_eq!({let r: u8 = try!(0.0f32.approx()); r}, 0u8);
Ok(())
}
# let _ = too_many_errors();
# }
```
*/
#![deny(missing_docs)]
// Exported macros.
pub mod macros;
pub use errors::{
NoError, GeneralError, Unrepresentable,
Underflow, Overflow,
FloatError, RangeError,
UnwrapOk, UnwrapOrInf, UnwrapOrInvalid, UnwrapOrSaturate,
};
/**
Publicly re-exports the most generally useful set of items.
Usage of the prelude should be considered **unstable**. Although items will likely *not* be removed without bumping the major version, new items *may* be added, which could potentially cause name conflicts in user code.
*/
pub mod prelude {
pub use super::{
ApproxFrom, ApproxInto, ApproxWith,
ValueFrom, ValueInto,
UnwrapOk, UnwrapOrInf, UnwrapOrInvalid, UnwrapOrSaturate,
};
}
macro_rules! as_item {
($($i:item)*) => {$($i)*};
}
macro_rules! item_for_each {
(
$( ($($arg:tt)*) ),* $(,)* => { $($exp:tt)* }
) => {
macro_rules! body {
$($exp)*
}
$(
body! { $($arg)* }
)*
};
}
pub mod errors;
pub mod misc;
mod impls;
/**
This trait is used to perform a conversion that is permitted to approximate the result, but *not* to wrap or saturate the result to fit into the destination type's representable range.
# Details
All implementations of this trait must provide a conversion that can be separated into two logical steps: an approximation transform, and a representation transform.
The "approximation transform" step involves transforming the input value into an approximately equivalent value which is supported by the target type *without* taking the target type's representable range into account. For example, this might involve rounding or truncating a floating point value to an integer, or reducing the accuracy of a floating point value.
The "representation transform" step *exactly* rewrites the value from the source type's binary representation into the destination type's binary representation. This step *may not* transform the value in any way. If the result of the approximation is not representable, the conversion *must* fail.
The major reason for this formulation is to exactly define what happens when converting between floating point and integer types. Often, it is unclear what happens to floating point values beyond the range of the target integer type. Do they saturate, wrap, or cause a failure?
With this formulation, it is well-defined: if a floating point value is outside the representable range, the conversion fails. This allows users to distinguish between approximation and range violation, and act accordingly.
*/
pub trait ApproxFrom<Src, Scheme=DefaultApprox> where Scheme: ApproxScheme {
/// The error type produced by a failed conversion.
type Err;
/// Convert the given value into an approximately equivalent representation.
fn approx_from(src: Src) -> Result<Self, Self::Err>;
}
impl<Src, Scheme> ApproxFrom<Src, Scheme> for Src where Scheme: ApproxScheme {
type Err = NoError;
fn approx_from(src: Src) -> Result<Self, Self::Err> {
Ok(src)
}
}
/**
This is the dual of `ApproxFrom`; see that trait for information.
*/
pub trait ApproxInto<Dst, Scheme=DefaultApprox> where Scheme: ApproxScheme {
/// The error type produced by a failed conversion.
type Err;
/// Convert the subject into an approximately equivalent representation.
fn approx_into(self) -> Result<Dst, Self::Err>;
}
impl<Dst, Src, Scheme> ApproxInto<Dst, Scheme> for Src
where
Dst: ApproxFrom<Src, Scheme>,
Scheme: ApproxScheme,
{
type Err = Dst::Err;
fn approx_into(self) -> Result<Dst, Self::Err> {
ApproxFrom::approx_from(self)
}
}
/**
This extension trait exists to simplify using approximation implementations.
If there is more than one `ApproxFrom` implementation for a given type, a simple call to `approx_into` may not be uniquely resolvable. Due to the position of the scheme parameter (on the trait itself), it is cumbersome to specify which scheme you wanted.
Hence this trait.
> **Note**: There appears to be a bug in `rustdoc`'s output. This trait is implemented *for all* types.
*/
pub trait ApproxWith<Dst> {
/// Approximate the subject with the default scheme.
fn approx(self) -> Result<Dst, Self::Err>
where Self: Sized + ApproxInto<Dst> {
self.approx_into()
}
/// Approximate the subject with a specific scheme.
fn approx_with<Scheme=DefaultApprox>(self) -> Result<Dst, Self::Err>
where
Self: Sized + ApproxInto<Dst, Scheme>,
Scheme: ApproxScheme,
{
self.approx_into()
}
}
impl<T, Dst> ApproxWith<Dst> for T {}
/**
This trait is used to mark approximation scheme types.
*/
pub trait ApproxScheme {}
/**
The "default" approximation scheme. This scheme does whatever would generally be expected of a lossy conversion, assuming no additional context or instruction is given.
This is a double-edged sword: it has the loosest semantics, but is far more likely to exist than more complicated approximation schemes.
*/
pub enum DefaultApprox {}
impl ApproxScheme for DefaultApprox {}
/**
This scheme is used to convert a value by "wrapping" it into a narrower range.
In abstract, this can be viewed as the opposite of rounding: rather than preserving the most significant bits of a value, it preserves the *least* significant bits of a value.
*/
pub enum Wrapping {}
impl ApproxScheme for Wrapping {}
// TODO: RoundToNearest, RoundToPosInf, RoundToNegInf, RoundToZero
/**
This trait is used to perform a conversion between different semantic types which might fail.
# Details
Typically, this should be used in cases where you are converting between values whose ranges and/or representations only partially overlap. That the conversion may fail should be a reasonably expected outcome. A standard example of this is converting from integers to enums of unitary variants.
*/
pub trait TryFrom<Src> {
/// The error type produced by a failed conversion.
type Err;
/// Convert the given value into the subject type.
fn try_from(src: Src) -> Result<Self, Self::Err>;
}
impl<Src> TryFrom<Src> for Src {
type Err = NoError;
fn try_from(src: Src) -> Result<Self, Self::Err> {
Ok(src)
}
}
/**
This is the dual of `TryFrom`; see that trait for information.
*/
pub trait TryInto<Dst> {
/// The error type produced by a failed conversion.
type Err;
/// Convert the subject into the destination type.
fn try_into(self) -> Result<Dst, Self::Err>;
}
impl<Src, Dst> TryInto<Dst> for Src where Dst: TryFrom<Src> {
type Err = Dst::Err;
fn try_into(self) -> Result<Dst, Self::Err> {
TryFrom::try_from(self)
}
}
/**
This trait is used to perform an exact, value-preserving conversion.
# Details
Implementations of this trait should be reflexive, associative and commutative (in the absence of conversion errors). That is, all possible cycles of `ValueFrom` conversions (for which each "step" has a defined implementation) should produce the same result, with a given value either being "round-tripped" exactly, or an error being produced.
*/
pub trait ValueFrom<Src> {
/// The error type produced by a failed conversion.
type Err;
/// Convert the given value into an exactly equivalent representation.
fn value_from(src: Src) -> Result<Self, Self::Err>;
}
impl<Src> ValueFrom<Src> for Src {<|fim▁hole|> fn value_from(src: Src) -> Result<Self, Self::Err> {
Ok(src)
}
}
/**
This is the dual of `ValueFrom`; see that trait for information.
*/
pub trait ValueInto<Dst> {
/// The error type produced by a failed conversion.
type Err;
/// Convert the subject into an exactly equivalent representation.
fn value_into(self) -> Result<Dst, Self::Err>;
}
impl<Src, Dst> ValueInto<Dst> for Src where Dst: ValueFrom<Src> {
type Err = Dst::Err;
fn value_into(self) -> Result<Dst, Self::Err> {
ValueFrom::value_from(self)
}
}<|fim▁end|> | type Err = NoError; |
<|file_name|>country.rs<|end_file_name|><|fim▁begin|>use std::collections::BTreeMap;
use std::f64;
use super::country_page_data::CountryPageData;
use super::event::Event;
use super::event_trunc::{EventTrunc, EventTruncRenderable};
use super::year::Year;
use super::year_page_data::YearPageData;
/// `Country` contains full country data
#[derive(Debug, Clone, RustcEncodable)]
pub struct Country {
pub events: Vec<Event>,
pub link: String,
pub name: String,
pub num_events: i32,
pub num_fatalities: i32,
}
impl Country {
pub fn new(t_link: String, t_name: String, num_eve: i32, num_fat: i32) -> Country {
Country {
events: Vec::new(),
link: t_link,
name: t_name,
num_events: num_eve,
num_fatalities: num_fat,
}
}
/// Create a blank `Country` from just the name
pub fn from_name(t_name: String) -> Country {
Country {
events: Vec::new(),
link: t_name.clone().replace(" ", ""),
name: t_name,
num_events: 0i32,
num_fatalities: 0i32,
}
}
pub fn to_page_data(&self) -> CountryPageData {
let mut t_years: BTreeMap<i32, Year> = BTreeMap::new();
for event in &self.events {
let c_year = t_years.entry(event.year).or_insert(Year::new(event.year));
c_year.events += 1;
c_year.fatalities += event.fatalities;
}
let mut year_vec: Vec<Year> = Vec::new();
for elem in t_years.values() {<|fim▁hole|> let f_eve: f64 = f64::from(elem.events);
let f_fat: f64 = f64::from(elem.fatalities);
let t_epd: f64 = f_eve / 365.0f64;
let t_fpe: f64 = f_fat / f_eve;
elem.epd = format!("{:.2}", t_epd);
elem.fpe = format!("{:.2}", t_fpe);
}
let mut t_vec = year_vec.clone();
t_vec.reverse();
CountryPageData {
found: true,
name: self.name.clone(),
link: self.link.clone(),
total_eve: t_vec.len() as i32,
years: t_vec,
}
}
pub fn to_year_data(&self, inp_year: i32) -> YearPageData {
let mut t_eve: Vec<EventTruncRenderable> = Vec::new();
for elem in &self.events {
if elem.year == inp_year {
t_eve.push(EventTruncRenderable::from_event(elem));
}
}
YearPageData {
eve_vec: t_eve,
name: self.name.clone(),
year: inp_year,
}
}
}<|fim▁end|> | year_vec.push(elem.clone());
}
for elem in &mut year_vec { |
<|file_name|>nav.component.ts<|end_file_name|><|fim▁begin|>import {Component, OnInit} from '@angular/core';
import {LegacyAppRouter} from '../../core/legacy-app-router';
import {AuthService} from 'app/auth/auth.service';
import {LoggedInUser} from '../../auth/logged-in-user';
@Component({
selector: 'tb-nav',
templateUrl: './nav.component.html',
styleUrls: ['./nav.component.scss']
})
export class NavComponent implements OnInit {
title: String = 'Taskboard';
loggedInUser: LoggedInUser;
constructor(
private legacyAppRouter: LegacyAppRouter,
private authService: AuthService
) {}
<|fim▁hole|> this.authService.getLoggedInUser().subscribe(user => this.loggedInUser = user);
}
logout() {
this.authService.logout();
}
backToHome() {
this.legacyAppRouter.goToHome();
}
}<|fim▁end|> | ngOnInit() { |
<|file_name|>fcm.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
blank_datafile = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002/Specimen_001_F1_F01_046.fcs'
script_output_dir = 'script_output'
sample_directory = '/home/kyleb/Dropbox/UCSF/cas9/FCS/150916-3.1/kyleb/150916-rfp-cas9/96 Well - Flat bottom_002'
rows_in_plate = 'ABCDEFGH'
cols_in_plate = list(range(1, 13))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from FlowCytometryTools import FCMeasurement, PolyGate, ThresholdGate
import os, FlowCytometryTools
import pylab as P
import numpy as np
import scipy
use_multiprocessing = True
if use_multiprocessing:
import multiprocessing as mp
class PlatePos:
def __init__ (self, plate_position_str):
self.row = plate_position_str[0]
assert( self.row in rows_in_plate )
self.col = int(plate_position_str[1:])
# Returns the next position on the plate
@property
def next_pos(self):
if self.row_index == len(rows_in_plate)-1:
if self.col == cols_in_plate[-1]:
return None
if self.col == cols_in_plate[-1]:
next_pos_row = rows_in_plate[ self.row_index+1 ]
next_pos_col = 1
else:
next_pos_row = self.row
next_pos_col = self.col + 1
return PlatePos( '%s%d' % (next_pos_row, next_pos_col) )
@property
def row_index(self):
return rows_in_plate.index(self.row)
def __repr__(self):
return '%s%02d' % (self.row, self.col)
def __lt__ (self, other):
if self.row == other.row:
return self.col < other.col
else:
return self.row < other.row
def __hash__(self):
return hash( str(self) )
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __ne__(self, other):
return not self.__eq__(other)
class PlateInfo:
def __init__ (self, name, value, new_positions):
self.name = name
if value == None:
self.value = np.nan
else:
self.value = value
self.positions = []
if isinstance(new_positions, list):
for new_position_range in new_positions:
self.add_position_range(new_position_range)
elif isinstance(new_positions, str):
self.add_position_range(new_positions)
else:
raise Exception('Input new positions must be a list or string')
def add_position_range(self, pos_range):
if '-' in pos_range:
first_pos_str, second_pos_str = pos_range.split('-')
first_pos = PlatePos(first_pos_str)
second_pos = PlatePos(second_pos_str)
first_pos_char_index = rows_in_plate.index(first_pos.row)
second_pos_char_index = rows_in_plate.index(second_pos.row)
for char_index in range(first_pos_char_index, second_pos_char_index + 1):
row = rows_in_plate[char_index]
for col in range(first_pos.col, second_pos.col + 1):
self.add_position( '%s%d' % (row, col) )
else:
self.add_position(pos_range)
def add_position(self, pos_str):
pos = PlatePos(pos_str)
if pos not in self.positions:
self.positions.append(pos)
self.positions.sort()
@property
def position_set(self):
return_set = set()
for pos in self.positions:
return_set.add(pos)
return return_set
def __repr__(self):
return str( self.positions )
class Plate:
def __init__ (self, plate_info_list, sample_dir=None, verbose=False, name=None):
self.name = name
self.info_dict = {}
self.samples = {}
self.sample_dir = sample_dir
for plate_info in plate_info_list:
if plate_info.name not in self.info_dict:
self.info_dict[plate_info.name] = {}
assert( plate_info.value not in self.info_dict[plate_info.name] )
self.info_dict[plate_info.name][plate_info.value] = plate_info
if sample_dir != None:
self.load_fcs_dir(sample_dir, verbose=verbose)
def __repr__(self):
return str(self.info_dict)
@property
def all_position_set(self):
s = set()
for name in self.info_dict:
for value in self.info_dict[name]:
s = s.union(self.info_dict[name][value].position_set)
return s
def get_by_well(self, well_pos):
search_pos = PlatePos(well_pos)
for pos in self.all_position_set:
if pos == search_pos:
return self.samples[pos]
def parameter_values(self, parameter_name):
return sorted( self.info_dict[parameter_name].keys() )
def well_set(self, parameter_name, parameter_value=np.nan):
if parameter_name not in self.info_dict or parameter_value not in self.info_dict[parameter_name]:
return set()
else:
return self.info_dict[parameter_name][parameter_value].position_set
def single_well_from_set(self, well_set):
well_list = list(well_set)
assert( len(well_list) == 1 )
return self.samples[well_list[0]]
@property
def experimental_parameters(self):
experimental_parameters = []
for parameter_name in list(self.info_dict.keys()):
if 'blank' not in parameter_name.lower():
if len(self.info_dict[parameter_name]) == 1 and np.nan in self.info_dict[parameter_name]:
experimental_parameters.append(parameter_name)
return experimental_parameters
def gate(self, gate):
if use_multiprocessing:
pool = mp.Pool()
for pos in self.samples:
pool.apply_async(gate_data, (pos, self.samples[pos], gate), callback=self.set_gate)
pool.close()
pool.join()
else:
for pos in self.samples:
self.samples[pos] = self.samples[pos].gate(gate)
def gate_sample(self, gate, pos):
self.samples[pos] = self.samples[pos].gate(gate)
def set_gate(self, tup):
pos, fcs_data = tup
self.samples[pos] = fcs_data
def load_fcs_dir(self, sample_directory, verbose=False):
fcs_files = find_fcs_files(sample_directory)
for plate_pos, filepath in fcs_files:
assert(plate_pos not in self.samples)
self.samples[plate_pos] = FCMeasurement(ID=str(plate_pos), datafile=filepath)
if verbose:
print('Loaded %d FCS files from directory %s' % (len(fcs_files), sample_directory))
def gate_data(pos, fcs_data, gate):
return (pos, fcs_data.gate(gate))
class FCSFile:
def __init__ (self, filepath, plate_position_str):
self.filepath = filepath
self.plate_position_obj = PlatePos(plate_position_str)
@property
def plate_position(self):
return str( self.plate_position_obj )
@property
def plate_row(self):
return self.plate_position_obj.row
@property
def plate_col(self):
return self.plate_position_obj.col
def __lt__ (self, other):
return self.plate_position < other.plate_position
def __repr__(self):
return self.plate_position
def find_fcs_files(sample_directory):
fcs_files = []
for filename in os.listdir(sample_directory):
if filename.endswith('.fcs'):
full_filename = os.path.join(sample_directory, filename)
fcs_files.append( (PlatePos(filename.split('_')[2]), full_filename) )
fcs_files.sort()
return fcs_files
def ticks_format(value, index):
"""
get the value and returns the value as:
integer: [0,99]
1 digit float: [0.1, 0.99]
n*10^m: otherwise
To have all the number of the same size they are all returned as latex strings
http://stackoverflow.com/questions/17165435/matplotlib-show-labels-for-minor-ticks-also
"""
exp = np.floor(np.log10(value))
base = value/10**exp
if exp == 0 or exp == 1:
return '${0:d}$'.format(int(value))
if exp == -1:
return '${0:.1f}$'.format(value)
else:
return '${0:d}\\times10^{{{1:d}}}$'.format(int(base), int(exp))
def output_medians_and_sums():
fsc_gate = ThresholdGate(10000.0, 'FSC-A', region='above')
ssc_gate = ThresholdGate(9000.0, 'SSC-A', region='above')
fsc_ssc_gate = CompositeGate(fsc_gate, 'and', ssc_gate)
# Load blank data
blank_sample = FCMeasurement(ID='blank', datafile=blank_datafile).gate(fsc_gate)
fcs_files = find_fcs_files(sample_directory)
channel_medians = {channel_name : {} for channel_name in blank_sample.channel_names}
channel_sums = {channel_name : {} for channel_name in blank_sample.channel_names}
for plate_pos, filepath in fcs_files:
sample = FCMeasurement(ID='sample', datafile=filepath).gate(fsc_gate)
for channel_name in sample.channel_names:
if plate_pos.row not in channel_medians[channel_name]:
channel_medians[channel_name][plate_pos.row] = {}
channel_sums[channel_name][plate_pos.row] = {}
assert( plate_pos.col not in channel_medians[channel_name][plate_pos.row] )
channel_medians[channel_name][plate_pos.row][plate_pos.col] = sample.data[channel_name].median()
channel_sums[channel_name][plate_pos.row][plate_pos.col] = np.sum(sample.data[channel_name])
# if channel_name in ['B-A', 'A-A']:
# print filename, channel_name
# sample.plot(channel_name, bins=100, alpha=0.9, color='green');
# blank_sample.plot(channel_name, bins=100, alpha=0.9, color='blue');
# P.grid(True)
# P.show() # <-- Uncomment when running as a script.
if not os.path.isdir(script_output_dir):
os.makedirs(script_output_dir)
rows = [char for char in 'ABCDEFGH']
cols = list(range(1, 13))
for channel, data_type in [(channel_medians, 'medians'), (channel_sums, 'sums')]:
for channel_name in channel:
filename = os.path.join(script_output_dir, '%s_%s.csv' % (channel_name, data_type))
with open(filename, 'w') as f:
for col in cols:
for row in rows:
if row in channel[channel_name] and col in channel[channel_name][row]:
f.write('%.2f,' % channel[channel_name][row][col])
else:
f.write('NA,')
f.write('\n')
def points_above_line(x_data, y_data, m, b):
# Calculate y-intercepts for all points given slope m
comp_bs = np.subtract(y_data, np.multiply(x_data, m))
# Return number of points whose y intercept is above passed in b
return np.count_nonzero(comp_bs > b)
def find_perpendicular_gating_line(x_data, y_data, threshold):
# Returns the line parameters which give you a certain percentage (threshold) of population
# above the line
x_data = np.sort( x_data )
y_data = np.sort( y_data )
x_max = np.amax(x_data)
y_max = np.amax(y_data)
# y = mx + b
m, b, r, p, stderr = scipy.stats.linregress(x_data, y_data)
inv_m = -1.0 / m
inv_b = np.median( y_data )
percent_above_line = points_above_line(x_data, y_data, inv_m, inv_b) / float(len(x_data))
desired_points_above_line = int(threshold * len(x_data))
def obj_helper(calc_b):
return abs(points_above_line(x_data, y_data, inv_m, calc_b) - desired_points_above_line)
res = scipy.optimize.minimize(obj_helper, inv_b, method='nelder-mead', options={'disp': False, 'maxiter': 1000})
inv_b = res.x[0]
return (inv_m, inv_b)
def mean_confidence_interval(data, confidence=0.95):
a = 1.0*np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
def make_gating_fig(plate_list, gate_val, gate_name, fig_dir, fast_run = False, blank_samples=[], plot_one_sample=False):
gating_fig = plt.figure(figsize=(len(plate_list)*9, 11), dpi=600)
gated_plates_for_return = []
gating_axes = []
mean_diffs = {}
for plate_num, exp in enumerate(plate_list):
nonblank_samples = list(exp.all_position_set)
if len(gating_axes) >= 1:
ax = gating_fig.add_subplot(1, len(plate_list), plate_num+1, sharey=gating_axes[0])
else:
ax = gating_fig.add_subplot(1, len(plate_list), plate_num+1)
gating_axes.append(ax)
ax.set_title(exp.name)
if gate_name.startswith('fsc'):
gate = ThresholdGate(gate_val, 'FSC-A', region='above')
elif gate_name.startswith('poly'):
all_exp_data_fsc = []
all_exp_data_ssc = []
for i, nonblank_sample in enumerate(nonblank_samples):
all_exp_data_fsc.append( exp.samples[nonblank_sample].data['FSC-A'] )
all_exp_data_ssc.append( exp.samples[nonblank_sample].data['SSC-A'] )
if not fast_run:
exp.samples[nonblank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color=np.random.rand(3,1), s=1, alpha=0.1, ax=ax)
gate_m, gate_b = find_perpendicular_gating_line( np.concatenate(all_exp_data_fsc), np.concatenate(all_exp_data_ssc), gate_val)
fsc_ssc_axis_limits = (-50000, 100000)
x_max = np.amax(np.concatenate(all_exp_data_fsc))
x_min = np.amin(np.concatenate(all_exp_data_fsc))
y_max = np.amax(np.concatenate(all_exp_data_ssc))
y_min = np.amin(np.concatenate(all_exp_data_ssc))
ax.set_ylim(fsc_ssc_axis_limits)
ax.set_xlim(fsc_ssc_axis_limits)
fudge = 1.0
polygon_xs = [x_min-fudge, x_min-fudge, (y_min-gate_b)/float(gate_m), x_max+fudge, x_max+fudge]
polygon_ys = [y_max+fudge, gate_m*x_min+gate_b, y_min-fudge, y_min-fudge, y_max+fudge]
gate = PolyGate(np.array([[x,y] for x, y in zip(polygon_xs, polygon_ys)]), ['FSC-A', 'SSC-A'], region='in', name='polygate')
if plot_one_sample and len(nonblank_samples) > 0:
exp.samples[nonblank_samples[0]].plot(['FSC-A', 'SSC-A'], kind='scatter', color='green', s=1, alpha=0.1, ax=ax, gates=[gate])
for i, blank_sample in enumerate(blank_samples):
if i == 0:
exp.samples[blank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color='red', s=2, alpha=1.0/float(len(blank_samples)), gates=[gate], label='Blank media', ax=ax)
else:
if not fast_run:
exp.samples[blank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color='red', s=2, alpha=1.0/float(len(blank_samples)), gates=[gate], ax=ax)
exp.gate(gate)
gated_plates_for_return.append(exp)
ax.grid(True)
if len(blank_samples) > 0:
ax.legend()
gating_fig.savefig(os.path.join(fig_dir, 'gates.png'))
gating_fig.clf()
plt.close(gating_fig)
del gating_fig
return gated_plates_for_return
def make_individual_gating_fig(exp, gate_val, gate_name, fig_dir, fast_run = False, florescence_channel = None, title=None, tight_layout = True):
gated_plates_for_return = []
mean_diffs = {}
nonblank_samples = sorted(list(exp.all_position_set))
samples_per_row = 3
if florescence_channel:
plots_per_sample = 2
else:
plots_per_sample = 1
figs_per_row = samples_per_row * plots_per_sample
num_fig_rows = 1 + ( len(nonblank_samples) - 1 ) / samples_per_row
num_fig_cols = min(samples_per_row * plots_per_sample, len(nonblank_samples) * plots_per_sample)
gating_fig = plt.figure(figsize=(8.2*num_fig_cols, num_fig_rows*5.5), dpi=600)
if title:
plt.title('%s - %s' % (title, exp.name), fontsize=20)
else:
plt.title(exp.name, fontsize=20)
current_fig_row = 1
current_fig_col = 1
current_fig_count = 1
for sample_num, nonblank_sample in enumerate(nonblank_samples):
#### FSC/SSC plot ####
ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
if current_fig_col >= figs_per_row:
current_fig_col = 1
current_fig_row += 1
else:
current_fig_col += 1
current_fig_count += 1<|fim▁hole|> ax.set_title(str(nonblank_sample))
if gate_name.startswith('fsc'):
gate = ThresholdGate(gate_val, 'FSC-A', region='above')
elif gate_name.startswith('poly'):
fsc_data = exp.samples[nonblank_sample].data['FSC-A']
ssc_data = exp.samples[nonblank_sample].data['SSC-A']
gate_m, gate_b = find_perpendicular_gating_line( exp.samples[nonblank_sample].data['FSC-A'], exp.samples[nonblank_sample].data['SSC-A'], gate_val)
fsc_ssc_axis_limits = (-50000, 100000)
x_max = np.amax(fsc_data)
x_min = np.amin(fsc_data)
y_max = np.amax(ssc_data)
y_min = np.amin(ssc_data)
ax.set_ylim(fsc_ssc_axis_limits)
ax.set_xlim(fsc_ssc_axis_limits)
fudge = 1.0
polygon_xs = [x_min-fudge, x_min-fudge, (y_min-gate_b)/float(gate_m), x_max+fudge, x_max+fudge]
polygon_ys = [y_max+fudge, gate_m*x_min+gate_b, y_min-fudge, y_min-fudge, y_max+fudge]
gate = PolyGate(np.array([[x,y] for x, y in zip(polygon_xs, polygon_ys)]), ['FSC-A', 'SSC-A'], region='in', name='polygate')
if not fast_run:
exp.samples[nonblank_sample].plot(['FSC-A', 'SSC-A'], kind='scatter', color=(0.0, 0.0, 1.0), s=1, alpha=0.05, ax=ax, gates=[gate])
ax.grid(True)
#### Gate sample ####
exp.gate_sample(gate, nonblank_sample)
#### Florescence/Time plot ####
if florescence_channel:
ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
current_fig_count += 1
ax.set_title(str(nonblank_sample))
exp.samples[nonblank_sample].plot(['Time', florescence_channel], kind='scatter', color=(1.0, 0.0, 0.0), s=1, alpha=0.05, ax=ax,)
# #### Singlet plot ####
# ax = gating_fig.add_subplot(num_fig_rows, num_fig_cols, current_fig_count)
# current_fig_count += 1
# ax.set_title(str(nonblank_sample))
# print exp.samples[nonblank_sample].channel_names
# exp.samples[nonblank_sample].plot(['FSC-H', 'FSC-W'], kind='scatter', color=(0.0, 0.0, 1.0), s=1, alpha=0.05, ax=ax,)
if tight_layout:
gating_fig.tight_layout()
gating_fig.savefig(os.path.join(fig_dir, 'gates-%s.png' % exp.name))
gating_fig.clf()
plt.close(gating_fig)
del gating_fig
return exp
if __name__ == '__main__':
output_medians_and_sums()<|fim▁end|> | |
<|file_name|>3_2.cpp<|end_file_name|><|fim▁begin|>/*
How would you design a stack which, in addition to push and pop, also has a function min
which returns the minimum element? Push, pop and min should all operate in O(1) time.
*/
#include <stdio.h>
#include <map>
using namespace std;
#define N 500
typedef struct Stack
{
int top;
int min;
int value[N];
map<int, int> minTr;
}Stack;
void init(Stack& s)
{
s.top = 0;
s.min = 1 << 30;
}
void push(Stack& s, int val)
{
if(s.top >= N)
{
printf("overflow!\n");
return;
}
s.value[s.top] = val;
if(val < s.min)
{<|fim▁hole|> }
s.top++;
}
int pop(Stack& s)
{
if(s.top <= 0)
{
printf("Stack is empty!\n");
return 0;
}
s.top--;
int e = s.value[s.top];
if(e == s.min)
{
int ind = s.minTr.rbegin()->first;
if(ind == s.top)
{
s.minTr.erase(s.top);
if(s.minTr.empty())
s.min = 1 << 30;
else
s.min = s.minTr.rbegin()->second;
}
}
return e;
}
int minEle(Stack s)
{
if(s.top == 0)
{
printf("Stack is empty!\n");
return 0;
}
return s.min;
}
void createStack(Stack& s, int *a, int n)
{
for (int i = 0; i < 9; ++i)
{
push(s, a[i]);
//printf("%d %d\n", a[i], minEle(s));
}
}
void popEmpty(Stack s)
{
//printf("hello\n");
while(s.top > 0)
{
int e = pop(s);
printf("%d %d\n", e, s.min);
}
}
int main()
{
int a[9] = {3, 4, 5, 2, 6, 8, 1, 1, 4};
Stack s;
init(s);
createStack(s, a, 9);
popEmpty(s);
return 0;
}<|fim▁end|> | s.minTr.insert(pair<int, int>(s.top, val));
s.min = val; |
<|file_name|>run_command.rs<|end_file_name|><|fim▁begin|>extern crate std;
extern crate hostname;
extern crate glob;
use std::str;
use std::ffi::OsString; // Probably want OsStr in a few places
use std::path::Path;
use std::process::Command;
use std::fs;
use state::ShellState;
impl ShellState {
pub fn run_command(&self, command: &str, args: std::str::SplitWhitespace) {
// Very crude glob support
let mut expanded_args = Vec::new();
for arg in args {
if !arg.contains('*') {
expanded_args.push(OsString::from(arg));
continue;
}
let mut pattern = self.variables.get("PWD").unwrap().clone();
pattern.push(arg);
match glob::glob(pattern.to_str().unwrap()) {
Ok(result_iter) => {
for entry in result_iter.filter_map(|e| e.ok()) {
expanded_args.push(entry.as_os_str().to_owned());
}
}
Err(..) => expanded_args.push(OsString::from(arg)),
}
}
if command == "ls" || command == "grep" {
expanded_args.push(OsString::from("--color=auto"));
}
if Path::new(command).is_file() {
match Command::new(Path::new(command))
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
let path = self.variables.get("PATH").unwrap().clone();
for entries in path.into_string()
.unwrap()
.split(':')
.map(|dir| fs::read_dir(Path::new(dir)))
.filter_map(|e| e.ok())<|fim▁hole|> {
// loop over the iterator of every directory in PATH that's possible to read
for dir_entry in entries
.filter_map(|e| e.ok()) // Only entries that are ok
.filter(|e| &e.file_name() == command)
{
// Check if entry filename matches
match Command::new(dir_entry.path())
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
}
println!("command not found: {}", command);
}
}<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common constants for DFP scripts."""
__author__ = '[email protected] (Jeff Sham)'<|fim▁end|> | # distributed under the License is distributed on an "AS IS" BASIS, |
<|file_name|>Modal.js<|end_file_name|><|fim▁begin|>import './Modal.scss'
import pugTpl from './Modal.pug'
import mixin from '../../mixin'
import alert from '@vue2do/component/module/Modal/alert'
import confirm from '@vue2do/component/module/Modal/confirm'
export default {
name: 'PageCompModal',<|fim▁hole|>
mixins: [mixin],
data() {
return {
testName: 'test'
}
},
methods: {
simple() {
this.$refs.simple.show()
},
alert() {
alert({
message: '这是一个警告弹窗',
theme: this.typeTheme,
ui: this.typeUI
})
},
confirm() {
confirm({
message: '这是一个确认弹窗',
title: '测试确认弹出',
theme: 'danger',
ui: 'bootstrap'
})
},
showFullPop() {
this.$refs.fullPop.show()
},
hideFullPop() {
this.$refs.fullPop.hide()
},
showPureModal() {
this.$refs.pureModal.show()
},
hidePureModal() {
this.$refs.pureModal.hide()
}
}
}<|fim▁end|> |
template: pugTpl(), |
<|file_name|>test_mini_project7.py<|end_file_name|><|fim▁begin|># unit tests for Mini-project 7 (The Fifteen Puzzle), by k., 08/02/2014
import unittest
from mini_project7 import Puzzle
class TestFunctions(unittest.TestCase):
def setUp(self):
pass
def test_lower_row_invariant(self):
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 0, 11], [12, 13, 14, 15]])
self.assertTrue(state.lower_row_invariant(2, 2))
self.assertIs(type(state.lower_row_invariant(2, 2)), bool)
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 11, 0], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 0, 1, 11], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 10], [9, 1, 0, 12], [11, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 2))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 1], [9, 0, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.lower_row_invariant(2, 1))
state = Puzzle(4, 4, [[4, 2, 3, 7], [8, 5, 6, 1], [9, 0, 10, 11], [13, 12, 14, 15]])
self.assertFalse(state.lower_row_invariant(2, 1))
state = Puzzle(3, 3, [[8, 7, 6], [5, 4, 3], [2, 1, 0]])
self.assertTrue(state.lower_row_invariant(2, 2))
state = Puzzle(3, 3, [[2, 3, 4], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.lower_row_invariant(1, 1))
state = Puzzle(3, 3, [[2, 3, 4], [5, 0, 1], [6, 7, 8]])
self.assertFalse(state.lower_row_invariant(1, 1))
state = Puzzle(3, 5, [[13, 1, 2, 3, 11], [5, 6, 7, 8, 10], [11, 12, 4, 0, 14]])
self.assertTrue(state.lower_row_invariant(2, 3))
state = Puzzle(4, 4, [[1, 2, 3, 7], [5, 0, 6, 4], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.lower_row_invariant(1, 1))
def test_solve_interior_tile(self):
state = Puzzle(4, 4, [[4, 13, 1, 3], [5, 10, 2, 7], [8, 12, 6, 11], [9, 0, 14, 15]])
self.assertIs(type(state.solve_interior_tile(3, 1)), str)
state = Puzzle(4, 4, [[4, 13, 1, 3], [5, 10, 2, 7], [8, 12, 6, 11], [9, 0, 14, 15]])
self.assertEqual(state.solve_interior_tile(3, 1), 'uuulddrulddruld')
state = Puzzle(4, 4, [[1, 2, 3, 7], [5, 4, 9, 6], [8, 0, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 1), 'urullddruld')
state = Puzzle(3, 3, [[8, 7, 6], [5, 4, 3], [2, 1, 0]])
self.assertEqual(state.solve_interior_tile(2, 2) , 'uulldrruldrulddruld')
state = Puzzle(3, 3, [[1, 2, 3], [4, 5, 6], [7, 0, 8]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'l')
state = Puzzle(4, 4, [[1, 2, 3, 4], [5, 6, 10, 7], [8, 9, 0, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 2) , 'uld')
state = Puzzle(3, 5, [[13, 2, 3, 4, 5], [6, 7, 8, 9, 11], [10, 12, 1, 0, 14]])
self.assertEqual(state.solve_interior_tile(2, 3) , 'uullldrruldrruldrulddruld')<|fim▁hole|> state = Puzzle(4, 4, [[1, 2, 3, 4], [5, 6, 7, 9], [8, 0, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'urrulldrullddruld')
state = Puzzle(3, 3, [[1, 2, 3], [4, 5, 7], [6, 0, 8]])
self.assertEqual(state.solve_interior_tile(2, 1) , 'urullddruld')
state = Puzzle(4, 5, [[15, 16, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 0, 17, 18, 19]])
self.assertEqual(state.solve_interior_tile(3, 1), 'uuulddrulddruld')
def test_solve_col0_tile(self):
state = Puzzle(3, 3, [[1, 2, 3], [6, 4, 5], [0, 7, 8]])
self.assertIs(type(state.solve_col0_tile(2)), str)
state = Puzzle(3, 3, [[1, 2, 3], [6, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'urr')
state = Puzzle(3, 3, [[2, 3, 6], [1, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'ururdlludruldruldrdlurdluurddlurr')
state = Puzzle(3, 3, [[2, 6, 1], [3, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldrdlurdluurddlurr')
state = Puzzle(3, 3, [[6, 2, 1], [3, 4, 5], [0, 7, 8]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldruldrdlurdluurddlurr')
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'urrrrulldrulldrulldruldrdlurdluurddlurrrr')
state = Puzzle(3, 5, [[10, 2, 3, 4, 5], [6, 7, 8, 9, 1], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'uruldruldruldrdlurdluurddlurrrr')
state = Puzzle(3, 5, [[1, 2, 10, 4, 5], [6, 7, 8, 9, 3], [0, 11, 12, 13, 14]])
self.assertEqual(state.solve_col0_tile(2), 'ururdlludruldruldrdlurdluurddlurrrr')
def test_invariant_row0(self):
state = Puzzle(3, 3, [[2, 0, 1], [3, 4, 5], [6, 7, 8]])
self.assertFalse(state.row0_invariant(1))
self.assertIs(type(state.row0_invariant(1)), bool)
state = Puzzle(4, 4, [[1, 0, 3, 2], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.row0_invariant(1))
state = Puzzle(3, 3, [[1, 0, 2], [3, 4, 5], [6, 7, 8]])
self.assertTrue(state.row0_invariant(1))
state = Puzzle(4, 4, [[1, 0, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row0_invariant(1))
state = Puzzle(3, 5, [[1, 2, 3, 4, 0], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertTrue(state.row0_invariant(4))
state = Puzzle(3, 5, [[2, 4, 1, 0, 3], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertFalse(state.row0_invariant(3))
state = Puzzle(4, 4, [[4, 2, 0, 3], [5, 1, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row0_invariant(2))
# from the grader
state = Puzzle(4, 5, [[15, 16, 0, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 2, 17, 18, 19]])
self.assertFalse(state.row0_invariant(2))
def test_invariant_row1(self):
state = Puzzle(3, 3, [[2, 3, 4], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.row1_invariant(1))
self.assertIs(type(state.row1_invariant(1)), bool)
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertTrue(state.row1_invariant(1))
state = Puzzle(3, 3, [[2, 3, 4], [5, 1, 0], [6, 7, 8]])
self.assertTrue(state.row1_invariant(2))
state = Puzzle(4, 4, [[1, 3, 4, 2], [0, 6, 5, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertFalse(state.row1_invariant(0))
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [8, 9, 0, 6, 7], [10, 11, 12, 13, 14]])
self.assertFalse(state.row1_invariant(2))
state = Puzzle(3, 5, [[1, 5, 2, 3, 4], [7, 6, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertTrue(state.row1_invariant(2))
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0], [10, 11, 12, 13, 14]])
self.assertTrue(state.row1_invariant(4))
state = Puzzle(4, 4, [[4, 6, 1, 3], [5, 2, 0, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertTrue(state.row1_invariant(2))
# from the grader
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertFalse(state.row1_invariant(0))
self.assertIs(type(state.row1_invariant(1)), bool)
state = Puzzle(4, 5, [[15, 6, 5, 3, 4], [2, 1, 0, 8, 9], [10, 11, 12, 13, 14], [7, 16, 17, 18, 19]])
self.assertFalse(state.row1_invariant(2))
def test_solve_row0(self):
state = Puzzle(3, 3, [[1, 2, 0], [3, 4, 5], [6, 7, 8]])
self.assertEqual(state.solve_row0_tile(2), 'ld')
state = Puzzle(4, 4, [[2, 4, 5, 0], [3, 6, 1, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row0_tile(3), 'ldllurrdlurdlurrdluldrruld')
state = Puzzle(4, 4, [[1, 3, 5, 0], [2, 6, 4, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row0_tile(3), 'lduldruldurdlurrdluldrruld')
state = Puzzle(4, 5, [[1, 5, 6, 0, 4], [7, 2, 3, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19]])
self.assertEqual(state.solve_row0_tile(3), 'lduldurdlurrdluldrruld')
def test_solve_row1(self):
state = Puzzle(3, 3, [[2, 5, 4], [1, 3, 0], [6, 7, 8]])
self.assertEqual(state.solve_row1_tile(2), 'uldruldur')
self.assertIs(type(state.solve_row1_tile(2)), str)
state = Puzzle(3, 3, [[1, 4, 2], [3, 5, 0], [6, 7, 8]])
self.assertEqual(state.solve_row1_tile(2), 'lur')
state = Puzzle(3, 5, [[1, 2, 7, 3, 4], [6, 5, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(2), 'uldur')
state = puzzle = Puzzle(4, 4, [[1, 2, 6, 3], [7, 4, 5, 0], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row1_tile(3), 'lllurrdlurrdlur')
state = Puzzle(4, 4, [[1, 7, 4, 2], [3, 5, 6, 0], [8, 9, 10, 11], [12, 13, 14, 15]])
self.assertEqual(state.solve_row1_tile(3), 'ulldrruldruldur')
state = Puzzle(3, 5, [[1, 7, 2, 3, 4], [6, 5, 0, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(2), 'uldruldur')
state = Puzzle(3, 5, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 0], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_row1_tile(4), 'lur')
def test_two_by_two(self):
state = Puzzle(3, 3, [[4, 3, 2], [1, 0, 5], [6, 7, 8]])
self.assertEqual(state.solve_2x2(), 'uldrul')
self.assertIs(type(state.solve_2x2()), str)
state = Puzzle(3, 5, [[5, 1, 2, 3, 4], [6, 0, 7, 8, 9], [10, 11, 12, 13, 14]])
self.assertEqual(state.solve_2x2(), 'ulrdlu')
state = Puzzle(2, 2, [[3, 2], [1, 0]])
self.assertEqual(state.solve_2x2(), 'uldrul')
state = Puzzle(2, 2, [[1, 3], [2, 0]])
self.assertEqual(state.solve_2x2(), 'ul')
state = Puzzle(2, 2, [[0, 1], [2, 3]])
self.assertEqual(state.solve_2x2(), '')
def test_finale(self):
state = Puzzle(4, 5, [[15, 16, 0, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [1, 2, 17, 18, 19]])
self.assertEqual(state.solve_puzzle(), 'rrdddulduldulduuulddrulddrulduruulddruldruldrdlurdluurddlurrrrulduldulduldurlruldrdlurdluurddlurrrruldurlduldurlduldurlduldurdlurrdluldrrulduldrul')
state = Puzzle(4, 4, [[14, 12, 8, 5], [0, 2, 15, 6], [4, 13, 7, 9], [10, 11, 3, 1]])
self.assertEqual(state.solve_puzzle(), 'rrrdduullurrdldrulddrulduuulldrruldrulddrulddrulduurullddrulddrulduruuldrulddruldruldrdlurdluurddlurrrllurrdlllurrdluulddruldururdlludruldruldrdlurdluurddlurrrulldrruldruldurldlurdlurrdluldrruldlurldulrdlu')
state = Puzzle(4,4,[[2,11,12,13],[9,4,6,1],[5,7,8,3],[10,0,14,15]])
self.assertEqual(state.solve_puzzle(), 'rrlluuurrdllurdlludrulddrulddruldururullddruldruldrdlurdluurddlurrruldruldllurrdluulddruldurrulldruldrdlurdluurddlurrrlllurrdlurrdlurldulldrruldruldurlduldurdlurrdluldrrulduldrul')
# let's run it in IDLE
if __name__ == '__main__':
unittest.main(exit=False)<|fim▁end|> | |
<|file_name|>import_odbc.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
<|fim▁hole|>import sys
from datetime import datetime
from openerp.osv import orm, fields
import logging
_logger = logging.getLogger(__name__)
_loglvl = _logger.getEffectiveLevel()
SEP = '|'
class import_odbc_dbtable(orm.Model):
_name = "import.odbc.dbtable"
_description = 'Import Table Data'
_order = 'exec_order'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'enabled': fields.boolean('Execution enabled'),
'dbsource_id': fields.many2one('base.external.dbsource', 'Database source', required=True),
'sql_source': fields.text('SQL', required=True, help='Column names must be valid "import_data" columns.'),
'model_target': fields.many2one('ir.model', 'Target object'),
'noupdate': fields.boolean('No updates', help="Only create new records; disable updates to existing records."),
'exec_order': fields.integer('Execution order', help="Defines the order to perform the import"),
'last_sync': fields.datetime('Last sync date',
help="Datetime for the last succesfull sync."
"\nLater changes on the source may not be replicated on the destination"),
'start_run': fields.datetime('Time started', readonly=True),
'last_run': fields.datetime('Time ended', readonly=True),
'last_record_count': fields.integer('Last record count', readonly=True),
'last_error_count': fields.integer('Last error count', readonly=True),
'last_warn_count': fields.integer('Last warning count', readonly=True),
'last_log': fields.text('Last run log', readonly=True),
'ignore_rel_errors': fields.boolean('Ignore relationship errors',
help="On error try to reimport rows ignoring relationships."),
'raise_import_errors': fields.boolean('Raise import errors',
help="Import errors not handled, intended for debugging purposes."
"\nAlso forces debug messages to be written to the server log."),
}
_defaults = {
'enabled': True,
'exec_order': 10,
}
def _import_data(self, cr, uid, flds, data, model_obj, table_obj, log):
"""Import data and returns error msg or empty string"""
def find_m2o(field_list):
""""Find index of first column with a one2many field"""
for i, x in enumerate(field_list):
if len(x) > 3 and x[-3:] == ':id' or x[-3:] == '/id':
return i
return -1
def append_to_log(log, level, obj_id='', msg='', rel_id=''):
if '_id_' in obj_id:
obj_id = '.'.join(obj_id.split('_')[:-2]) + ': ' + obj_id.split('_')[-1]
if ': .' in msg and not rel_id:
rel_id = msg[msg.find(': .')+3:]
if '_id_' in rel_id:
rel_id = '.'.join(rel_id.split('_')[:-2]) + ': ' + rel_id.split('_')[-1]
msg = msg[:msg.find(': .')]
log['last_log'].append('%s|%s\t|%s\t|%s' % (level.ljust(5), obj_id, rel_id, msg))
_logger.debug(data)
cols = list(flds) # copy to avoid side effects
errmsg = str()
if table_obj.raise_import_errors:
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
else:
try:
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
except:
errmsg = str(sys.exc_info()[1])
if errmsg and not table_obj.ignore_rel_errors:
#Fail
append_to_log(log, 'ERROR', data, errmsg)
log['last_error_count'] += 1
return False
if errmsg and table_obj.ignore_rel_errors:
#Warn and retry ignoring many2one fields...
append_to_log(log, 'WARN', data, errmsg)
log['last_warn_count'] += 1
#Try ignoring each many2one (tip: in the SQL sentence select more problematic FKs first)
i = find_m2o(cols)
if i >= 0:
#Try again without the [i] column
del cols[i]
del data[i]
self._import_data(cr, uid, cols, data, model_obj, table_obj, log)
else:
#Fail
append_to_log(log, 'ERROR', data, 'Removed all m2o keys and still fails.')
log['last_error_count'] += 1
return False
return True
def import_run(self, cr, uid, ids=None, context=None):
db_model = self.pool.get('base.external.dbsource')
actions = self.read(cr, uid, ids, ['id', 'exec_order'])
actions.sort(key=lambda x: (x['exec_order'], x['id']))
#Consider each dbtable:
for action_ref in actions:
obj = self.browse(cr, uid, action_ref['id'])
if not obj.enabled:
continue # skip
_logger.setLevel(obj.raise_import_errors and logging.DEBUG or _loglvl)
_logger.debug('Importing %s...' % obj.name)
#now() microseconds are stripped to avoid problem with SQL smalldate
#TODO: convert UTC Now to local timezone
#http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime
model_name = obj.model_target.model
model_obj = self.pool.get(model_name)
xml_prefix = model_name.replace('.', '_') + "_id_"
log = {'start_run': datetime.now().replace(microsecond=0),
'last_run': None,
'last_record_count': 0,
'last_error_count': 0,
'last_warn_count': 0,
'last_log': list()}
self.write(cr, uid, [obj.id], log)
#Prepare SQL sentence; replace "%s" with the last_sync date
if obj.last_sync:
sync = datetime.strptime(obj.last_sync, "%Y-%m-%d %H:%M:%S")
else:
sync = datetime.datetime(1900, 1, 1, 0, 0, 0)
params = {'sync': sync}
res = db_model.execute(cr, uid, [obj.dbsource_id.id],
obj.sql_source, params, metadata=True)
#Exclude columns titled "None"; add (xml_)"id" column
cidx = [i for i, x in enumerate(res['cols']) if x.upper() != 'NONE']
cols = [x for i, x in enumerate(res['cols']) if x.upper() != 'NONE'] + ['id']
#Import each row:
for row in res['rows']:
#Build data row; import only columns present in the "cols" list
data = list()
for i in cidx:
#TODO: Handle imported datetimes properly - convert from localtime to UTC!
v = row[i]
if isinstance(v, str):
v = v.strip()
data.append(v)
data.append(xml_prefix + str(row[0]).strip())
#Import the row; on error, write line to the log
log['last_record_count'] += 1
self._import_data(cr, uid, cols, data, model_obj, obj, log)
if log['last_record_count'] % 500 == 0:
_logger.info('...%s rows processed...' % (log['last_record_count']))
#Finished importing all rows
#If no errors, write new sync date
if not (log['last_error_count'] or log['last_warn_count']):
log['last_sync'] = log['start_run']
level = logging.DEBUG
if log['last_warn_count']:
level = logging.WARN
if log['last_error_count']:
level = logging.ERROR
_logger.log(level, 'Imported %s , %d rows, %d errors, %d warnings.' % (
model_name, log['last_record_count'], log['last_error_count'],
log['last_warn_count']))
#Write run log, either if the table import is active or inactive
if log['last_log']:
log['last_log'].insert(0, 'LEVEL|== Line == |== Relationship ==|== Message ==')
log.update({'last_log': '\n'.join(log['last_log'])})
log.update({'last_run': datetime.now().replace(microsecond=0)})
self.write(cr, uid, [obj.id], log)
#Finished
_logger.debug('Import job FINISHED.')
return True
def import_schedule(self, cr, uid, ids, context=None):
cron_obj = self.pool.get('ir.cron')
new_create_id = cron_obj.create(cr, uid, {
'name': 'Import ODBC tables',
'interval_type': 'hours',
'interval_number': 1,
'numbercall': -1,
'model': 'import.odbc.dbtable',
'function': 'import_run',
'doall': False,
'active': True
})
return {
'name': 'Import ODBC tables',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'ir.cron',
'res_id': new_create_id,
'type': 'ir.actions.act_window',
}
#EOF<|fim▁end|> | |
<|file_name|>return-list-item.component.ts<|end_file_name|><|fim▁begin|>import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-return-list-item',<|fim▁hole|> styleUrls: ['./return-list-item.component.css']
})
export class ReturnListItemComponent implements OnInit {
constructor() {}
ngOnInit() {}
}<|fim▁end|> | templateUrl: './return-list-item.component.html', |
<|file_name|>sparse_matrix_vector_07.cc<|end_file_name|><|fim▁begin|>// ---------------------------------------------------------------------
//
// Copyright (C) 2004 - 2015 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE at
// the top level of the deal.II distribution.
//
// ---------------------------------------------------------------------
// check SparseMatrix::matrix_norm_square
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <iostream>
#include <vector>
void test (TrilinosWrappers::MPI::Vector &v,
TrilinosWrappers::MPI::Vector &w,
TrilinosWrappers::MPI::Vector &x)
{
TrilinosWrappers::SparseMatrix m(v.size(),v.size(),v.size());
for (unsigned int i=0; i<m.m(); ++i)
for (unsigned int j=0; j<m.m(); ++j)
m.set (i,j, i+2*j);
<|fim▁hole|> v(i) = i;
w(i) = i+1;
}
m.compress (VectorOperation::insert);
v.compress (VectorOperation::insert);
w.compress (VectorOperation::insert);
// x=w-Mv
const double s = m.residual (x, v, w);
// make sure we get the expected result
for (unsigned int i=0; i<v.size(); ++i)
{
AssertThrow (v(i) == i, ExcInternalError());
AssertThrow (w(i) == i+1, ExcInternalError());
double result = i+1;
for (unsigned int j=0; j<m.m(); ++j)
result -= (i+2*j)*j;
AssertThrow (x(i) == result, ExcInternalError());
}
AssertThrow (s == x.l2_norm(), ExcInternalError());
deallog << "OK" << std::endl;
}
int main (int argc, char **argv)
{
initlog();
Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
try
{
{
TrilinosWrappers::MPI::Vector v;
v.reinit(complete_index_set(100), MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector w;
w.reinit(complete_index_set(100), MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector x;
x.reinit(complete_index_set(100), MPI_COMM_WORLD);
test (v,w,x);
}
}
catch (std::exception &exc)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Exception on processing: " << std::endl
<< exc.what() << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
}
catch (...)
{
std::cerr << std::endl << std::endl
<< "----------------------------------------------------"
<< std::endl;
std::cerr << "Unknown exception!" << std::endl
<< "Aborting!" << std::endl
<< "----------------------------------------------------"
<< std::endl;
return 1;
};
}<|fim▁end|> | for (unsigned int i=0; i<v.size(); ++i)
{ |
<|file_name|>raster.rs<|end_file_name|><|fim▁begin|>//! Path rasterization.
use crate::{gpu::GpuVertex, Result, P2};
use lyon_path::Builder;
use lyon_tessellation::{
BuffersBuilder, FillAttributes, FillOptions, FillTessellator, LineJoin, StrokeAttributes,
StrokeOptions, StrokeTessellator, VertexBuffers,
};
use palette::LinSrgba;
/// The method by which the rasterizer will rasterize the vector path.
#[derive(Debug, Clone, Copy)]
pub enum Method {
/// In fill method, the rasterizer will treat all the area inside the path as part of the
/// raster area. In this method, paths are automatically closed by assuming an edge from the
/// last to the first vertex.
Fill,
/// In stroke method, the rasterizer will treat the area immediately adjacent the path within
/// the given width as part of the rastered area. In this method, paths are left open
/// and no edge between the last and first vertex is assumed.
Stroke(f32),
}
pub fn raster_path(
builder: Builder,<|fim▁hole|> color: LinSrgba,
) -> Result<(Vec<GpuVertex>, Vec<u32>)> {
match method {
Method::Fill => {
let ctor = |v: P2, _: FillAttributes| -> P2 { v };
let mut buffers: VertexBuffers<P2, u32> = VertexBuffers::new();
let mut buffers_builder = BuffersBuilder::new(&mut buffers, ctor);
let mut tessellator = FillTessellator::new();
let result = tessellator.tessellate_path(
&builder.build(),
&FillOptions::default().with_tolerance(0.05),
&mut buffers_builder,
);
match result {
Ok(_) => {}
Err(e) => panic!("Tessellation failed: {:?}", e),
}
Ok((
buffers
.vertices
.into_iter()
.map(|v| GpuVertex {
vpos: [v.x, v.y],
vcol: [
color.color.red,
color.color.green,
color.color.blue,
color.alpha,
],
})
.collect(),
buffers.indices,
))
}
Method::Stroke(width) => {
let ctor = |v: P2, _: StrokeAttributes| -> P2 { v };
let mut buffers: VertexBuffers<P2, u32> = VertexBuffers::new();
let mut buffers_builder = BuffersBuilder::new(&mut buffers, ctor);
let mut tessellator = StrokeTessellator::new();
tessellator
.tessellate_path(
&builder.build(),
&StrokeOptions::default()
.with_line_join(LineJoin::MiterClip)
.with_line_width(width)
.with_tolerance(0.05),
&mut buffers_builder,
)
.expect("TODO: wrap error");
Ok((
buffers
.vertices
.into_iter()
.map(|v| GpuVertex {
vpos: [v.x, v.y],
vcol: [
color.color.red,
color.color.green,
color.color.blue,
color.alpha,
],
})
.collect(),
buffers.indices,
))
}
}
}<|fim▁end|> | method: Method, |
<|file_name|>edit_channel.rs<|end_file_name|><|fim▁begin|>use internal::prelude::*;
/// A builder to edit a [`GuildChannel`] for use via [`GuildChannel::edit`]
///
/// Defaults are not directly provided by the builder itself.
///
/// # Examples
///
/// Edit a channel, providing a new name and topic:
///
/// ```rust,ignore
/// // assuming a channel has already been bound
/// if let Err(why) = channel::edit(|c| c.name("new name").topic("a test topic")) {
/// // properly handle the error
/// }
/// ```
///
/// [`GuildChannel`]: ../model/struct.GuildChannel.html
/// [`GuildChannel::edit`]: ../model/struct.GuildChannel.html#method.edit
#[derive(Clone, Debug, Default)]
pub struct EditChannel(pub JsonMap);
impl EditChannel {
/// The bitrate of the channel in bits.
///
/// This is for [voice] channels only.
///
/// [voice]: ../model/enum.ChannelType.html#variant.Voice
pub fn bitrate(mut self, bitrate: u64) -> Self {
self.0.insert(<|fim▁hole|> "bitrate".to_owned(),
Value::Number(Number::from(bitrate)),
);
self
}
/// The name of the channel.
///
/// Must be between 2 and 100 characters long.
pub fn name(mut self, name: &str) -> Self {
self.0.insert(
"name".to_owned(),
Value::String(name.to_owned()),
);
self
}
/// The position of the channel in the channel list.
pub fn position(mut self, position: u64) -> Self {
self.0.insert(
"position".to_owned(),
Value::Number(Number::from(position)),
);
self
}
/// The topic of the channel. Can be empty.
///
/// Must be between 0 and 1024 characters long.
///
/// This is for [text] channels only.
///
/// [text]: ../model/enum.ChannelType.html#variant.Text
pub fn topic(mut self, topic: &str) -> Self {
self.0.insert(
"topic".to_owned(),
Value::String(topic.to_owned()),
);
self
}
/// The number of users that may be in the channel simultaneously.
///
/// This is for [voice] channels only.
///
/// [voice]: ../model/enum.ChannelType.html#variant.Voice
pub fn user_limit(mut self, user_limit: u64) -> Self {
self.0.insert(
"user_limit".to_owned(),
Value::Number(Number::from(user_limit)),
);
self
}
}<|fim▁end|> | |
<|file_name|>transformer_benchmark.py<|end_file_name|><|fim▁begin|># Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Transformer w/Keras benchmark and accuracy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import flags
import tensorflow as tf
from official.benchmark import benchmark_wrappers
from official.benchmark import owner_utils
from official.benchmark.perfzero_benchmark import PerfZeroBenchmark
from official.nlp.transformer import misc
from official.nlp.transformer import transformer_main as transformer_main
from official.utils.flags import core as flags_core
TRANSFORMER_EN2DE_DATA_DIR_NAME = 'wmt32k-en2de-official'
EN2DE_2014_BLEU_DATA_DIR_NAME = 'newstest2014'
FLAGS = flags.FLAGS
TMP_DIR = os.getenv('TMPDIR')
class TransformerBenchmark(PerfZeroBenchmark):
"""Methods common to executing transformer w/keras tests.
Code under test for the Transformer Keras models report the same data and
require the same FLAG setup.
"""
def __init__(self, output_dir=None, default_flags=None, root_data_dir=None,
flag_methods=None, tpu=None):
root_data_dir = root_data_dir if root_data_dir else ''
self.train_data_dir = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME)
self.vocab_file = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME,
'vocab.ende.32768')
self.bleu_source = os.path.join(root_data_dir,
EN2DE_2014_BLEU_DATA_DIR_NAME,
'newstest2014.en')
self.bleu_ref = os.path.join(root_data_dir,
EN2DE_2014_BLEU_DATA_DIR_NAME,
'newstest2014.de')
if default_flags is None:
default_flags = {}
default_flags['data_dir'] = self.train_data_dir
default_flags['vocab_file'] = self.vocab_file
super(TransformerBenchmark, self).__init__(
output_dir=output_dir,
default_flags=default_flags,
flag_methods=flag_methods,
tpu=tpu)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
bleu_max=None,
bleu_min=None,
log_steps=None,
total_batch_size=None,
warmup=1):
"""Report benchmark results by writing to local protobuf file.
Args:
bleu_max: highest passing level for bleu score.
bleu_min: lowest passing level for bleu score.
log_steps: How often the log was created for stats['step_timestamp_log'].
total_batch_size: Global batch-size.
warmup: number of entries in stats['step_timestamp_log'] to ignore.
"""
start_time_sec = time.time()
task = transformer_main.TransformerTask(FLAGS)
stats = task.train()
wall_time_sec = time.time() - start_time_sec
metrics = []
if 'bleu_uncased' in stats:
if 'bleu_uncased_history' in stats:
bleu_uncased_best = max(stats['bleu_uncased_history'],
key=lambda x: x[1])
metrics.append({'name': 'bleu_uncased',
'value': bleu_uncased_best[1],
'min_value': bleu_min,
'max_value': bleu_max})
metrics.append({'name': 'bleu_best_score_iteration',
'value': bleu_uncased_best[0]})
metrics.append({'name': 'bleu_uncased_last',
'value': stats['bleu_uncased']})
else:
metrics.append({'name': 'bleu_uncased',
'value': stats['bleu_uncased'],<|fim▁hole|> len(stats['step_timestamp_log']) > warmup + 1):
# first entry in the time_log is start of step 1. The rest of the
# entries are the end of each step recorded
time_log = stats['step_timestamp_log']
elapsed = time_log[-1].timestamp - time_log[warmup].timestamp
num_examples = (
total_batch_size * log_steps * (len(time_log) - warmup - 1))
examples_per_sec = num_examples / elapsed
metrics.append({'name': 'exp_per_second',
'value': examples_per_sec})
if 'avg_exp_per_second' in stats:
metrics.append({'name': 'avg_exp_per_second',
'value': stats['avg_exp_per_second']})
if 'step_timestamp_log' in stats:
time_log = stats['step_timestamp_log']
metrics.append({'name': 'startup_time',
'value': time_log[0].timestamp - start_time_sec})
flags_str = flags_core.get_nondefault_flags_as_str()
self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics,
extras={'flags': flags_str})
class TransformerBaseKerasAccuracy(TransformerBenchmark):
"""Benchmark accuracy tests for Transformer Base model w/ Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""Benchmark accuracy tests for Transformer Base model w/ Keras.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [misc.define_transformer_flags]
super(TransformerBaseKerasAccuracy, self).__init__(
output_dir=output_dir, root_data_dir=root_data_dir,
flag_methods=flag_methods)
def benchmark_1_gpu(self):
"""Benchmark 1 gpu.
The paper uses 8 GPUs and a much larger effective batch size, this is will
not converge to the 27.3 BLEU (uncased) SOTA.
"""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 2048
FLAGS.train_steps = 1000
FLAGS.steps_between_evals = 500
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
# These bleu scores are based on test runs after at this limited
# number of steps and batch size after verifying SOTA at 8xV100s.
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=25.3,
bleu_max=26)
def benchmark_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static_batch.
The paper uses 8 GPUs and a much larger effective batch size, this is will
not converge to the 27.3 BLEU (uncased) SOTA.
"""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch')
# These bleu scores are based on test runs after at this limited
# number of steps and batch size after verifying SOTA at 8xV100s.
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=25.3,
bleu_max=26)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu.
Should converge to 27.3 BLEU (uncased). This has not been confirmed yet.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096*8
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27,
bleu_max=28)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu.
Should converge to 27.3 BLEU (uncased). This has not been confirmed yet.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096*8
FLAGS.train_steps = 100000
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27,
bleu_max=28)
class TransformerBigKerasAccuracy(TransformerBenchmark):
"""Benchmark accuracy tests for Transformer Big model w/ Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""Benchmark accuracy tests for Transformer Big model w/ Keras.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [misc.define_transformer_flags]
super(TransformerBigKerasAccuracy, self).__init__(
output_dir=output_dir, root_data_dir=root_data_dir,
flag_methods=flag_methods)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu.
Over 6 runs with eval every 20K steps the average highest value was 28.195
(bleu uncased). 28.424 was the highest and 27.96 the lowest. The values are
the highest value seen during a run and occurred at a median of iteration 9.
Iterations are not epochs, an iteration is a number of steps between evals.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27.9,
bleu_max=29.2)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_8_gpu_fp16(self):
"""Benchmark 8 gpu with dynamic batch and fp16.
Over 6 runs with eval every 20K steps the average highest value was 28.247
(bleu uncased). 28.424 was the highest and 28.09 the lowest. The values are
the highest value seen during a run and occurred at a median of iteration
11. While this could be interpreted as worse than FP32, if looking at the
first iteration at which 28 is passed FP16 performs equal and possibly
better. Although not part of the initial test runs, the highest value
recorded with the arguments below was 28.9 at iteration 12. Iterations are
not epochs, an iteration is a number of steps between evals.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_8_gpu_fp16_amp(self):
"""Benchmark 8 gpu with dynamic batch and fp16 with automatic mixed precision.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_amp')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29)
def benchmark_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch and fp16.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 400000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_xla_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch, XLA, and FP16.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_xla = True
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 400000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_static_batch_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
class TransformerKerasBenchmark(TransformerBenchmark):
"""Benchmarks for Transformer (Base and Big) using Keras."""
def __init__(self, output_dir=None, default_flags=None,
root_data_dir=None, batch_per_gpu=4096, tpu=None):
"""Initialize.
Args:
output_dir: Based directory for saving artifacts, e.g. checkpoints.
default_flags: default flags to use for all tests.
root_data_dir: root directory for data, e.g. training.
batch_per_gpu: batch size to use per gpu.
tpu: Target TPU to use.
"""
flag_methods = [misc.define_transformer_flags]
self.batch_per_gpu = batch_per_gpu
super(TransformerKerasBenchmark, self).__init__(
output_dir=output_dir,
default_flags=default_flags,
root_data_dir=root_data_dir,
flag_methods=flag_methods,
tpu=tpu)
def benchmark_1_gpu_no_dist_strat(self):
"""Benchmark 1 gpu without distribution strategy."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_no_dist_strat_static_batch(self):
"""Benchmark 1 gpu without distribution strategy with static batch."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_ds_sb')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu(self):
"""Benchmark 1 gpu."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_fp16(self):
"""Benchmark 1 gpu FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu(self):
"""Benchmark 1 gpu w/xla."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
FLAGS.enable_xla = True
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_fp16(self):
"""Benchmark 1 gpu w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static batch."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static batch w/xla."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.enable_xla = True
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_static_batch_fp16(self):
"""Benchmark 1 gpu with static batch FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_static_batch_fp16(self):
"""Benchmark 1 gpu with static batch w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_1_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_fp16(self):
"""Benchmark 8 gpu FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu(self):
"""Benchmark 8 gpu w/xla."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_fp16(self):
"""Benchmark 8 gpu w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu with static batch."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir(
'benchmark_8_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_static_batch(self):
"""Benchmark 8 gpu with static batch w/xla."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class TransformerBaseKerasBenchmarkReal(TransformerKerasBenchmark):
"""Transformer based version real data benchmark tests."""
def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR, **kwargs):
def_flags = {}
def_flags['param_set'] = 'base'
def_flags['train_steps'] = 50
def_flags['log_steps'] = 10
super(TransformerBaseKerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags,
root_data_dir=root_data_dir, batch_per_gpu=4096)
class TransformerBigKerasBenchmarkReal(TransformerKerasBenchmark):
"""Transformer based version real data benchmark tests."""
def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR,
tpu=None, **kwargs):
def_flags = {}
def_flags['param_set'] = 'big'
def_flags['train_steps'] = 50
def_flags['log_steps'] = 10
super(TransformerBigKerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags,
root_data_dir=root_data_dir, batch_per_gpu=3072,
tpu=tpu)
def benchmark_2x2_tpu(self):
"""Port of former snaggletooth transformer_big model on 2x2."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 6144
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_4x4_tpu(self):
"""Port of former GCP transformer_big model on 4x4."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 24576
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
@owner_utils.Owner('tf-graph-compiler')
def benchmark_4x4_tpu_mlir(self):
"""Run transformer_big model on 4x4 with the MLIR Bridge enabled."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 24576
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
tf.config.experimental.enable_mlir_bridge()
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
if __name__ == '__main__':
tf.test.main()<|fim▁end|> | 'min_value': bleu_min,
'max_value': bleu_max})
if (warmup and 'step_timestamp_log' in stats and |
<|file_name|>WidthSizable.ts<|end_file_name|><|fim▁begin|>// Copyright 2021-2022, University of Colorado Boulder
/**
* Provides a minimum and preferred width. The minimum width is set by the component, so that layout containers could
* know how "small" the component can be made. The preferred width is set by the layout container, and the component
* should adjust its size so that it takes up that width.
*
* @author Jonathan Olson <[email protected]>
*/
import TinyProperty from '../../../axon/js/TinyProperty.js';
import memoize from '../../../phet-core/js/memoize.js';
import { scenery, Node } from '../imports.js';
import Constructor from '../../../phet-core/js/types/Constructor.js';
const WIDTH_SIZABLE_OPTION_KEYS = [
'preferredWidth',
'minimumWidth'
];
type WidthSizableSelfOptions = {
preferredWidth?: number | null,
minimumWidth?: number | null
};
const WidthSizable = memoize( <SuperType extends Constructor>( type: SuperType ) => {
const clazz = class extends type {
preferredWidthProperty: TinyProperty<number | null>;
minimumWidthProperty: TinyProperty<number | null>;
constructor( ...args: any[] ) {
super( ...args );
this.preferredWidthProperty = new TinyProperty<number | null>( null );
this.minimumWidthProperty = new TinyProperty<number | null>( null );
}
get preferredWidth(): number | null {
return this.preferredWidthProperty.value;
}
set preferredWidth( value: number | null ) {
assert && assert( value === null || ( typeof value === 'number' && isFinite( value ) && value >= 0 ),
'preferredWidth should be null or a non-negative finite number' );
this.preferredWidthProperty.value = value;
}
get minimumWidth(): number | null {<|fim▁hole|> set minimumWidth( value: number | null ) {
assert && assert( value === null || ( typeof value === 'number' && isFinite( value ) ) );
this.minimumWidthProperty.value = value;
}
// Detection flag for this trait
get widthSizable(): boolean { return true; }
};
// If we're extending into a Node type, include option keys
// TODO: This is ugly, we'll need to mutate after construction, no?
if ( type.prototype._mutatorKeys ) {
clazz.prototype._mutatorKeys = type.prototype._mutatorKeys.concat( WIDTH_SIZABLE_OPTION_KEYS );
}
return clazz;
} );
// Some typescript gymnastics to provide a user-defined type guard that treats something as widthSizable
const wrapper = () => WidthSizable( Node );
type WidthSizableNode = InstanceType<ReturnType<typeof wrapper>>;
const isWidthSizable = ( node: Node ): node is WidthSizableNode => {
return node.widthSizable;
};
scenery.register( 'WidthSizable', WidthSizable );
export default WidthSizable;
export { isWidthSizable };
export type { WidthSizableNode, WidthSizableSelfOptions };<|fim▁end|> | return this.minimumWidthProperty.value;
}
|
<|file_name|>test_share_types_negative.py<|end_file_name|><|fim▁begin|># Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test # noqa
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from manila_tempest_tests import clients_share as clients
from manila_tempest_tests.tests.api import base
class ShareTypesAdminNegativeTest(base.BaseSharesAdminTest):
def _create_share_type(self):
name = data_utils.rand_name("unique_st_name")
extra_specs = self.add_required_extra_specs_to_dict({"key": "value"})
return self.create_share_type(name, extra_specs=extra_specs)
@classmethod
def resource_setup(cls):
super(ShareTypesAdminNegativeTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", ])
def test_create_share_with_nonexistent_share_type(self):<|fim▁hole|> @test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_empty_name(self):
self.assertRaises(lib_exc.BadRequest, self.create_share_type, '')
@test.attr(type=["gate", "smoke", ])
def test_create_share_type_with_too_big_name(self):
self.assertRaises(lib_exc.BadRequest,
self.create_share_type,
"x" * 256)
@test.attr(type=["gate", "smoke", ])
def test_get_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_delete_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_create_duplicate_of_share_type(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.create_share_type,
st["share_type"]["name"],
extra_specs=self.add_required_extra_specs_to_dict())
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.add_access_to_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_allowed_for_public(self):
st = self._create_share_type()
self.assertRaises(lib_exc.Conflict,
self.shares_client.remove_access_from_share_type,
st["share_type"]["id"],
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_add_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.add_access_to_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)
@test.attr(type=["gate", "smoke", ])
def test_remove_share_type_by_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.remove_access_from_share_type,
data_utils.rand_name("fake"),
self.shares_client.tenant_id)<|fim▁end|> | self.assertRaises(lib_exc.NotFound,
self.create_share,
share_type_id=data_utils.rand_name("fake"))
|
<|file_name|>postprocess.py<|end_file_name|><|fim▁begin|># <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# modified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is not possible and the data is not usable at all.
# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
# carry the error message around.<li>
# <li>Warning: There are some minor issues (e.g. 1000000 animation
# keyframes with the same time), but further postprocessing and use
# of the data structure is still safe. Warning details are written
# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set
# in #aiScene::mFlags<li>
# <ul>
#
# This post-processing step is not time-consuming. Its use is not
# compulsory, but recommended.
#
aiProcess_ValidateDataStructure = 0x400
## <hr>Reorders triangles for better vertex cache locality.
#
# The step tries to improve the ACMR (average post-transform vertex cache
# miss ratio) for all meshes. The implementation runs in O(n) and is
# roughly based on the 'tipsify' algorithm (see <a href="
# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this
# paper<a>).
#
# If you intend to render huge models in hardware, this step might
# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config
# setting can be used to fine-tune the cache optimization.
#
aiProcess_ImproveCacheLocality = 0x800
## <hr>Searches for redundantunreferenced materials and removes them.
#
# This is especially useful in combination with the
# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
# Both join small meshes with equal characteristics, but they can't do
# their work if two meshes have different materials. Because several
# material settings are lost during Assimp's import filters,
# (and because many exporters don't check for redundant materials), huge
# models often have materials which are are defined several times with<|fim▁hole|># a surface are ignored in all comparisons (e.g. the material name).
# So, if you're passing additional information through the
# content pipeline (probably using #magic# material names), don't
# specify this flag. Alternatively take a look at the
# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting.
#
aiProcess_RemoveRedundantMaterials = 0x1000
## <hr>This step tries to determine which meshes have normal vectors
# that are facing inwards and inverts them.
#
# The algorithm is simple but effective:
# the bounding box of all vertices + their normals is compared against
# the volume of the bounding box of all vertices without their normals.
# This works well for most objects, problems might occur with planar
# surfaces. However, the step tries to filter such cases.
# The step inverts all in-facing normals. Generally it is recommended
# to enable this step, although the result is not always correct.
#
aiProcess_FixInfacingNormals = 0x2000
## <hr>This step splits meshes with more than one primitive type in
# homogeneous sub-meshes.
#
# The step is executed after the triangulation step. After the step
# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
# especially useful for real-time rendering where point and line
# primitives are often ignored or rendered separately.
# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which
# primitive types you need. This can be used to easily exclude
# lines and points, which are rarely used, from the import.
#
aiProcess_SortByPType = 0x8000
## <hr>This step searches all meshes for degenerate primitives and
# converts them to proper lines or points.
#
# A face is 'degenerate' if one or more of its points are identical.
# To have the degenerate stuff not only detected and collapsed but
# removed, try one of the following procedures:
# <br><b>1.<b> (if you support lines and points for rendering but don't
# want the degenerates)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will
# cause the step to remove degenerate triangles from the import
# as soon as they're detected. They won't pass any further
# pipeline steps.
# <li>
# <ul>
# <br><b>2.<b>(if you don't support lines and points at all)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Specify the #aiProcess_SortByPType flag. This moves line and
# point primitives to separate meshes.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to
# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
# @endcode to cause SortByPType to reject point
# and line meshes from the scene.
# <li>
# <ul>
# @note Degenerate polygons are not necessarily evil and that's why
# they're not removed by default. There are several file formats which
# don't support lines or points, and some exporters bypass the
# format specification and write them as degenerate triangles instead.
#
aiProcess_FindDegenerates = 0x10000
## <hr>This step searches all meshes for invalid data, such as zeroed
# normal vectors or invalid UV coords and removesfixes them. This is
# intended to get rid of some common exporter errors.
#
# This is especially useful for normals. If they are invalid, and
# the step recognizes this, they will be removed and can later
# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br>
# The step will also remove meshes that are infinitely small and reduce
# animation tracks consisting of hundreds if redundant keys to a single
# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides
# the accuracy of the check for duplicate animation tracks.
#
aiProcess_FindInvalidData = 0x20000
## <hr>This step converts non-UV mappings (such as spherical or
# cylindrical mapping) to proper texture coordinate channels.
#
# Most applications will support UV mapping only, so you will
# probably want to specify this step in every case. Note that Assimp is not
# always able to match the original mapping implementation of the
# 3D app which produced a model perfectly. It's always better to let the
# modelling app compute the UV channels - 3ds max, Maya, Blender,
# LightWave, and Modo do this for example.
#
# @note If this step is not requested, you'll need to process the
# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets
# properly.
#
aiProcess_GenUVCoords = 0x40000
## <hr>This step applies per-texture UV transformations and bakes
# them into stand-alone vtexture coordinate channels.
#
# UV transformations are specified per-texture - see the
# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information.
# This step processes all textures with
# transformed input UV coordinates and generates a new (pre-transformed) UV channel
# which replaces the old channel. Most applications won't support UV
# transformations, so you will probably want to specify this step.
#
# @note UV transformations are usually implemented in real-time apps by
# transforming texture coordinates at vertex shader stage with a 3x3
# (homogenous) transformation matrix.
#
aiProcess_TransformUVCoords = 0x80000
## <hr>This step searches for duplicate meshes and replaces them
# with references to the first mesh.
#
# This step takes a while, so don't use it if speed is a concern.
# Its main purpose is to workaround the fact that many export
# file formats don't support instanced meshes, so exporters need to
# duplicate meshes. This step removes the duplicates again. Please
# note that Assimp does not currently support per-node material
# assignment to meshes, which means that identical meshes with
# different materials are currently #not# joined, although this is
# planned for future versions.
#
aiProcess_FindInstances = 0x100000
## <hr>A postprocessing step to reduce the number of meshes.
#
# This will, in fact, reduce the number of draw calls.
#
# This is a very effective optimization and is recommended to be used
# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
#
aiProcess_OptimizeMeshes = 0x200000
## <hr>A postprocessing step to optimize the scene hierarchy.
#
# Nodes without animations, bones, lights or cameras assigned are
# collapsed and joined.
#
# Node names can be lost during this step. If you use special 'tag nodes'
# to pass additional information through your content pipeline, use the
# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node
# names you want to be kept. Nodes matching one of the names in this list won't
# be touched or modified.
#
# Use this flag with caution. Most simple files will be collapsed to a
# single node, so complex hierarchies are usually completely lost. This is not
# useful for editor environments, but probably a very effective
# optimization if you just want to get the model data, convert it to your
# own format, and render it as fast as possible.
#
# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
# results.
#
# @note 'Crappy' scenes with thousands of extremely small meshes packed
# in deeply nested nodes exist for almost all file formats.
# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
# usually fixes them all and makes them renderable.
#
aiProcess_OptimizeGraph = 0x400000
## <hr>This step flips all UV coordinates along the y-axis and adjusts
# material settings and bitangents accordingly.
#
# <b>Output UV coordinate system:<b>
# @code
# 0y|0y ---------- 1x|0y
# | |
# | |
# | |
# 0x|1y ---------- 1x|1y
# @endcode
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_FlipUVs = 0x800000
## <hr>This step adjusts the output face winding order to be CW.
#
# The default face winding order is counter clockwise (CCW).
#
# <b>Output face order:<b>
# @code
# x2
#
# x0
# x1
# @endcode
#
aiProcess_FlipWindingOrder = 0x1000000
## <hr>This step splits meshes with many bones into sub-meshes so that each
# su-bmesh has fewer or as many bones as a given limit.
#
aiProcess_SplitByBoneCount = 0x2000000
## <hr>This step removes bones losslessly or according to some threshold.
#
# In some cases (i.e. formats that require it) exporters are forced to
# assign dummy bone weights to otherwise static meshes assigned to
# animated meshes. Full, weight-based skinning is expensive while
# animating nodes is extremely cheap, so this step is offered to clean up
# the data in that regard.
#
# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this.
# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and
# only if all bones within the scene qualify for removal.
#
aiProcess_Debone = 0x4000000
aiProcess_GenEntityMeshes = 0x100000
aiProcess_OptimizeAnimations = 0x200000
aiProcess_FixTexturePaths = 0x200000
## @def aiProcess_ConvertToLeftHanded
# @brief Shortcut flag for Direct3D-based applications.
#
# Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
# #aiProcess_FlipWindingOrder flags.
# The output data matches Direct3D's conventions: left-handed geometry, upper-left
# origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
#
# @deprecated
#
aiProcess_ConvertToLeftHanded = ( \
aiProcess_MakeLeftHanded | \
aiProcess_FlipUVs | \
aiProcess_FlipWindingOrder | \
0 )
## @def aiProcessPreset_TargetRealtimeUse_Fast
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Applications would want to use this preset to load models on end-user PCs,
# maybe for direct use in game.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be of
# use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Fast = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
0 )
## @def aiProcessPreset_TargetRealtime_Quality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
# performs some extra optimizations to improve rendering speed and
# to minimize memory usage. It could be a good choice for a level editor
# environment where import speed is not so important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Quality = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenSmoothNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_ImproveCacheLocality | \
aiProcess_LimitBoneWeights | \
aiProcess_RemoveRedundantMaterials | \
aiProcess_SplitLargeMeshes | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
aiProcess_FindDegenerates | \
aiProcess_FindInvalidData | \
0 )
## @def aiProcessPreset_TargetRealtime_MaxQuality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# This preset enables almost every optimization step to achieve perfectly
# optimized data. It's your choice for level editor environments where import speed
# is not important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application, apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_MaxQuality = ( \
aiProcessPreset_TargetRealtime_Quality | \
aiProcess_FindInstances | \
aiProcess_ValidateDataStructure | \
aiProcess_OptimizeMeshes | \
0 )<|fim▁end|> | # exactly the same settings.
#
# Several material settings not contributing to the final appearance of |
<|file_name|>auth.py<|end_file_name|><|fim▁begin|>"""Flask Blueprint adding login functionality to our app. Note that we expect
gluten model and db config to be handled elsewhere
"""
import sys
import traceback
from functools import partial, wraps
from flask import redirect, request, flash, session, abort, g, url_for
from flask.globals import LocalProxy, _lookup_app_object
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
from flask_dance.consumer import (
OAuth2ConsumerBlueprint,
oauth_authorized,
oauth_error
)
from gludb.utils import now_field
from .utils import app_logger
from .models import User
def set_user_session(user_id=None):
if not user_id:
user_id = ''
session['user_id'] = user_id
def get_user():
"""Return current user"""
user_id = session.get('user_id', '')
if not user_id:
return None # Not logged in
return User.find_one(user_id)
def require_login(func):
"""Simple decorator helper for requiring login on functions decorated with
flask route: make sure that it's LAST in the decorator list so that the
flask magic happens (see voice_testing for an example).
Important: we are assuming the blueprint endpoint auth.login exists
"""
@wraps(func)
def wrapper(*args, **kwrds):
try:
user = get_user()
if user:
setattr(g, 'user', user)
return func(*args, **kwrds)
else:
url = url_for('auth.login', redir=request.url)
return redirect(url)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
log = app_logger()
log.warning("Unexpected error: %s", exc_value)
log.error(''.join(traceback.format_exception(
exc_type, exc_value, exc_traceback
)))
return abort(500)
return wrapper
# Make the google blueprint (taken from their contrib code)
auth = OAuth2ConsumerBlueprint(
"auth",
__name__,
client_id=None, # Handled via app config
client_secret=None, # Handled via app config
scope=["profile", "email"],
base_url="https://www.googleapis.com/",
authorization_url="https://accounts.google.com/o/oauth2/auth",
token_url="https://accounts.google.com/o/oauth2/token",
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
authorization_url_params={},
session_class=None,
backend=None,
)
auth.from_config["client_id"] = "GOOGLE_OAUTH_CLIENT_ID"
auth.from_config["client_secret"] = "GOOGLE_OAUTH_CLIENT_SECRET"
@auth.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.google_oauth = auth.session
google_api = LocalProxy(partial(_lookup_app_object, "google_oauth"))
def login_fail(msg):
flash(msg, category="error")
app_logger().error(msg)
return False
# create/login local user on successful OAuth login
@oauth_authorized.connect
def log_in_event(blueprint, token):
set_user_session() # Clear previous session
if not token:
return login_fail("Failed to log in")
resp = blueprint.session.get("/oauth2/v1/userinfo")
if not resp.ok:
return login_fail("Failed to login user!")
data = resp.json()
email = data.get('email', '')
if not email:
return login_fail("Google failed to supply an email address")
users = User.find_by_index('idx_email', email)
if users:
user = users[0]
else:
user = User(email=email)
# Update the user info and save the session info
user.name = data.get('name', email)
user.photo = data.get('picture', '/static/anonymous_person.png')
user.logins.append(now_field())
user.save()
set_user_session(user.id)
app_logger().info("Logged in user id %s, email %s" % (user.id, user.email))
# notify on OAuth provider error
@oauth_error.connect
def github_error(blueprint, error, error_description=None, error_uri=None):
login_fail("OAuth login failure: [%s] %s (uri=%s)" % (
error, error_description, error_uri
))
@auth.route('/logout')<|fim▁hole|> redir_url = request.args.get("redir", None)
if not redir_url:
redir_url = '/'
return redirect(redir_url)<|fim▁end|> | def logout():
set_user_session() |
<|file_name|>ImprovementCardServiceSpec.ts<|end_file_name|><|fim▁begin|>// Copyright 2019 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Unit tests for the ImprovementCardService.
*/
// TODO(#7222): Remove the following block of unnnecessary imports once
// ImprovementCardService.ts is upgraded to Angular 8.
import { AngularNameService } from
'pages/exploration-editor-page/services/angular-name.service.ts';
import { AnswerClassificationResultObjectFactory } from
'domain/classifier/AnswerClassificationResultObjectFactory.ts';
import { ClassifierObjectFactory } from
'domain/classifier/ClassifierObjectFactory.ts';
import { EditabilityService } from 'services/EditabilityService.ts';
import { ExplorationDraftObjectFactory } from
'domain/exploration/ExplorationDraftObjectFactory.ts';
import { FeedbackThreadObjectFactory } from
'domain/feedback_thread/FeedbackThreadObjectFactory.ts';
import { ImprovementActionButtonObjectFactory } from
'domain/statistics/ImprovementActionButtonObjectFactory.ts';
import { PlaythroughIssueObjectFactory } from
'domain/statistics/PlaythroughIssueObjectFactory.ts';
import { ParamChangeObjectFactory } from
'domain/exploration/ParamChangeObjectFactory.ts';
import { RuleObjectFactory } from 'domain/exploration/RuleObjectFactory.ts';
/* eslint-disable max-len */
import { SolutionValidityService } from
'pages/exploration-editor-page/editor-tab/services/solution-validity.service.ts';
/* eslint-enable max-len */
import { SuggestionModalService } from 'services/SuggestionModalService.ts';
import { SuggestionObjectFactory } from
'domain/suggestion/SuggestionObjectFactory.ts';
/* eslint-disable max-len */
import { ThreadStatusDisplayService } from
'pages/exploration-editor-page/feedback-tab/services/thread-status-display.service.ts';
/* eslint-enable max-len */
import { UserInfoObjectFactory } from 'domain/user/UserInfoObjectFactory.ts';
import { WrittenTranslationObjectFactory } from
'domain/exploration/WrittenTranslationObjectFactory.ts';
import { VoiceoverObjectFactory } from
'domain/exploration/VoiceoverObjectFactory.ts';
// ^^^ This block is to be removed.
require('domain/statistics/FeedbackImprovementCardObjectFactory.ts');
require('domain/statistics/PlaythroughImprovementCardObjectFactory.ts');
require('domain/statistics/SuggestionImprovementCardObjectFactory.ts');
require('services/ImprovementCardService.ts');
describe('ImprovementCardService', function() {
var $q = null;
var $rootScope = null;
var ImprovementCardService = null;
var FeedbackImprovementCardObjectFactory = null;
var PlaythroughImprovementCardObjectFactory = null;
var SuggestionImprovementCardObjectFactory = null;
beforeEach(angular.mock.module('oppia'));
beforeEach(angular.mock.module('oppia', function($provide) {
$provide.value('AngularNameService', new AngularNameService());
$provide.value(
'AnswerClassificationResultObjectFactory',
new AnswerClassificationResultObjectFactory());
$provide.value('ClassifierObjectFactory', new ClassifierObjectFactory());
$provide.value('EditabilityService', new EditabilityService());
$provide.value(
'ExplorationDraftObjectFactory', new ExplorationDraftObjectFactory());
$provide.value(
'FeedbackThreadObjectFactory', new FeedbackThreadObjectFactory());
$provide.value(
'ImprovementActionButtonObjectFactory',
new ImprovementActionButtonObjectFactory());
$provide.value(
'PlaythroughIssueObjectFactory', new PlaythroughIssueObjectFactory());
$provide.value(
'ParamChangeObjectFactory', new ParamChangeObjectFactory());
$provide.value('RuleObjectFactory', new RuleObjectFactory());
$provide.value('SolutionValidityService', new SolutionValidityService());
$provide.value('SuggestionModalService', new SuggestionModalService());
$provide.value('SuggestionObjectFactory', new SuggestionObjectFactory());
$provide.value(
'ThreadStatusDisplayService', new ThreadStatusDisplayService());
$provide.value('UserInfoObjectFactory', new UserInfoObjectFactory());
$provide.value('VoiceoverObjectFactory', new VoiceoverObjectFactory());
$provide.value(
'WrittenTranslationObjectFactory',
new WrittenTranslationObjectFactory());
}));
beforeEach(angular.mock.inject(function(
_$q_, _$rootScope_, _ImprovementCardService_,
_FeedbackImprovementCardObjectFactory_,
_PlaythroughImprovementCardObjectFactory_,
_SuggestionImprovementCardObjectFactory_) {
$q = _$q_;
$rootScope = _$rootScope_;
ImprovementCardService = _ImprovementCardService_;
FeedbackImprovementCardObjectFactory =
_FeedbackImprovementCardObjectFactory_;
PlaythroughImprovementCardObjectFactory =
_PlaythroughImprovementCardObjectFactory_;
SuggestionImprovementCardObjectFactory =
_SuggestionImprovementCardObjectFactory_;
this.expectedFactories = [
FeedbackImprovementCardObjectFactory,
PlaythroughImprovementCardObjectFactory,
SuggestionImprovementCardObjectFactory,
];
}));
describe('.getImprovementCardObjectFactoryRegistry', function() {
it('contains all known improvement card object factories', function() {
var actualFactories =
ImprovementCardService.getImprovementCardObjectFactoryRegistry();
// The registry should not be modifiable.
expect(Object.isFrozen(actualFactories)).toBe(true);
// Ordering isn't important, so allow the checks to be flexible.
expect(actualFactories.length).toEqual(this.expectedFactories.length);
this.expectedFactories.forEach(function(expectedFactory) {
expect(actualFactories).toContain(expectedFactory);
});
});
});
describe('.fetchCards', function() {
// Each individual factory should test their own fetchCards function.
describe('from factories which all return empty cards', function() {
beforeEach(function() {
this.expectedFactories.forEach(function(factory) {
spyOn(factory, 'fetchCards').and.callFake(function() {
return $q.resolve([]);
});
});
});<|fim▁hole|> it('returns an empty list', function(done) {
var onSuccess = function(cards) {
expect(cards).toEqual([]);
done();
};
var onFailure = function(error) {
done.fail(error);
};
ImprovementCardService.fetchCards().then(onSuccess, onFailure);
// $q Promises need to be forcibly resolved through a JavaScript digest,
// which is what $apply helps kick-start.
$rootScope.$apply();
});
});
});
});<|fim▁end|> | |
<|file_name|>ws_iris_AdaBoostClassifier_mysql_code_gen.py<|end_file_name|><|fim▁begin|>from sklearn2sql_heroku.tests.classification import generic as class_gen
<|fim▁hole|><|fim▁end|> |
class_gen.test_model("AdaBoostClassifier" , "iris" , "mysql") |
<|file_name|>create_secret_tls.go<|end_file_name|><|fim▁begin|>/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package create
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/cli-runtime/pkg/resource"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubectl/pkg/util"
"k8s.io/kubectl/pkg/util/hash"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
)
var (
secretForTLSLong = templates.LongDesc(i18n.T(`
Create a TLS secret from the given public/private key pair.
The public/private key pair must exist before hand. The public key certificate must be .PEM encoded and match
the given private key.`))
secretForTLSExample = templates.Examples(i18n.T(`
# Create a new TLS secret named tls-secret with the given key pair:
kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key`))
)
// CreateSecretTLSOptions holds the options for 'create secret tls' sub command
type CreateSecretTLSOptions struct {
// PrintFlags holds options necessary for obtaining a printer
PrintFlags *genericclioptions.PrintFlags
PrintObj func(obj runtime.Object) error
// Name is the name of this TLS secret.
Name string
// Key is the path to the user's private key.
Key string
// Cert is the path to the user's public key certificate.
Cert string
// AppendHash; if true, derive a hash from the Secret and append it to the name
AppendHash bool
FieldManager string
CreateAnnotation bool
Namespace string
EnforceNamespace bool
Client corev1client.CoreV1Interface
DryRunStrategy cmdutil.DryRunStrategy
DryRunVerifier *resource.DryRunVerifier
genericclioptions.IOStreams
}
// NewSecretTLSOptions creates a new *CreateSecretTLSOptions with default value
func NewSecretTLSOptions(ioStrems genericclioptions.IOStreams) *CreateSecretTLSOptions {
return &CreateSecretTLSOptions{
PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme),
IOStreams: ioStrems,
}
}
// NewCmdCreateSecretTLS is a macro command for creating secrets to work with TLS client or server
func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command {
o := NewSecretTLSOptions(ioStreams)
cmd := &cobra.Command{
Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run=server|client|none]",
DisableFlagsInUseLine: true,
Short: i18n.T("Create a TLS secret"),
Long: secretForTLSLong,
Example: secretForTLSExample,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(o.Complete(f, cmd, args))
cmdutil.CheckErr(o.Validate())
cmdutil.CheckErr(o.Run())
},
}
o.PrintFlags.AddFlags(cmd)
cmdutil.AddApplyAnnotationFlags(cmd)
cmdutil.AddValidateFlags(cmd)
cmdutil.AddDryRunFlag(cmd)
cmd.Flags().StringVar(&o.Cert, "cert", o.Cert, i18n.T("Path to PEM encoded public key certificate."))
cmd.Flags().StringVar(&o.Key, "key", o.Key, i18n.T("Path to private key associated with given certificate."))
cmd.Flags().BoolVar(&o.AppendHash, "append-hash", o.AppendHash, "Append a hash of the secret to its name.")
cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create")
return cmd
}
// Complete loads data from the command line environment
func (o *CreateSecretTLSOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
var err error
o.Name, err = NameFromCommandArgs(cmd, args)
if err != nil {
return err
}
restConfig, err := f.ToRESTConfig()
if err != nil {
return err
}
o.Client, err = corev1client.NewForConfig(restConfig)
if err != nil {
return err
}
o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag)
o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd)
if err != nil {
return err
}
dynamicClient, err := f.DynamicClient()
if err != nil {
return err
}
discoveryClient, err := f.ToDiscoveryClient()
if err != nil {
return err
}
o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient)
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
if err != nil {
return nil
}
cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy)
printer, err := o.PrintFlags.ToPrinter()
if err != nil {
return nil
}
o.PrintObj = func(obj runtime.Object) error {
return printer.PrintObj(obj, o.Out)<|fim▁hole|>
// Validate checks if CreateSecretTLSOptions hass sufficient value to run
func (o *CreateSecretTLSOptions) Validate() error {
// TODO: This is not strictly necessary. We can generate a self signed cert
// if no key/cert is given. The only requirement is that we either get both
// or none. See test/e2e/ingress_utils for self signed cert generation.
if len(o.Key) == 0 || len(o.Cert) == 0 {
return fmt.Errorf("key and cert must be specified")
}
return nil
}
// Run calls createSecretTLS which will create secretTLS based on CreateSecretTLSOptions
// and makes an API call to the server
func (o *CreateSecretTLSOptions) Run() error {
secretTLS, err := o.createSecretTLS()
if err != nil {
return err
}
err = util.CreateOrUpdateAnnotation(o.CreateAnnotation, secretTLS, scheme.DefaultJSONEncoder())
if err != nil {
return err
}
if o.DryRunStrategy != cmdutil.DryRunClient {
createOptions := metav1.CreateOptions{}
if o.FieldManager != "" {
createOptions.FieldManager = o.FieldManager
}
if o.DryRunStrategy == cmdutil.DryRunServer {
err := o.DryRunVerifier.HasSupport(secretTLS.GroupVersionKind())
if err != nil {
return err
}
createOptions.DryRun = []string{metav1.DryRunAll}
}
secretTLS, err = o.Client.Secrets(o.Namespace).Create(context.TODO(), secretTLS, createOptions)
if err != nil {
return fmt.Errorf("failed to create secret %v", err)
}
}
return o.PrintObj(secretTLS)
}
// createSecretTLS fills in key value pair from the information given in
// CreateSecretTLSOptions into *corev1.Secret
func (o *CreateSecretTLSOptions) createSecretTLS() (*corev1.Secret, error) {
namespace := ""
if o.EnforceNamespace {
namespace = o.Namespace
}
tlsCert, err := readFile(o.Cert)
if err != nil {
return nil, err
}
tlsKey, err := readFile(o.Key)
if err != nil {
return nil, err
}
if _, err := tls.X509KeyPair(tlsCert, tlsKey); err != nil {
return nil, err
}
// TODO: Add more validation.
// 1. If the certificate contains intermediates, it is a valid chain.
// 2. Format etc.
secretTLS := newSecretObj(o.Name, namespace, corev1.SecretTypeTLS)
secretTLS.Data[corev1.TLSCertKey] = []byte(tlsCert)
secretTLS.Data[corev1.TLSPrivateKeyKey] = []byte(tlsKey)
if o.AppendHash {
hash, err := hash.SecretHash(secretTLS)
if err != nil {
return nil, err
}
secretTLS.Name = fmt.Sprintf("%s-%s", secretTLS.Name, hash)
}
return secretTLS, nil
}
// readFile just reads a file into a byte array.
func readFile(file string) ([]byte, error) {
b, err := ioutil.ReadFile(file)
if err != nil {
return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err)
}
return b, nil
}<|fim▁end|> | }
return nil
} |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>import os
# Django settings for mysite project.
DEBUG = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
SITE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
DATE_INPUT_FORMATS = ('%d/%m/%Y')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SITE_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}<|fim▁hole|> 'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}<|fim▁end|> | },
'handlers': {
'mail_admins': {
'level': 'ERROR', |
<|file_name|>global.js<|end_file_name|><|fim▁begin|>$(function() {
(function() {
$('.j_toClass').each(function(index, el) {
var $it = $(this);
var targetTo = $it.attr('data-target');
var thisTo = $it.attr('data-this');<|fim▁hole|> var $target = $(targetId);
var _fn = {
on: function() {
$target.addClass(targetTo);
$it.addClass(thisTo);
},
off: function() {
$target.removeClass(targetTo);
$it.removeClass(thisTo);
}
};
targetTo = targetTo && targetTo !== '' ? targetTo : 'on';
thisTo = thisTo && thisTo !== '' ? thisTo : 'on';
$it.on('click', function(e) {
e.preventDefault;
}).on('mouseenter', function() {
_fn.on();
return false;
}).on('mouseleave', function() {
_fn.off();
return false;
});
});
})();
$('.j-tab').on('click','a',function(e){
e.preventDefault();
var $it=$(this);
var targetId=$it.attr('href');
var $target=$(targetId);
$it.addClass('on').siblings('.on').removeClass('on');
$target.addClass('on').siblings('.on').removeClass('on');
$target.find('img[data-src]').each(function(index, el) {
var $it=$(this);
var src=$it.attr('data-src');
$it.attr('src',src).removeAttr('data-src');
});
});
//弹出框
$('body').on('click','.modal-close, .modal .j-close',function(e){
e.preventDefault();
var $it=$(this);
var $moldal=$it.parents('.modal');
$it.parents('.modal').removeClass('on');
}).on('click','.j-modal',function(e){
e.preventDefault();
var $it=$(this);
var targetId=$it.attr('href');
var $target=$(targetId);
$target.addClass('on');
});
});<|fim▁end|> | var targetId = $it.attr('href'); |
<|file_name|>syntaxEnvironment.js<|end_file_name|><|fim▁begin|>/**
* syntaxEnvironment.js
* Andrea Tino - 2015
*/
/**
* Main collection point for types to be rendered.
*/
module.exports = function() {
var tsClass = require('./class.js');
var tsInterface = require('./interface.js');
var tsEnum = require('./enum.js');
// Configuration: { classIds = [], interfaceIds = [], enumIds = [] }
var config = null;
// Associative arrays: id -> Typescript mapper (ts[Class|Interface|Enum])
var classes = {};
var interfaces = {};
var enums = {};
return {
/**
* Initializes the module.
* _config: Configuration:
* { classIds = [], interfaceIds = [], enumIds = [] }
*/
initialize: function(_config) {
if (!_config) {
throw 'Error: Configuration cannot be null or undefined!';
}
config = _config;
},
/**
* Builds TypeScript classes.
*/
buildClasses: function() {
},
/**
* Builds TypeScript interfaces.<|fim▁hole|>
/**
* Build enums.
*/
buildEnums: function() {
}
};
};<|fim▁end|> | */
buildInterfaces: function() {
}, |
<|file_name|>construct.cpp<|end_file_name|><|fim▁begin|>// Copyright Louis Dionne 2013-2017
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)<|fim▁hole|>
template <int i>
struct x { };
int main() {
constexpr hana::basic_tuple<> empty{}; (void)empty;
constexpr hana::basic_tuple<int, float> xs{1, 2.3f};
constexpr auto ys = hana::basic_tuple<int, float>{1, 2.3f};
constexpr auto copy = ys; (void)copy;
}<|fim▁end|> |
#include <boost/hana/basic_tuple.hpp>
namespace hana = boost::hana;
|
<|file_name|>rng.js<|end_file_name|><|fim▁begin|>// Random number generator - requires a PRNG backend, e.g. prng4.js
// For best results, put code like
// <body onClick='rng_seed_time();' onKeyPress='rng_seed_time();'>
// in your main HTML document.
var rng_state;
var rng_pool;
var rng_pptr;
// Mix in a 32-bit integer into the pool
function rng_seed_int(x) {
rng_pool[rng_pptr++] ^= x & 255;
rng_pool[rng_pptr++] ^= (x >> 8) & 255;
rng_pool[rng_pptr++] ^= (x >> 16) & 255;
rng_pool[rng_pptr++] ^= (x >> 24) & 255;
if(rng_pptr >= rng_psize) rng_pptr -= rng_psize;
}
// Mix in the current time (w/milliseconds) into the pool
function rng_seed_time() {
rng_seed_int(new Date().getTime());
}
// Initialize the pool with junk if needed.
if(rng_pool == null) {
rng_pool = new Array();
rng_pptr = 0;
var t;
if(window.crypto && window.crypto.getRandomValues) {
// Use webcrypto if available<|fim▁hole|> rng_pool[rng_pptr++] = ua[t];
}
if(navigator.appName == "Netscape" && navigator.appVersion < "5" && window.crypto) {
// Extract entropy (256 bits) from NS4 RNG if available
var z = window.crypto.random(32);
for(t = 0; t < z.length; ++t)
rng_pool[rng_pptr++] = z.charCodeAt(t) & 255;
}
while(rng_pptr < rng_psize) { // extract some randomness from Math.random()
t = Math.floor(65536 * Math.random());
rng_pool[rng_pptr++] = t >>> 8;
rng_pool[rng_pptr++] = t & 255;
}
rng_pptr = 0;
rng_seed_time();
//rng_seed_int(window.screenX);
//rng_seed_int(window.screenY);
}
function rng_get_byte() {
if(rng_state == null) {
rng_seed_time();
rng_state = prng_newstate();
rng_state.init(rng_pool);
for(rng_pptr = 0; rng_pptr < rng_pool.length; ++rng_pptr)
rng_pool[rng_pptr] = 0;
rng_pptr = 0;
//rng_pool = null;
}
// TODO: allow reseeding after first request
return rng_state.next();
}
function rng_get_bytes(ba) {
var i;
for(i = 0; i < ba.length; ++i) ba[i] = rng_get_byte();
}
function SecureRandom() {}
SecureRandom.prototype.nextBytes = rng_get_bytes;<|fim▁end|> | var ua = new Uint8Array(32);
window.crypto.getRandomValues(ua);
for(t = 0; t < 32; ++t) |
<|file_name|>store.js<|end_file_name|><|fim▁begin|>import { createStore } from '@utils/store.utils';
import placeholderImage from '../images/placeholder.jpeg';
import { getPhotoUrl, getPrefetchedPhotoForDisplay } from './api';
import { getLocalPhotoPath, getRandomLocalPhoto } from './photos.local';
import Settings from './settings';
export const getStateObject = (force = false) => {
const fetchFromServer = Settings.fetchFromServer;
const newPhotoDuration = Settings.newPhotoDuration;
let photoUrl;
let placeholderPhotoUrl;
let photoMeta;
let placeholderPhotoMeta;<|fim▁hole|> if (fetchFromServer) {
photoMeta = getPrefetchedPhotoForDisplay(force ? 0 : newPhotoDuration);
photoUrl = getPhotoUrl(photoMeta);
}
// or a locally stored photo
if (!photoUrl) {
photoMeta = getRandomLocalPhoto();
photoUrl = getLocalPhotoPath(photoMeta);
}
// or a fallback placeholder photo
if (!photoUrl) {
photoMeta = null;
photoUrl = placeholderImage;
}
// get a random image as placeholder
// to handle offline network scenarios
placeholderPhotoMeta = getRandomLocalPhoto();
placeholderPhotoUrl = getLocalPhotoPath(placeholderPhotoMeta);
return {
fetchFromServer,
photoUrl,
photoMeta,
placeholderPhotoUrl,
placeholderPhotoMeta,
newPhotoDuration,
};
};
export default createStore();<|fim▁end|> |
// if allowed to fetch from server
// begin with assuming we get a
// prefetched photo from the api |
<|file_name|>erase_regions.rs<|end_file_name|><|fim▁begin|>use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder};
use crate::ty::{self, Ty, TyCtxt, TypeFlags};
pub(super) fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers { erase_regions_ty, ..*providers };
}
fn erase_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
// N.B., use `super_fold_with` here. If we used `fold_with`, it
// could invoke the `erase_regions_ty` query recursively.
ty.super_fold_with(&mut RegionEraserVisitor { tcx }).into_ok()
}
impl<'tcx> TyCtxt<'tcx> {
/// Returns an equivalent value with all free regions removed (note
/// that late-bound regions remain, because they are important for
/// subtyping, but they are anonymized and normalized as well)..
pub fn erase_regions<T>(self, value: T) -> T
where
T: TypeFoldable<'tcx>,
{
// If there's nothing to erase avoid performing the query at all
if !value
.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_POTENTIAL_FREE_REGIONS)
{
return value;
}
debug!("erase_regions({:?})", value);
let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self }).into_ok();
debug!("erase_regions = {:?}", value1);
value1
}
}
struct RegionEraserVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
}
impl TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
self.tcx
}
fn fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
if ty.needs_infer() { ty.super_fold_with(self) } else { Ok(self.tcx.erase_regions_ty(ty)) }
}
fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> Result<ty::Binder<'tcx, T>, Self::Error><|fim▁hole|> let u = self.tcx.anonymize_late_bound_regions(t);
u.super_fold_with(self)
}
fn fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, Self::Error> {
// because late-bound regions affect subtyping, we can't
// erase the bound/free distinction, but we can replace
// all free regions with 'erased.
//
// Note that we *CAN* replace early-bound regions -- the
// type system never "sees" those, they get substituted
// away. In codegen, they will always be erased to 'erased
// whenever a substitution occurs.
match *r {
ty::ReLateBound(..) => Ok(r),
_ => Ok(self.tcx.lifetimes.re_erased),
}
}
fn fold_mir_const(
&mut self,
c: mir::ConstantKind<'tcx>,
) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
c.super_fold_with(self)
}
}<|fim▁end|> | where
T: TypeFoldable<'tcx>,
{ |
<|file_name|>pool.go<|end_file_name|><|fim▁begin|>package pipeline
import (
"runtime"
"sync/atomic"
"time"
"github.com/Jeffail/benthos/v3/internal/component"
iprocessor "github.com/Jeffail/benthos/v3/internal/component/processor"
"github.com/Jeffail/benthos/v3/internal/log"
"github.com/Jeffail/benthos/v3/internal/message"
"github.com/Jeffail/benthos/v3/internal/shutdown"
)
// Pool is a pool of pipelines. Each pipeline reads from a shared transaction
// channel. Inputs remain coupled to their outputs as they propagate the
// response channel in the transaction.
type Pool struct {
running uint32
workers []iprocessor.Pipeline
log log.Modular
messagesIn <-chan message.Transaction
messagesOut chan message.Transaction
closeChan chan struct{}
closed chan struct{}
}
func newPoolV2(threads int, log log.Modular, msgProcessors ...iprocessor.V1) (*Pool, error) {
if threads <= 0 {
threads = runtime.NumCPU()
}
p := &Pool{
running: 1,
workers: make([]iprocessor.Pipeline, threads),
log: log,
messagesOut: make(chan message.Transaction),
closeChan: make(chan struct{}),
closed: make(chan struct{}),
}
for i := range p.workers {
p.workers[i] = NewProcessor(msgProcessors...)
}
return p, nil
}
//------------------------------------------------------------------------------
// loop is the processing loop of this pipeline.
func (p *Pool) loop() {
defer func() {
atomic.StoreUint32(&p.running, 0)
// Signal all workers to close.
for _, worker := range p.workers {
worker.CloseAsync()
}
// Wait for all workers to be closed before closing our response and
// messages channels as the workers may still have access to them.
for _, worker := range p.workers {
_ = worker.WaitForClose(shutdown.MaximumShutdownWait())
}
close(p.messagesOut)
close(p.closed)
}()
internalMessages := make(chan message.Transaction)
remainingWorkers := int64(len(p.workers))
for _, worker := range p.workers {
if err := worker.Consume(p.messagesIn); err != nil {
p.log.Errorf("Failed to start pipeline worker: %v\n", err)
atomic.AddInt64(&remainingWorkers, -1)
continue
}
go func(w iprocessor.Pipeline) {
defer func() {
if atomic.AddInt64(&remainingWorkers, -1) == 0 {
close(internalMessages)
}
}()
for {
var t message.Transaction
var open bool
select {
case t, open = <-w.TransactionChan():
if !open {
return
}<|fim▁hole|> return
}
select {
case internalMessages <- t:
case <-p.closeChan:
return
}
}
}(worker)
}
for atomic.LoadUint32(&p.running) == 1 && atomic.LoadInt64(&remainingWorkers) > 0 {
select {
case t, open := <-internalMessages:
if !open {
return
}
select {
case p.messagesOut <- t:
case <-p.closeChan:
return
}
case <-p.closeChan:
return
}
}
}
//------------------------------------------------------------------------------
// Consume assigns a messages channel for the pipeline to read.
func (p *Pool) Consume(msgs <-chan message.Transaction) error {
if p.messagesIn != nil {
return component.ErrAlreadyStarted
}
p.messagesIn = msgs
go p.loop()
return nil
}
// TransactionChan returns the channel used for consuming messages from this
// pipeline.
func (p *Pool) TransactionChan() <-chan message.Transaction {
return p.messagesOut
}
// CloseAsync shuts down the pipeline and stops processing messages.
func (p *Pool) CloseAsync() {
if atomic.CompareAndSwapUint32(&p.running, 1, 0) {
close(p.closeChan)
}
}
// WaitForClose - Blocks until the StackBuffer output has closed down.
func (p *Pool) WaitForClose(timeout time.Duration) error {
select {
case <-p.closed:
case <-time.After(timeout):
return component.ErrTimeout
}
return nil
}<|fim▁end|> | case <-p.closeChan: |
<|file_name|>Holiday.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Source : Les recettes Python de Tyrtamos
http://python.jpvweb.com/mesrecettespython/doku.php?id=date_de_paques
"""
class jourferie:
def datepaques(self,an):
"""Calcule la date de Pâques d'une année donnée an (=nombre entier)"""
a=an//100
b=an%100
c=(3*(a+25))//4
d=(3*(a+25))%4
e=(8*(a+11))//25
f=(5*a+b)%19
g=(19*f+c-e)%30
h=(f+11*g)//319
j=(60*(5-d)+b)//4
k=(60*(5-d)+b)%4
m=(2*j-k-g+h)%7
n=(g-h+m+114)//31
p=(g-h+m+114)%31
jour=p+1
mois=n
return [jour, mois, an]
def dateliste(self,c, sep='/'):
"""Transforme une date chaîne 'j/m/a' en une date liste [j,m,a]"""
j, m, a = c.split(sep)
return [int(j), int(m), int(a)]
def datechaine(self,d, sep='/'):
"""Transforme une date liste=[j,m,a] en une date chaîne 'jj/mm/aaaa'"""
return ("%02d" + sep + "%02d" + sep + "%0004d") % (d[0], d[1], d[2])
def jourplus(self,d, n=1):
"""Donne la date du nième jour suivant d=[j, m, a] (n>=0)"""
j, m, a = d
fm = [0,31,28,31,30,31,30,31,31,30,31,30,31]
if (a%4==0 and a%100!=0) or a%400==0: # bissextile?
fm[2] = 29
for i in xrange(0,n):
j += 1
if j > fm[m]:
j = 1
m += 1
if m>12:
m = 1
a += 1
return [j,m,a]
def jourmoins(self,d, n=-1):
"""Donne la date du nième jour précédent d=[j, m, a] (n<=0)"""
j, m, a = d
fm = [0,31,28,31,30,31,30,31,31,30,31,30,31]
if (a%4==0 and a%100!=0) or a%400==0: # bissextile?
fm[2] = 29
for i in xrange(0,abs(n)):
j -= 1
if j < 1:
m -= 1
if m<1:
m = 12
a -= 1
j = fm[m]
return [j,m,a]
def numjoursem(self,d):
"""Donne le numéro du jour de la semaine d'une date d=[j,m,a]
lundi=1, mardi=2, ..., dimanche=7
Algorithme de Maurice Kraitchik (1882?1957)"""
j, m, a = d
if m<3:
m += 12
a -= 1
n = (j +2*m + (3*(m+1))//5 +a + a//4 - a//100 + a//400 +2) % 7
return [6, 7, 1, 2, 3, 4, 5][n]
def joursem(self,d):
"""Donne le jour de semaine en texte à partir de son numéro
lundi=1, mardi=2, ..., dimanche=7"""
return ["", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi",
"dimanche"][self.numjoursem(d)]
def joursferiesliste(self,an, sd=0):
"""Liste des jours fériés France en date-liste de l'année an (nb entier).
sd=0 (=defaut): tous les jours fériés.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours fériés supplémentaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
F = [] # =liste des dates des jours feries en date-liste d=[j,m,a]
L = [] # =liste des libelles du jour ferie
dp = self.datepaques(an)
# Jour de l'an
d = [1,1,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Jour de l'an")
# Vendredi saint (pour l'Alsace-Moselle)
d = self.jourmoins(dp, -2)
if (sd==0) or (sd==2):
#if sd>=2:
F.append(d)
L.append(u"Vendredi saint (Alsace-Moselle)")
# Dimanche de Paques
d = dp
if (sd==0) or (sd==2):
F.append(d)
L.append(u"Dimanche de Paques")
# Lundi de Paques
d = self.jourplus(dp, +1)
F.append(d)
L.append(u"Lundi de Paques")
# Fête du travail
d = [1,5,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Fete du travail")
# Victoire des allies 1945
d = [8,5,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Victoire des allies 1945")
# Jeudi de l'Ascension
d = self.jourplus(dp, +39)
F.append(d)
L.append(u"Jeudi de l'Ascension")
# Dimanche de Pentecote
d = self.jourplus(dp, +49)
if (sd==0) or (sd==2):
F.append(d)
L.append(u"Dimanche de Pentecote")
# Lundi de Pentecote
d = self.jourplus(d, +1)
F.append(d)
L.append(u"Lundi de Pentecote")
# Fete Nationale
d = [14,7,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Fete Nationale")
# Assomption
d = [15,8,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Assomption")
# Toussaint
d = [1,11,an]<|fim▁hole|> if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Toussaint")
# Armistice 1918
d = [11,11,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Armistice 1918")
# Jour de Noel
d = [25,12,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Jour de Noel")
# Saint Etienne Alsace
d = [26,12,an]
nj = self.numjoursem(d)
if (sd==0) or (sd==1 and nj<6) or (sd==2) or (sd==3 and nj<6):
F.append(d)
L.append(u"Saint-Etienne (Alsace)")
return F, L
def joursferies(self,an, sd=0, sep='/'):
"""Liste des jours fériés France en date-chaine de l'année an (nb entier).
sd=0 (=defaut): tous les jours fériés.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours fériés supplémentaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
C = []
J = []
F, L = self.joursferiesliste(an, sd)
for i in xrange(0,len(F)):
C.append(self.datechaine(F[i])) # conversion des dates-liste en dates-chaine
J.append(self.joursem(F[i])) # ajout du jour de semaine
return C, J, L
def estferie(self,d,sd=0):
"""estferie(d,sd=0): => dit si une date d=[j,m,a] donnée est fériée France
si la date est fériée, renvoie son libellé
sinon, renvoie une chaine vide"""
j,m,a = d
F, L = self.joursferiesliste(a, sd)
for i in xrange(0, len(F)):
if j==F[i][0] and m==F[i][1] and a==F[i][2]:
return L[i]
return "False"<|fim▁end|> | nj = self.numjoursem(d) |
<|file_name|>PhraseService.java<|end_file_name|><|fim▁begin|>/*
* PhraseService.java
*
* Copyright (C) 2018 [ A Legge Up ]
*
* This software may be modified and distributed under the terms
* of the MIT license. See the LICENSE file for details.
*/
package com.aleggeup.confagrid.content;
import java.util.List;
import java.util.UUID;
import com.aleggeup.confagrid.model.Phrase;
public interface PhraseService {
List<Phrase> findAll();
void save(Phrase phrase);
Phrase phraseFromText(String text);<|fim▁hole|>
long count();
Phrase findOne(UUID id);
}<|fim▁end|> | |
<|file_name|>pixel_collidable.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
class PixelCollidable( Collidable ) :
def __init__(self) :
self.spm = None
self.r = None<|fim▁end|> | from collidable import *
from math_3d import * |
<|file_name|>ArServerModeRatioDrive.cpp<|end_file_name|><|fim▁begin|>/*
MobileRobots Advanced Robotics Interface for Applications (ARIA)
Copyright (C) 2004, 2005 ActivMedia Robotics LLC
Copyright (C) 2006, 2007, 2008, 2009 MobileRobots Inc.
Copyright (C) 2010, 2011 Adept Technology, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
<|fim▁hole|>If you wish to redistribute ARIA under different terms, contact
Adept MobileRobots for information about a commercial version of ARIA at
[email protected] or
Adept MobileRobots, 10 Columbia Drive, Amherst, NH 03031; 800-639-9481
*/
#include "Aria.h"
#include "ArExport.h"
#include "ArServerModeRatioDrive.h"
#include "ArServerHandlerCommands.h"
AREXPORT ArServerModeRatioDrive::ArServerModeRatioDrive(
ArServerBase *server, ArRobot *robot, bool takeControlOnJoystick,
bool useComputerJoystick, bool useRobotJoystick,
bool useServerCommands, const char *name,
bool robotJoystickOverridesLocks) :
ArServerMode(robot, server, name),
myRatioDriveGroup(robot),
myJoyUserTaskCB(this, &ArServerModeRatioDrive::joyUserTask),
myServerSetSafeDriveCB(this,
&ArServerModeRatioDrive::serverSetSafeDrive),
myServerGetSafeDriveCB(this,
&ArServerModeRatioDrive::serverGetSafeDrive),
myServerRatioDriveCB(this, &ArServerModeRatioDrive::serverRatioDrive),
myRatioFireCB(this, &ArServerModeRatioDrive::ratioFireCallback),
myServerSafeDrivingEnableCB(this,
&ArServerModeRatioDrive::serverSafeDrivingEnable),
myServerSafeDrivingDisableCB(this,
&ArServerModeRatioDrive::serverSafeDrivingDisable)
{
myHandlerCommands = NULL;
myDriveSafely = true;
myTakeControlOnJoystick = takeControlOnJoystick;
myUseComputerJoystick = useComputerJoystick;
myUseRobotJoystick = useRobotJoystick;
myUseServerCommands = useServerCommands;
myUseLocationDependentDevices = true;
myRobotJoystickOverridesLock = robotJoystickOverridesLocks;
myTimeout = 2;
myGotServerCommand = true;
myLastTimedOut = false;
// SEEKUR
mySentRecenter = false;
// add the actions, put the ratio input on top, then have the
// limiters since the ratio doesn't touch decel except lightly
// whereas the limiter will touch it strongly
myRatioAction = new ArActionRatioInput;
myRatioDriveGroup.addAction(myRatioAction, 50);
myLimiterForward = new ArActionDeceleratingLimiter(
"DeceleratingLimiterForward", ArActionDeceleratingLimiter::FORWARDS);
myRatioDriveGroup.addAction(myLimiterForward, 40);
myLimiterBackward = new ArActionDeceleratingLimiter(
"DeceleratingLimiterBackward",
ArActionDeceleratingLimiter::BACKWARDS);
myRatioDriveGroup.addAction(myLimiterBackward, 39);
myLimiterLateralLeft = NULL;
myLimiterLateralRight = NULL;
if (myRobot->hasLatVel())
{
myLimiterLateralLeft = new ArActionDeceleratingLimiter(
"DeceleratingLimiterLateralLeft",
ArActionDeceleratingLimiter::LATERAL_LEFT);
myRatioDriveGroup.addAction(myLimiterLateralLeft, 38);
myLimiterLateralRight = new ArActionDeceleratingLimiter(
"DeceleratingLimiterLateralRight",
ArActionDeceleratingLimiter::LATERAL_RIGHT);
myRatioDriveGroup.addAction(myLimiterLateralRight, 37);
}
myMovementParameters = new ArActionMovementParameters("TeleopMovementParameters", false);
myRatioDriveGroup.addAction(myMovementParameters, 1);
myRatioFireCB.setName("ArServerModeRatioDrive");
myRatioAction->addFireCallback(30, &myRatioFireCB);
myLastRobotSafeDrive = true;
if (myServer != NULL && myUseServerCommands)
{
addModeData("ratioDrive", "drives the robot as with a joystick",
&myServerRatioDriveCB,
"double: transRatio; double: rotRatio; double: throttleRatio ",
"none", "Movement", "RETURN_NONE");
myServer->addData("setSafeDrive",
"sets whether we drive the robot safely or not",
&myServerSetSafeDriveCB,
"byte: 1 == drive safely, 0 == drive unsafely",
"none", "UnsafeMovement", "RETURN_NONE");
myServer->addData("getSafeDrive",
"gets whether we drive the robot safely or not",
&myServerGetSafeDriveCB,
"none",
"byte: 1 == driving safely, 0 == driving unsafely",
"Movement", "RETURN_SINGLE");
}
if (myUseComputerJoystick)
{
myJoydrive = new ArRatioInputJoydrive(robot, myRatioAction);
if ((myJoyHandler = Aria::getJoyHandler()) == NULL)
{
myJoyHandler = new ArJoyHandler;
myJoyHandler->init();
Aria::setJoyHandler(myJoyHandler);
}
}
if (myUseRobotJoystick)
{
myRobotJoydrive = new ArRatioInputRobotJoydrive(robot, myRatioAction);
if ((myRobotJoyHandler = Aria::getRobotJoyHandler()) == NULL)
{
myRobotJoyHandler = new ArRobotJoyHandler(robot);
Aria::setRobotJoyHandler(myRobotJoyHandler);
}
}
if (myUseRobotJoystick || myUseComputerJoystick)
{
std::string taskName = name;
taskName += "::joyUserTask";
myRobot->addUserTask(taskName.c_str(), 75, &myJoyUserTaskCB);
}
myPrinting = false;
}
AREXPORT ArServerModeRatioDrive::~ArServerModeRatioDrive()
{
}
AREXPORT void ArServerModeRatioDrive::addToConfig(ArConfig *config,
const char *section)
{
config->addParam(
ArConfigArg(
"Timeout", &myTimeout,
"If there are no commands for this period of time, then the robot will stop. 0 Disables. This is a double so you can do like .1 seconds if you want.", 0),
section, ArPriority::ADVANCED);
myRatioAction->addToConfig(config, section);
myLimiterForward->addToConfig(config, section, "Forward");
myLimiterBackward->addToConfig(config, section, "Backward");
if (myLimiterLateralLeft != NULL)
myLimiterLateralLeft->addToConfig(config, section, "Lateral");
if (myLimiterLateralRight != NULL)
myLimiterLateralRight->addToConfig(config, section, "Lateral");
myMovementParameters->addToConfig(config, section, "Teleop");
}
AREXPORT void ArServerModeRatioDrive::activate(void)
{
//if (!baseActivate()) {
// return;
//}
ratioDrive(0, 0, 100, true);
}
AREXPORT void ArServerModeRatioDrive::deactivate(void)
{
myRatioDriveGroup.deactivate();
baseDeactivate();
}
AREXPORT void ArServerModeRatioDrive::setSafeDriving(bool safe, bool internal)
{
if (!internal)
myRobot->lock();
// if this is a change then print it
if (safe != myDriveSafely)
{
if (safe)
{
ArLog::log(ArLog::Normal, "%s: Driving safely again", myName.c_str());
}
else
{
ArLog::log(ArLog::Normal, "%s: Driving UNSAFELY", myName.c_str());
}
myNewDriveSafely = true;
}
myDriveSafely = safe;
// ratioDrive is only called if this mode is already active
if (isActive())
ratioDrive(myRatioAction->getTransRatio(),
myRatioAction->getRotRatio(),
myRatioAction->getThrottleRatio());
if (!internal)
myRobot->unlock();
}
AREXPORT bool ArServerModeRatioDrive::getSafeDriving(void)
{
return myDriveSafely;
}
AREXPORT void ArServerModeRatioDrive::serverSafeDrivingEnable(void)
{
setSafeDriving(true);
}
AREXPORT void ArServerModeRatioDrive::serverSafeDrivingDisable(void)
{
setSafeDriving(false);
}
AREXPORT void ArServerModeRatioDrive::addControlCommands(ArServerHandlerCommands *handlerCommands)
{
if (!myUseServerCommands)
{
ArLog::log(ArLog::Normal,
"ArServerModeRatioDrive::addControlCommands: Tried to add control commands to a ratio drive not using the server");
return;
}
myHandlerCommands = handlerCommands;
myHandlerCommands->addCommand(
"safeRatioDrivingEnable",
"Enables safe driving with ratioDrive, which will attempt to prevent collisions (default)",
&myServerSafeDrivingEnableCB, "UnsafeMovement");
myHandlerCommands->addCommand(
"safeRatioDrivingDisable",
"Disables safe driving with ratioDrive, this is UNSAFE and will let you drive your robot into things or down stairs, use at your own risk",
&myServerSafeDrivingDisableCB, "UnsafeMovement");
}
/**
* @param isActivating a bool set to true only if this method is called from the activate()
* method, otherwise false
* @param transRatio Amount of forward velocity to request
* @param rotRatio Amount of rotational velocity to request
* @param throttleRatio Amount of speed to request
* @param latRatio amount of lateral velocity to request (if robot supports it)
**/
AREXPORT void ArServerModeRatioDrive::ratioDrive(
double transRatio, double rotRatio, double throttleRatio,
bool isActivating, double latRatio)
{
bool wasActive;
wasActive = isActive();
myTransRatio = transRatio;
myRotRatio = rotRatio;
myThrottleRatio = throttleRatio;
myLatRatio = latRatio;
// KMC: Changed the following test to include isActivating.
// if (!wasActive && !baseActivate())
// return;
// The baseActivate() method should only be called in the context of the activate()
// method.
if (isActivating && !wasActive) {
if (!baseActivate()) {
return;
}
} // end if activating and wasn't previously active
// This is to handle the case where ratioDrive is called outside the
// activate() method, and the activation was not successful.
if (!isActive()) {
return;
}
if (!wasActive || myNewDriveSafely)
{
myRobot->clearDirectMotion();
if (myDriveSafely)
{
myRatioDriveGroup.activateExclusive();
myMode = "Drive";
ArLog::log(ArLog::Normal, "%s: Driving safely", myName.c_str());
}
else
{
myRobot->deactivateActions();
myRatioAction->activate();
myMode = "UNSAFE Drive";
ArLog::log(ArLog::Normal, "%s: Driving unsafely", myName.c_str());
}
if (myDriveSafely)
mySafeDrivingCallbacks.invoke();
else
myUnsafeDrivingCallbacks.invoke();
}
myNewDriveSafely = false;
// MPL why is this here twice?
myTransRatio = transRatio;
myRotRatio = rotRatio;
myThrottleRatio = throttleRatio;
myLatRatio = latRatio;
setActivityTimeToNow();
// SEEKUR
mySentRecenter = false;
if (myPrinting)
printf("cmd %.0f %.0f %.0f %.0f\n", transRatio, rotRatio, throttleRatio,
latRatio);
if (myTransRatio < -0.1)
myDrivingBackwardsCallbacks.invoke();
//myRatioAction.setRatios(transRatio, rotRatio, throttleRatio);
}
AREXPORT void ArServerModeRatioDrive::serverRatioDrive(ArServerClient *client,
ArNetPacket *packet)
{
double transRatio = packet->bufToDouble();
double rotRatio = packet->bufToDouble();
double throttleRatio = packet->bufToDouble();
double lateralRatio = packet->bufToDouble();
myGotServerCommand = true;
if (!myDriveSafely && !client->hasGroupAccess("UnsafeMovement"))
serverSafeDrivingEnable();
myRobot->lock();
// Activate if necessary. Note that this is done before the ratioDrive
// call because we want the new ratio values to be applied after the
// default ones.
if (!isActive()) {
activate();
}
/*
ArLog::log(ArLog::Normal,
"RatioDrive trans %.0f rot %.0f lat %.0f ratio %.0f",
transRatio, rotRatio, lateralRatio, throttleRatio);
*/
ratioDrive(transRatio, rotRatio, throttleRatio, false, lateralRatio);
myRobot->unlock();
}
AREXPORT void ArServerModeRatioDrive::serverSetSafeDrive(
ArServerClient *client, ArNetPacket *packet)
{
if (packet->bufToUByte() == 0)
setSafeDriving(false);
else
setSafeDriving(true);
}
AREXPORT void ArServerModeRatioDrive::serverGetSafeDrive(
ArServerClient *client, ArNetPacket *packet)
{
ArNetPacket sendPacket;
if (getSafeDriving())
sendPacket.uByteToBuf(1);
else
sendPacket.uByteToBuf(0);
client->sendPacketTcp(&sendPacket);
}
AREXPORT void ArServerModeRatioDrive::joyUserTask(void)
{
// if we're not active but we should be
if (myTakeControlOnJoystick && !isActive() &&
((myUseComputerJoystick && myJoyHandler->haveJoystick() &&
myJoyHandler->getButton(1)) ||
(myUseRobotJoystick && myRobotJoyHandler->gotData() &&
myRobotJoyHandler->getButton1())))
{
if (ArServerMode::getActiveMode() != NULL)
ArLog::log(ArLog::Normal,
"%s: Activating instead of %s because of local joystick",
ArServerMode::getActiveMode()->getName(),
myName.c_str());
else
ArLog::log(ArLog::Normal,
"%s: Activating because of local joystick",
myName.c_str());
// if we're locked and are overriding that lock for the robot
// joystick and it was the robot joystick that caused it to happen
if (myUseRobotJoystick && myRobotJoyHandler->gotData() &&
myRobotJoyHandler->getButton1() && myRobotJoystickOverridesLock &&
ArServerMode::ourActiveModeLocked)
{
ArLog::log(ArLog::Terse, "Robot joystick is overriding locked mode %s",
ourActiveMode->getName());
ourActiveMode->forceUnlock();
myRobot->enableMotors();
}
activate();
}
bool unsafeRobotDrive;
if (myUseRobotJoystick && myRobotJoyHandler->gotData() &&
((unsafeRobotDrive =
(bool)(myRobot->getFaultFlags() & ArUtil::BIT15)) !=
!myLastRobotSafeDrive))
{
myLastRobotSafeDrive = !unsafeRobotDrive;
setSafeDriving(myLastRobotSafeDrive, true);
}
}
AREXPORT void ArServerModeRatioDrive::userTask(void)
{
// Sets the robot so that we always think we're trying to move in
// this mode
myRobot->forceTryingToMove();
// if the joystick is pushed then set that we're active, server
// commands'll go into ratioDrive and set it there too
if ((myUseComputerJoystick && myJoyHandler->haveJoystick() &&
myJoyHandler->getButton(1)) ||
(myUseRobotJoystick && myRobotJoyHandler->gotData() &&
myRobotJoyHandler->getButton1()) ||
(myUseServerCommands && myGotServerCommand))
{
setActivityTimeToNow();
}
myGotServerCommand = false;
bool timedOut = false;
// if we're moving, and there is a timeout, and the activity time is
// greater than the timeout, then stop the robot
if ((fabs(myRobot->getVel()) > 0 ||
fabs(myRobot->getRotVel()) > 0 ||
fabs(myRobot->getLatVel()) > 0) &&
myTimeout > .0000001 &&
getActivityTime().mSecSince()/1000.0 >= myTimeout)
{
if (!myLastTimedOut)
{
ArLog::log(ArLog::Normal, "Stopping the robot since teleop timed out");
myRobot->stop();
myRobot->clearDirectMotion();
ratioDrive(0, 0, 0, 0);
}
timedOut = true;
}
myLastTimedOut = timedOut;
// SEEKUR (needed for prototype versions)
/*
if (myRobot->hasLatVel() && !mySentRecenter &&
getActivityTime().secSince() >= 10)
{
mySentRecenter = true;
myRobot->com(120);
}
*/
if (!myStatusSetThisCycle)
{
if (myRobot->isLeftMotorStalled() || myRobot->isRightMotorStalled())
myStatus = "Stalled";
// this works because if the motors stalled above caught it, if
// not and more values it means a stall
else if (myRobot->getStallValue())
myStatus = "Bumped";
else if (ArMath::fabs(myRobot->getVel()) < 2 &&
ArMath::fabs(myRobot->getRotVel()) < 2 &&
(!myRobot->hasLatVel() || ArMath::fabs(myRobot->getLatVel()) < 2))
myStatus = "Stopped";
else
myStatus = "Driving";
}
myStatusSetThisCycle = false;
} // end method userTask
AREXPORT void ArServerModeRatioDrive::ratioFireCallback(void)
{
if (myPrinting)
ArLog::log(ArLog::Normal, "ArServerModeRatioDrive: TransRatio=%.0f RotRatio=%.0f ThrottleRatio=%.0f LatRatio=%.0f",
myTransRatio, myRotRatio, myThrottleRatio, myLatRatio);
myRatioAction->setRatios(myTransRatio, myRotRatio, myThrottleRatio,
myLatRatio);
}
AREXPORT void ArServerModeRatioDrive::setUseLocationDependentDevices(
bool useLocationDependentDevices, bool internal)
{
if (!internal)
myRobot->lock();
// if this is a change then print it
if (useLocationDependentDevices != myUseLocationDependentDevices)
{
if (useLocationDependentDevices)
{
ArLog::log(ArLog::Normal, "%s: Using location dependent range devices",
myName.c_str());
}
else
{
ArLog::log(ArLog::Normal,
"%s: Not using location dependent range devices",
myName.c_str());
}
myUseLocationDependentDevices = useLocationDependentDevices;
myLimiterForward->setUseLocationDependentDevices(
myUseLocationDependentDevices);
myLimiterBackward->setUseLocationDependentDevices(
myUseLocationDependentDevices);
if (myLimiterLateralLeft != NULL)
myLimiterLateralLeft->setUseLocationDependentDevices(
myUseLocationDependentDevices);
if (myLimiterLateralRight != NULL)
myLimiterLateralRight->setUseLocationDependentDevices(
myUseLocationDependentDevices);
}
if (!internal)
myRobot->unlock();
}
AREXPORT bool ArServerModeRatioDrive::getUseLocationDependentDevices(void)
{
return myUseLocationDependentDevices;
}<|fim▁end|> | |
<|file_name|>webserver.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from flask import Flask
from flask import Flask,jsonify, request, Response, session,g,redirect, url_for,abort, render_template, flash
from islem import *
from bot import *
import sys
import time
import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
toxbot = tox_factory(ProfileHelper.open_profile("tox_save.tox"))
sonek=str(toxbot.self_get_address())[0:2]
karsi_dosyalar="gelen_cevaplar"+sonek
komut_dosyasi="gelen_komutlar"+sonek
@app.route('/')
def indeks():
arkadaslar=""
for num in toxbot.self_get_friend_list():
arkadaslar+="<tr><td><a href=/toxsys?fno="+str(num)+">"+str(num)+"</td><td>"+toxbot.friend_get_name(num)+"</td><td>"+str(toxbot.friend_get_status_message(num))+"</td><td>"+str(toxbot.friend_get_public_key(num))+"</td></tr>"
return '''<html>
<h2>Tox Yönetim Sayfası</h2>
<table border=1>
<tr><td>no</td><td>isim</td><td>publickey</td></tr>
<tr><td>-1</td><td>'''+toxbot.self_get_name()+'''</td><td>'''+toxbot.self_get_status_message()+'''</td><td>'''+toxbot.self_get_address()+'''</td></tr>
'''+arkadaslar+'''
</tr></table>
<a href="/toxfs">toxfs</a>
</html>'''
@app.route('/toxfs', methods = ['GET','POST'])
def toxfs():
# localhost:2061
#if request.method == 'GET':
islem=Islem()
islem.fno = request.args.get('fno')
islem.tip = request.args.get('tip')
islem.mesaj = request.args.get('mesaj')
islem.komut="---"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)<|fim▁hole|> return "komut icra edildi."
#else:
#return '''<html>
#paremetreyle gönderin</html>'''
@app.route('/toxsys', methods = ['GET','POST'])
def toxsys():
dosyalar_html=""
# localhost:2061
#if request.method == 'GET':
islem=Islem()
if 'fno' in request.args and 'dosya' not in request.args:
islem.fno = request.args.get('fno')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@100@dlist"
print "islem icerik:"
islem.icerik()
islem.dosyala(komut_dosyasi)
cevap_geldi=False
dosya_bek_bas = datetime.datetime.now()
#6sn bekle cevap icin
t_end = time.time() + 6
while not cevap_geldi :
if os.path.exists(karsi_dosyalar):
time.sleep(1)
cevaplar=open(karsi_dosyalar,"r").read()
cevaplar=cevaplar.split("\n")
for dosya in cevaplar:
dosyalar_html+="<tr><td><a href=/toxsys?fno="+str(islem.fno)+"&dosya="+dosya+">"+dosya+"</td><td></tr>"
os.remove(karsi_dosyalar)
cevap_geldi=True
return '''<html>
<h3>dosyalar</h3>
<table border=1>
'''+dosyalar_html+'''
</tr>
<a href="./">anasayfa</a>
</html>'''
dosya_bek_son = datetime.datetime.now()
krono=dosya_bek_son-dosya_bek_bas
if krono.total_seconds() > 6 :
break
else:
print "dlist sonucu bekleniyor.",krono.total_seconds()
if 'fno' in request.args and 'dosya' in request.args:
islem.fno = request.args.get('fno')
dosya = request.args.get('dosya')
islem.tip = "komut"
islem.mesaj = "x"
islem.komut = "@102@"+dosya
islem.dosyala(komut_dosyasi)
cevap_geldi=False
while not cevap_geldi:
time.sleep(0.5)
#md5sum kontrol
if os.path.exists(karsi_dosyalar):
cevap=open(karsi_dosyalar,"r").read()
if cevap =="dosya_inme_tamam":
cevap_geldi=True
os.remove(karsi_dosyalar)
return "dosya geldi statikte"
else:
return redirect(url_for('indeks'))
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=2061)<|fim▁end|> | |
<|file_name|>fib.rs<|end_file_name|><|fim▁begin|>extern crate llvm;
use llvm::*;
use llvm::Attribute::*;
fn main() {
let ctx = Context::new();
let module = Module::new("simple", &ctx);
let func = module.add_function("fib", Type::get::<fn(u64) -> u64>(&ctx));
func.add_attributes(&[NoUnwind, ReadNone]);
let value = &func[0];
let entry = func.append("entry");
let on_zero = func.append("on_zero");
let on_one = func.append("on_one");
let default = func.append("default");
let builder = Builder::new(&ctx);
let zero = 0u64.compile(&ctx);
let one = 1u64.compile(&ctx);
builder.position_at_end(entry);
builder.build_switch(value, default, &[
(zero, on_zero),
(one, on_one)
]);
builder.position_at_end(on_zero);<|fim▁hole|> builder.build_ret(zero);
builder.position_at_end(on_one);
builder.build_ret(one);
builder.position_at_end(default);
let two = 2u64.compile(&ctx);
let a = builder.build_sub(value, one);
let b = builder.build_sub(value, two);
let fa = builder.build_tail_call(func, &[a]);
let fb = builder.build_tail_call(func, &[b]);
builder.build_ret(builder.build_add(fa, fb));
module.verify().unwrap();
let ee = JitEngine::new(&module, JitOptions {opt_level: 0}).unwrap();
ee.with_function(func, |fib: extern fn(u64) -> u64| {
for i in 0..10 {
println!("fib {} = {}", i, fib(i))
}
});
}<|fim▁end|> | |
<|file_name|>user_resource.go<|end_file_name|><|fim▁begin|>package dao
import (
"context"
"database/sql"
"go-common/app/service/live/resource/model"
"go-common/library/ecode"
"go-common/library/log"
"time"
"github.com/jinzhu/gorm"
)
var (
addSql = "INSERT INTO `user_resource`(`res_type`,`custom_id`,`title`,`url`,`weight`,`status`,`creator`) values (?,?,?,?,?,?,?);"
rowFields = "`id`, `res_type`,`custom_id`,`title`,`url`,`weight`,`status`,`creator`,UNIX_TIMESTAMP(`ctime`), UNIX_TIMESTAMP(`mtime`)"
)
// AddUserResource 添加用户资源到DB
func (d *Dao) AddUserResource(c context.Context, res *model.UserResource) (newRes model.UserResource, err error) {
if res == nil {
return
}
var reply sql.Result
if _, err = d.db.Begin(c); err != nil {
log.Error("初始化DB错误(%v)", err)
return<|fim▁hole|>
reply, err = d.db.Exec(c, addSql, res.ResType, res.CustomID, res.Title, res.URL, res.Weight, res.Status, res.Creator)
if err != nil {
log.Error("执行SQL语句 err: %v", err)
return
}
lastID_, _ := reply.LastInsertId()
newRes, err = d.GetUserResourceInfoByID(c, int32(lastID_))
return
}
// EditUserResource 编辑已有资源
func (d *Dao) EditUserResource(c context.Context, resType int32, customID int32, update map[string]interface{}) (effectRow int32, newRes model.UserResource, err error) {
if update == nil {
return
}
var tx = d.rsDB
tableInfo := &model.UserResource{}
var reply = tx.Model(tableInfo).
Where("`res_type` = ? AND `custom_id` = ?", resType, customID).
Update(update)
log.Info("effected rows: %d, res_type : %d custom_id : %d", reply.RowsAffected, resType, customID)
if reply.Error != nil {
log.Error("resource.editResource error: %v", err)
return
}
effectRow = int32(reply.RowsAffected)
newRes, err = d.GetUserResourceInfo(c, resType, customID)
return
}
// SetUserResourceStatus 设置资源状态
func (d *Dao) SetUserResourceStatus(c context.Context, resType int32, customID int32, status int32) (effectRow int32, err error) {
update := make(map[string]interface{})
update["status"] = status
effectRow, _, err = d.EditUserResource(c, resType, customID, update)
if err != nil {
log.Error("修改资源状态: %v", err)
}
return
}
// GetMaxCustomID 根据资源类型获取当前最大的资源ID
func (d *Dao) GetMaxCustomID(c context.Context, resType int32) (maxCustomID int32, err error) {
tableInfo := &model.UserResource{}
var ret sql.NullInt64
err = d.rsDB.Model(tableInfo).Debug().
Select("max(custom_id) as mcid").
Where("res_type=?", resType).
Row().Scan(&ret)
if err != nil {
log.Error("查找最大的资源ID res_type : %d : %v", resType, err)
return
}
maxCustomID = int32(ret.Int64)
log.Info("类型为 %d 最大的资源ID是 %d", resType, maxCustomID)
return
}
// getRowResult Helper方法
func getRowResult(queryResult *gorm.DB) (res model.UserResource, err error) {
var count int32
err = queryResult.Count(&count).Error
if err != nil {
log.Error("user_resource.GetUserResourceInfoByID %v", err)
err = ecode.SeltResErr
return
}
if count == 0 {
log.Info("user_resource.getRowResult 查询结果为空")
err = ecode.SeltResErr
return
}
var retID, retResType, retCustomID, retWeight, retStatus, retCtime, retMtime sql.NullInt64
var retTitle, retURL, retCreator sql.NullString
err = queryResult.Row().Scan(&retID, &retResType, &retCustomID, &retTitle, &retURL, &retWeight, &retStatus, &retCreator, &retCtime, &retMtime)
if err != nil {
log.Error("resource.GetUserResourceInfoByID error: %v", err)
err = ecode.SeltResErr
return
}
res.ID = int32(retID.Int64)
res.ResType = int32(retResType.Int64)
res.CustomID = int32(retCustomID.Int64)
res.Title = retTitle.String
res.URL = retURL.String
res.Weight = int32(retWeight.Int64)
res.Status = int32(retStatus.Int64)
res.Creator = retCreator.String
res.Ctime = time.Unix(retCtime.Int64, 0)
res.Ctime = time.Unix(retMtime.Int64, 0)
return
}
// GetUserResourceInfo 获取单个配置
func (d *Dao) GetUserResourceInfo(c context.Context, resType int32, customID int32) (res model.UserResource, err error) {
tableInfo := &model.UserResource{}
queryResult := d.rsDBReader.Model(tableInfo).Select(rowFields).
Where("res_type=? AND custom_id=?", resType, customID)
res, err = getRowResult(queryResult)
return
}
// GetUserResourceInfoByID 根据ID获取单个配置
func (d *Dao) GetUserResourceInfoByID(c context.Context, id int32) (res model.UserResource, err error) {
tableInfo := &model.UserResource{}
queryResult := d.rsDBReader.Model(tableInfo).Select(rowFields).
Where("id=?", id)
res, err = getRowResult(queryResult)
return
}
// ListUserResourceInfo 获取配置列表
func (d *Dao) ListUserResourceInfo(c context.Context, resType int32, page int32, pageSize int32) (list []model.UserResource, err error) {
var tx = d.rsDBReader
tableInfo := &model.UserResource{}
err = tx.Model(tableInfo).
Select("`id`, `res_type`,`custom_id`,`title`,`url`,`weight`,`status`,`creator`,`ctime`, `mtime`").
Where("res_type=?", resType).
Order("id ASC").
Limit(pageSize).
Offset((page - 1) * pageSize).
Find(&list).Error
if err != nil {
log.Error("resource.editResource error: %v", err)
return
}
return
}<|fim▁end|> | } |
<|file_name|>import_supervised_user_tests.js<|end_file_name|><|fim▁begin|>// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be<|fim▁hole|>
cr.define('user_manager.import_supervised_user_tests', function() {
function registerTests() {
suite('ImportSupervisedUserTests', function() {
/** @type {?ImportSupervisedUserElement} */
var importElement = null;
/**
* @param {!HTMLElement} element
* @return {string}
*/
var getProfileName = function(element) {
return element.querySelector('.profile-name').textContent.trim();
}
setup(function() {
importElement = document.createElement('import-supervised-user');
document.body.appendChild(importElement);
// Make sure DOM is up to date.
Polymer.dom.flush();
});
teardown(function(done) {
importElement.remove();
// Allow asynchronous tasks to finish.
setTimeout(done);
});
test('Dialog does not show if no signed-in user is provided', function() {
// The dialog is initially not visible.
assertFalse(importElement.$.dialog.opened);
importElement.show(undefined, []);
Polymer.dom.flush();
// The dialog is still not visible.
assertFalse(importElement.$.dialog.opened);
});
test('Can import supervised user', function() {
return new Promise(function(resolve, reject) {
/** @type {!SignedInUser} */
var signedInUser = {username: 'username',
profilePath: 'path/to/profile'};
/** @type {!Array<!SupervisedUser>} */
var supervisedUsers = [{name: 'supervised user 1',
onCurrentDevice: true},
{name: 'supervised user 3',
onCurrentDevice: false},
{name: 'supervised user 2',
onCurrentDevice: false}];
// Expect an event to import the selected supervised user to be fired.
importElement.addEventListener('import', function(event) {
if (event.detail.signedInUser == signedInUser &&
event.detail.supervisedUser.name == 'supervised user 2') {
Polymer.dom.flush();
// The dialog is no longer visible.
assertFalse(importElement.$.dialog.opened);
resolve();
}
});
// The dialog is initially not visible.
assertFalse(importElement.$.dialog.opened);
importElement.show(signedInUser, supervisedUsers);
Polymer.dom.flush();
// The dialog becomes visible.
assertTrue(importElement.$.dialog.opened);
// The correct message is displayed.
assertEquals(loadTimeData.getString('supervisedUserImportText'),
importElement.$$('#message').textContent.trim());
var selectorElement = importElement.$$('paper-listbox');
assertTrue(!!selectorElement);
// Supervised users are ordered correctly (Ones that are not on the
// current device appear first, then they are alphabetically order in
// ascending order).
var items = selectorElement.querySelectorAll('paper-item');
assertEquals(3, items.length);
assertEquals('supervised user 2', getProfileName(items[0]));
assertEquals('supervised user 3', getProfileName(items[1]));
assertEquals('supervised user 1', getProfileName(items[2]));
// Supervised users that are on this device are disabled.
var selectableItems = selectorElement.querySelectorAll('[disabled]');
assertEquals(1, selectableItems.length);
assertEquals('supervised user 1', getProfileName(selectableItems[0]));
// No user is initially selected.
assertEquals(-1, selectorElement.selected);
// The import button is disabled if no supervised user is selected.
assertTrue(importElement.$$('#import').disabled);
// Simulate selecting the third user which is disabled.
MockInteractions.tap(items[2]);
// Confirm no user is selected.
assertEquals(-1, selectorElement.selected);
// Simulate selecting the first user.
MockInteractions.tap(items[0]);
// Confirm the user is selected.
assertEquals(0, selectorElement.selected);
// The import button becomes enabled once a user is selected.
assertFalse(importElement.$$('#import').disabled);
// Simulate clicking 'Import'.
MockInteractions.tap(importElement.$$('#import'));
});
});
});
}
return {
registerTests: registerTests,
};
});<|fim▁end|> | // found in the LICENSE file. |
<|file_name|>cleanup.rs<|end_file_name|><|fim▁begin|>// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! ## The Cleanup module
//!
//! The cleanup module tracks what values need to be cleaned up as scopes
//! are exited, either via panic or just normal control flow. The basic
//! idea is that the function context maintains a stack of cleanup scopes
//! that are pushed/popped as we traverse the AST tree. There is typically
//! at least one cleanup scope per AST node; some AST nodes may introduce
//! additional temporary scopes.
//!
//! Cleanup items can be scheduled into any of the scopes on the stack.
//! Typically, when a scope is popped, we will also generate the code for
//! each of its cleanups at that time. This corresponds to a normal exit
//! from a block (for example, an expression completing evaluation
//! successfully without panic). However, it is also possible to pop a
//! block *without* executing its cleanups; this is typically used to
//! guard intermediate values that must be cleaned up on panic, but not
//! if everything goes right. See the section on custom scopes below for
//! more details.
//!
//! Cleanup scopes come in three kinds:
//!
//! - **AST scopes:** each AST node in a function body has a corresponding
//! AST scope. We push the AST scope when we start generate code for an AST
//! node and pop it once the AST node has been fully generated.
//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are
//! never scheduled into loop scopes; instead, they are used to record the
//! basic blocks that we should branch to when a `continue` or `break` statement
//! is encountered.
//! - **Custom scopes:** custom scopes are typically used to ensure cleanup
//! of intermediate values.
//!
//! ### When to schedule cleanup
//!
//! Although the cleanup system is intended to *feel* fairly declarative,
//! it's still important to time calls to `schedule_clean()` correctly.
//! Basically, you should not schedule cleanup for memory until it has
//! been initialized, because if an unwind should occur before the memory
//! is fully initialized, then the cleanup will run and try to free or
//! drop uninitialized memory. If the initialization itself produces
//! byproducts that need to be freed, then you should use temporary custom
//! scopes to ensure that those byproducts will get freed on unwind. For
//! example, an expression like `box foo()` will first allocate a box in the
//! heap and then call `foo()` -- if `foo()` should panic, this box needs
//! to be *shallowly* freed.
//!
//! ### Long-distance jumps
//!
//! In addition to popping a scope, which corresponds to normal control
//! flow exiting the scope, we may also *jump out* of a scope into some
//! earlier scope on the stack. This can occur in response to a `return`,
//! `break`, or `continue` statement, but also in response to panic. In
//! any of these cases, we will generate a series of cleanup blocks for
//! each of the scopes that is exited. So, if the stack contains scopes A
//! ... Z, and we break out of a loop whose corresponding cleanup scope is
//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z.
//! After cleanup is done we would branch to the exit point for scope X.
//! But if panic should occur, we would generate cleanups for all the
//! scopes from A to Z and then resume the unwind process afterwards.
//!
//! To avoid generating tons of code, we cache the cleanup blocks that we
//! create for breaks, returns, unwinds, and other jumps. Whenever a new
//! cleanup is scheduled, though, we must clear these cached blocks. A
//! possible improvement would be to keep the cached blocks but simply
//! generate a new block which performs the additional cleanup and then
//! branches to the existing cached blocks.
//!
//! ### AST and loop cleanup scopes
//!
//! AST cleanup scopes are pushed when we begin and end processing an AST
//! node. They are used to house cleanups related to rvalue temporary that
//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an
//! AST scope is popped, we always trans all the cleanups, adding the cleanup
//! code after the postdominator of the AST node.
//!
//! AST nodes that represent breakable loops also push a loop scope; the
//! loop scope never has any actual cleanups, it's just used to point to
//! the basic blocks where control should flow after a "continue" or
//! "break" statement. Popping a loop scope never generates code.
//!
//! ### Custom cleanup scopes
//!
//! Custom cleanup scopes are used for a variety of purposes. The most
//! common though is to handle temporary byproducts, where cleanup only
//! needs to occur on panic. The general strategy is to push a custom
//! cleanup scope, schedule *shallow* cleanups into the custom scope, and
//! then pop the custom scope (without transing the cleanups) when
//! execution succeeds normally. This way the cleanups are only trans'd on
//! unwind, and only up until the point where execution succeeded, at
//! which time the complete value should be stored in an lvalue or some
//! other place where normal cleanup applies.
//!
//! To spell it out, here is an example. Imagine an expression `box expr`.
//! We would basically:
//!
//! 1. Push a custom cleanup scope C.
//! 2. Allocate the box.
//! 3. Schedule a shallow free in the scope C.
//! 4. Trans `expr` into the box.
//! 5. Pop the scope C.
//! 6. Return the box as an rvalue.
//!
//! This way, if a panic occurs while transing `expr`, the custom
//! cleanup scope C is pushed and hence the box will be freed. The trans
//! code for `expr` itself is responsible for freeing any other byproducts
//! that may be in play.
pub use self::ScopeId::*;
pub use self::CleanupScopeKind::*;
pub use self::EarlyExitLabel::*;
pub use self::Heap::*;
use llvm::{BasicBlockRef, ValueRef};
use trans::base;
use trans::build;
use trans::callee;
use trans::common;
use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan};
use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::declare;
use trans::glue;
use middle::region;
use trans::type_::Type;
use middle::ty::{self, Ty};
use std::fmt;
use syntax::ast;
use util::ppaux::Repr;
pub struct CleanupScope<'blk, 'tcx: 'blk> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
// cleanup miscellaneous garbage that trans may generate whose
// lifetime is a subset of some expression. See module doc for
// more details.
kind: CleanupScopeKind<'blk, 'tcx>,
// Cleanups to run upon scope exit.
cleanups: Vec<CleanupObj<'tcx>>,
// The debug location any drop calls generated for this scope will be
// associated with.
debug_loc: DebugLoc,
cached_early_exits: Vec<CachedEarlyExit>,
cached_landing_pad: Option<BasicBlockRef>,
}
#[derive(Copy, Clone, Debug)]
pub struct CustomScopeIndex {
index: usize
}
pub const EXIT_BREAK: usize = 0;
pub const EXIT_LOOP: usize = 1;
pub const EXIT_MAX: usize = 2;
pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
CustomScopeKind,
AstScopeKind(ast::NodeId),
LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
}
impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {<|fim▁hole|> match *self {
CustomScopeKind => write!(f, "CustomScopeKind"),
AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
LoopScopeKind(nid, ref blks) => {
try!(write!(f, "LoopScopeKind({}, [", nid));
for blk in blks {
try!(write!(f, "{:p}, ", blk));
}
write!(f, "])")
}
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum EarlyExitLabel {
UnwindExit,
ReturnExit,
LoopExit(ast::NodeId, usize)
}
#[derive(Copy, Clone)]
pub struct CachedEarlyExit {
label: EarlyExitLabel,
cleanup_block: BasicBlockRef,
}
pub trait Cleanup<'tcx> {
fn must_unwind(&self) -> bool;
fn clean_on_unwind(&self) -> bool;
fn is_lifetime_end(&self) -> bool;
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx>;
}
pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
#[derive(Copy, Clone, Debug)]
pub enum ScopeId {
AstScope(ast::NodeId),
CustomScope(CustomScopeIndex)
}
impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Invoked when we start to trans the code contained within a new cleanup scope.
fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
debug!("push_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(debug_loc.id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// are no cleanup scopes on the stack at the start of
// trans'ing a closure body. I think though that this should
// eventually be fixed by closure bodies not having a parent
// region, though that's a touch unclear, and it might also be
// better just to narrow this assertion more (i.e., by
// excluding id's that correspond to closure bodies only). For
// now we just say that if there is already an AST scope on the stack,
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
if top_scope.is_some() {
assert!((self.ccx
.tcx()
.region_maps
.opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
.map(|s|s.node_id()) == top_scope)
||
(self.ccx
.tcx()
.region_maps
.opt_encl_scope(region::CodeExtent::DestructionScope(debug_loc.id))
.map(|s|s.node_id()) == top_scope));
}
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
debug_loc.debug_loc()));
}
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
// Just copy the debuginfo source location from the enclosing scope
let debug_loc = self.scopes
.borrow()
.last()
.unwrap()
.debug_loc;
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
}
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
// Just copy the debuginfo source location from the enclosing scope
let debug_loc = self.scopes
.borrow()
.last()
.map(|opt_scope| opt_scope.debug_loc)
.unwrap_or(DebugLoc::None);
self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
CustomScopeIndex { index: index }
}
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeIdAndSpan)
-> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
self.push_scope(CleanupScope::new(CustomScopeKind,
debug_loc.debug_loc()));
CustomScopeIndex { index: index }
}
/// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
/// stack, and generates the code to do its cleanups for normal exit.
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
let scope = self.pop_scope();
self.trans_scope_cleanups(bcx, &scope)
}
/// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
/// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
/// branching to a block generated by `normal_exit_block`.
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId) {
debug!("pop_loop_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
let _ = self.pop_scope();
}
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
/// cleanup scope must be the temporary scope `custom_scope`.
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex) {
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let _ = self.pop_scope();
}
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let scope = self.pop_scope();
self.trans_scope_cleanups(bcx, &scope)
}
/// Returns the id of the top-most loop scope
fn top_loop_scope(&self) -> ast::NodeId {
for scope in self.scopes.borrow().iter().rev() {
if let LoopScopeKind(id, _) = scope.kind {
return id;
}
}
self.ccx.sess().bug("no loop scope found");
}
/// Returns a block to branch to which will perform all pending cleanups and then
/// break/continue (depending on `exit`) out of the loop with id `cleanup_scope`
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: usize) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
/// Returns a block to branch to which will perform all pending cleanups and then return from
/// this function
fn return_exit_block(&'blk self) -> BasicBlockRef {
self.trans_cleanups_to_exit_scope(ReturnExit)
}
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef) {
let drop = box LifetimeEnd {
ptr: val,
};
debug!("schedule_lifetime_end({:?}, val={})",
cleanup_scope,
self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
};
debug!("schedule_drop_mem({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: true,
skip_dtor: false,
};
debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={}, fill_on_drop={}, skip_dtor={})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Issue #23611: Schedules a (deep) drop of the contents of
/// `val`, which is a pointer to an instance of struct/enum type
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
is_immediate: false,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: true,
};
debug!("schedule_drop_adt_contents({:?}, val={}, ty={}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
let drop = box DropValue {
is_immediate: true,
must_unwind: common::type_needs_unwind_cleanup(self.ccx, ty),
val: val,
ty: ty,
fill_on_drop: false,
skip_dtor: false,
};
debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope,
self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
drop.fill_on_drop,
drop.skip_dtor);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
/// Schedules a call to `free(val)`. Note that this is a shallow operation.
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>) {
let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>) {
match cleanup_scope {
AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
}
}
/// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
/// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
/// scope.
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>) {
debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
cleanup_scope);
for scope in self.scopes.borrow_mut().iter_mut().rev() {
if scope.kind.is_ast_with_id(cleanup_scope) {
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
return;
} else {
// will be adding a cleanup to some enclosing scope
scope.clear_cached_exits();
}
}
self.ccx.sess().bug(
&format!("no cleanup scope {} found",
self.ccx.tcx().map.node_to_string(cleanup_scope)));
}
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>) {
debug!("schedule_clean_in_custom_scope(custom_scope={})",
custom_scope.index);
assert!(self.is_valid_custom_scope(custom_scope));
let mut scopes = self.scopes.borrow_mut();
let scope = &mut (*scopes)[custom_scope.index];
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
}
/// Returns true if there are pending cleanups that should execute on panic.
fn needs_invoke(&self) -> bool {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block will run the panic
/// cleanups and eventually invoke the LLVM `Resume` instruction.
fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
debug!("get_landing_pad");
let orig_scopes_len = self.scopes_len();
assert!(orig_scopes_len > 0);
// Remove any scopes that do not have cleanups on panic:
let mut popped_scopes = vec!();
while !self.top_scope(|s| s.needs_invoke()) {
debug!("top scope does not need invoke");
popped_scopes.push(self.pop_scope());
}
// Check for an existing landing pad in the new topmost scope:
let llbb = self.get_or_create_landing_pad();
// Push the scopes we removed back on:
loop {
match popped_scopes.pop() {
Some(scope) => self.push_scope(scope),
None => break
}
}
assert_eq!(self.scopes_len(), orig_scopes_len);
return llbb;
}
}
impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Returns the id of the current top-most AST scope, if any.
fn top_ast_scope(&self) -> Option<ast::NodeId> {
for scope in self.scopes.borrow().iter().rev() {
match scope.kind {
CustomScopeKind | LoopScopeKind(..) => {}
AstScopeKind(i) => {
return Some(i);
}
}
}
None
}
fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
}
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
self.is_valid_custom_scope(custom_scope) &&
custom_scope.index == self.scopes.borrow().len() - 1
}
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
let scopes = self.scopes.borrow();
custom_scope.index < scopes.len() &&
(*scopes)[custom_scope.index].kind.is_temp()
}
/// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
let mut bcx = bcx;
if !bcx.unreachable.get() {
for cleanup in scope.cleanups.iter().rev() {
bcx = cleanup.trans(bcx, scope.debug_loc);
}
}
bcx
}
fn scopes_len(&self) -> usize {
self.scopes.borrow().len()
}
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
self.scopes.borrow_mut().push(scope)
}
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
f(self.scopes.borrow().last().unwrap())
}
/// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or
/// unwind. This function will generate all cleanups between the top of the stack and the exit
/// `label` and return a basic block that the caller can branch to.
///
/// For example, if the current stack of cleanups were as follows:
///
/// AST 22
/// Custom 1
/// AST 23
/// Loop 23
/// Custom 2
/// AST 24
///
/// and the `label` specifies a break from `Loop 23`, then this function would generate a
/// series of basic blocks as follows:
///
/// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk
///
/// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return
/// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could
/// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the
/// `break_blk`.
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef {
debug!("trans_cleanups_to_exit_scope label={:?} scopes={}",
label, self.scopes_len());
let orig_scopes_len = self.scopes_len();
let mut prev_llbb;
let mut popped_scopes = vec!();
// First we pop off all the cleanup stacks that are
// traversed until the exit is reached, pushing them
// onto the side vector `popped_scopes`. No code is
// generated at this time.
//
// So, continuing the example from above, we would wind up
// with a `popped_scopes` vector of `[AST 24, Custom 2]`.
// (Presuming that there are no cached exits)
loop {
if self.scopes_len() == 0 {
match label {
UnwindExit => {
// Generate a block that will `Resume`.
let prev_bcx = self.new_block(true, "resume", None);
let personality = self.personality.get().expect(
"create_landing_pad() should have set this");
build::Resume(prev_bcx,
build::Load(prev_bcx, personality));
prev_llbb = prev_bcx.llbb;
break;
}
ReturnExit => {
prev_llbb = self.get_llreturn();
break;
}
LoopExit(id, _) => {
self.ccx.sess().bug(&format!(
"cannot exit from scope {}, \
not in scope", id));
}
}
}
// Check if we have already cached the unwinding of this
// scope for this label. If so, we can stop popping scopes
// and branch to the cached label, since it contains the
// cleanups for any subsequent scopes.
match self.top_scope(|s| s.cached_early_exit(label)) {
Some(cleanup_block) => {
prev_llbb = cleanup_block;
break;
}
None => { }
}
// Pop off the scope, since we will be generating
// unwinding code for it. If we are searching for a loop exit,
// and this scope is that loop, then stop popping and set
// `prev_llbb` to the appropriate exit block from the loop.
popped_scopes.push(self.pop_scope());
let scope = popped_scopes.last().unwrap();
match label {
UnwindExit | ReturnExit => { }
LoopExit(id, exit) => {
match scope.kind.early_exit_block(id, exit) {
Some(exitllbb) => {
prev_llbb = exitllbb;
break;
}
None => { }
}
}
}
}
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
popped_scopes.len());
// Now push the popped scopes back on. As we go,
// we track in `prev_llbb` the exit to which this scope
// should branch when it's done.
//
// So, continuing with our example, we will start out with
// `prev_llbb` being set to `break_blk` (or possibly a cached
// early exit). We will then pop the scopes from `popped_scopes`
// and generate a basic block for each one, prepending it in the
// series and updating `prev_llbb`. So we begin by popping `Custom 2`
// and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)`
// branch to `prev_llbb == break_blk`, giving us a sequence like:
//
// Cleanup(Custom 2) -> prev_llbb
//
// We then pop `AST 24` and repeat the process, giving us the sequence:
//
// Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb
//
// At this point, `popped_scopes` is empty, and so the final block
// that we return to the user is `Cleanup(AST 24)`.
while !popped_scopes.is_empty() {
let mut scope = popped_scopes.pop().unwrap();
if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
{
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(label.is_unwind(),
&name[..],
None);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
if cleanup_is_suitable_for(&**cleanup, label) {
bcx_out = cleanup.trans(bcx_out,
scope.debug_loc);
}
}
build::Br(bcx_out, prev_llbb, DebugLoc::None);
prev_llbb = bcx_in.llbb;
} else {
debug!("no suitable cleanups in {}",
scope.block_name("clean"));
}
scope.add_cached_early_exit(label, prev_llbb);
self.push_scope(scope);
}
debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb);
assert_eq!(self.scopes_len(), orig_scopes_len);
prev_llbb
}
/// Creates a landing pad for the top scope, if one does not exist. The landing pad will
/// perform all cleanups necessary for an unwind and then `resume` to continue error
/// propagation:
///
/// landing_pad -> ... cleanups ... -> [resume]
///
/// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not
/// in this function itself.)
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
let pad_bcx;
debug!("get_or_create_landing_pad");
// Check if a landing pad block exists; if not, create one.
{
let mut scopes = self.scopes.borrow_mut();
let last_scope = scopes.last_mut().unwrap();
match last_scope.cached_landing_pad {
Some(llbb) => { return llbb; }
None => {
let name = last_scope.block_name("unwind");
pad_bcx = self.new_block(true, &name[..], None);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
}
// The landing pad return type (the type being propagated). Not sure what
// this represents but it's determined by the personality function and
// this is what the EH proposal example uses.
let llretty = Type::struct_(self.ccx,
&[Type::i8p(self.ccx), Type::i32(self.ccx)],
false);
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to translate that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => {
callee::trans_fn_ref(pad_bcx.ccx(), def_id, ExprId(0),
pad_bcx.fcx.param_substs).val
}
None => {
let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
let f = declare::declare_cfn(self.ccx, "rust_eh_personality", fty,
self.ccx.tcx().types.i32);
*personality = Some(f);
f
}
}
}
};
// The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1);
// The landing pad block is a cleanup
build::SetCleanup(pad_bcx, llretval);
// We store the retval in a function-central alloca, so that calls to
// Resume can find it.
match self.personality.get() {
Some(addr) => {
build::Store(pad_bcx, llretval, addr);
}
None => {
let addr = base::alloca(pad_bcx, common::val_ty(llretval), "");
self.personality.set(Some(addr));
build::Store(pad_bcx, llretval, addr);
}
}
// Generate the cleanup block and branch to it.
let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
build::Br(pad_bcx, cleanup_llbb, DebugLoc::None);
return pad_bcx.llbb;
}
}
impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
fn new(kind: CleanupScopeKind<'blk, 'tcx>,
debug_loc: DebugLoc)
-> CleanupScope<'blk, 'tcx> {
CleanupScope {
kind: kind,
debug_loc: debug_loc,
cleanups: vec!(),
cached_early_exits: vec!(),
cached_landing_pad: None,
}
}
fn clear_cached_exits(&mut self) {
self.cached_early_exits = vec!();
self.cached_landing_pad = None;
}
fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<BasicBlockRef> {
self.cached_early_exits.iter().
find(|e| e.label == label).
map(|e| e.cleanup_block)
}
fn add_cached_early_exit(&mut self,
label: EarlyExitLabel,
blk: BasicBlockRef) {
self.cached_early_exits.push(
CachedEarlyExit { label: label,
cleanup_block: blk });
}
/// True if this scope has cleanups that need unwinding
fn needs_invoke(&self) -> bool {
self.cached_landing_pad.is_some() ||
self.cleanups.iter().any(|c| c.must_unwind())
}
/// Returns a suitable name to use for the basic block that handles this cleanup scope
fn block_name(&self, prefix: &str) -> String {
match self.kind {
CustomScopeKind => format!("{}_custom_", prefix),
AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
}
}
pub fn drop_non_lifetime_clean(&mut self) {
self.cleanups.retain(|c| c.is_lifetime_end());
}
}
impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
fn is_temp(&self) -> bool {
match *self {
CustomScopeKind => true,
LoopScopeKind(..) | AstScopeKind(..) => false,
}
}
fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | LoopScopeKind(..) => false,
AstScopeKind(i) => i == id
}
}
fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
match *self {
CustomScopeKind | AstScopeKind(..) => false,
LoopScopeKind(i, _) => i == id
}
}
/// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
fn early_exit_block(&self,
id: ast::NodeId,
exit: usize) -> Option<BasicBlockRef> {
match *self {
LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
_ => None,
}
}
}
impl EarlyExitLabel {
fn is_unwind(&self) -> bool {
match *self {
UnwindExit => true,
_ => false
}
}
}
///////////////////////////////////////////////////////////////////////////
// Cleanup types
#[derive(Copy, Clone)]
pub struct DropValue<'tcx> {
is_immediate: bool,
must_unwind: bool,
val: ValueRef,
ty: Ty<'tcx>,
fill_on_drop: bool,
skip_dtor: bool,
}
impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
fn must_unwind(&self) -> bool {
self.must_unwind
}
fn clean_on_unwind(&self) -> bool {
self.must_unwind
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let skip_dtor = self.skip_dtor;
let _icx = if skip_dtor {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
} else {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
};
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
if self.fill_on_drop {
base::drop_done_fill_mem(bcx, self.val, self.ty);
}
bcx
}
}
#[derive(Copy, Clone, Debug)]
pub enum Heap {
HeapExchange
}
#[derive(Copy, Clone)]
pub struct FreeValue<'tcx> {
ptr: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>
}
impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
fn must_unwind(&self) -> bool {
true
}
fn clean_on_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
false
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
match self.heap {
HeapExchange => {
glue::trans_exchange_free_ty(bcx,
self.ptr,
self.content_ty,
debug_loc)
}
}
}
}
#[derive(Copy, Clone)]
pub struct LifetimeEnd {
ptr: ValueRef,
}
impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
fn must_unwind(&self) -> bool {
false
}
fn clean_on_unwind(&self) -> bool {
true
}
fn is_lifetime_end(&self) -> bool {
true
}
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
debug_loc.apply(bcx.fcx);
base::call_lifetime_end(bcx, self.ptr);
bcx
}
}
pub fn temporary_scope(tcx: &ty::ctxt,
id: ast::NodeId)
-> ScopeId {
match tcx.region_maps.temporary_scope(id) {
Some(scope) => {
let r = AstScope(scope.node_id());
debug!("temporary_scope({}) = {:?}", id, r);
r
}
None => {
tcx.sess.bug(&format!("no temporary scope available for expr {}",
id))
}
}
}
pub fn var_scope(tcx: &ty::ctxt,
id: ast::NodeId)
-> ScopeId {
let r = AstScope(tcx.region_maps.var_scope(id).node_id());
debug!("var_scope({}) = {:?}", id, r);
r
}
fn cleanup_is_suitable_for(c: &Cleanup,
label: EarlyExitLabel) -> bool {
!label.is_unwind() || c.clean_on_unwind()
}
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
pub trait CleanupMethods<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn push_custom_cleanup_scope_with_debug_loc(&self,
debug_loc: NodeIdAndSpan)
-> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
-> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex);
fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx>;
fn top_loop_scope(&self) -> ast::NodeId;
fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: usize) -> BasicBlockRef;
fn return_exit_block(&'blk self) -> BasicBlockRef;
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef);
fn schedule_drop_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_and_fill_mem(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_adt_contents(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_drop_immediate(&self,
cleanup_scope: ScopeId,
val: ValueRef,
ty: Ty<'tcx>);
fn schedule_free_value(&self,
cleanup_scope: ScopeId,
val: ValueRef,
heap: Heap,
content_ty: Ty<'tcx>);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_ast_scope(&self,
cleanup_scope: ast::NodeId,
cleanup: CleanupObj<'tcx>);
fn schedule_clean_in_custom_scope(&self,
custom_scope: CustomScopeIndex,
cleanup: CleanupObj<'tcx>);
fn needs_invoke(&self) -> bool;
fn get_landing_pad(&'blk self) -> BasicBlockRef;
}
trait CleanupHelperMethods<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId>;
fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn trans_scope_cleanups(&self,
bcx: Block<'blk, 'tcx>,
scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef;
fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
fn scopes_len(&self) -> usize;
fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
}<|fim▁end|> | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
<|file_name|>sextutils.py<|end_file_name|><|fim▁begin|>"""
Utilities for parseing SExtractor files.
H. Ferguson - revised 4/23/03 to promote ints to floats if a value
with a decimal point appears somewhere in the column originally thought
to be integers
version::
v2.1 - fails gracefully when the catalog has no sources
v3.0 - added gettypes to return column types
- create new column names when they are not explictly in the header
v4.0 - added gettypes to return column types
v4.1 - uses numarray by default
v4.2 - delete attributed 'l' (input lines from catalog) before returning
v4.3 - 1/11/06Added less-offensive alias se_catalog() == sextractor()
v4.4hf-1/21/06 Fixed bug in creating extra column names when last is a vector
v4.4vl - V. Laidler added new methods:
__len__ returns number of objects in catalog
__iter__ returns the index of the next row in the catalog
line(self,i) returns a constructed string containing the ith line
buildheader returns a constructed header from the hdict
Added new attribute self.header: contains the header as read in
from the catalog.
Lines that start with '#' but are not followed by an integer are
now assumed to be comment lines, which are added to the
header but otherwise skipped.
v4.5 - V. Laidler removed Numeric dependence
v4.6 - V. Laidler converted to numpy
v5.0 - 7/5/07 Numpy conversion
v6.0 - V. Laidler: added rw_catalog class, reworked internals to avoid
column name clashes
v7.0 - S.-M. Niemi: some modifications
v7.1 - S.-M. Niemi: now supports string columns
"""
__version__ = '7.1'
__author = 'Henry C. Ferguson, STScI'
import string
import numpy as N
import os, sys
class se_catalog(object):
"""
Read a SExtractor-style catalog.
Usage: c=se_catalog(catalog,readfile=True,preserve_case=False)
Will read the catalog and return an object c, whose attributes are
arrays containing the data. For example, c.mag_auto contains the
mag_auto values.
Arguments:
catalog -- The input SExtractor catalog.
readfile -- True means read the data. False means return the
object without reading the data. The lines from the catalog
are returned as a list of ascii strings c.l. Useful if you want
to do some special parsing of some sort.
preserve_case -- default (False) converts column names to lower case
The input catalog MUST have a header with the SExtractor format:
# 1 ID comment
# 2 ALPHA_J200 another comment
That is, first column is the comment symbol #, second column is
the column number, third column is the column name, and the rest
of the line is a comment. SExtractor allows "vectors" to be identified
only by the first column...e.g.
# 12 FLUX_APER
# 20 FLUXERR_APER
the missing columns are all aperture fluxes through different
apertures. These will be read into attributes:
c.flux_aper # The first one
c.flux_aper_1 # the second one, and so on
The case of aperture radii is a bit nasty, since these only
appear in the SExtractor configuration file. Use parseconfig()
to read that file.
"""
def __init__(self, cfile, readfile=True, preserve_case=False):
(self._d, self._l, self._ncolumns, self._header) = initcat(cfile,
preserve_case=preserve_case)
self._fname = cfile
if readfile:
self._colentries = range(len(self._l))
for i in range(len(self._l)):
self._colentries[i] = self._l[i].split()
#SMN: added
if min(self._d.values()) == 0:
for key in self._d: self._d[key] += 1
self.gettypes()
for k in self._d.keys():
contents = getcolvalues(self._d[k],
self._type[k],
self._colentries)
colname = self._okname(k)
setattr(self, colname, contents)
delattr(self, '_l')
def __len__(self):
return len(self._colentries)
def __iter__(self):
return range(len(self._colentries)).__iter__()
def _okname(self, k):
try:
#Munge column name if it conflicts
test = self.__getattribute__(k)
newkey = 'c_' + k
print "--Column '%s' changed to '%s' to avoid conflicts" % (k, newkey)
self._d[newkey] = self._d[k]
del self._d[k]
return newkey
except AttributeError:
return k
def line(self, i):
"""
Returns an assembled line of this catalog suitable for writing.
Except it doesn't really, if we modified the individual columns
"""
ans = ' '.join(self._colentries[i]) + '\n'
return ans
def buildheader(self):
"""
Reconstruct the header from the header dictionary.
This might be useful if only a few columns were selected
from the file; otherwise just use the 'header' attribute.
"""
lines = {}
for k in self._d:
lines[self._d[k]] = '# %d %s' % (self._d[k], k.upper())
#sort the new keys
nkeys = lines.keys()
nkeys.sort()
#join them together with newlines
ans = ''
for k in nkeys:
ans = ans + "%s\n" % lines[k]
return ans
def getcol(self, col, offset=0):
column = self._d[col]
return getcol(column + offset, self._l)
def getcols(self, *args):
ret = []
for i in range(len(args)):
ret = ret + [getcol(self._d[args[i]], self._l)]
return ret
def gettypes(self):
self._type = {}
for k in self._d.keys():
#this line may require changing
if len(self._l) > 1000000:
every = 500
elif len(self._l) > 10000:
every = 20
else:
every = 10
ret = getcol(self._d[k], self._l[::every])
t = type(ret)
if t == type(N.array([1])):
if ret.dtype.char == 'i' or ret.dtype.char == 'l':
t = type(1)
elif ret.dtype.char == 'd':
t = type(1.e99)
else:
t = type('string')
#print k, t
self._type[k] = t
class sextractor(se_catalog): # Just an alias for class se_catalog
""" Read SExtractor catalog...just an alias for se_catalog """
pass
class rw_catalog(se_catalog):
""" Extend the se_catalog class to support adding new columns,
and writing out the new version."""
def __init__(self, fname):
self._modflag = False #this flag will be set by add_column routines
self._fname = fname
self._colnames = []
se_catalog.__init__(self, fname,
readfile=True, preserve_case=False)
coldict = invert_dict(self._d)
for k in coldict:
self._colnames.append(coldict[k])
def addcolumn(self, input_colname, coldata):
""" coldata must be a 1d numarray of the correct length"""
if len(coldata) != len(self):
raise ValueError, "Column length must match catalog length"
colname = self._okname(input_colname)
#Most of the bookkeeping is the same as for an empty column
self.addemptycolumn(colname, coldata.dtype)
#and then we reset the column to contain the actual data
setattr(self, colname, coldata)
def addemptycolumn(self, input_colname, coltype):
""" Defines a new column & updates all the bookkeeping, but
does not actually fill in the data. """
colname = self._okname(input_colname)
<|fim▁hole|> self._modflag = True
self._type[colname] = coltype
#Looks strange here because we count columns from 1 but
#Python counts them from 0
self._ncolumns += 1
self._d[colname] = self._ncolumns
self._colnames.append(colname)
self._header += '# %d %s\n' % (self._ncolumns, colname)
def line(self, rownum):
""" Construct a new line as to be printed out """
if not self._modflag:
return se_catalog.line(self, rownum)
else:
linelist = []
for c in self._colnames:
col = getattr(self, c)
linelist.append(str(col[rownum]))
line = ' '.join(linelist) + '\n'
return line
def writeto(self, outname, clobber=False):
if not clobber:
if os.path.isfile(outname):
raise ValueError, """File already exists.
Use .writeto(fname, clobber=True) to overwrite. """
out = open(outname, 'w')
out.write(self._header)
for k in range(len(self)):
out.write(self.line(k))
out.close()
def printme(self):
""" Like writeto, but for sys.stdout """
sys.stdout.write(self._header)
for k in range(len(self)):
sys.stdout.write(self.line(k))
def invert_dict(d):
""" Generate a new dictionary with the key/value relationship inverted """
newd = {}
for k in d:
newd[d[k]] = k
return newd
def parseconfig_se(cfile):
""" parseconfig -- read a SExtractor .sex file and return a dictionary
of options & values. Comments are ignored.
"""
cdict = {}
f = open(cfile, 'r')
lines = f.readlines()
for l in lines:
a = string.split(l)
if len(a) > 0:
if a[0][0] != '#':
maxi = len(a)
for i in range(1, len(a)):
if a[i][0] == '#':
maxi = i
break
# Turn comma-separated lists into python lists
entry = []
for e in a[1:maxi]:
if string.find(e, ','):
entry = entry + string.split(e, ',')
else:
entry = entry + [e]
cdict[a[0]] = entry
return cdict
def initcat(catfile, preserve_case=False):
""" parseheader -- reads the header of a SExtractor catalog file and
returns a dictionary of parameter names and column numbers.
Also returns a list of lines containing the data.
"""
hdict = {}
header = []
f = open(catfile, 'r')
lines = f.readlines()
f.close()
first = 1
firstdata = 0
i = 0
previous_column = 0
previous_key = ""
for l in lines:
if l.startswith('#'): #this is a header line
header.append(l)
a = (l.replace('#', '# ')).split() #Guard against "#10 colname"
try:
col = int(a[1])
# If the column numbers skip, create new column names for
# columns not named explicitly in the header
if col != previous_column + 1:
for c in range(previous_column + 1, col):
column_name = previous_key + "_%d" % (c - previous_column)
hdict[column_name] = c
# Update this column in the dictionary
if (preserve_case):
column_name = a[2]
else:
column_name = a[2].lower()
hdict[column_name] = col
firstdata = i + 1
previous_column = col
previous_key = column_name
except (ValueError, IndexError):
#it's a comment line with no column number,
#or an entirely blank comment line: skip
pass
else: # This is where the data start
if previous_column == 0:
raise ValueError("No valid header found in %s" % catfile)
a = string.split(l)
if len(a) > 0:
if first:
firstdata = i
first = 0
# Check if there are extra columns
if len(a) > previous_column:
# If so, add keys for the last entry
for c in range(previous_column + 1, len(a)):
column_name = previous_key + "_%d" % (c - previous_column)
if (preserve_case):
hdict[column_name] = c
else:
hdict[column_name] = c.lower()
ncolumns = len(a)
i = i + 1
return(hdict, lines[firstdata:], ncolumns, ''.join(header))
def getcol(col, lines):
""" Get a column from a SExtractor catalog. Determine the type
(integer, float, string) and return either an array of that
type (Int32, Float64) or a list of strings """
i = col - 1 # Columns start at 1, arrays start at 0
nlines = len(lines)
if len(lines) == 0:
values = N.array([])
return values
a = string.split(lines[0])
if string.find(a[i], '.') < 0:
try:
x = int(a[i])
except:
values = range(nlines)
getstrings(col, lines, values)
else:
values = N.zeros((nlines), N.int32)
if type(getints(col, lines, values)) == type(-1):
values = N.zeros((nlines), N.float64)
getfloats(col, lines, values)
else:
try:
x = float(a[i])
except:
values = range(nlines)
getstrings(col, lines, values)
else:
values = N.zeros((nlines), N.float64)
getfloats(col, lines, values)
return values
def getcolvalues(col, coltype, colentries, colzero=False):
""" Get a column from a SExtractor catalog. Determine the type
(integer, float, string) and return either an array of that
type (Int32, Float64) or a list of strings """
i = col - 1 # Columns start at 1, arrays start at 0
nlines = len(colentries)
if len(colentries) == 0:
values = N.array([])
return values
if coltype == type('string'):
values = range(nlines)
for j in range(nlines):
values[j] = colentries[j][i]
if coltype == type(1.0): # Convert floats
values = N.zeros((nlines), N.float64)
for j in range(nlines):
values[j] = float(colentries[j][i])
if coltype == type(1): # Convert Ints
values = N.zeros((nlines), N.int32)
for j in range(nlines):
values[j] = int(colentries[j][i])
return values
def getstrings(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
values[n] = a[col - 1]
n = n + 1
def getints(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
if string.find(a[col - 1], '.') > 0:
return -1
else:
values[n] = int(a[col - 1])
n = n + 1
return values
def getfloats(col, lines, values):
n = 0
for l in lines:
a = string.split(l)
values[n] = float(a[col - 1])
n = n + 1
def getcols(d, l, *args):
""" Get multiple columns from SExtractor list using getcol() """
ret = []
for i in range(len(args)):
ret = ret + [getcol(d[args[i]], l)]
return ret
def writeheader(fh, colnames):
""" Write an SExtractor-style header to an open file handle.
:param fh: file handle
:type fh: file
:param colnames: list of column names
:type colnames: list
:todo: add space checking to colnames
:todo: permit passing a filename?
:todo: handle comments
"""
for i in range(len(colnames)):
fh.write('# %d %s\n' % (i + 1, colnames[i]))<|fim▁end|> | setattr(self, colname, N.zeros((len(self),), coltype)) |
<|file_name|>topup.go<|end_file_name|><|fim▁begin|>package stripe
// TopupParams is the set of parameters that can be used when creating or updating a top-up.
// For more details see https://stripe.com/docs/api#create_topup and https://stripe.com/docs/api#update_topup.
type TopupParams struct {
Params `form:"*"`
Amount *int64 `form:"amount"`
Currency *string `form:"currency"`
Description *string `form:"description"`
Source *SourceParams `form:"*"` // SourceParams has custom encoding so brought to top level with "*"
StatementDescriptor *string `form:"statement_descriptor"`
TransferGroup *string `form:"transfer_group"`
}
// SetSource adds valid sources to a TopupParams object,
// returning an error for unsupported sources.
func (p *TopupParams) SetSource(sp interface{}) error {
source, err := SourceParamsFor(sp)
p.Source = source
return err
}
// TopupListParams is the set of parameters that can be used when listing top-ups.
// For more details see https://stripe.com/docs/api#list_topups.
type TopupListParams struct {
ListParams `form:"*"`
Created *int64 `form:"created"`
CreatedRange *RangeQueryParams `form:"created"`
}
// TopupList is a list of top-ups as retrieved from a list endpoint.
type TopupList struct {
ListMeta
Data []*Topup `json:"data"`
}
// Topup is the resource representing a Stripe top-up.
// For more details see https://stripe.com/docs/api#topups.
type Topup struct {
Amount int64 `json:"amount"`
ArrivalDate int64 `json:"arrival_date"`
BalanceTransaction *BalanceTransaction `json:"balance_transaction"`
Created int64 `json:"created"`
Currency Currency `json:"currency"`<|fim▁hole|> ExpectedAvailabilityDate int64 `json:"expected_availability_date"`
FailureCode string `json:"failure_code"`
FailureMessage string `json:"failure_message"`
ID string `json:"id"`
Livemode bool `json:"livemode"`
Source *PaymentSource `json:"source"`
StatementDescriptor string `json:"statement_descriptor"`
Status string `json:"status"`
TransferGroup string `json:"transfer_group"`
}<|fim▁end|> | Description string `json:"description"` |
<|file_name|>StringUtils.java<|end_file_name|><|fim▁begin|>package com.temenos.soa.plugin.uml2dsconverter.utils;
// General String utilities
public class StringUtils {
/**
* Turns the first character of a string in to an uppercase character
* @param source The source string
* @return String Resultant string
*/
public static String upperInitialCharacter(String source) {
final StringBuilder result = new StringBuilder(source.length());
result.append(Character.toUpperCase(source.charAt(0))).append(source.substring(1));
return result.toString();
}
/**
* Turns the first character of a string in to a lowercase character
* @param source The source string
* @return String Resultant string
*/
public static String lowerInitialCharacter(String source) {
final StringBuilder result = new StringBuilder(source.length()); <|fim▁hole|> return result.toString();
}
}<|fim▁end|> | result.append(Character.toLowerCase(source.charAt(0))).append(source.substring(1)); |
<|file_name|>_express_route_circuit_connections_operations.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitConnectionsOperations(object):
"""ExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str<|fim▁hole|> cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitConnection"
"""Gets the specified Express Route Circuit Connection from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "_models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteCircuitConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
express_route_circuit_connection_parameters, # type: "_models.ExpressRouteCircuitConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteCircuitConnection"]
"""Creates or updates a Express Route Circuit Connection in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters supplied to the create or update
express route circuit connection operation.
:type express_route_circuit_connection_parameters: ~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteCircuitConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRouteCircuitConnectionListResult"]
"""Gets all global reach connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'} # type: ignore<|fim▁end|> | **kwargs # type: Any
):
# type: (...) -> None |
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>from django.test import TestCase
from manager.models import Page
from datetime import datetime, timedelta
from django.utils import timezone
class PageTestCase(TestCase):
def setUp(self):
now = timezone.now()
Page.objects.create(url="testurl", description="test description")
def test_regular_page_active(self):
"""Page with no pause or time/date range is active."""
page = Page.objects.get(url="/testurl")
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
def test_paused_page_not_active(self):
"""Page that has been paused is not active."""
page = Page.objects.get(url="/testurl")
page.pause_at = timezone.now().replace(hour=12)
current_time = timezone.now().replace(hour=13)
self.assertTrue(page.is_paused(current_time))
self.assertFalse(page.is_active(current_time))
def test_previously_paused_page_active(self):
"""Page that has is not paused but has been in the past is active."""
page = Page.objects.get(url="/testurl")
page.paused_at = timezone.now() - timedelta(hours=48)
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
page.paused_at = timezone.now()
morning = timezone.now().replace(hour=6)
self.assertFalse(page.is_paused(morning))
self.assertTrue(page.is_active(morning))
def test_page_active_time_of_day(self):
"""Page has certain times of day it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now().replace(hour=12)
# Default page has no times -> active
self.assertTrue(page.is_active(now))
# Set start time in the future
page.active_time_start = now.replace(hour=13).time()
self.assertFalse(page.is_active(now))
# Set time to be past start time
now = now.replace(hour=14)<|fim▁hole|> # Set end time in the future, still active
page.active_time_end = now.replace(hour=15).time()
self.assertTrue(page.is_active(now))
# Set time to be past end-time -> inactive
now = now.replace(hour=16)
self.assertFalse(page.is_active(now))
# Set start time in the future but bigger than end-time
page.active_time_start = now.replace(hour=17).time()
self.assertFalse(page.is_active(now))
# Time bigger than start time in the evening
now = now.replace(hour=19)
self.assertTrue(page.is_active(now))
def test_page_date_range(self):
"""Page has certains dates it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now()
today = now.date()
page.active_date_start = today
self.assertTrue(page.is_active(now))
page.active_date_start = today + timedelta(days=1)
self.assertFalse(page.is_active(now))
page.active_date_start = today - timedelta(days=7)
page.active_date_end = today - timedelta(days=3)
self.assertFalse(page.is_active(now))
def test_page_weekdays(self):
"""Page is active on certain weekdays"""
page = Page.objects.get(url="/testurl")
now = datetime(2014, 4, 28, 16, 53) # Monday
page.active_date_start = now.date()
self.assertTrue(page.is_active(now))
page.monday = False
self.assertFalse(page.is_active(now))
now = now + timedelta(days=1)
self.assertTrue(page.is_active(now))<|fim▁end|> | self.assertTrue(page.is_active(now))
|
<|file_name|>auth.py<|end_file_name|><|fim▁begin|>from django.contrib.auth.backends import ModelBackend
from django.contrib.sites.models import Site
from socialregistration.contrib.twitter.models import TwitterProfile
class TwitterAuth(ModelBackend):
def authenticate(self, twitter_id=None):
try:
return TwitterProfile.objects.get(
twitter_id=twitter_id,
site=Site.objects.get_current()
).user<|fim▁hole|> return None<|fim▁end|> | except TwitterProfile.DoesNotExist: |
<|file_name|>certs_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package phases
import (
"fmt"
"os"
"strings"
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
"k8s.io/kubernetes/pkg/util/node"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
cmdtestutil "k8s.io/kubernetes/cmd/kubeadm/test/cmd"
)
// phaseTestK8sVersion is a fake kubernetes version to use when testing
const phaseTestK8sVersion = "v1.10.0"
func TestCertsSubCommandsHasFlags(t *testing.T) {
subCmds := getCertsSubCommands(phaseTestK8sVersion)
commonFlags := []string{
"cert-dir",
"config",
}
var tests = []struct {
command string
additionalFlags []string
}{
{
command: "all",
additionalFlags: []string{
"apiserver-advertise-address",
"apiserver-cert-extra-sans",
"service-cidr",
"service-dns-domain",
},
},
{
command: "ca",
},
{
command: "apiserver",
additionalFlags: []string{
"apiserver-advertise-address",
"apiserver-cert-extra-sans",
"service-cidr",
"service-dns-domain",
},
},
{
command: "apiserver-kubelet-client",
},
{
command: "etcd-ca",
},
{
command: "etcd-server",
},
{
command: "etcd-peer",
},
{
command: "etcd-healthcheck-client",
},
{
command: "apiserver-etcd-client",
},
{
command: "sa",
},
{
command: "front-proxy-ca",
},
{
command: "front-proxy-client",
},
}
for _, test := range tests {
expectedFlags := append(commonFlags, test.additionalFlags...)
cmdtestutil.AssertSubCommandHasFlags(t, subCmds, test.command, expectedFlags...)
}
}
func TestSubCmdCertsCreateFilesWithFlags(t *testing.T) {
subCmds := getCertsSubCommands(phaseTestK8sVersion)
var tests = []struct {
subCmds []string
expectedFiles []string
}{
{
subCmds: []string{"all"},
expectedFiles: []string{
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,<|fim▁hole|> {
subCmds: []string{"ca", "apiserver", "apiserver-kubelet-client"},
expectedFiles: []string{kubeadmconstants.CACertName, kubeadmconstants.CAKeyName, kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName, kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName},
},
{
subCmds: []string{"etcd-ca", "etcd-server", "etcd-peer", "etcd-healthcheck-client", "apiserver-etcd-client"},
expectedFiles: []string{
kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName,
kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName,
kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName,
kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName,
kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName,
},
},
{
subCmds: []string{"sa"},
expectedFiles: []string{kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName},
},
{
subCmds: []string{"front-proxy-ca", "front-proxy-client"},
expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName, kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName},
},
}
for _, test := range tests {
t.Run(strings.Join(test.subCmds, ","), func(t *testing.T) {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// executes given sub commands
for _, subCmdName := range test.subCmds {
fmt.Printf("running command %q\n", subCmdName)
certDirFlag := fmt.Sprintf("--cert-dir=%s", tmpdir)
cmdtestutil.RunSubCommand(t, subCmds, subCmdName, certDirFlag)
}
// verify expected files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
})
}
}
func TestSubCmdCertsApiServerForwardsFlags(t *testing.T) {
subCmds := getCertsSubCommands(phaseTestK8sVersion)
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
// creates ca cert
certDirFlag := fmt.Sprintf("--cert-dir=%s", tmpdir)
cmdtestutil.RunSubCommand(t, subCmds, "ca", certDirFlag)
// creates apiserver cert
apiserverFlags := []string{
fmt.Sprintf("--cert-dir=%s", tmpdir),
"--apiserver-cert-extra-sans=foo,boo",
"--service-cidr=10.0.0.0/24",
"--service-dns-domain=mycluster.local",
"--apiserver-advertise-address=1.2.3.4",
}
cmdtestutil.RunSubCommand(t, subCmds, "apiserver", apiserverFlags...)
// asserts created cert has values from CLI flags
APIserverCert, err := pkiutil.TryLoadCertFromDisk(tmpdir, kubeadmconstants.APIServerCertAndKeyBaseName)
if err != nil {
t.Fatalf("Error loading API server certificate: %v", err)
}
hostname, err := node.GetHostname("")
if err != nil {
t.Fatal(err)
}
for i, name := range []string{hostname, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.mycluster.local"} {
if APIserverCert.DNSNames[i] != name {
t.Errorf("APIserverCert.DNSNames[%d] is %s instead of %s", i, APIserverCert.DNSNames[i], name)
}
}
for i, ip := range []string{"10.0.0.1", "1.2.3.4"} {
if APIserverCert.IPAddresses[i].String() != ip {
t.Errorf("APIserverCert.IPAddresses[%d] is %s instead of %s", i, APIserverCert.IPAddresses[i], ip)
}
}
}
func TestSubCmdCertsCreateFilesWithConfigFile(t *testing.T) {
subCmds := getCertsSubCommands(phaseTestK8sVersion)
var tests = []struct {
subCmds []string
expectedFiles []string
}{
{
subCmds: []string{"all"},
expectedFiles: []string{
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName,
kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName,
kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName,
},
},
{
subCmds: []string{"ca", "apiserver", "apiserver-kubelet-client"},
expectedFiles: []string{
kubeadmconstants.CACertName, kubeadmconstants.CAKeyName,
kubeadmconstants.APIServerCertName, kubeadmconstants.APIServerKeyName,
kubeadmconstants.APIServerKubeletClientCertName, kubeadmconstants.APIServerKubeletClientKeyName,
},
},
{
subCmds: []string{"etcd-ca", "etcd-server", "etcd-peer", "etcd-healthcheck-client", "apiserver-etcd-client"},
expectedFiles: []string{
kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName,
kubeadmconstants.EtcdServerCertName, kubeadmconstants.EtcdServerKeyName,
kubeadmconstants.EtcdPeerCertName, kubeadmconstants.EtcdPeerKeyName,
kubeadmconstants.EtcdHealthcheckClientCertName, kubeadmconstants.EtcdHealthcheckClientKeyName,
kubeadmconstants.APIServerEtcdClientCertName, kubeadmconstants.APIServerEtcdClientKeyName,
},
},
{
subCmds: []string{"front-proxy-ca", "front-proxy-client"},
expectedFiles: []string{kubeadmconstants.FrontProxyCACertName, kubeadmconstants.FrontProxyCAKeyName, kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName},
},
{
subCmds: []string{"sa"},
expectedFiles: []string{kubeadmconstants.ServiceAccountPrivateKeyName, kubeadmconstants.ServiceAccountPublicKeyName},
},
}
for _, test := range tests {
t.Run(strings.Join(test.subCmds, ","), func(t *testing.T) {
// Create temp folder for the test case
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
cfg := &kubeadmapi.InitConfiguration{
ClusterConfiguration: kubeadmapi.ClusterConfiguration{
API: kubeadmapi.API{AdvertiseAddress: "1.2.3.4", BindPort: 1234},
CertificatesDir: tmpdir,
},
NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: "valid-node-name"},
}
configPath := testutil.SetupInitConfigurationFile(t, tmpdir, cfg)
// executes given sub commands
for _, subCmdName := range test.subCmds {
t.Logf("running subcommand %q", subCmdName)
configFlag := fmt.Sprintf("--config=%s", configPath)
cmdtestutil.RunSubCommand(t, subCmds, subCmdName, configFlag)
}
// verify expected files are there
testutil.AssertFileExists(t, tmpdir, test.expectedFiles...)
})
}
}<|fim▁end|> | kubeadmconstants.FrontProxyClientCertName, kubeadmconstants.FrontProxyClientKeyName,
},
}, |
<|file_name|>foldelf.cpp<|end_file_name|><|fim▁begin|>/* -*- Mode: C++; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* This program reads an ELF file and computes information about
* redundancies.
*/
#include <algorithm>
#include <fstream>
#include <string>
#include <vector>
#include <map>
#include <elf.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <getopt.h>
//----------------------------------------------------------------------
char* opt_type = "func";
char* opt_section = ".text";
//----------------------------------------------------------------------
static void
hexdump(ostream& out, const char* bytes, size_t count)
{
hex(out);
size_t off = 0;
while (off < count) {
out.form("%08lx: ", off);
const char* p = bytes + off;
int j = 0;
while (j < 16) {
out.form("%02x", p[j++] & 0xff);
if (j + off >= count)
break;
out.form("%02x ", p[j++] & 0xff);
if (j + off >= count)
break;
}
// Pad
for (; j < 16; ++j)
out << ((j%2) ? " " : " ");
for (j = 0; j < 16; ++j) {
if (j + off < count)
out.put(isprint(p[j]) ? p[j] : '.');
}
out << endl;
off += 16;
}
}
//----------------------------------------------------------------------
int
verify_elf_header(const Elf32_Ehdr* hdr)
{
if (hdr->e_ident[EI_MAG0] != ELFMAG0
|| hdr->e_ident[EI_MAG1] != ELFMAG1
|| hdr->e_ident[EI_MAG2] != ELFMAG2
|| hdr->e_ident[EI_MAG3] != ELFMAG3) {
cerr << "not an elf file" << endl;
return -1;
}
if (hdr->e_ident[EI_CLASS] != ELFCLASS32) {
cerr << "not a 32-bit elf file" << endl;
return -1;
}
if (hdr->e_ident[EI_DATA] != ELFDATA2LSB) {
cerr << "not a little endian elf file" << endl;
return -1;
}
if (hdr->e_ident[EI_VERSION] != EV_CURRENT) {
cerr << "incompatible version" << endl;
return -1;
}
return 0;
}
//----------------------------------------------------------------------
class elf_symbol : public Elf32_Sym
{
public:
elf_symbol(const Elf32_Sym& sym)
{ ::memcpy(static_cast<Elf32_Sym*>(this), &sym, sizeof(Elf32_Sym)); }
friend bool operator==(const elf_symbol& lhs, const elf_symbol& rhs) {
return 0 == ::memcmp(static_cast<const Elf32_Sym*>(&lhs),
static_cast<const Elf32_Sym*>(&rhs),
sizeof(Elf32_Sym)); }
};
//----------------------------------------------------------------------
static const char*
st_bind(unsigned char info)
{
switch (ELF32_ST_BIND(info)) {
case STB_LOCAL: return "local";
case STB_GLOBAL: return "global";
case STB_WEAK: return "weak";
default: return "unknown";
}
}
static const char*
st_type(unsigned char info)
{
switch (ELF32_ST_TYPE(info)) {
case STT_NOTYPE: return "none";
case STT_OBJECT: return "object";
case STT_FUNC: return "func";
case STT_SECTION: return "section";
case STT_FILE: return "file";
default: return "unknown";
}
}
static unsigned char
st_type(const char* type)
{
if (strcmp(type, "none") == 0) {
return STT_NOTYPE;
}
else if (strcmp(type, "object") == 0) {
return STT_OBJECT;
}
else if (strcmp(type, "func") == 0) {
return STT_FUNC;
}
else {
return 0;
}
}
//----------------------------------------------------------------------
typedef vector<elf_symbol> elf_symbol_table;
typedef map< basic_string<char>, elf_symbol_table > elf_text_map;
void
process_mapping(char* mapping, size_t size)
{
const Elf32_Ehdr* ehdr = reinterpret_cast<Elf32_Ehdr*>(mapping);
if (verify_elf_header(ehdr) < 0)
return;
// find the section headers
const Elf32_Shdr* shdrs = reinterpret_cast<Elf32_Shdr*>(mapping + ehdr->e_shoff);
// find the section header string table, .shstrtab
const Elf32_Shdr* shstrtabsh = shdrs + ehdr->e_shstrndx;
const char* shstrtab = mapping + shstrtabsh->sh_offset;
// find the sections we care about
const Elf32_Shdr *symtabsh, *strtabsh, *textsh;
int textndx;
for (int i = 0; i < ehdr->e_shnum; ++i) {
basic_string<char> name(shstrtab + shdrs[i].sh_name);
if (name == opt_section) {
textsh = shdrs + i;
textndx = i;
}
else if (name == ".symtab") {
symtabsh = shdrs + i;
}
else if (name == ".strtab") {
strtabsh = shdrs + i;
}
}
// find the .strtab
char* strtab = mapping + strtabsh->sh_offset;
// find the .text
char* text = mapping + textsh->sh_offset;
int textaddr = textsh->sh_addr;
// find the symbol table
int nentries = symtabsh->sh_size / sizeof(Elf32_Sym);
Elf32_Sym* symtab = reinterpret_cast<Elf32_Sym*>(mapping + symtabsh->sh_offset);
// look for symbols in the .text section
elf_text_map textmap;
for (int i = 0; i < nentries; ++i) {
const Elf32_Sym* sym = symtab + i;
if (sym->st_shndx == textndx &&
ELF32_ST_TYPE(sym->st_info) == st_type(opt_type) &&
sym->st_size) {
basic_string<char> functext(text + sym->st_value - textaddr, sym->st_size);
elf_symbol_table& syms = textmap[functext];
if (syms.end() == find(syms.begin(), syms.end(), elf_symbol(*sym)))
syms.insert(syms.end(), *sym);
}
}
int uniquebytes = 0, totalbytes = 0;<|fim▁hole|> entry != textmap.end();
++entry) {
const elf_symbol_table& syms = entry->second;
if (syms.size() <= 1)
continue;
int sz = syms.begin()->st_size;
uniquebytes += sz;
totalbytes += sz * syms.size();
uniquecount += 1;
totalcount += syms.size();
for (elf_symbol_table::const_iterator sym = syms.begin(); sym != syms.end(); ++sym)
cout << strtab + sym->st_name << endl;
dec(cout);
cout << syms.size() << " copies of " << sz << " bytes";
cout << " (" << ((syms.size() - 1) * sz) << " redundant bytes)" << endl;
hexdump(cout, entry->first.data(), entry->first.size());
cout << endl;
}
dec(cout);
cout << "bytes unique=" << uniquebytes << ", total=" << totalbytes << endl;
cout << "entries unique=" << uniquecount << ", total=" << totalcount << endl;
}
void
process_file(const char* name)
{
int fd = open(name, O_RDWR);
if (fd >= 0) {
struct stat statbuf;
if (fstat(fd, &statbuf) >= 0) {
size_t size = statbuf.st_size;
void* mapping = mmap(0, size, PROT_READ, MAP_SHARED, fd, 0);
if (mapping != MAP_FAILED) {
process_mapping(static_cast<char*>(mapping), size);
munmap(mapping, size);
}
}
close(fd);
}
}
static void
usage()
{
cerr << "foldelf [--section=<section>] [--type=<type>] [file ...]\n\
--section, -s the section of the ELF file to scan; defaults\n\
to ``.text''. Valid values include any section\n\
of the ELF file.\n\
--type, -t the type of object to examine in the section;\n\
defaults to ``func''. Valid values include\n\
``none'', ``func'', or ``object''.\n";
}
static struct option opts[] = {
{ "type", required_argument, 0, 't' },
{ "section", required_argument, 0, 's' },
{ "help", no_argument, 0, '?' },
{ 0, 0, 0, 0 }
};
int
main(int argc, char* argv[])
{
while (1) {
int option_index = 0;
int c = getopt_long(argc, argv, "t:s:", opts, &option_index);
if (c < 0) break;
switch (c) {
case 't':
opt_type = optarg;
break;
case 's':
opt_section = optarg;
break;
case '?':
usage();
break;
}
}
for (int i = optind; i < argc; ++i)
process_file(argv[i]);
return 0;
}<|fim▁end|> | int uniquecount = 0, totalcount = 0;
for (elf_text_map::const_iterator entry = textmap.begin(); |
<|file_name|>ZipAssetBundle.cpp<|end_file_name|><|fim▁begin|><|fim▁hole|>#include "StableHeaders.h"
#include "ZipAssetBundle.h"
#include "ZipHelpers.h"
#include "ZipWorker.h"
#include "CoreDefines.h"
#include "Framework.h"
#include "FrameAPI.h"
#include "AssetAPI.h"
#include "AssetCache.h"
#include "LoggingFunctions.h"
#include <Urho3D/IO/FileSystem.h>
#include <zzip/zzip.h>
namespace Tundra
{
ZipAssetBundle::ZipAssetBundle(AssetAPI *owner, const String &type, const String &name) :
IAssetBundle(owner, type, name),
worker_(0),
archive_(0),
fileCount_(-1),
done_(false),
success_(false)
{
}
ZipAssetBundle::~ZipAssetBundle()
{
Unload();
}
void ZipAssetBundle::DoUnload()
{
Close();
StopThread();
fileCount_ = -1;
}
void ZipAssetBundle::Close()
{
if (archive_)
{
zzip_dir_close(archive_);
archive_ = 0;
}
}
bool ZipAssetBundle::DeserializeFromDiskSource()
{
if (!assetAPI_->Cache())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, AssetAPI cache is null.");
return false;
}
else if (DiskSource().Empty())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, no disk source for " + Name());
return false;
}
/* We want to detect if the extracted files are already up to date to save time.
If the last modified date for the sub asset is the same as the parent zip file,
we don't extract it. If the zip is re-downloaded from source everything will get unpacked even
if only one file would have changed inside it. We could do uncompressed size comparisons
but that is not a absolute guarantee that the file has not changed. We'll be on the safe side
to unpack the whole zip file. Zip files are meant for deploying the scene and should be touched
rather rarely. Note that local:// refs are unpacked to cache but the zips disk source is not in the
cache. Meaning that local:// zip files will always be extracted fully even if the disk source
was not changed, we don't have a mechanism to get the last modified date properly except from
the asset cache. For local scenes this should be fine as there is no real need to
zip the scene up as you already have the disk sources right there in the storage.
The last modified query will fail if the file is open with zziplib, do it first. */
uint zipLastModified = assetAPI_->Cache()->LastModified(Name());
const String diskSourceInternal = Urho3D::GetInternalPath(DiskSource());
zzip_error_t error = ZZIP_NO_ERROR;
archive_ = zzip_dir_open(diskSourceInternal.CString(), &error);
if (CheckAndLogZzipError(error) || CheckAndLogArchiveError(archive_) || !archive_)
{
archive_ = 0;
return false;
}
int uncompressing = 0;
ZZIP_DIRENT archiveEntry;
while(zzip_dir_read(archive_, &archiveEntry))
{
String relativePath = Urho3D::GetInternalPath(archiveEntry.d_name);
if (!relativePath.EndsWith("/"))
{
String subAssetRef = GetFullAssetReference(relativePath);
ZipArchiveFile file;
file.relativePath = relativePath;
file.cachePath = Urho3D::GetInternalPath(assetAPI_->Cache()->DiskSourceByRef(subAssetRef));
file.lastModified = assetAPI_->Cache()->LastModified(subAssetRef);
file.compressedSize = archiveEntry.d_csize;
file.uncompressedSize = archiveEntry.st_size;
/* Mark this file for extraction. If both cache files have valid dates
and they differ extract. If they have the same date stamp skip extraction.
Note that file.lastModified will be non-valid for non cached files so we
will cover also missing files. */
file.doExtract = (zipLastModified > 0 && file.lastModified > 0) ? (zipLastModified != file.lastModified) : true;
if (file.doExtract)
uncompressing++;
files_.Push(file);
fileCount_++;
}
}
// Close the zzip directory ptr
Close();
// If the zip file was empty we don't want IsLoaded to fail on the files_ check.
// The bundle loaded fine but there was no content, log a warning.
if (files_.Empty())
{
LogWarning("ZipAssetBundle: Bundle loaded but does not contain any files " + Name());
files_.Push(ZipArchiveFile());
Loaded.Emit(this);
return true;
}
// Don't spin the worker if all sub assets are up to date in cache.
if (uncompressing > 0)
{
// Now that the file info has been read, continue in a worker thread.
LogDebug("ZipAssetBundle: File information read for " + Name() + ". File count: " + String(files_.Size()) + ". Starting worker thread to uncompress " + String(uncompressing) + " files.");
// ZipWorker is a QRunnable we can pass to QThreadPool, it will handle scheduling it and deletes it when done.
worker_ = new ZipWorker(this, zipLastModified, diskSourceInternal, files_);
if (!worker_->Run())
{
LogError("ZipAssetBundle: Failed to start worker thread for " + Name());
files_.Clear();
return false;
}
assetAPI_->GetFramework()->Frame()->Updated.Connect(this, &ZipAssetBundle::CheckDone);
}
else
Loaded.Emit(this);
return true;
}
bool ZipAssetBundle::DeserializeFromData(const u8* /*data*/, uint /*numBytes*/)
{
/** @note At this point it seems zzip needs a disk source to do processing
so we require disk source for the archive. This might change in the future by changing the lib. */
return false;
}
Vector<u8> ZipAssetBundle::GetSubAssetData(const String &subAssetName)
{
/* Makes no sense to keep the whole zip file contents in memory as only
few files could be wanted from a 100mb bundle. Additionally all asset would take 2x the memory.
We could make this function also open the zip file and uncompress the data for every sub asset request.
But that would be rather pointless, not to mention slower, as we already have the unpacked individual
assets on disk. If the unpacking to disk changes we might need to rethink this. */
String filePath = GetSubAssetDiskSource(subAssetName);
if (filePath.Empty())
return Vector<u8>();
Vector<u8> data;
return LoadFileToVector(filePath, data) ? data : Vector<u8>();
}
String ZipAssetBundle::GetSubAssetDiskSource(const String &subAssetName)
{
return assetAPI_->Cache()->FindInCache(GetFullAssetReference(subAssetName));
}
String ZipAssetBundle::GetFullAssetReference(const String &subAssetName)
{
return Name() + "#" + subAssetName;
}
bool ZipAssetBundle::IsLoaded() const
{
return (archive_ != 0 || !files_.Empty());
}
void ZipAssetBundle::CheckDone(float /*frametime*/)
{
// Invoked in main thread context
{
Urho3D::MutexLock m(mutexDone_);
if (!done_)
return;
if (success_)
Loaded.Emit(this);
else
Failed.Emit(this);
}
StopThread();
assetAPI_->GetFramework()->Frame()->Updated.Disconnect(this, &ZipAssetBundle::CheckDone);
}
void ZipAssetBundle::WorkerDone(bool successful)
{
// Invoked in worker thread context
Urho3D::MutexLock m(mutexDone_);
done_ = true;
success_ = successful;
}
void ZipAssetBundle::StopThread()
{
if (worker_)
worker_->Stop();
SAFE_DELETE(worker_);
}
Urho3D::Context *ZipAssetBundle::Context() const
{
return assetAPI_->GetContext();
}
Urho3D::FileSystem *ZipAssetBundle::FileSystem() const
{
return assetAPI_->GetSubsystem<Urho3D::FileSystem>();
}
}<|fim▁end|> | // For conditions of distribution and use, see copyright notice in LICENSE
|
<|file_name|>AnnouncementDeleteRequest.ts<|end_file_name|><|fim▁begin|>import {AsyncTask} from "@/objects/task/AsyncTask";
import {TaskCallback} from "@/objects/task/TaskCallback";
import {NetworkCallStatus} from "@/objects/network/NetworkCallStatus";
import {TaskType} from "@/enums/TaskType";
import jqXHR = JQuery.jqXHR;
export class AnnouncementDeleteRequest implements AsyncTask {
private readonly guildId: string;
private readonly anId: string;
readonly callback: TaskCallback;
apiKey: string = "";
apiUrl: string = "";
constructor(guildId: string, anId: string, callback: TaskCallback) {
this.guildId = guildId;
this.anId = anId;
this.callback = callback;
}
provideApiDetails(apiKey: string, apiUrl: string): void {
this.apiKey = apiKey;
this.apiUrl = apiUrl;
}
execute(): void {
let bodyRaw: any = {
"guild_id": this.guildId,
"announcement_id": this.anId
};<|fim▁hole|>
$.ajax({
url: this.apiUrl + "/v2/announcement/delete",
headers: {
"Content-Type": "application/json",
"Authorization": this.apiKey
},
method: "POST",
dataType: "json",
data: JSON.stringify(bodyRaw),
success: function (json: any) {
let status = new NetworkCallStatus(true, TaskType.ANNOUNCEMENT_DELETE);
status.code = 200;
status.body = json;
status.message = json.message;
this.onComplete(status);
}.bind(this),
error: function (jqXHR: jqXHR) {
let status = new NetworkCallStatus(false, TaskType.ANNOUNCEMENT_DELETE);
status.code = jqXHR.status;
status.body = jqXHR.responseJSON;
status.message = jqXHR.responseJSON.message;
this.onComplete(status);
}.bind(this)
});
}
onComplete(status: NetworkCallStatus): void {
this.callback.onCallback(status);
}
}<|fim▁end|> | |
<|file_name|>server.js<|end_file_name|><|fim▁begin|>var http = require('http'),
fs = require('fs');
var people = {};
//var port = process.env.OPENSHIFT_NODEJS_PORT || "1337";
var port = "1337";
//var serverUrl = process.env.OPENSHIFT_NODEJS_IP || "127.0.0.1";
var serverUrl = "127.0.0.1";
var app = http.createServer(function (request, response)
{
//console.log("Server request: " + request.url)
fs.readFile("chat.html", 'utf-8', function (error, data) {
response.writeHead(200, {'Content-Type': 'text/html'});
response.write(data);
response.end();
});
}).listen(port, serverUrl);
console.log("Listening at " + serverUrl + ":" + port);
var io = require('socket.io').listen(app);
io.sockets.on('connection', function(client) {
client.emit('connected');
client.on("join", function(name){
people[client.id] = {name:name, html:'<span onclick="msgTo(\''+client.id+'\')" title="Type a message and click here to send in private">'+name+'</span>'}; //data["name"];<|fim▁hole|> io.sockets.emit("update-people", people);
console.log("New join: " + name);
});
client.on('sendTo', function(id, msg, name){
if (people[client.id] == undefined || people[client.id] == null)
{
people[client.id] = {name:name, html:'<span onclick="msgTo(\''+client.id+'\')" title="Type a message and click here to send in private">'+name+'</span>'}; //data["name"];
io.sockets.to(client.id).emit('messageMe', 'Server', 'You have connected.');
io.sockets.emit("update", name + " has joined the server.")
io.sockets.emit("update-people", people);
console.log("New join: " + name);
}
io.sockets.to(id).emit('messageMe', people[client.id]["name"] + '<span style="color:red"> in PVT</span>', msg);
io.sockets.to(client.id).emit('messageMe', people[client.id]["name"] + '<span style="color:red"> in PVT</span>', msg);
});
client.on("sendAll", function(msg, name){
if (people[client.id] == undefined || people[client.id] == null)
{
people[client.id] = {name:name, html:'<span onclick="msgTo(\''+client.id+'\')" title="Type a message and click here to send in private">'+name+'</span>'}; //data["name"];
io.sockets.to(client.id).emit('messageMe', 'Server', 'You have connected.');
io.sockets.emit("update", name + " has joined the server.")
io.sockets.emit("update-people", people);
console.log("New join: " + name);
}
//console.log("Send message by " + people[client.id] + ": " + msg);
io.sockets.emit("chat", people[client.id]["name"], msg);
});
client.on("disconnect", function(){
if (people[client.id] != undefined){
io.sockets.emit("update", people[client.id]["name"] + " has left the server.");
console.log(people[client.id]["name"] + " was disconnected")
delete people[client.id];
io.sockets.emit("update-people", people);
}
});
});<|fim▁end|> | io.sockets.to(client.id).emit('messageMe', 'Server', 'You have connected.');
io.sockets.emit("update", name + " has joined the server.") |
<|file_name|>header.hpp<|end_file_name|><|fim▁begin|>//
// header.hpp
// ~~~~~~~~~~
//
// Copyright (c) 2003-2018 Christopher M. Kohlhoff (chris at kohlhoff dot com)<|fim▁hole|>//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef HTTP_SERVER3_HEADER_HPP
#define HTTP_SERVER3_HEADER_HPP
#include <string>
namespace http {
namespace server3 {
struct header
{
std::string name;
std::string value;
};
} // namespace server3
} // namespace http
#endif // HTTP_SERVER3_HEADER_HPP<|fim▁end|> | |
<|file_name|>gettext.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
sphinx.builders.gettext
~~~~~~~~~~~~~~~~~~~~~~~
The MessageCatalogBuilder class.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from os import path, walk
from codecs import open
from time import time
from datetime import datetime, tzinfo, timedelta
from collections import defaultdict
from uuid import uuid4
from six import iteritems
from sphinx.builders import Builder
from sphinx.util import split_index_msg
from sphinx.util.nodes import extract_messages, traverse_translatable_index<|fim▁hole|>from sphinx.util.console import darkgreen, purple, bold
from sphinx.locale import pairindextypes
POHEADER = r"""
# SOME DESCRIPTIVE TITLE.
# Copyright (C) %(copyright)s
# This file is distributed under the same license as the %(project)s package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: %(project)s %(version)s\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: %(ctime)s\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"""[1:]
class Catalog(object):
"""Catalog of translatable messages."""
def __init__(self):
self.messages = [] # retain insertion order, a la OrderedDict
self.metadata = {} # msgid -> file, line, uid
def add(self, msg, origin):
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
return
if msg not in self.metadata: # faster lookup in hash
self.messages.append(msg)
self.metadata[msg] = []
self.metadata[msg].append((origin.source, origin.line, origin.uid))
class MsgOrigin(object):
"""
Origin holder for Catalog message origin.
"""
def __init__(self, source, line):
self.source = source
self.line = line
self.uid = uuid4().hex
class I18nBuilder(Builder):
"""
General i18n builder.
"""
name = 'i18n'
versioning_method = 'text'
versioning_compare = None # be set by `gettext_uuid`
def __init__(self, app):
self.versioning_compare = app.env.config.gettext_uuid
super(I18nBuilder, self).__init__(app)
def init(self):
Builder.init(self)
self.catalogs = defaultdict(Catalog)
def get_target_uri(self, docname, typ=None):
return ''
def get_outdated_docs(self):
return self.env.found_docs
def prepare_writing(self, docnames):
return
def compile_catalogs(self, catalogs, message):
return
def write_doc(self, docname, doctree):
catalog = self.catalogs[find_catalog(docname,
self.config.gettext_compact)]
for node, msg in extract_messages(doctree):
catalog.add(msg, node)
if 'index' in self.env.config.gettext_additional_targets:
# Extract translatable messages from index entries.
for node, entries in traverse_translatable_index(doctree):
for typ, msg, tid, main in entries:
for m in split_index_msg(typ, msg):
if typ == 'pair' and m in pairindextypes.values():
# avoid built-in translated message was incorporated
# in 'sphinx.util.nodes.process_index_entry'
continue
catalog.add(m, node)
# determine tzoffset once to remain unaffected by DST change during build
timestamp = time()
tzdelta = datetime.fromtimestamp(timestamp) - \
datetime.utcfromtimestamp(timestamp)
class LocalTimeZone(tzinfo):
def __init__(self, *args, **kw):
super(LocalTimeZone, self).__init__(*args, **kw)
self.tzdelta = tzdelta
def utcoffset(self, dt):
return self.tzdelta
def dst(self, dt):
return timedelta(0)
ltz = LocalTimeZone()
class MessageCatalogBuilder(I18nBuilder):
"""
Builds gettext-style message catalogs (.pot files).
"""
name = 'gettext'
def init(self):
I18nBuilder.init(self)
self.create_template_bridge()
self.templates.init(self)
def _collect_templates(self):
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
for dirpath, dirs, files in walk(tmpl_abs_path):
for fn in files:
if fn.endswith('.html'):
filename = path.join(dirpath, fn)
filename = filename.replace(path.sep, SEP)
template_files.add(filename)
return template_files
def _extract_from_template(self):
files = self._collect_templates()
self.info(bold('building [%s]: ' % self.name), nonl=1)
self.info('targets for %d template files' % len(files))
extract_translations = self.templates.environment.extract_translations
for template in self.app.status_iterator(
files, 'reading templates... ', purple, len(files)):
with open(template, 'r', encoding='utf-8') as f:
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
self.catalogs['sphinx'].add(msg, origin)
def build(self, docnames, summary=None, method='update'):
self._extract_from_template()
I18nBuilder.build(self, docnames, summary, method)
def finish(self):
I18nBuilder.finish(self)
data = dict(
version = self.config.version,
copyright = self.config.copyright,
project = self.config.project,
ctime = datetime.fromtimestamp(
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
)
for textdomain, catalog in self.app.status_iterator(
iteritems(self.catalogs), "writing message catalogs... ",
darkgreen, len(self.catalogs),
lambda textdomain__: textdomain__[0]):
# noop if config.gettext_compact is set
ensuredir(path.join(self.outdir, path.dirname(textdomain)))
pofn = path.join(self.outdir, textdomain + '.pot')
pofile = open(pofn, 'w', encoding='utf-8')
try:
pofile.write(POHEADER % data)
for message in catalog.messages:
positions = catalog.metadata[message]
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
pofile.write("#: %s\n" % "\n#: ".join(
"%s:%s" % (safe_relpath(source, self.outdir), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
pofile.write("# %s\n" % "\n# ".join(
uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
pofile.write('msgid "%s"\nmsgstr ""\n\n' % message)
finally:
pofile.close()<|fim▁end|> | from sphinx.util.osutil import safe_relpath, ensuredir, SEP
from sphinx.util.i18n import find_catalog |
<|file_name|>main.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import Flask, render_template, session, redirect, url_for, flash
from flask_wtf import FlaskForm
from flask_bootstrap import Bootstrap
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'had to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
Bootstrap(app)
db = SQLAlchemy(app)
manager = Manager(app)
migrate = Migrate(app, db)
class NameForm(FlaskForm):
name = StringField('What is your name? ', validators=[DataRequired()])
submit = SubmitField('Submit :233')
# model definition
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
def __repr__(self):
return '<User %r>' % self.username
# View Functions
@app.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('名字已经修改完成!')
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html',
form=form,
name=session.get('name', None),
known=session.get('known', False))
<|fim▁hole|>
if __name__ == "__main__":
# app.run(debug=True)
# 自动在 shell 中导入 app db User Role
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.run()<|fim▁end|> | def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role) |
<|file_name|>oom_intervention_impl.cc<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/controller/oom_intervention_impl.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "base/bind.h"
#include "base/debug/crash_logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/bindings/core/v8/v8_gc_for_context_dispose.h"
#include "third_party/blink/renderer/controller/crash_memory_metrics_reporter_impl.h"
#include "third_party/blink/renderer/core/frame/local_dom_window.h"
#include "third_party/blink/renderer/core/frame/local_frame.h"
#include "third_party/blink/renderer/core/loader/frame_load_request.h"
#include "third_party/blink/renderer/core/page/page.h"
#include "third_party/blink/renderer/platform/bindings/v8_per_isolate_data.h"
namespace blink {
namespace {
enum class OomInterventionState {
// Initial value for a variable.
None,
// Before the intervention has been triggered.
Before,
// While the intervention is active.
During,
// After the intervention has triggered at least once.
After
};
void UpdateStateCrashKey(OomInterventionState next_state) {
static OomInterventionState current_state = OomInterventionState::None;
// Once an intervention is trigger, the state shall never go back to the
// Before state.
if (next_state == OomInterventionState::Before &&
current_state != OomInterventionState::None)
return;
if (current_state == next_state)
return;
current_state = next_state;
static auto* crash_key = base::debug::AllocateCrashKeyString(
"oom_intervention_state", base::debug::CrashKeySize::Size32);
switch (current_state) {
case OomInterventionState::None:
base::debug::SetCrashKeyString(crash_key, "none");
break;
case OomInterventionState::Before:
base::debug::SetCrashKeyString(crash_key, "before");
break;
case OomInterventionState::During:
base::debug::SetCrashKeyString(crash_key, "during");
break;
case OomInterventionState::After:
base::debug::SetCrashKeyString(crash_key, "after");
break;
}
}
void NavigateLocalAdsFrames(LocalFrame* frame) {
// This navigates all the frames detected as an advertisement to about:blank.
DCHECK(frame);
for (Frame* child = frame->Tree().FirstChild(); child;
child = child->Tree().TraverseNext(frame)) {
if (auto* child_local_frame = DynamicTo<LocalFrame>(child)) {
if (child_local_frame->IsAdSubframe()) {
FrameLoadRequest request(frame->DomWindow(),
ResourceRequest(BlankURL()));
child_local_frame->Navigate(request, WebFrameLoadType::kStandard);
}
}
// TODO(yuzus): Once AdsTracker for remote frames is implemented and OOPIF
// is enabled on low-end devices, navigate remote ads as well.
}
}
OomInterventionImpl& GetOomIntervention() {
DEFINE_STATIC_LOCAL(OomInterventionImpl, oom_intervention, ());
return oom_intervention;
}
} // namespace
// static
void OomInterventionImpl::BindReceiver(
mojo::PendingReceiver<mojom::blink::OomIntervention> receiver) {
GetOomIntervention().Bind(std::move(receiver));
}
OomInterventionImpl::OomInterventionImpl()
: delayed_report_timer_(Thread::MainThread()->GetTaskRunner(),
this,
&OomInterventionImpl::TimerFiredUMAReport) {
UpdateStateCrashKey(OomInterventionState::Before);
}
OomInterventionImpl::~OomInterventionImpl() {
UpdateStateCrashKey(OomInterventionState::After);
MemoryUsageMonitorInstance().RemoveObserver(this);
}
void OomInterventionImpl::Bind(
mojo::PendingReceiver<mojom::blink::OomIntervention> receiver) {
// This interface can be bound multiple time, however, there should never be
// multiple callers bound at a time.
Reset();
receiver_.Bind(std::move(receiver));
// Disconnection means the user closed the dialog without activating the OOM
// intervention.
receiver_.set_disconnect_handler(
base::BindOnce(&OomInterventionImpl::Reset, base::Unretained(this)));
}
void OomInterventionImpl::Reset() {
receiver_.reset();
host_.reset();
pauser_.reset();
MemoryUsageMonitorInstance().RemoveObserver(this);
}
void OomInterventionImpl::StartDetection(
mojo::PendingRemote<mojom::blink::OomInterventionHost> host,
mojom::blink::DetectionArgsPtr detection_args,
bool renderer_pause_enabled,
bool navigate_ads_enabled,
bool purge_v8_memory_enabled) {
host_.Bind(std::move(host));
detection_args_ = std::move(detection_args);
renderer_pause_enabled_ = renderer_pause_enabled;
navigate_ads_enabled_ = navigate_ads_enabled;
purge_v8_memory_enabled_ = purge_v8_memory_enabled;
MemoryUsageMonitorInstance().AddObserver(this);
}
MemoryUsageMonitor& OomInterventionImpl::MemoryUsageMonitorInstance() {
return MemoryUsageMonitor::Instance();
}
void OomInterventionImpl::OnMemoryPing(MemoryUsage usage) {
// Ignore pings without process memory usage information.
if (std::isnan(usage.private_footprint_bytes) ||
std::isnan(usage.swap_bytes) || std::isnan(usage.vm_size_bytes))
return;
Check(usage);
}
void OomInterventionImpl::Check(MemoryUsage usage) {
DCHECK(host_);
OomInterventionMetrics current_memory =
CrashMemoryMetricsReporterImpl::MemoryUsageToMetrics(usage);
bool oom_detected = false;
oom_detected |= detection_args_->blink_workload_threshold > 0 &&
current_memory.current_blink_usage_kb * 1024 >
detection_args_->blink_workload_threshold;
oom_detected |= detection_args_->private_footprint_threshold > 0 &&
current_memory.current_private_footprint_kb * 1024 >
detection_args_->private_footprint_threshold;
oom_detected |=
detection_args_->swap_threshold > 0 &&
current_memory.current_swap_kb * 1024 > detection_args_->swap_threshold;
oom_detected |= detection_args_->virtual_memory_thresold > 0 &&
current_memory.current_vm_size_kb * 1024 >
detection_args_->virtual_memory_thresold;
// Report memory stats every second to send UMA.
ReportMemoryStats(current_memory);
if (oom_detected) {
UpdateStateCrashKey(OomInterventionState::During);
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageBefore",
base::saturated_cast<int>(usage.v8_bytes / 1024 / 1024));
if (navigate_ads_enabled_ || purge_v8_memory_enabled_) {
for (const auto& page : Page::OrdinaryPages()) {
for (Frame* frame = page->MainFrame(); frame;
frame = frame->Tree().TraverseNext()) {
auto* local_frame = DynamicTo<LocalFrame>(frame);
if (!local_frame)
continue;
if (navigate_ads_enabled_)
NavigateLocalAdsFrames(local_frame);
if (purge_v8_memory_enabled_)
local_frame->ForciblyPurgeV8Memory();
}
}
}
if (renderer_pause_enabled_) {
// The ScopedPagePauser is destroyed when the intervention is declined and
// mojo strong binding is disconnected.
pauser_ = std::make_unique<ScopedPagePauser>();
}
<|fim▁hole|> Thread::MainThread()->GetTaskRunner()->PostTask(FROM_HERE,
base::BindOnce(&TriggerGC));
// Notify V8GCForContextDispose that page navigation gc is needed when
// intervention runs, as it indicates that memory usage is high.
V8GCForContextDispose::Instance().SetForcePageNavigationGC();
// Report the memory impact of intervention after 10, 20, 30 seconds.
metrics_at_intervention_ = current_memory;
number_of_report_needed_ = 3;
delayed_report_timer_.StartRepeating(base::Seconds(10), FROM_HERE);
}
}
void OomInterventionImpl::ReportMemoryStats(
OomInterventionMetrics& current_memory) {
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.RendererBlinkUsage",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_blink_usage_kb / 1024));
UMA_HISTOGRAM_MEMORY_LARGE_MB(
"Memory.Experimental.OomIntervention."
"RendererPrivateMemoryFootprint",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_private_footprint_kb / 1024));
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.RendererSwapFootprint",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_swap_kb / 1024));
UMA_HISTOGRAM_MEMORY_LARGE_MB(
"Memory.Experimental.OomIntervention.RendererVmSize",
base::saturated_cast<base::Histogram::Sample>(
current_memory.current_vm_size_kb / 1024));
}
int ToMemoryUsageDeltaSample(uint64_t after_kb, uint64_t before_kb) {
int delta_mb = (base::saturated_cast<int>(before_kb) -
base::saturated_cast<int>(after_kb)) /
1024;
return std::min(std::max(delta_mb, -500), 500);
}
void OomInterventionImpl::TimerFiredUMAReport(TimerBase*) {
MemoryUsage usage = MemoryUsageMonitorInstance().GetCurrentMemoryUsage();
OomInterventionMetrics current_memory =
CrashMemoryMetricsReporterImpl::MemoryUsageToMetrics(usage);
int blink_usage_delta =
ToMemoryUsageDeltaSample(current_memory.current_blink_usage_kb,
metrics_at_intervention_.current_blink_usage_kb);
int private_footprint_delta = ToMemoryUsageDeltaSample(
current_memory.current_private_footprint_kb,
metrics_at_intervention_.current_private_footprint_kb);
int v8_usage_mb = base::saturated_cast<int>(usage.v8_bytes / 1024 / 1024);
switch (number_of_report_needed_--) {
case 3:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter10secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter10secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter10secs2",
private_footprint_delta);
break;
case 2:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter20secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter20secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter20secs2",
private_footprint_delta);
break;
case 1:
UMA_HISTOGRAM_MEMORY_MB(
"Memory.Experimental.OomIntervention.V8UsageAfter30secs",
v8_usage_mb);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedBlinkUsageAfter30secs2",
blink_usage_delta);
base::UmaHistogramSparse(
"Memory.Experimental.OomIntervention.ReducedRendererPMFAfter30secs2",
private_footprint_delta);
delayed_report_timer_.Stop();
break;
}
}
void OomInterventionImpl::TriggerGC() {
V8PerIsolateData::MainThreadIsolate()->MemoryPressureNotification(
v8::MemoryPressureLevel::kCritical);
}
} // namespace blink<|fim▁end|> | host_->OnHighMemoryUsage();
MemoryUsageMonitorInstance().RemoveObserver(this);
// Send memory pressure notification to trigger GC. |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright (c) <2015> <lummax>
// Licensed under MIT (http://opensource.org/licenses/MIT)
<|fim▁hole|><|fim▁end|> | pub mod wayland; |
<|file_name|>std-uncopyable-atomics.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// Issue #8380
#[feature(globs)];
use std::unstable::atomics::*;
use std::ptr;
fn main() {
let x = INIT_ATOMIC_FLAG;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_BOOL;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_INT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::mut_null());
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicOption<uint> = AtomicOption::empty();
let x = *&x; //~ ERROR: cannot move out of dereference
}<|fim▁end|> | // option. This file may not be copied, modified, or distributed
// except according to those terms.
|
<|file_name|>get_public_ipaddr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import re,urllib2
class Get_public_ip:
def getip(self):
try:
myip = self.visit("http://ip.chinaz.com/getip.aspx")
except:
try:
myip = self.visit("http://ipv4.icanhazip.com/")
except:
myip = "So sorry!!!"
return myip
def visit(self,url):
opener = urllib2.urlopen(url)
if url == opener.geturl():
str = opener.read()
return re.search('\d+\.\d+\.\d+\.\d+',str).group(0)
<|fim▁hole|><|fim▁end|> | if __name__ == "__main__":
getmyip = Get_public_ip()
print getmyip.getip() |
<|file_name|>application.py<|end_file_name|><|fim▁begin|>from selenium.webdriver.chrome.webdriver import WebDriver
from selenium import webdriver<|fim▁hole|>from fixture.mk import MkHelper
from fixture.cas import CasHelper
class Application:
def __init__(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
self.session = SessionHelper(self)
self.mk = MkHelper(self)
self.cas = CasHelper(self)
def open_home_page(self):
wd = self.wd
wd.get("https://new.kyivstar.ua/ecare/")
wd.maximize_window()
def destroy(self):
self.wd.quit()
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False<|fim▁end|> | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fixture.session import SessionHelper |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#![deny(warnings)]
use cases::case::*;
/// Converts a `&str` to `Title Case` `String`
///
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "Foo bar";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "FooBar";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "fooBar";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "FOO_BAR";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "foo_bar";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
/// ```
/// use inflector::cases::titlecase::to_title_case;
/// let mock_string: &str = "foo-bar";
/// let expected_string: String = "Foo Bar".to_string();
/// let asserted_string: String = to_title_case(mock_string);
/// assert!(asserted_string == expected_string);
///
/// ```
pub fn to_title_case(non_title_case_string: &str) -> String {
let options = CamelOptions {
new_word: true,
last_char: ' ',
first_word: true,
injectable_char: ' ',
has_seperator: true,
inverted: false,
};
to_case_camel_like(non_title_case_string, options)
}
/// Determines if a `&str` is `Title Case`
///
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "foo-bar-string-that-is-really-really-long";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "FooBarIsAReallyReallyLongString";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "fooBarIsAReallyReallyLongString";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "FOO_BAR_STRING_THAT_IS_REALLY_REALLY_LONG";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "foo_bar_string_that_is_really_really_long";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "Foo bar string that is really really long";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "foo";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == false);
///
/// ```
/// ```
/// use inflector::cases::titlecase::is_title_case;
/// let mock_string: &str = "Foo Bar String That Is Really Really Long";
/// let asserted_bool: bool = is_title_case(mock_string);
/// assert!(asserted_bool == true);
///
/// ```
pub fn is_title_case(test_string: &str) -> bool {
test_string == to_title_case(test_string.clone())
}
#[cfg(all(feature = "unstable", test))]
mod benchmarks {
extern crate test;
use self::test::Bencher;
#[bench]
fn bench_title(b: &mut Bencher) {
b.iter(|| super::to_title_case("Foo BAR"));
}
#[bench]
fn bench_is_title(b: &mut Bencher) {
b.iter(|| super::is_title_case("Foo bar"));
}
#[bench]
fn bench_title_from_snake(b: &mut Bencher) {
b.iter(|| super::to_title_case("foo_bar"));
}
}
<|fim▁hole|> use ::to_title_case;
use ::is_title_case;
#[test]
fn from_camel_case() {
let convertable_string: String = "fooBar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_pascal_case() {
let convertable_string: String = "FooBar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_kebab_case() {
let convertable_string: String = "foo-bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_sentence_case() {
let convertable_string: String = "Foo bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_title_case() {
let convertable_string: String = "Foo Bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_train_case() {
let convertable_string: String = "Foo-Bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_screaming_snake_case() {
let convertable_string: String = "FOO_BAR".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_snake_case() {
let convertable_string: String = "foo_bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn from_case_with_loads_of_space() {
let convertable_string: String = "foo bar".to_owned();
let expected: String = "Foo Bar".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn a_name_with_a_dot() {
let convertable_string: String = "Robert C. Martin".to_owned();
let expected: String = "Robert C Martin".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn random_text_with_bad_chars() {
let convertable_string: String = "Random text with *(bad) chars".to_owned();
let expected: String = "Random Text With Bad Chars".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn trailing_bad_chars() {
let convertable_string: String = "trailing bad_chars*(()())".to_owned();
let expected: String = "Trailing Bad Chars".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn leading_bad_chars() {
let convertable_string: String = "-!#$%leading bad chars".to_owned();
let expected: String = "Leading Bad Chars".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn wrapped_in_bad_chars() {
let convertable_string: String = "-!#$%wrapped in bad chars&*^*&(&*^&(<><?>><?><>))".to_owned();
let expected: String = "Wrapped In Bad Chars".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn has_a_sign() {
let convertable_string: String = "has a + sign".to_owned();
let expected: String = "Has A Sign".to_owned();
assert_eq!(to_title_case(&convertable_string), expected)
}
#[test]
fn is_correct_from_camel_case() {
let convertable_string: String = "fooBar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_pascal_case() {
let convertable_string: String = "FooBar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_kebab_case() {
let convertable_string: String = "foo-bar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_sentence_case() {
let convertable_string: String = "Foo bar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_title_case() {
let convertable_string: String = "Foo Bar".to_owned();
assert_eq!(is_title_case(&convertable_string), true)
}
#[test]
fn is_correct_from_train_case() {
let convertable_string: String = "Foo-Bar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_screaming_snake_case() {
let convertable_string: String = "FOO_BAR".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
#[test]
fn is_correct_from_snake_case() {
let convertable_string: String = "foo_bar".to_owned();
assert_eq!(is_title_case(&convertable_string), false)
}
}<|fim▁end|> |
#[cfg(test)]
mod tests { |
<|file_name|>synchronous-script-tag.js<|end_file_name|><|fim▁begin|>var Tag = require('./tag');
var ScriptTag = require('./script-tag');
var Utils = require('./../utils');
/**
* Script tag class that is loaded in a synchronous way.
*
* This is the class that will generate script tags that will be appended to the
* page using the document.write method.
*
* @param string The tag data.
* @param TagLoader The loader instance that has instantiated the tag.
*
* @return void
*/
class SynchronousScriptTag extends ScriptTag {
constructor(data = {}, loader_instance) {
super(data, loader_instance);
this.data = Utils.mergeObject(this.data, {
attributes: {
async: false,
defer: false
}
}, false);
this.data = Utils.mergeObject(this.data, data);
if (this.data !== data) {
this.data = Utils.getSanitizedObject(this.data, Tag.properties);
}
}
/**
* Returns the script node that will be appended to the DOM.
*
* @return HTMLElement
*/
getDomNode() {
var s, data;
if (!this.data.src) {
return false;
}
if (Utils.isDomReady() === true) {
data = Utils.mergeObject({}, this.data, false);
data.type = 'script';
this.loader_instance.addToQueue([data]);
return false;<|fim▁hole|> }
s = this.getScriptNode(false);
s.text = this.getDomNodeSource(s);
return s;
}
/**
* Returns the JS code that will insert the script source using
* document.write.
*
* @return string
*/
getDomNodeSource(s) {
var text;
text = 'document.write(\'<script src="' + this.data.src + '"';
text += ' id="' + this.data.id + '"';
if (s.addEventListener) {
text += ' onload="' + this.getOnTagLoadPageCode() + '"';
} else {
text += ' onreadystatechange="' + this.getIeOnLoadFunction() + '"';
}
text += '></scr' + 'ipt>\');';
return text;
}
/**
* Returns function that will be called only on older IE versions when the
* tag has been loaded by the browser.
*
* @return string
*/
getIeOnLoadFunction() {
var text = '';
text += 'if (this.addEventListener || ';
text += 'this.amc_load || ';
text += '(this.readyState && ';
text += 'this.readyState !== \\\'complete\\\')';
text += ') { return; } ';
text += 'this.amc_load = true; ';
text += this.getOnTagLoadPageCode();
return text;
}
}
module.exports = SynchronousScriptTag;<|fim▁end|> | |
<|file_name|>NodeQueryCacheConfigurator.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2008-2018, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.map.impl.querycache.subscriber;
import com.hazelcast.config.Config;
import com.hazelcast.config.MapConfig;
import com.hazelcast.config.QueryCacheConfig;
import com.hazelcast.internal.config.ConfigUtils;
import com.hazelcast.map.impl.querycache.QueryCacheConfigurator;
import com.hazelcast.map.impl.querycache.QueryCacheEventService;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Node side implementation of {@link QueryCacheConfigurator}.
*
* @see QueryCacheConfigurator
*/
public class NodeQueryCacheConfigurator extends AbstractQueryCacheConfigurator {
private final Config config;
public NodeQueryCacheConfigurator(Config config, ClassLoader configClassLoader,
QueryCacheEventService eventService) {
super(configClassLoader, eventService);
this.config = config;
}
<|fim▁hole|> QueryCacheConfig queryCacheConfig = findQueryCacheConfigFromMapConfig(mapConfig, cacheName);
if (queryCacheConfig != null) {
setPredicateImpl(queryCacheConfig);
setEntryListener(mapName, cacheId, queryCacheConfig);
return queryCacheConfig;
}
QueryCacheConfig newConfig = new QueryCacheConfig(cacheName);
mapConfig.getQueryCacheConfigs().add(newConfig);
return newConfig;
}
@Override
public QueryCacheConfig getOrNull(String mapName, String cacheName, String cacheId) {
MapConfig mapConfig = config.getMapConfigOrNull(mapName);
if (mapConfig == null) {
return null;
}
QueryCacheConfig queryCacheConfig = findQueryCacheConfigFromMapConfig(mapConfig, cacheName);
if (queryCacheConfig != null) {
setPredicateImpl(queryCacheConfig);
setEntryListener(mapName, cacheId, queryCacheConfig);
return queryCacheConfig;
}
return queryCacheConfig;
}
private QueryCacheConfig findQueryCacheConfigFromMapConfig(MapConfig mapConfig, String cacheName) {
List<QueryCacheConfig> queryCacheConfigs = mapConfig.getQueryCacheConfigs();
Map<String, QueryCacheConfig> allQueryCacheConfigs = new HashMap<String, QueryCacheConfig>(queryCacheConfigs.size());
for (QueryCacheConfig queryCacheConfig : queryCacheConfigs) {
allQueryCacheConfigs.put(queryCacheConfig.getName(), queryCacheConfig);
}
return ConfigUtils.lookupByPattern(config.getConfigPatternMatcher(), allQueryCacheConfigs, cacheName);
}
@Override
public void removeConfiguration(String mapName, String cacheName) {
MapConfig mapConfig = config.getMapConfig(mapName);
List<QueryCacheConfig> queryCacheConfigs = mapConfig.getQueryCacheConfigs();
if (queryCacheConfigs == null || queryCacheConfigs.isEmpty()) {
return;
}
Iterator<QueryCacheConfig> iterator = queryCacheConfigs.iterator();
while (iterator.hasNext()) {
QueryCacheConfig config = iterator.next();
if (config.getName().equals(cacheName)) {
iterator.remove();
}
}
}
}<|fim▁end|> | @Override
public QueryCacheConfig getOrCreateConfiguration(String mapName, String cacheName, String cacheId) {
MapConfig mapConfig = config.getMapConfig(mapName);
|
<|file_name|>timedrift_with_migration.py<|end_file_name|><|fim▁begin|>import logging
from autotest_lib.client.common_lib import error
import kvm_test_utils
def run_timedrift_with_migration(test, params, env):
"""
Time drift test with migration:
1) Log into a guest.
2) Take a time reading from the guest and host.
3) Migrate the guest.
4) Take a second time reading.
5) If the drift (in seconds) is higher than a user specified value, fail.
@param test: KVM test object.
@param params: Dictionary with test parameters.
@param env: Dictionary with the test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Collect test parameters:
# Command to run to get the current time
time_command = params.get("time_command")
# Filter which should match a string to be passed to time.strptime()
time_filter_re = params.get("time_filter_re")
# Time format for time.strptime()
time_format = params.get("time_format")
drift_threshold = float(params.get("drift_threshold", "10"))
drift_threshold_single = float(params.get("drift_threshold_single", "3"))<|fim▁hole|> migration_iterations = int(params.get("migration_iterations", 1))
try:
# Get initial time
# (ht stands for host time, gt stands for guest time)
(ht0, gt0) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
# Migrate
for i in range(migration_iterations):
# Get time before current iteration
(ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
session.close()
# Run current iteration
logging.info("Migrating: iteration %d of %d...",
(i + 1), migration_iterations)
vm.migrate()
# Log in
logging.info("Logging in after migration...")
session = vm.wait_for_login(timeout=30)
logging.info("Logged in after migration")
# Get time after current iteration
(ht1_, gt1_) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
# Report iteration results
host_delta = ht1_ - ht0_
guest_delta = gt1_ - gt0_
drift = abs(host_delta - guest_delta)
logging.info("Host duration (iteration %d): %.2f",
(i + 1), host_delta)
logging.info("Guest duration (iteration %d): %.2f",
(i + 1), guest_delta)
logging.info("Drift at iteration %d: %.2f seconds",
(i + 1), drift)
# Fail if necessary
if drift > drift_threshold_single:
raise error.TestFail("Time drift too large at iteration %d: "
"%.2f seconds" % (i + 1, drift))
# Get final time
(ht1, gt1) = kvm_test_utils.get_time(session, time_command,
time_filter_re, time_format)
finally:
if session:
session.close()
# Report results
host_delta = ht1 - ht0
guest_delta = gt1 - gt0
drift = abs(host_delta - guest_delta)
logging.info("Host duration (%d migrations): %.2f",
migration_iterations, host_delta)
logging.info("Guest duration (%d migrations): %.2f",
migration_iterations, guest_delta)
logging.info("Drift after %d migrations: %.2f seconds",
migration_iterations, drift)
# Fail if necessary
if drift > drift_threshold:
raise error.TestFail("Time drift too large after %d migrations: "
"%.2f seconds" % (migration_iterations, drift))<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from . import families<|fim▁hole|><|fim▁end|> | from .glm import glm, linear_component, plot_posterior_predictive |
<|file_name|>measure_data_test.cc<|end_file_name|><|fim▁begin|>// Copyright 2018, OpenCensus Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "opencensus/stats/internal/measure_data.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
#include "absl/types/span.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "opencensus/stats/bucket_boundaries.h"
#include "opencensus/stats/distribution.h"
#include "opencensus/stats/testing/test_utils.h"
namespace opencensus {
namespace stats {
namespace {
TEST(MeasureDataTest, SmallSequence) {
MeasureData data({});
data.Add(-6);
data.Add(0);
data.Add(3);
EXPECT_EQ(data.count(), 3);
EXPECT_DOUBLE_EQ(data.sum(), -3);
}
TEST(MeasureDataTest, MultipleHistograms) {
std::vector<BucketBoundaries> buckets = {BucketBoundaries::Explicit({0, 10}),
BucketBoundaries::Explicit({}),
BucketBoundaries::Explicit({5})};
MeasureData data(buckets);
data.Add(-1);
data.Add(1);
data.Add(8);
Distribution distribution1 =
testing::TestUtils::MakeDistribution(&buckets[0]);
data.AddToDistribution(&distribution1);
EXPECT_THAT(distribution1.bucket_counts(), ::testing::ElementsAre(1, 2, 0));
Distribution distribution2 =
testing::TestUtils::MakeDistribution(&buckets[2]);
data.AddToDistribution(&distribution2);
EXPECT_THAT(distribution2.bucket_counts(), ::testing::ElementsAre(2, 1));
}
TEST(MeasureDataTest, DistributionStatistics) {
BucketBoundaries buckets = BucketBoundaries::Explicit({});
MeasureData data(absl::MakeSpan(&buckets, 1));
const std::vector<int> samples{91, 18, 63, 98, 87, 77, 14, 97, 10, 35,
12, 5, 75, 41, 49, 38, 40, 20, 55, 83};
const double expected_mean =
static_cast<double>(std::accumulate(samples.begin(), samples.end(), 0)) /
samples.size();
double expected_sum_of_squared_deviation = 0;
for (const auto sample : samples) {
data.Add(sample);
expected_sum_of_squared_deviation += pow(sample - expected_mean, 2);
}
Distribution distribution = testing::TestUtils::MakeDistribution(&buckets);
data.AddToDistribution(&distribution);
EXPECT_EQ(distribution.count(), samples.size());
EXPECT_DOUBLE_EQ(distribution.mean(), expected_mean);
EXPECT_DOUBLE_EQ(distribution.sum_of_squared_deviation(),
expected_sum_of_squared_deviation);
EXPECT_DOUBLE_EQ(distribution.min(),
*std::min_element(samples.begin(), samples.end()));
EXPECT_DOUBLE_EQ(distribution.max(),
*std::max_element(samples.begin(), samples.end()));
}
TEST(MeasureDataTest, BatchedAddToDistribution) {
// Tests that batching values in the MeasureData is equivalent to sequentially
// adding to the distribution.
BucketBoundaries buckets = BucketBoundaries::Exponential(7, 2, 2);
MeasureData data(absl::MakeSpan(&buckets, 1));
Distribution base_distribution =
testing::TestUtils::MakeDistribution(&buckets);
// Add some preexisting data to fully test the merge.
testing::TestUtils::AddToDistribution(&base_distribution, 20);
testing::TestUtils::AddToDistribution(&base_distribution, 10);
Distribution expected_distribution = base_distribution;
const double tolerance = 1.0 / 1000000000;
const int max = 100;
for (int i = 0; i <= max; ++i) {
data.Add(i);
testing::TestUtils::AddToDistribution(&expected_distribution, i);<|fim▁hole|>
EXPECT_EQ(expected_distribution.count(), actual_distribution.count());
EXPECT_DOUBLE_EQ(expected_distribution.mean(), actual_distribution.mean());
EXPECT_NEAR(expected_distribution.sum_of_squared_deviation(),
actual_distribution.sum_of_squared_deviation(), tolerance);
EXPECT_DOUBLE_EQ(expected_distribution.min(), actual_distribution.min());
EXPECT_DOUBLE_EQ(expected_distribution.max(), actual_distribution.max());
EXPECT_THAT(
actual_distribution.bucket_counts(),
::testing::ElementsAreArray(expected_distribution.bucket_counts()));
}
}
TEST(MeasureDataDeathTest, AddToDistributionWithUnknownBuckets) {
BucketBoundaries buckets = BucketBoundaries::Explicit({0, 10});
MeasureData data(absl::MakeSpan(&buckets, 1));
data.Add(1);
BucketBoundaries distribution_buckets = BucketBoundaries::Explicit({0});
Distribution distribution =
testing::TestUtils::MakeDistribution(&distribution_buckets);
EXPECT_DEBUG_DEATH(
{
data.AddToDistribution(&distribution);
EXPECT_THAT(distribution.bucket_counts(), ::testing::ElementsAre(1, 0));
},
"No matching BucketBoundaries in AddToDistribution");
}
} // namespace
} // namespace stats
} // namespace opencensus<|fim▁end|> |
Distribution actual_distribution = base_distribution;
data.AddToDistribution(&actual_distribution); |
<|file_name|>NetworkService.go<|end_file_name|><|fim▁begin|>//
// Copyright 2014, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cloudstack
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
)
type DedicatePublicIpRangeParams struct {
p map[string]interface{}
}
func (p *DedicatePublicIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["account"]; found {
u.Set("account", v.(string))
}
if v, found := p.p["domainid"]; found {
u.Set("domainid", v.(string))
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["projectid"]; found {
u.Set("projectid", v.(string))
}
return u
}
func (p *DedicatePublicIpRangeParams) SetAccount(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["account"] = v
return
}
func (p *DedicatePublicIpRangeParams) SetDomainid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["domainid"] = v
return
}
func (p *DedicatePublicIpRangeParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *DedicatePublicIpRangeParams) SetProjectid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["projectid"] = v
return
}
// You should always use this function to get a new DedicatePublicIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDedicatePublicIpRangeParams(domainid string, id string) *DedicatePublicIpRangeParams {
p := &DedicatePublicIpRangeParams{}
p.p = make(map[string]interface{})
p.p["domainid"] = domainid
p.p["id"] = id
return p
}
// Dedicates a Public IP range to an account
func (s *NetworkService) DedicatePublicIpRange(p *DedicatePublicIpRangeParams) (*DedicatePublicIpRangeResponse, error) {
resp, err := s.cs.newRequest("dedicatePublicIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r DedicatePublicIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type DedicatePublicIpRangeResponse struct {
Account string `json:"account,omitempty"`
Description string `json:"description,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Endip string `json:"endip,omitempty"`
Endipv6 string `json:"endipv6,omitempty"`
Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkid string `json:"networkid,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Podid string `json:"podid,omitempty"`
Podname string `json:"podname,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Startip string `json:"startip,omitempty"`
Startipv6 string `json:"startipv6,omitempty"`
Vlan string `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type ReleasePublicIpRangeParams struct {
p map[string]interface{}
}
func (p *ReleasePublicIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *ReleasePublicIpRangeParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new ReleasePublicIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewReleasePublicIpRangeParams(id string) *ReleasePublicIpRangeParams {
p := &ReleasePublicIpRangeParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Releases a Public IP range back to the system pool
func (s *NetworkService) ReleasePublicIpRange(p *ReleasePublicIpRangeParams) (*ReleasePublicIpRangeResponse, error) {
resp, err := s.cs.newRequest("releasePublicIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r ReleasePublicIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ReleasePublicIpRangeResponse struct {
Displaytext string `json:"displaytext,omitempty"`
Success string `json:"success,omitempty"`
}
type CreateNetworkParams struct {
p map[string]interface{}
}
func (p *CreateNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["account"]; found {
u.Set("account", v.(string))
}
if v, found := p.p["aclid"]; found {
u.Set("aclid", v.(string))
}
if v, found := p.p["acltype"]; found {
u.Set("acltype", v.(string))
}
if v, found := p.p["displaynetwork"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("displaynetwork", vv)
}
if v, found := p.p["displaytext"]; found {
u.Set("displaytext", v.(string))
}
if v, found := p.p["domainid"]; found {
u.Set("domainid", v.(string))
}
if v, found := p.p["endip"]; found {
u.Set("endip", v.(string))
}
if v, found := p.p["endipv6"]; found {
u.Set("endipv6", v.(string))
}
if v, found := p.p["gateway"]; found {
u.Set("gateway", v.(string))
}
if v, found := p.p["ip6cidr"]; found {
u.Set("ip6cidr", v.(string))
}
if v, found := p.p["ip6gateway"]; found {
u.Set("ip6gateway", v.(string))
}
if v, found := p.p["isolatedpvlan"]; found {
u.Set("isolatedpvlan", v.(string))
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["netmask"]; found {
u.Set("netmask", v.(string))
}
if v, found := p.p["networkdomain"]; found {
u.Set("networkdomain", v.(string))
}
if v, found := p.p["networkofferingid"]; found {
u.Set("networkofferingid", v.(string))
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
if v, found := p.p["projectid"]; found {
u.Set("projectid", v.(string))
}
if v, found := p.p["startip"]; found {
u.Set("startip", v.(string))
}
if v, found := p.p["startipv6"]; found {
u.Set("startipv6", v.(string))
}
if v, found := p.p["subdomainaccess"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("subdomainaccess", vv)
}
if v, found := p.p["vlan"]; found {
u.Set("vlan", v.(string))
}
if v, found := p.p["vpcid"]; found {
u.Set("vpcid", v.(string))
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *CreateNetworkParams) SetAccount(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["account"] = v
return
}
func (p *CreateNetworkParams) SetAclid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["aclid"] = v
return
}
func (p *CreateNetworkParams) SetAcltype(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["acltype"] = v
return
}
func (p *CreateNetworkParams) SetDisplaynetwork(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["displaynetwork"] = v
return
}
func (p *CreateNetworkParams) SetDisplaytext(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["displaytext"] = v
return
}
func (p *CreateNetworkParams) SetDomainid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["domainid"] = v
return
}
func (p *CreateNetworkParams) SetEndip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["endip"] = v
return
}
func (p *CreateNetworkParams) SetEndipv6(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["endipv6"] = v
return
}
func (p *CreateNetworkParams) SetGateway(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["gateway"] = v
return
}
func (p *CreateNetworkParams) SetIp6cidr(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["ip6cidr"] = v
return
}
func (p *CreateNetworkParams) SetIp6gateway(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["ip6gateway"] = v
return
}
func (p *CreateNetworkParams) SetIsolatedpvlan(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["isolatedpvlan"] = v
return
}
func (p *CreateNetworkParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *CreateNetworkParams) SetNetmask(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["netmask"] = v
return
}
func (p *CreateNetworkParams) SetNetworkdomain(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkdomain"] = v
return
}
func (p *CreateNetworkParams) SetNetworkofferingid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkofferingid"] = v
return
}
func (p *CreateNetworkParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
func (p *CreateNetworkParams) SetProjectid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["projectid"] = v
return
}
func (p *CreateNetworkParams) SetStartip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["startip"] = v
return
}
func (p *CreateNetworkParams) SetStartipv6(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["startipv6"] = v
return
}
func (p *CreateNetworkParams) SetSubdomainaccess(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["subdomainaccess"] = v
return
}
func (p *CreateNetworkParams) SetVlan(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vlan"] = v
return
}
func (p *CreateNetworkParams) SetVpcid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vpcid"] = v
return
}
func (p *CreateNetworkParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new CreateNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewCreateNetworkParams(displaytext string, name string, networkofferingid string, zoneid string) *CreateNetworkParams {
p := &CreateNetworkParams{}
p.p = make(map[string]interface{})
p.p["displaytext"] = displaytext
p.p["name"] = name
p.p["networkofferingid"] = networkofferingid
p.p["zoneid"] = zoneid
return p
}
// Creates a network
func (s *NetworkService) CreateNetwork(p *CreateNetworkParams) (*CreateNetworkResponse, error) {
resp, err := s.cs.newRequest("createNetwork", p.toURLValues())
if err != nil {
return nil, err
}
if resp, err = getRawValue(resp); err != nil {
return nil, err
}
var r CreateNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}<|fim▁hole|>type CreateNetworkResponse struct {
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type DeleteNetworkParams struct {
p map[string]interface{}
}
func (p *DeleteNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["forced"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("forced", vv)
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *DeleteNetworkParams) SetForced(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["forced"] = v
return
}
func (p *DeleteNetworkParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new DeleteNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDeleteNetworkParams(id string) *DeleteNetworkParams {
p := &DeleteNetworkParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Deletes a network
func (s *NetworkService) DeleteNetwork(p *DeleteNetworkParams) (*DeleteNetworkResponse, error) {
resp, err := s.cs.newRequest("deleteNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r DeleteNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type DeleteNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Success bool `json:"success,omitempty"`
}
type ListNetworksParams struct {
p map[string]interface{}
}
func (p *ListNetworksParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["account"]; found {
u.Set("account", v.(string))
}
if v, found := p.p["acltype"]; found {
u.Set("acltype", v.(string))
}
if v, found := p.p["canusefordeploy"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("canusefordeploy", vv)
}
if v, found := p.p["displaynetwork"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("displaynetwork", vv)
}
if v, found := p.p["domainid"]; found {
u.Set("domainid", v.(string))
}
if v, found := p.p["forvpc"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("forvpc", vv)
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["isrecursive"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("isrecursive", vv)
}
if v, found := p.p["issystem"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("issystem", vv)
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["listall"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("listall", vv)
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
if v, found := p.p["projectid"]; found {
u.Set("projectid", v.(string))
}
if v, found := p.p["restartrequired"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("restartrequired", vv)
}
if v, found := p.p["specifyipranges"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("specifyipranges", vv)
}
if v, found := p.p["supportedservices"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("supportedservices", vv)
}
if v, found := p.p["tags"]; found {
i := 0
for k, vv := range v.(map[string]string) {
u.Set(fmt.Sprintf("tags[%d].key", i), k)
u.Set(fmt.Sprintf("tags[%d].value", i), vv)
i++
}
}
if v, found := p.p["traffictype"]; found {
u.Set("traffictype", v.(string))
}
if v, found := p.p["type"]; found {
u.Set("type", v.(string))
}
if v, found := p.p["vpcid"]; found {
u.Set("vpcid", v.(string))
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *ListNetworksParams) SetAccount(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["account"] = v
return
}
func (p *ListNetworksParams) SetAcltype(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["acltype"] = v
return
}
func (p *ListNetworksParams) SetCanusefordeploy(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["canusefordeploy"] = v
return
}
func (p *ListNetworksParams) SetDisplaynetwork(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["displaynetwork"] = v
return
}
func (p *ListNetworksParams) SetDomainid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["domainid"] = v
return
}
func (p *ListNetworksParams) SetForvpc(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["forvpc"] = v
return
}
func (p *ListNetworksParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *ListNetworksParams) SetIsrecursive(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["isrecursive"] = v
return
}
func (p *ListNetworksParams) SetIssystem(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["issystem"] = v
return
}
func (p *ListNetworksParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListNetworksParams) SetListall(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["listall"] = v
return
}
func (p *ListNetworksParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListNetworksParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
func (p *ListNetworksParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
func (p *ListNetworksParams) SetProjectid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["projectid"] = v
return
}
func (p *ListNetworksParams) SetRestartrequired(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["restartrequired"] = v
return
}
func (p *ListNetworksParams) SetSpecifyipranges(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["specifyipranges"] = v
return
}
func (p *ListNetworksParams) SetSupportedservices(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["supportedservices"] = v
return
}
func (p *ListNetworksParams) SetTags(v map[string]string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["tags"] = v
return
}
func (p *ListNetworksParams) SetTraffictype(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["traffictype"] = v
return
}
func (p *ListNetworksParams) SetType(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkType"] = v
return
}
func (p *ListNetworksParams) SetVpcid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vpcid"] = v
return
}
func (p *ListNetworksParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new ListNetworksParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListNetworksParams() *ListNetworksParams {
p := &ListNetworksParams{}
p.p = make(map[string]interface{})
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNetworkID(keyword string) (string, error) {
p := &ListNetworksParams{}
p.p = make(map[string]interface{})
p.p["keyword"] = keyword
l, err := s.ListNetworks(p)
if err != nil {
return "", err
}
if l.Count == 0 {
// If no matches, search all projects
p.p["projectid"] = "-1"
l, err = s.ListNetworks(p)
if err != nil {
return "", err
}
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", keyword, l)
}
if l.Count == 1 {
return l.Networks[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.Networks {
if v.Name == keyword {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l)
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNetworkByName(name string) (*Network, int, error) {
id, err := s.GetNetworkID(name)
if err != nil {
return nil, -1, err
}
r, count, err := s.GetNetworkByID(id)
if err != nil {
return nil, count, err
}
return r, count, nil
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNetworkByID(id string) (*Network, int, error) {
p := &ListNetworksParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
l, err := s.ListNetworks(p)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id)) {
return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l)
}
return nil, -1, err
}
if l.Count == 0 {
// If no matches, search all projects
p.p["projectid"] = "-1"
l, err = s.ListNetworks(p)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id)) {
return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l)
}
return nil, -1, err
}
}
if l.Count == 0 {
return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l)
}
if l.Count == 1 {
return l.Networks[0], l.Count, nil
}
return nil, l.Count, fmt.Errorf("There is more then one result for Network UUID: %s!", id)
}
// Lists all available networks.
func (s *NetworkService) ListNetworks(p *ListNetworksParams) (*ListNetworksResponse, error) {
resp, err := s.cs.newRequest("listNetworks", p.toURLValues())
if err != nil {
return nil, err
}
var r ListNetworksResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListNetworksResponse struct {
Count int `json:"count"`
Networks []*Network `json:"network"`
}
type Network struct {
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type RestartNetworkParams struct {
p map[string]interface{}
}
func (p *RestartNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["cleanup"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("cleanup", vv)
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *RestartNetworkParams) SetCleanup(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["cleanup"] = v
return
}
func (p *RestartNetworkParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new RestartNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewRestartNetworkParams(id string) *RestartNetworkParams {
p := &RestartNetworkParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Restarts the network; includes 1) restarting network elements - virtual routers, DHCP servers 2) reapplying all public IPs 3) reapplying loadBalancing/portForwarding rules
func (s *NetworkService) RestartNetwork(p *RestartNetworkParams) (*RestartNetworkResponse, error) {
resp, err := s.cs.newRequest("restartNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r RestartNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type RestartNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Account string `json:"account,omitempty"`
Allocated string `json:"allocated,omitempty"`
Associatednetworkid string `json:"associatednetworkid,omitempty"`
Associatednetworkname string `json:"associatednetworkname,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Fordisplay bool `json:"fordisplay,omitempty"`
Forvirtualnetwork bool `json:"forvirtualnetwork,omitempty"`
Id string `json:"id,omitempty"`
Ipaddress string `json:"ipaddress,omitempty"`
Isportable bool `json:"isportable,omitempty"`
Issourcenat bool `json:"issourcenat,omitempty"`
Isstaticnat bool `json:"isstaticnat,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Networkid string `json:"networkid,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Purpose string `json:"purpose,omitempty"`
State string `json:"state,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Virtualmachinedisplayname string `json:"virtualmachinedisplayname,omitempty"`
Virtualmachineid string `json:"virtualmachineid,omitempty"`
Virtualmachinename string `json:"virtualmachinename,omitempty"`
Vlanid string `json:"vlanid,omitempty"`
Vlanname string `json:"vlanname,omitempty"`
Vmipaddress string `json:"vmipaddress,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
}
type UpdateNetworkParams struct {
p map[string]interface{}
}
func (p *UpdateNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["changecidr"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("changecidr", vv)
}
if v, found := p.p["customid"]; found {
u.Set("customid", v.(string))
}
if v, found := p.p["displaynetwork"]; found {
vv := strconv.FormatBool(v.(bool))
u.Set("displaynetwork", vv)
}
if v, found := p.p["displaytext"]; found {
u.Set("displaytext", v.(string))
}
if v, found := p.p["guestvmcidr"]; found {
u.Set("guestvmcidr", v.(string))
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["networkdomain"]; found {
u.Set("networkdomain", v.(string))
}
if v, found := p.p["networkofferingid"]; found {
u.Set("networkofferingid", v.(string))
}
return u
}
func (p *UpdateNetworkParams) SetChangecidr(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["changecidr"] = v
return
}
func (p *UpdateNetworkParams) SetCustomid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["customid"] = v
return
}
func (p *UpdateNetworkParams) SetDisplaynetwork(v bool) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["displaynetwork"] = v
return
}
func (p *UpdateNetworkParams) SetDisplaytext(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["displaytext"] = v
return
}
func (p *UpdateNetworkParams) SetGuestvmcidr(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["guestvmcidr"] = v
return
}
func (p *UpdateNetworkParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *UpdateNetworkParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *UpdateNetworkParams) SetNetworkdomain(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkdomain"] = v
return
}
func (p *UpdateNetworkParams) SetNetworkofferingid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkofferingid"] = v
return
}
// You should always use this function to get a new UpdateNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewUpdateNetworkParams(id string) *UpdateNetworkParams {
p := &UpdateNetworkParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Updates a network
func (s *NetworkService) UpdateNetwork(p *UpdateNetworkParams) (*UpdateNetworkResponse, error) {
resp, err := s.cs.newRequest("updateNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r UpdateNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type UpdateNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type CreatePhysicalNetworkParams struct {
p map[string]interface{}
}
func (p *CreatePhysicalNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["broadcastdomainrange"]; found {
u.Set("broadcastdomainrange", v.(string))
}
if v, found := p.p["domainid"]; found {
u.Set("domainid", v.(string))
}
if v, found := p.p["isolationmethods"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("isolationmethods", vv)
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["networkspeed"]; found {
u.Set("networkspeed", v.(string))
}
if v, found := p.p["tags"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("tags", vv)
}
if v, found := p.p["vlan"]; found {
u.Set("vlan", v.(string))
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *CreatePhysicalNetworkParams) SetBroadcastdomainrange(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["broadcastdomainrange"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetDomainid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["domainid"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetIsolationmethods(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["isolationmethods"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetNetworkspeed(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkspeed"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetTags(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["tags"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetVlan(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vlan"] = v
return
}
func (p *CreatePhysicalNetworkParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new CreatePhysicalNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewCreatePhysicalNetworkParams(name string, zoneid string) *CreatePhysicalNetworkParams {
p := &CreatePhysicalNetworkParams{}
p.p = make(map[string]interface{})
p.p["name"] = name
p.p["zoneid"] = zoneid
return p
}
// Creates a physical network
func (s *NetworkService) CreatePhysicalNetwork(p *CreatePhysicalNetworkParams) (*CreatePhysicalNetworkResponse, error) {
resp, err := s.cs.newRequest("createPhysicalNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r CreatePhysicalNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type CreatePhysicalNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"`
Domainid string `json:"domainid,omitempty"`
Id string `json:"id,omitempty"`
Isolationmethods string `json:"isolationmethods,omitempty"`
Name string `json:"name,omitempty"`
Networkspeed string `json:"networkspeed,omitempty"`
State string `json:"state,omitempty"`
Tags string `json:"tags,omitempty"`
Vlan string `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type DeletePhysicalNetworkParams struct {
p map[string]interface{}
}
func (p *DeletePhysicalNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *DeletePhysicalNetworkParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new DeletePhysicalNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDeletePhysicalNetworkParams(id string) *DeletePhysicalNetworkParams {
p := &DeletePhysicalNetworkParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Deletes a Physical Network.
func (s *NetworkService) DeletePhysicalNetwork(p *DeletePhysicalNetworkParams) (*DeletePhysicalNetworkResponse, error) {
resp, err := s.cs.newRequest("deletePhysicalNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r DeletePhysicalNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type DeletePhysicalNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Success bool `json:"success,omitempty"`
}
type ListPhysicalNetworksParams struct {
p map[string]interface{}
}
func (p *ListPhysicalNetworksParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *ListPhysicalNetworksParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *ListPhysicalNetworksParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListPhysicalNetworksParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *ListPhysicalNetworksParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListPhysicalNetworksParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
func (p *ListPhysicalNetworksParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new ListPhysicalNetworksParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListPhysicalNetworksParams() *ListPhysicalNetworksParams {
p := &ListPhysicalNetworksParams{}
p.p = make(map[string]interface{})
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetPhysicalNetworkID(name string) (string, error) {
p := &ListPhysicalNetworksParams{}
p.p = make(map[string]interface{})
p.p["name"] = name
l, err := s.ListPhysicalNetworks(p)
if err != nil {
return "", err
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", name, l)
}
if l.Count == 1 {
return l.PhysicalNetworks[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.PhysicalNetworks {
if v.Name == name {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l)
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetPhysicalNetworkByName(name string) (*PhysicalNetwork, int, error) {
id, err := s.GetPhysicalNetworkID(name)
if err != nil {
return nil, -1, err
}
r, count, err := s.GetPhysicalNetworkByID(id)
if err != nil {
return nil, count, err
}
return r, count, nil
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetPhysicalNetworkByID(id string) (*PhysicalNetwork, int, error) {
p := &ListPhysicalNetworksParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
l, err := s.ListPhysicalNetworks(p)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id)) {
return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l)
}
return nil, -1, err
}
if l.Count == 0 {
return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l)
}
if l.Count == 1 {
return l.PhysicalNetworks[0], l.Count, nil
}
return nil, l.Count, fmt.Errorf("There is more then one result for PhysicalNetwork UUID: %s!", id)
}
// Lists physical networks
func (s *NetworkService) ListPhysicalNetworks(p *ListPhysicalNetworksParams) (*ListPhysicalNetworksResponse, error) {
resp, err := s.cs.newRequest("listPhysicalNetworks", p.toURLValues())
if err != nil {
return nil, err
}
var r ListPhysicalNetworksResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListPhysicalNetworksResponse struct {
Count int `json:"count"`
PhysicalNetworks []*PhysicalNetwork `json:"physicalnetwork"`
}
type PhysicalNetwork struct {
Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"`
Domainid string `json:"domainid,omitempty"`
Id string `json:"id,omitempty"`
Isolationmethods string `json:"isolationmethods,omitempty"`
Name string `json:"name,omitempty"`
Networkspeed string `json:"networkspeed,omitempty"`
State string `json:"state,omitempty"`
Tags string `json:"tags,omitempty"`
Vlan string `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type UpdatePhysicalNetworkParams struct {
p map[string]interface{}
}
func (p *UpdatePhysicalNetworkParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["networkspeed"]; found {
u.Set("networkspeed", v.(string))
}
if v, found := p.p["state"]; found {
u.Set("state", v.(string))
}
if v, found := p.p["tags"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("tags", vv)
}
if v, found := p.p["vlan"]; found {
u.Set("vlan", v.(string))
}
return u
}
func (p *UpdatePhysicalNetworkParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *UpdatePhysicalNetworkParams) SetNetworkspeed(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["networkspeed"] = v
return
}
func (p *UpdatePhysicalNetworkParams) SetState(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["state"] = v
return
}
func (p *UpdatePhysicalNetworkParams) SetTags(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["tags"] = v
return
}
func (p *UpdatePhysicalNetworkParams) SetVlan(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vlan"] = v
return
}
// You should always use this function to get a new UpdatePhysicalNetworkParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewUpdatePhysicalNetworkParams(id string) *UpdatePhysicalNetworkParams {
p := &UpdatePhysicalNetworkParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Updates a physical network
func (s *NetworkService) UpdatePhysicalNetwork(p *UpdatePhysicalNetworkParams) (*UpdatePhysicalNetworkResponse, error) {
resp, err := s.cs.newRequest("updatePhysicalNetwork", p.toURLValues())
if err != nil {
return nil, err
}
var r UpdatePhysicalNetworkResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type UpdatePhysicalNetworkResponse struct {
JobID string `json:"jobid,omitempty"`
Broadcastdomainrange string `json:"broadcastdomainrange,omitempty"`
Domainid string `json:"domainid,omitempty"`
Id string `json:"id,omitempty"`
Isolationmethods string `json:"isolationmethods,omitempty"`
Name string `json:"name,omitempty"`
Networkspeed string `json:"networkspeed,omitempty"`
State string `json:"state,omitempty"`
Tags string `json:"tags,omitempty"`
Vlan string `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type ListSupportedNetworkServicesParams struct {
p map[string]interface{}
}
func (p *ListSupportedNetworkServicesParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
if v, found := p.p["provider"]; found {
u.Set("provider", v.(string))
}
if v, found := p.p["service"]; found {
u.Set("service", v.(string))
}
return u
}
func (p *ListSupportedNetworkServicesParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListSupportedNetworkServicesParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListSupportedNetworkServicesParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
func (p *ListSupportedNetworkServicesParams) SetProvider(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["provider"] = v
return
}
func (p *ListSupportedNetworkServicesParams) SetService(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["service"] = v
return
}
// You should always use this function to get a new ListSupportedNetworkServicesParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListSupportedNetworkServicesParams() *ListSupportedNetworkServicesParams {
p := &ListSupportedNetworkServicesParams{}
p.p = make(map[string]interface{})
return p
}
// Lists all network services provided by CloudStack or for the given Provider.
func (s *NetworkService) ListSupportedNetworkServices(p *ListSupportedNetworkServicesParams) (*ListSupportedNetworkServicesResponse, error) {
resp, err := s.cs.newRequest("listSupportedNetworkServices", p.toURLValues())
if err != nil {
return nil, err
}
var r ListSupportedNetworkServicesResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListSupportedNetworkServicesResponse struct {
Count int `json:"count"`
SupportedNetworkServices []*SupportedNetworkService `json:"supportednetworkservice"`
}
type SupportedNetworkService struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
}
type AddNetworkServiceProviderParams struct {
p map[string]interface{}
}
func (p *AddNetworkServiceProviderParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["destinationphysicalnetworkid"]; found {
u.Set("destinationphysicalnetworkid", v.(string))
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
if v, found := p.p["servicelist"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("servicelist", vv)
}
return u
}
func (p *AddNetworkServiceProviderParams) SetDestinationphysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["destinationphysicalnetworkid"] = v
return
}
func (p *AddNetworkServiceProviderParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *AddNetworkServiceProviderParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
func (p *AddNetworkServiceProviderParams) SetServicelist(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["servicelist"] = v
return
}
// You should always use this function to get a new AddNetworkServiceProviderParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewAddNetworkServiceProviderParams(name string, physicalnetworkid string) *AddNetworkServiceProviderParams {
p := &AddNetworkServiceProviderParams{}
p.p = make(map[string]interface{})
p.p["name"] = name
p.p["physicalnetworkid"] = physicalnetworkid
return p
}
// Adds a network serviceProvider to a physical network
func (s *NetworkService) AddNetworkServiceProvider(p *AddNetworkServiceProviderParams) (*AddNetworkServiceProviderResponse, error) {
resp, err := s.cs.newRequest("addNetworkServiceProvider", p.toURLValues())
if err != nil {
return nil, err
}
var r AddNetworkServiceProviderResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type AddNetworkServiceProviderResponse struct {
JobID string `json:"jobid,omitempty"`
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
}
type DeleteNetworkServiceProviderParams struct {
p map[string]interface{}
}
func (p *DeleteNetworkServiceProviderParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *DeleteNetworkServiceProviderParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new DeleteNetworkServiceProviderParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDeleteNetworkServiceProviderParams(id string) *DeleteNetworkServiceProviderParams {
p := &DeleteNetworkServiceProviderParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Deletes a Network Service Provider.
func (s *NetworkService) DeleteNetworkServiceProvider(p *DeleteNetworkServiceProviderParams) (*DeleteNetworkServiceProviderResponse, error) {
resp, err := s.cs.newRequest("deleteNetworkServiceProvider", p.toURLValues())
if err != nil {
return nil, err
}
var r DeleteNetworkServiceProviderResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type DeleteNetworkServiceProviderResponse struct {
JobID string `json:"jobid,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Success bool `json:"success,omitempty"`
}
type ListNetworkServiceProvidersParams struct {
p map[string]interface{}
}
func (p *ListNetworkServiceProvidersParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
if v, found := p.p["state"]; found {
u.Set("state", v.(string))
}
return u
}
func (p *ListNetworkServiceProvidersParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListNetworkServiceProvidersParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *ListNetworkServiceProvidersParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListNetworkServiceProvidersParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
func (p *ListNetworkServiceProvidersParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
func (p *ListNetworkServiceProvidersParams) SetState(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["state"] = v
return
}
// You should always use this function to get a new ListNetworkServiceProvidersParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListNetworkServiceProvidersParams() *ListNetworkServiceProvidersParams {
p := &ListNetworkServiceProvidersParams{}
p.p = make(map[string]interface{})
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNetworkServiceProviderID(name string) (string, error) {
p := &ListNetworkServiceProvidersParams{}
p.p = make(map[string]interface{})
p.p["name"] = name
l, err := s.ListNetworkServiceProviders(p)
if err != nil {
return "", err
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", name, l)
}
if l.Count == 1 {
return l.NetworkServiceProviders[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.NetworkServiceProviders {
if v.Name == name {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", name, l)
}
// Lists network serviceproviders for a given physical network.
func (s *NetworkService) ListNetworkServiceProviders(p *ListNetworkServiceProvidersParams) (*ListNetworkServiceProvidersResponse, error) {
resp, err := s.cs.newRequest("listNetworkServiceProviders", p.toURLValues())
if err != nil {
return nil, err
}
var r ListNetworkServiceProvidersResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListNetworkServiceProvidersResponse struct {
Count int `json:"count"`
NetworkServiceProviders []*NetworkServiceProvider `json:"networkserviceprovider"`
}
type NetworkServiceProvider struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
}
type UpdateNetworkServiceProviderParams struct {
p map[string]interface{}
}
func (p *UpdateNetworkServiceProviderParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["servicelist"]; found {
vv := strings.Join(v.([]string), ",")
u.Set("servicelist", vv)
}
if v, found := p.p["state"]; found {
u.Set("state", v.(string))
}
return u
}
func (p *UpdateNetworkServiceProviderParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *UpdateNetworkServiceProviderParams) SetServicelist(v []string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["servicelist"] = v
return
}
func (p *UpdateNetworkServiceProviderParams) SetState(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["state"] = v
return
}
// You should always use this function to get a new UpdateNetworkServiceProviderParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewUpdateNetworkServiceProviderParams(id string) *UpdateNetworkServiceProviderParams {
p := &UpdateNetworkServiceProviderParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Updates a network serviceProvider of a physical network
func (s *NetworkService) UpdateNetworkServiceProvider(p *UpdateNetworkServiceProviderParams) (*UpdateNetworkServiceProviderResponse, error) {
resp, err := s.cs.newRequest("updateNetworkServiceProvider", p.toURLValues())
if err != nil {
return nil, err
}
var r UpdateNetworkServiceProviderResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type UpdateNetworkServiceProviderResponse struct {
JobID string `json:"jobid,omitempty"`
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
}
type CreateStorageNetworkIpRangeParams struct {
p map[string]interface{}
}
func (p *CreateStorageNetworkIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["endip"]; found {
u.Set("endip", v.(string))
}
if v, found := p.p["gateway"]; found {
u.Set("gateway", v.(string))
}
if v, found := p.p["netmask"]; found {
u.Set("netmask", v.(string))
}
if v, found := p.p["podid"]; found {
u.Set("podid", v.(string))
}
if v, found := p.p["startip"]; found {
u.Set("startip", v.(string))
}
if v, found := p.p["vlan"]; found {
vv := strconv.Itoa(v.(int))
u.Set("vlan", vv)
}
return u
}
func (p *CreateStorageNetworkIpRangeParams) SetEndip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["endip"] = v
return
}
func (p *CreateStorageNetworkIpRangeParams) SetGateway(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["gateway"] = v
return
}
func (p *CreateStorageNetworkIpRangeParams) SetNetmask(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["netmask"] = v
return
}
func (p *CreateStorageNetworkIpRangeParams) SetPodid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["podid"] = v
return
}
func (p *CreateStorageNetworkIpRangeParams) SetStartip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["startip"] = v
return
}
func (p *CreateStorageNetworkIpRangeParams) SetVlan(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vlan"] = v
return
}
// You should always use this function to get a new CreateStorageNetworkIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewCreateStorageNetworkIpRangeParams(gateway string, netmask string, podid string, startip string) *CreateStorageNetworkIpRangeParams {
p := &CreateStorageNetworkIpRangeParams{}
p.p = make(map[string]interface{})
p.p["gateway"] = gateway
p.p["netmask"] = netmask
p.p["podid"] = podid
p.p["startip"] = startip
return p
}
// Creates a Storage network IP range.
func (s *NetworkService) CreateStorageNetworkIpRange(p *CreateStorageNetworkIpRangeParams) (*CreateStorageNetworkIpRangeResponse, error) {
resp, err := s.cs.newRequest("createStorageNetworkIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r CreateStorageNetworkIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type CreateStorageNetworkIpRangeResponse struct {
JobID string `json:"jobid,omitempty"`
Endip string `json:"endip,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkid string `json:"networkid,omitempty"`
Podid string `json:"podid,omitempty"`
Startip string `json:"startip,omitempty"`
Vlan int `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type DeleteStorageNetworkIpRangeParams struct {
p map[string]interface{}
}
func (p *DeleteStorageNetworkIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *DeleteStorageNetworkIpRangeParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new DeleteStorageNetworkIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDeleteStorageNetworkIpRangeParams(id string) *DeleteStorageNetworkIpRangeParams {
p := &DeleteStorageNetworkIpRangeParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Deletes a storage network IP Range.
func (s *NetworkService) DeleteStorageNetworkIpRange(p *DeleteStorageNetworkIpRangeParams) (*DeleteStorageNetworkIpRangeResponse, error) {
resp, err := s.cs.newRequest("deleteStorageNetworkIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r DeleteStorageNetworkIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type DeleteStorageNetworkIpRangeResponse struct {
JobID string `json:"jobid,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Success bool `json:"success,omitempty"`
}
type ListStorageNetworkIpRangeParams struct {
p map[string]interface{}
}
func (p *ListStorageNetworkIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
if v, found := p.p["podid"]; found {
u.Set("podid", v.(string))
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *ListStorageNetworkIpRangeParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *ListStorageNetworkIpRangeParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListStorageNetworkIpRangeParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListStorageNetworkIpRangeParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
func (p *ListStorageNetworkIpRangeParams) SetPodid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["podid"] = v
return
}
func (p *ListStorageNetworkIpRangeParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new ListStorageNetworkIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListStorageNetworkIpRangeParams() *ListStorageNetworkIpRangeParams {
p := &ListStorageNetworkIpRangeParams{}
p.p = make(map[string]interface{})
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetStorageNetworkIpRangeByID(id string) (*StorageNetworkIpRange, int, error) {
p := &ListStorageNetworkIpRangeParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
l, err := s.ListStorageNetworkIpRange(p)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id)) {
return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l)
}
return nil, -1, err
}
if l.Count == 0 {
return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l)
}
if l.Count == 1 {
return l.StorageNetworkIpRange[0], l.Count, nil
}
return nil, l.Count, fmt.Errorf("There is more then one result for StorageNetworkIpRange UUID: %s!", id)
}
// List a storage network IP range.
func (s *NetworkService) ListStorageNetworkIpRange(p *ListStorageNetworkIpRangeParams) (*ListStorageNetworkIpRangeResponse, error) {
resp, err := s.cs.newRequest("listStorageNetworkIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r ListStorageNetworkIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListStorageNetworkIpRangeResponse struct {
Count int `json:"count"`
StorageNetworkIpRange []*StorageNetworkIpRange `json:"storagenetworkiprange"`
}
type StorageNetworkIpRange struct {
Endip string `json:"endip,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkid string `json:"networkid,omitempty"`
Podid string `json:"podid,omitempty"`
Startip string `json:"startip,omitempty"`
Vlan int `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type UpdateStorageNetworkIpRangeParams struct {
p map[string]interface{}
}
func (p *UpdateStorageNetworkIpRangeParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["endip"]; found {
u.Set("endip", v.(string))
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["netmask"]; found {
u.Set("netmask", v.(string))
}
if v, found := p.p["startip"]; found {
u.Set("startip", v.(string))
}
if v, found := p.p["vlan"]; found {
vv := strconv.Itoa(v.(int))
u.Set("vlan", vv)
}
return u
}
func (p *UpdateStorageNetworkIpRangeParams) SetEndip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["endip"] = v
return
}
func (p *UpdateStorageNetworkIpRangeParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *UpdateStorageNetworkIpRangeParams) SetNetmask(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["netmask"] = v
return
}
func (p *UpdateStorageNetworkIpRangeParams) SetStartip(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["startip"] = v
return
}
func (p *UpdateStorageNetworkIpRangeParams) SetVlan(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["vlan"] = v
return
}
// You should always use this function to get a new UpdateStorageNetworkIpRangeParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewUpdateStorageNetworkIpRangeParams(id string) *UpdateStorageNetworkIpRangeParams {
p := &UpdateStorageNetworkIpRangeParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Update a Storage network IP range, only allowed when no IPs in this range have been allocated.
func (s *NetworkService) UpdateStorageNetworkIpRange(p *UpdateStorageNetworkIpRangeParams) (*UpdateStorageNetworkIpRangeResponse, error) {
resp, err := s.cs.newRequest("updateStorageNetworkIpRange", p.toURLValues())
if err != nil {
return nil, err
}
var r UpdateStorageNetworkIpRangeResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type UpdateStorageNetworkIpRangeResponse struct {
JobID string `json:"jobid,omitempty"`
Endip string `json:"endip,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkid string `json:"networkid,omitempty"`
Podid string `json:"podid,omitempty"`
Startip string `json:"startip,omitempty"`
Vlan int `json:"vlan,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
type ListPaloAltoFirewallNetworksParams struct {
p map[string]interface{}
}
func (p *ListPaloAltoFirewallNetworksParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["lbdeviceid"]; found {
u.Set("lbdeviceid", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
return u
}
func (p *ListPaloAltoFirewallNetworksParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListPaloAltoFirewallNetworksParams) SetLbdeviceid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["lbdeviceid"] = v
return
}
func (p *ListPaloAltoFirewallNetworksParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListPaloAltoFirewallNetworksParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
// You should always use this function to get a new ListPaloAltoFirewallNetworksParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListPaloAltoFirewallNetworksParams(lbdeviceid string) *ListPaloAltoFirewallNetworksParams {
p := &ListPaloAltoFirewallNetworksParams{}
p.p = make(map[string]interface{})
p.p["lbdeviceid"] = lbdeviceid
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetPaloAltoFirewallNetworkID(keyword string, lbdeviceid string) (string, error) {
p := &ListPaloAltoFirewallNetworksParams{}
p.p = make(map[string]interface{})
p.p["keyword"] = keyword
p.p["lbdeviceid"] = lbdeviceid
l, err := s.ListPaloAltoFirewallNetworks(p)
if err != nil {
return "", err
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", keyword, l)
}
if l.Count == 1 {
return l.PaloAltoFirewallNetworks[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.PaloAltoFirewallNetworks {
if v.Name == keyword {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l)
}
// lists network that are using Palo Alto firewall device
func (s *NetworkService) ListPaloAltoFirewallNetworks(p *ListPaloAltoFirewallNetworksParams) (*ListPaloAltoFirewallNetworksResponse, error) {
resp, err := s.cs.newRequest("listPaloAltoFirewallNetworks", p.toURLValues())
if err != nil {
return nil, err
}
var r ListPaloAltoFirewallNetworksResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListPaloAltoFirewallNetworksResponse struct {
Count int `json:"count"`
PaloAltoFirewallNetworks []*PaloAltoFirewallNetwork `json:"paloaltofirewallnetwork"`
}
type PaloAltoFirewallNetwork struct {
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type ListNetscalerLoadBalancerNetworksParams struct {
p map[string]interface{}
}
func (p *ListNetscalerLoadBalancerNetworksParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["lbdeviceid"]; found {
u.Set("lbdeviceid", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
return u
}
func (p *ListNetscalerLoadBalancerNetworksParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListNetscalerLoadBalancerNetworksParams) SetLbdeviceid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["lbdeviceid"] = v
return
}
func (p *ListNetscalerLoadBalancerNetworksParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListNetscalerLoadBalancerNetworksParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
// You should always use this function to get a new ListNetscalerLoadBalancerNetworksParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListNetscalerLoadBalancerNetworksParams(lbdeviceid string) *ListNetscalerLoadBalancerNetworksParams {
p := &ListNetscalerLoadBalancerNetworksParams{}
p.p = make(map[string]interface{})
p.p["lbdeviceid"] = lbdeviceid
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNetscalerLoadBalancerNetworkID(keyword string, lbdeviceid string) (string, error) {
p := &ListNetscalerLoadBalancerNetworksParams{}
p.p = make(map[string]interface{})
p.p["keyword"] = keyword
p.p["lbdeviceid"] = lbdeviceid
l, err := s.ListNetscalerLoadBalancerNetworks(p)
if err != nil {
return "", err
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", keyword, l)
}
if l.Count == 1 {
return l.NetscalerLoadBalancerNetworks[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.NetscalerLoadBalancerNetworks {
if v.Name == keyword {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l)
}
// lists network that are using a netscaler load balancer device
func (s *NetworkService) ListNetscalerLoadBalancerNetworks(p *ListNetscalerLoadBalancerNetworksParams) (*ListNetscalerLoadBalancerNetworksResponse, error) {
resp, err := s.cs.newRequest("listNetscalerLoadBalancerNetworks", p.toURLValues())
if err != nil {
return nil, err
}
var r ListNetscalerLoadBalancerNetworksResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListNetscalerLoadBalancerNetworksResponse struct {
Count int `json:"count"`
NetscalerLoadBalancerNetworks []*NetscalerLoadBalancerNetwork `json:"netscalerloadbalancernetwork"`
}
type NetscalerLoadBalancerNetwork struct {
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type ListNiciraNvpDeviceNetworksParams struct {
p map[string]interface{}
}
func (p *ListNiciraNvpDeviceNetworksParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["nvpdeviceid"]; found {
u.Set("nvpdeviceid", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
return u
}
func (p *ListNiciraNvpDeviceNetworksParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListNiciraNvpDeviceNetworksParams) SetNvpdeviceid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["nvpdeviceid"] = v
return
}
func (p *ListNiciraNvpDeviceNetworksParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListNiciraNvpDeviceNetworksParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
// You should always use this function to get a new ListNiciraNvpDeviceNetworksParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListNiciraNvpDeviceNetworksParams(nvpdeviceid string) *ListNiciraNvpDeviceNetworksParams {
p := &ListNiciraNvpDeviceNetworksParams{}
p.p = make(map[string]interface{})
p.p["nvpdeviceid"] = nvpdeviceid
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetNiciraNvpDeviceNetworkID(keyword string, nvpdeviceid string) (string, error) {
p := &ListNiciraNvpDeviceNetworksParams{}
p.p = make(map[string]interface{})
p.p["keyword"] = keyword
p.p["nvpdeviceid"] = nvpdeviceid
l, err := s.ListNiciraNvpDeviceNetworks(p)
if err != nil {
return "", err
}
if l.Count == 0 {
return "", fmt.Errorf("No match found for %s: %+v", keyword, l)
}
if l.Count == 1 {
return l.NiciraNvpDeviceNetworks[0].Id, nil
}
if l.Count > 1 {
for _, v := range l.NiciraNvpDeviceNetworks {
if v.Name == keyword {
return v.Id, nil
}
}
}
return "", fmt.Errorf("Could not find an exact match for %s: %+v", keyword, l)
}
// lists network that are using a nicira nvp device
func (s *NetworkService) ListNiciraNvpDeviceNetworks(p *ListNiciraNvpDeviceNetworksParams) (*ListNiciraNvpDeviceNetworksResponse, error) {
resp, err := s.cs.newRequest("listNiciraNvpDeviceNetworks", p.toURLValues())
if err != nil {
return nil, err
}
var r ListNiciraNvpDeviceNetworksResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListNiciraNvpDeviceNetworksResponse struct {
Count int `json:"count"`
NiciraNvpDeviceNetworks []*NiciraNvpDeviceNetwork `json:"niciranvpdevicenetwork"`
}
type NiciraNvpDeviceNetwork struct {
Account string `json:"account,omitempty"`
Aclid string `json:"aclid,omitempty"`
Acltype string `json:"acltype,omitempty"`
Broadcastdomaintype string `json:"broadcastdomaintype,omitempty"`
Broadcasturi string `json:"broadcasturi,omitempty"`
Canusefordeploy bool `json:"canusefordeploy,omitempty"`
Cidr string `json:"cidr,omitempty"`
Displaynetwork bool `json:"displaynetwork,omitempty"`
Displaytext string `json:"displaytext,omitempty"`
Dns1 string `json:"dns1,omitempty"`
Dns2 string `json:"dns2,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Gateway string `json:"gateway,omitempty"`
Id string `json:"id,omitempty"`
Ip6cidr string `json:"ip6cidr,omitempty"`
Ip6gateway string `json:"ip6gateway,omitempty"`
Isdefault bool `json:"isdefault,omitempty"`
Ispersistent bool `json:"ispersistent,omitempty"`
Issystem bool `json:"issystem,omitempty"`
Name string `json:"name,omitempty"`
Netmask string `json:"netmask,omitempty"`
Networkcidr string `json:"networkcidr,omitempty"`
Networkdomain string `json:"networkdomain,omitempty"`
Networkofferingavailability string `json:"networkofferingavailability,omitempty"`
Networkofferingconservemode bool `json:"networkofferingconservemode,omitempty"`
Networkofferingdisplaytext string `json:"networkofferingdisplaytext,omitempty"`
Networkofferingid string `json:"networkofferingid,omitempty"`
Networkofferingname string `json:"networkofferingname,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Related string `json:"related,omitempty"`
Reservediprange string `json:"reservediprange,omitempty"`
Restartrequired bool `json:"restartrequired,omitempty"`
Service []struct {
Capability []struct {
Canchooseservicecapability bool `json:"canchooseservicecapability,omitempty"`
Name string `json:"name,omitempty"`
Value string `json:"value,omitempty"`
} `json:"capability,omitempty"`
Name string `json:"name,omitempty"`
Provider []struct {
Canenableindividualservice bool `json:"canenableindividualservice,omitempty"`
Destinationphysicalnetworkid string `json:"destinationphysicalnetworkid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Servicelist []string `json:"servicelist,omitempty"`
State string `json:"state,omitempty"`
} `json:"provider,omitempty"`
} `json:"service,omitempty"`
Specifyipranges bool `json:"specifyipranges,omitempty"`
State string `json:"state,omitempty"`
Strechedl2subnet bool `json:"strechedl2subnet,omitempty"`
Subdomainaccess bool `json:"subdomainaccess,omitempty"`
Tags []struct {
Account string `json:"account,omitempty"`
Customer string `json:"customer,omitempty"`
Domain string `json:"domain,omitempty"`
Domainid string `json:"domainid,omitempty"`
Key string `json:"key,omitempty"`
Project string `json:"project,omitempty"`
Projectid string `json:"projectid,omitempty"`
Resourceid string `json:"resourceid,omitempty"`
Resourcetype string `json:"resourcetype,omitempty"`
Value string `json:"value,omitempty"`
} `json:"tags,omitempty"`
Traffictype string `json:"traffictype,omitempty"`
Type string `json:"type,omitempty"`
Vlan string `json:"vlan,omitempty"`
Vpcid string `json:"vpcid,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
Zonename string `json:"zonename,omitempty"`
Zonesnetworkspans []string `json:"zonesnetworkspans,omitempty"`
}
type ListNetworkIsolationMethodsParams struct {
p map[string]interface{}
}
func (p *ListNetworkIsolationMethodsParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["keyword"]; found {
u.Set("keyword", v.(string))
}
if v, found := p.p["page"]; found {
vv := strconv.Itoa(v.(int))
u.Set("page", vv)
}
if v, found := p.p["pagesize"]; found {
vv := strconv.Itoa(v.(int))
u.Set("pagesize", vv)
}
return u
}
func (p *ListNetworkIsolationMethodsParams) SetKeyword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["keyword"] = v
return
}
func (p *ListNetworkIsolationMethodsParams) SetPage(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["page"] = v
return
}
func (p *ListNetworkIsolationMethodsParams) SetPagesize(v int) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["pagesize"] = v
return
}
// You should always use this function to get a new ListNetworkIsolationMethodsParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListNetworkIsolationMethodsParams() *ListNetworkIsolationMethodsParams {
p := &ListNetworkIsolationMethodsParams{}
p.p = make(map[string]interface{})
return p
}
// Lists supported methods of network isolation
func (s *NetworkService) ListNetworkIsolationMethods(p *ListNetworkIsolationMethodsParams) (*ListNetworkIsolationMethodsResponse, error) {
resp, err := s.cs.newRequest("listNetworkIsolationMethods", p.toURLValues())
if err != nil {
return nil, err
}
var r ListNetworkIsolationMethodsResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListNetworkIsolationMethodsResponse struct {
Count int `json:"count"`
NetworkIsolationMethods []*NetworkIsolationMethod `json:"networkisolationmethod"`
}
type NetworkIsolationMethod struct {
Name string `json:"name,omitempty"`
}
type AddOpenDaylightControllerParams struct {
p map[string]interface{}
}
func (p *AddOpenDaylightControllerParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["password"]; found {
u.Set("password", v.(string))
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
if v, found := p.p["url"]; found {
u.Set("url", v.(string))
}
if v, found := p.p["username"]; found {
u.Set("username", v.(string))
}
return u
}
func (p *AddOpenDaylightControllerParams) SetPassword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["password"] = v
return
}
func (p *AddOpenDaylightControllerParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
func (p *AddOpenDaylightControllerParams) SetUrl(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["url"] = v
return
}
func (p *AddOpenDaylightControllerParams) SetUsername(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["username"] = v
return
}
// You should always use this function to get a new AddOpenDaylightControllerParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewAddOpenDaylightControllerParams(password string, physicalnetworkid string, url string, username string) *AddOpenDaylightControllerParams {
p := &AddOpenDaylightControllerParams{}
p.p = make(map[string]interface{})
p.p["password"] = password
p.p["physicalnetworkid"] = physicalnetworkid
p.p["url"] = url
p.p["username"] = username
return p
}
// Adds an OpenDyalight controler
func (s *NetworkService) AddOpenDaylightController(p *AddOpenDaylightControllerParams) (*AddOpenDaylightControllerResponse, error) {
resp, err := s.cs.newRequest("addOpenDaylightController", p.toURLValues())
if err != nil {
return nil, err
}
var r AddOpenDaylightControllerResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type AddOpenDaylightControllerResponse struct {
JobID string `json:"jobid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Url string `json:"url,omitempty"`
Username string `json:"username,omitempty"`
}
type DeleteOpenDaylightControllerParams struct {
p map[string]interface{}
}
func (p *DeleteOpenDaylightControllerParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
return u
}
func (p *DeleteOpenDaylightControllerParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
// You should always use this function to get a new DeleteOpenDaylightControllerParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewDeleteOpenDaylightControllerParams(id string) *DeleteOpenDaylightControllerParams {
p := &DeleteOpenDaylightControllerParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
return p
}
// Removes an OpenDyalight controler
func (s *NetworkService) DeleteOpenDaylightController(p *DeleteOpenDaylightControllerParams) (*DeleteOpenDaylightControllerResponse, error) {
resp, err := s.cs.newRequest("deleteOpenDaylightController", p.toURLValues())
if err != nil {
return nil, err
}
var r DeleteOpenDaylightControllerResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
// If we have a async client, we need to wait for the async result
if s.cs.async {
b, err := s.cs.GetAsyncJobResult(r.JobID, s.cs.timeout)
if err != nil {
if err == AsyncTimeoutErr {
return &r, err
}
return nil, err
}
b, err = getRawValue(b)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &r); err != nil {
return nil, err
}
}
return &r, nil
}
type DeleteOpenDaylightControllerResponse struct {
JobID string `json:"jobid,omitempty"`
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Url string `json:"url,omitempty"`
Username string `json:"username,omitempty"`
}
type ListOpenDaylightControllersParams struct {
p map[string]interface{}
}
func (p *ListOpenDaylightControllersParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["id"]; found {
u.Set("id", v.(string))
}
if v, found := p.p["physicalnetworkid"]; found {
u.Set("physicalnetworkid", v.(string))
}
return u
}
func (p *ListOpenDaylightControllersParams) SetId(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["id"] = v
return
}
func (p *ListOpenDaylightControllersParams) SetPhysicalnetworkid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["physicalnetworkid"] = v
return
}
// You should always use this function to get a new ListOpenDaylightControllersParams instance,
// as then you are sure you have configured all required params
func (s *NetworkService) NewListOpenDaylightControllersParams() *ListOpenDaylightControllersParams {
p := &ListOpenDaylightControllersParams{}
p.p = make(map[string]interface{})
return p
}
// This is a courtesy helper function, which in some cases may not work as expected!
func (s *NetworkService) GetOpenDaylightControllerByID(id string) (*OpenDaylightController, int, error) {
p := &ListOpenDaylightControllersParams{}
p.p = make(map[string]interface{})
p.p["id"] = id
l, err := s.ListOpenDaylightControllers(p)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf(
"Invalid parameter id value=%s due to incorrect long value format, "+
"or entity does not exist", id)) {
return nil, 0, fmt.Errorf("No match found for %s: %+v", id, l)
}
return nil, -1, err
}
if l.Count == 0 {
return nil, l.Count, fmt.Errorf("No match found for %s: %+v", id, l)
}
if l.Count == 1 {
return l.OpenDaylightControllers[0], l.Count, nil
}
return nil, l.Count, fmt.Errorf("There is more then one result for OpenDaylightController UUID: %s!", id)
}
// Lists OpenDyalight controllers
func (s *NetworkService) ListOpenDaylightControllers(p *ListOpenDaylightControllersParams) (*ListOpenDaylightControllersResponse, error) {
resp, err := s.cs.newRequest("listOpenDaylightControllers", p.toURLValues())
if err != nil {
return nil, err
}
var r ListOpenDaylightControllersResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type ListOpenDaylightControllersResponse struct {
Count int `json:"count"`
OpenDaylightControllers []*OpenDaylightController `json:"opendaylightcontroller"`
}
type OpenDaylightController struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Physicalnetworkid string `json:"physicalnetworkid,omitempty"`
Url string `json:"url,omitempty"`
Username string `json:"username,omitempty"`
}<|fim▁end|> | |
<|file_name|>Avatar.js<|end_file_name|><|fim▁begin|>import Canvas from '../tool/Canvas.js';
import Animation from './Animation/Animation.js';
import Frame from './Animation/Frame.js';
import Player from '../engine/Player.js';
class Avatar {
static radius = 360;
static shakeTime = 300;
constructor(player, direction) {
this.player = player;
this.idle = Avatar.createLozange('#FFFD1B', '#BCBB14', 1, direction, 0.5, 0.75, 0.25);
this.idleShadow = Avatar.createLozange('#000000', '#000000', 0.1, direction, 0.5, 0.75, 0.25);
this.thrust = Avatar.createLozange('#F5DF0E', '#AB9B0A', 1, direction, 0.25, 1, 0.25);
this.thrustShadow = Avatar.createLozange('#000000', '#000000', 0.1, direction, 0.25, 1, 0.25);
this.shake = 0;
this.shakeTimout = null;
this.startShake = this.startShake.bind(this);
this.endShake = this.endShake.bind(this);
this.player.setWallEventListener(this.startShake);
}
static createFrames(color, colorDark, direction) {
const size = Avatar.radius * 2;
const canvas = new Canvas(size, size);
const context = canvas.context();
let frames = [
];
}
static createLozange(color, colorDark, alpha, direction, height, body, head) {
const canvasWidth = 2;
const canvasHeight = 2;
const size = Avatar.radius * 2;
const canvas = new Canvas(size * canvasWidth, size * canvasHeight);
const context = canvas.context;
const center = { x: canvasWidth / 2, y: canvasHeight / 2 };
const top = { x: center.x, y: center.y - (height / 2) };
const right = { x: center.x + head, y: center.y }
const bottom = { x: center.x, y: top.y + height };
const left = { x: center.x - body, y: center.y };
if (direction) {
canvas.reverse();
}
context.scale(size, size);
canvas.setAlpha(alpha);
canvas.setFill(color);
context.beginPath();
context.moveTo(left.x, left.y);
context.lineTo(top.x, top.y);
context.lineTo(right.x, right.y);
context.fill();
canvas.setFill(colorDark);
context.beginPath();
context.moveTo(left.x, left.y);
context.lineTo(bottom.x, bottom.y);
context.lineTo(right.x, right.y);
context.fill();
if (direction) {
canvas.reverse();
}
return canvas;
}
startShake() {
this.shake = Date.now();
this.shakeTimout = setTimeout(this.endShake, Avatar.shakeTime);
}
endShake() {
this.shake = false;
clearTimeout(this.shakeTimout);
}
getShake() {
if (!this.shake) {
return 0;
}
const time = (Date.now() - this.shake) / Avatar.shakeTime * 4 * Math.PI;
return Math.cos(time) * Avatar.radius / 25;
}
draw() {
return this.player.thrusting ? this.thrust.element : this.idle.element;
}
drawShadow() {
return this.player.thrusting ? this.thrustShadow.element : this.idleShadow.element;
}
<|fim▁hole|> const ratio = 1 + (this.player.getSpeedRatio() - 1) * 0.5;
return Avatar.radius / devicePixelRatio * ratio;
}
getDropShadow() {
return Avatar.radius * 0.1;
}
}
export default Avatar;<|fim▁end|> | getSize() { |
<|file_name|>domain_tests.js<|end_file_name|><|fim▁begin|>odoo.define('web.domain_tests', function (require) {
"use strict";
var Domain = require('web.Domain');
QUnit.module('core', {}, function () {
QUnit.module('domain');
QUnit.test("basic", function (assert) {
assert.expect(3);
var fields = {<|fim▁hole|> rrule_type: 'monthly',
};
assert.ok(new Domain([['a', '=', 3]]).compute(fields));
assert.ok(new Domain([['group_method','!=','count']]).compute(fields));
assert.ok(new Domain([['select1','=','day'], ['rrule_type','=','monthly']]).compute(fields));
});
QUnit.test("or", function (assert) {
assert.expect(3);
var web = {
section_id: null,
user_id: null,
member_ids: null,
};
var currentDomain = [
'|',
['section_id', '=', 42],
'|',
['user_id', '=', 3],
['member_ids', 'in', [3]]
];
assert.ok(new Domain(currentDomain).compute(_.extend({}, web, {section_id: 42})));
assert.ok(new Domain(currentDomain).compute(_.extend({}, web, {user_id: 3})));
assert.ok(new Domain(currentDomain).compute(_.extend({}, web, {member_ids: 3})));
});
QUnit.test("not", function (assert) {
assert.expect(2);
var fields = {
a: 5,
group_method: 'line',
};
assert.ok(new Domain(['!', ['a', '=', 3]]).compute(fields));
assert.ok(new Domain(['!', ['group_method','=','count']]).compute(fields));
});
QUnit.test("domains initialized with a number", function (assert) {
assert.expect(2);
assert.ok(new Domain(1).compute({}));
assert.notOk(new Domain(0).compute({}));
});
});
});<|fim▁end|> | a: 3,
group_method: 'line',
select1: 'day', |
<|file_name|>webpack.dev.config.ts<|end_file_name|><|fim▁begin|>import * as ExtractTextPlugin from 'extract-text-webpack-plugin';
import * as path from 'path';
import * as webpack from 'webpack';
export const config: webpack.Configuration = {
module: {
rules: [{
test: /\.css$/,
use: ExtractTextPlugin.extract({
fallback: 'style-loader',
use: [{
loader: 'css-loader',
options: {
minimize: false,
sourceMap: true
}
}]<|fim▁hole|> test: /\.less$/,
use: ExtractTextPlugin.extract({
fallback: 'style-loader',
use: [{
loader: 'css-loader',
options: {
minimize: false,
sourceMap: true
}
}, {
loader: 'less-loader',
options: {
minimize: false,
sourceMap: true
}
}]
})
}]
}
};
export default config;<|fim▁end|> | })
}, { |
<|file_name|>vidspot.py<|end_file_name|><|fim▁begin|>'''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import urllib
import urlparse
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class VidSpotResolver(UrlResolver):
name = "vidspot"
domains = ["vidspot.net"]
pattern = '(?://|\.)(vidspot\.net)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = helpers.get_hidden(html)
html = self.net.http_POST(url, data).content
r = re.search('"sources"\s*:\s*\[(.*?)\]', html, re.DOTALL)
if r:
fragment = r.group(1)
stream_url = None
for match in re.finditer('"file"\s*:\s*"([^"]+)', fragment):
stream_url = match.group(1)
if stream_url:
stream_url = '%s?%s&direct=false' % (stream_url.split('?')[0], urlparse.urlparse(stream_url).query)
return stream_url + helpers.append_headers({'User-Agent': common.IE_USER_AGENT})
else:<|fim▁hole|> else:
raise ResolverError('could not find sources')
def get_url(self, host, media_id):
return 'http://vidspot.net/embed-%s.html' % (media_id)<|fim▁end|> | raise ResolverError('could not find file') |
<|file_name|>template_components.cpp<|end_file_name|><|fim▁begin|>#include "template_components.h"
#include <QtGui/QSpacerItem>
TemplateComponents::TemplateComponents(const QSharedPointer<const Template>& templ,
QWidget *parent)
: QWidget(parent), templ(templ)
{
ui.setupUi(this);
QList<FoodComponent> components = templ->getComponents();
for (QList<FoodComponent>::iterator i = components.begin(); i != components.end(); ++i)
{
componentWidgetGroups.append(ComponentWidgetGroup(i->getFoodAmount(), this));
}
ui.componentLayout->addItem(new QSpacerItem(20, 40, QSizePolicy::Minimum, QSizePolicy::Expanding),
ui.componentLayout->rowCount(), 0);
}
TemplateComponents::~TemplateComponents()
{
}
QSharedPointer<FoodCollection> TemplateComponents::getCollection() const
{
QSharedPointer<FoodCollection> collection = FoodCollection::createFoodCollection(templ->getDisplayName());
for (QList<ComponentWidgetGroup>::const_iterator i = componentWidgetGroups.begin();
i != componentWidgetGroups.end(); ++i)
{
collection->addComponent(i->getFoodAmount());
}
return collection;
}
TemplateComponents::ComponentWidgetGroup::ComponentWidgetGroup
(FoodAmount foodAmount, TemplateComponents* parent)
: food(foodAmount.getFood()), lblFoodName(new QLabel(parent)),
txtAmount(new QLineEdit(parent)), cbUnit(new QComboBox(parent)),
chkIncludeRefuse(new QCheckBox(parent))
{
int row = parent->ui.componentLayout->rowCount();
parent->ui.componentLayout->addWidget(lblFoodName, row, 0);
parent->ui.componentLayout->addWidget(txtAmount, row, 1);
parent->ui.componentLayout->addWidget(cbUnit, row, 2);
parent->ui.componentLayout->addWidget(chkIncludeRefuse, row, 3);
lblFoodName->setText(food->getDisplayName());
lblFoodName->setWordWrap(true);
txtAmount->setText(QString::number(foodAmount.getAmount()));
txtAmount->setMinimumWidth(50);
txtAmount->setMaximumWidth(80);
txtAmount->setAlignment(Qt::AlignRight);
QMap<QString, QSharedPointer<const Unit> > unitsToShow;
QList<Unit::Dimensions::Dimension> validDimensions = food->getValidDimensions();
for (QList<Unit::Dimensions::Dimension>::const_iterator i = validDimensions.begin();<|fim▁hole|> QVector<QSharedPointer<const Unit> > units = Unit::getAllUnits(*i);
for (QVector<QSharedPointer<const Unit> >::const_iterator i = units.begin();
i != units.end(); ++i)
{
unitsToShow.insert((*i)->getName(), *i);
}
}
for (QMap<QString, QSharedPointer<const Unit> >::iterator i = unitsToShow.begin();
i != unitsToShow.end(); ++i)
{
cbUnit->addItem(i.value()->getNameAndAbbreviation(),
i.value()->getAbbreviation());
}
cbUnit->setCurrentIndex(cbUnit->findData(foodAmount.getUnit()->getAbbreviation()));
chkIncludeRefuse->setText("Including inedible parts");
chkIncludeRefuse->setChecked
(foodAmount.includesRefuse() && foodAmount.getFood()->getPercentRefuse() > 0);
chkIncludeRefuse->setEnabled(foodAmount.getFood()->getPercentRefuse() > 0);
}
FoodAmount TemplateComponents::ComponentWidgetGroup::getFoodAmount() const
{
return FoodAmount(food, txtAmount->text().toDouble(),
Unit::getUnit(cbUnit->itemData(cbUnit->currentIndex()).toString()),
!chkIncludeRefuse->isEnabled() || chkIncludeRefuse->isChecked());
}<|fim▁end|> | i != validDimensions.end(); ++i)
{ |
<|file_name|>f5ssh.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Standard F5 documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- A dict object containing connection details.
type: dict
version_added: "1.0.0"
suboptions:
password:
description:
- The password for the user account used to connect to the BIG-IP.
- You may omit this option by setting the environment variable C(F5_PASSWORD).
type: str
required: true
aliases: [ pass, pwd ]<|fim▁hole|> - The BIG-IP host.
- You may omit this option by setting the environment variable C(F5_SERVER).
type: str
required: true
server_port:
description:
- The BIG-IP server port.
- You may omit this option by setting the environment variable C(F5_SERVER_PORT).
type: int
default: 22
user:
description:
- The username to connect to the BIG-IP with. This user must have
administrative privileges on the device.
- You may omit this option by setting the environment variable C(F5_USER).
type: str
required: true
validate_certs:
description:
- If C(no), SSL certificates are not validated. Use this only
on personally controlled sites using self-signed certificates.
- You may omit this option by setting the environment variable C(F5_VALIDATE_CERTS).
type: bool
default: yes
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
ssh_keyfile:
description:
- Specifies the SSH keyfile to use to authenticate the connection to
the remote device. This argument is only used for I(cli) transports.
- You may omit this option by setting the environment variable C(ANSIBLE_NET_SSH_KEYFILE).
type: path
transport:
description:
- Configures the transport connection to use when connecting to the
remote device.
type: str
choices: ['cli']
default: cli
no_f5_teem:
description:
- If C(yes), TEEM telemetry data is not sent to F5.
- You may omit this option by setting the environment variable C(F5_TELEMETRY_OFF).
- Previously used variable C(F5_TEEM) is deprecated as its name was confusing.
default: no
type: bool
auth_provider:
description:
- Configures the auth provider for to obtain authentication tokens from the remote device.
- This option is really used when working with BIG-IQ devices.
type: str
notes:
- For more information on using Ansible to manage F5 Networks devices see U(https://www.ansible.com/integrations/networks/f5).
- Requires BIG-IP software version >= 12.
- The F5 modules only manipulate the running configuration of the F5 product. To ensure that BIG-IP
specific configuration persists to disk, be sure to include at least one task that uses the
M(f5networks.f5_modules.bigip_config) module to save the running configuration. Refer to the module's documentation for
the correct usage of the module to save your running configuration.
'''<|fim▁end|> | server:
description: |
<|file_name|>notificator.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use error::Result;
/// A Notificator provides a function that can be called to notify about a certain object.
///
/// # TODO
///
/// The user of the library does _not_ get access to the notification handle.
/// This is not optimal, but enough for today.
///
pub trait Notificator<T> {<|fim▁hole|>}
pub mod default {
use std::fmt::Debug;
use std::fmt::Display;
use error::Result;
use notify_rust::Notification as RustNotification;
use notify_rust::NotificationUrgency;
use super::Notificator;
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub enum Urgency {
Low,
Normal,
High
}
impl Default for Urgency {
fn default() -> Urgency {
Urgency::Normal
}
}
impl Into<NotificationUrgency> for Urgency {
fn into(self) -> NotificationUrgency {
match self {
Urgency::Low => NotificationUrgency::Low,
Urgency::Normal => NotificationUrgency::Normal,
Urgency::High => NotificationUrgency::Critical,
}
}
}
#[derive(Debug, Default, Clone)]
pub struct Notification {
pub timeout: i32,
pub message: String,
pub summary: String,
pub urgency: Urgency,
}
impl<T: Display> Notificator<T> for Notification {
/// A default implementation for all Types that implement Display
fn notify(&self, item: &T) -> Result<()> {
let mut n = RustNotification::new();
n.appname("imag");
n.summary(&self.summary);
n.urgency(self.urgency.clone().into());
n.body(&format!("{}: {}", &self.message, item));
let _ = n.finalize().show(); // Ignoring error here
Ok(())
}
}
#[derive(Debug, Default, Clone)]
pub struct DebugNotification(Notification);
impl From<Notification> for DebugNotification {
fn from(n: Notification) -> DebugNotification {
DebugNotification(n)
}
}
impl<T: Debug> Notificator<T> for DebugNotification {
/// A default implementation for all Types that implement Display
fn notify(&self, item: &T) -> Result<()> {
let mut n = RustNotification::new();
n.appname("imag");
n.summary(&self.0.summary);
n.urgency(self.0.urgency.clone().into());
n.body(&format!("{}: {:?}", &self.0.message, item));
let _ = n.finalize().show(); // Ignoring error here
Ok(())
}
}
}<|fim▁end|> | fn notify(&self, item: &T) -> Result<()>; |
<|file_name|>idt.rs<|end_file_name|><|fim▁begin|>use core::ptr;
use x86;
use x86::shared::dtables::*;
use x86::current::irq::IdtEntry;
use x86::shared::PrivilegeLevel;
use x86::shared::paging::VAddr;
extern "C" {
static interrupt_handlers: [*const u8; 256];
}
pub struct Idt {
table: [IdtEntry; 256],
}
impl Idt {
pub const fn new() -> Idt {
Idt {
table: [x86::current::irq::IdtEntry::MISSING; 256],
}
}
pub fn init(&mut self) {
self.setup_gates();
unsafe {
x86::shared::dtables::lidt(
&DescriptorTablePointer::new_idtp(&self.table)
);
}
}
fn setup_gates(&mut self) {
unsafe {
for (index, &handler) in interrupt_handlers.iter().enumerate() {
if handler != ptr::null() {
self.set_gate(index, handler);<|fim▁hole|> }
fn set_gate(&mut self, num: usize, handler: *const u8) {
use x86::shared::segmentation::cs;
if num != 80 {
self.table[num] =
IdtEntry::new(
VAddr::from_usize(handler as usize),
cs().bits(),
PrivilegeLevel::Ring0,
false
);
} else {
self.table[num] =
IdtEntry::new(
VAddr::from_usize(handler as usize),
cs().bits(),
PrivilegeLevel::Ring3,
false
);
}
}
}
pub unsafe fn test() {
int!(81);
}
pub unsafe fn enable() {
x86::shared::irq::enable();
}
pub unsafe fn disable() {
x86::shared::irq::disable();
}<|fim▁end|> | }
}
} |
<|file_name|>piechart.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
** All rights reserved.
** Contact: Nokia Corporation ([email protected])
**
** This file is part of the documentation of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:BSD$
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
** the names of its contributors may be used to endorse or promote
** products derived from this software without specific prior written
** permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
** $QT_END_LICENSE$
**
****************************************************************************/
#include "piechart.h"
#include <QPainter>
#include <QDebug>
PieChart::PieChart(QDeclarativeItem *parent)
: QDeclarativeItem(parent)<|fim▁hole|> setFlag(QGraphicsItem::ItemHasNoContents, false);
}
QString PieChart::name() const
{
return m_name;
}
void PieChart::setName(const QString &name)
{
m_name = name;
}
QColor PieChart::color() const
{
return m_color;
}
void PieChart::setColor(const QColor &color)
{
m_color = color;
}
void PieChart::paint(QPainter *painter, const QStyleOptionGraphicsItem *, QWidget *)
{
QPen pen(m_color, 2);
painter->setPen(pen);
painter->setRenderHints(QPainter::Antialiasing, true);
painter->drawPie(boundingRect(), 90 * 16, 290 * 16);
}
//![0]
void PieChart::clearChart()
{
setColor(QColor(Qt::transparent));
update();
emit chartCleared();
}
//![0]<|fim▁end|> | {
// need to disable this flag to draw inside a QDeclarativeItem |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![deny(unsafe_code)]
#[macro_use]
extern crate bitflags;
extern crate hyper;
extern crate ipc_channel;
extern crate malloc_size_of;
#[macro_use] extern crate malloc_size_of_derive;
extern crate msg;
#[macro_use] extern crate serde;
extern crate servo_url;
extern crate time;
use hyper::header::Headers;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::PipelineId;
use servo_url::ServoUrl;
use std::net::TcpStream;
use time::Duration;
use time::Tm;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Debug, Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: String,
pub url: ServoUrl,
}
#[derive(Clone, Debug, Deserialize, MallocSizeOf, Serialize)]
pub struct CSSError {
pub filename: String,
pub line: u32,
pub column: u32,
pub msg: String
}
/// Messages to instruct the devtools server to update its known actors/state
/// according to changes in the browser.
#[derive(Debug)]
pub enum DevtoolsControlMsg {
/// Messages from threads in the chrome process (resource/constellation/devtools)
FromChrome(ChromeToDevtoolsControlMsg),
/// Messages from script threads
FromScript(ScriptToDevtoolsControlMsg),
}
/// Events that the devtools server must act upon.
#[derive(Debug)]
pub enum ChromeToDevtoolsControlMsg {
/// A new client has connected to the server.
AddClient(TcpStream),
/// The browser is shutting down.
ServerExitMsg,
/// A network event occurred (request, reply, etc.). The actor with the
/// provided name should be notified.
NetworkEvent(String, NetworkEvent),
}
#[derive(Debug, Deserialize, Serialize)]
/// Events that the devtools server must act upon.
pub enum ScriptToDevtoolsControlMsg {
/// A new global object was created, associated with a particular pipeline.
/// The means of communicating directly with it are provided.
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
/// A particular page has invoked the console API.
ConsoleAPI(PipelineId, ConsoleMessage, Option<WorkerId>),
/// An animation frame with the given timestamp was processed in a script thread.
/// The actor with the provided name should be notified.
FramerateTick(String, f64),
/// Report a CSS parse error for the given pipeline
ReportCSSError(PipelineId, CSSError),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Debug, Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue { class: String, uuid: String },
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
pub struct StartedTimelineMarker {
name: String,
start_time: PreciseTime,
start_stack: Option<Vec<()>>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct TimelineMarker {
pub name: String,
pub start_time: PreciseTime,
pub start_stack: Option<Vec<()>>,
pub end_time: PreciseTime,
pub end_stack: Option<Vec<()>>,
}
#[derive(Clone, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// The properties of a DOM node as computed by layout.
#[derive(Debug, Deserialize, Serialize)]
pub struct ComputedNodeLayout {
pub display: String,
pub position: String,
pub zIndex: String,
pub boxSizing: String,
pub autoMargins: AutoMargins,
pub marginTop: String,
pub marginRight: String,
pub marginBottom: String,
pub marginLeft: String,
pub borderTopWidth: String,
pub borderRightWidth: String,
pub borderBottomWidth: String,
pub borderLeftWidth: String,
pub paddingTop: String,
pub paddingRight: String,
pub paddingBottom: String,
pub paddingLeft: String,
pub width: f32,
pub height: f32,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct AutoMargins {
pub top: bool,
pub right: bool,
pub bottom: bool,
pub left: bool,
}
/// Messages to process in a particular script thread, as instructed by a devtools client.
/// TODO: better error handling, e.g. if pipeline id lookup fails?
#[derive(Debug, Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
/// Evaluate a JS snippet in the context of the global for the given pipeline.
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
/// Retrieve the details of the root node (ie. the document) for the given pipeline.
GetRootNode(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the document element for the given pipeline.
GetDocumentElement(PipelineId, IpcSender<Option<NodeInfo>>),
/// Retrieve the details of the child nodes of the given node in the given pipeline.
GetChildren(PipelineId, String, IpcSender<Option<Vec<NodeInfo>>>),
/// Retrieve the computed layout properties of the given node in the given pipeline.
GetLayout(PipelineId, String, IpcSender<Option<ComputedNodeLayout>>),
/// Retrieve all stored console messages for the given pipeline.
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
/// Update a given node's attributes with a list of modifications.
ModifyAttribute(PipelineId, String, Vec<Modification>),
/// Request live console messages for a given pipeline (true if desired, false otherwise).
WantsLiveNotifications(PipelineId, bool),
/// Request live notifications for a given set of timeline events for a given pipeline.<|fim▁hole|> /// executed in the given pipeline.
RequestAnimationFrame(PipelineId, String),
/// Direct the given pipeline to reload the current page.
Reload(PipelineId),
}
#[derive(Debug, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: usize,
pub columnNumber: usize,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub flags CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0,
const CONSOLE_API = 1 << 1,
}
}
#[derive(Debug, Deserialize, Serialize)]
pub struct PageError {
#[serde(rename = "_type")]
pub type_: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,
pub timeStamp: u64,
pub error: bool,
pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct ConsoleAPI {
#[serde(rename = "_type")]
pub type_: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Debug, PartialEq)]
pub struct HttpRequest {
pub url: ServoUrl,
pub method: Method,
pub headers: Headers,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
pub startedDateTime: Tm,
pub timeStamp: i64,
pub connect_time: u64,
pub send_time: u64,
pub is_xhr: bool,
}
#[derive(Debug, PartialEq)]
pub struct HttpResponse {
pub headers: Option<Headers>,
pub status: Option<(u16, Vec<u8>)>,
pub body: Option<Vec<u8>>,
pub pipeline_id: PipelineId,
}
#[derive(Debug)]
pub enum NetworkEvent {
HttpRequest(HttpRequest),
HttpResponse(HttpResponse),
}
impl TimelineMarker {
pub fn start(name: String) -> StartedTimelineMarker {
StartedTimelineMarker {
name: name,
start_time: PreciseTime::now(),
start_stack: None,
}
}
}
impl StartedTimelineMarker {
pub fn end(self) -> TimelineMarker {
TimelineMarker {
name: self.name,
start_time: self.start_time,
start_stack: self.start_stack,
end_time: PreciseTime::now(),
end_stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn to(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub struct WorkerId(pub u32);<|fim▁end|> | SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<Option<TimelineMarker>>),
/// Withdraw request for live timeline notifications for a given pipeline.
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
/// Request a callback directed at the given actor name from the next animation frame |
<|file_name|>graph_node.py<|end_file_name|><|fim▁begin|>from .graph_exception import GraphException
from .quality import Quality
from .node_factory import NodeFactory
# noinspection PyProtectedMember
class GraphNode(object):
"""
Base class for nodes in the calculation graph.
See: http:#richard-shepherd.github.io/calculation_graph/GraphNode.html
"""
# 'enum' for node GC collectability...
class GCType(object):
COLLECTABLE = 1 # Node can be GC'd if not referred to by other nodes.
NON_COLLECTABLE = 2 # Node will not be GC'd, even if not referred to by other nodes.
#'enum' for whether child nodes should be calculated after the current node has finished calculating...
class CalculateChildrenType(object):
CALCULATE_CHILDREN = 1
DO_NOT_CALCULATE_CHILDREN = 2
def __init__(self, node_id, graph_manager, environment, *args, **kwargs):
"""
The constructor.
"""
# The node's unique ID in the graph...
self.node_id = node_id
# The graph manager...
self.graph_manager = graph_manager
# The environment. This can be any object useful for the particular application
# which this graph and its nodes are used for...
self.environment = environment
# The quality of the data managed by this node...
self.quality = Quality()
# The set of parent nodes...
self._parent_nodes = set()
# The set of child nodes...
self._child_nodes = set()
# The set of child nodes to be calculated during one calculation cycle.
# When we calculate, we first take a copy of the _child_nodes (above), as
# the set may change during calculation...
self._child_nodes_for_this_calculation_cycle = set()
# The number of parent nodes which have caused this node to calculate during
# one calculation cycle...
self._invalid_count = 0
# True if this node is marked for calculation in the next cycle...
self._needs_calculation = True
# The set of parent nodes that caused this node to calculate in
# the current calculation cycle...
self._updated_parent_nodes = set()
# Garbage collection...
self._gc_type = GraphNode.GCType.COLLECTABLE
self._gc_ref_count = 0
# Indicates whether the node has calculated in the most recent
# calculation cycle.
# Note: This flag is only valid if the graph-manager's use_has_calculated_flags
# property is True.
self.has_calculated = False
# We automatically reset dependencies if any of these
# nodes has updated in the current calculation cycle...
self._auto_rebuild_nodes = set()
@staticmethod
def make_node_id(*args):
"""
Override this if you need to make the ID of the node yourself.
This method automatically creates the ID by stringifying the parameters
which identify the node. If this cannot be done, you should override the
method and create the ID yourself.
This static method is called by the add_parent_node "factory" function
to help find nodes based on their parameters.
"""
if len(args) == 0:
node_id = "ID"
else:
node_id = "_".join((str(x) for x in args))
return node_id
@staticmethod
def get_type():
"""
Returns the type (class name) of this node.
In some fairly rare cases you may want to override this in derived classes.
You might do this, for example, if you want to 'mock' a node.
"""
return ""
def cleanup(self):
"""
Cleans up the node and calls dispose() on derived classes.
"""
self.remove_parents()
self.remove_children()
self.dispose()
def dispose(self):
"""
Should be implemented by derived classes, if there are any non-node
resources to be cleaned up.
"""
pass
def set_dependencies(self):
"""
Should be implemented by derived classes, if they depend on any parent nodes.
"""
pass
def pre_calculate(self):
"""
Called just before calculation. You may want to implement this if you
need to do any custom resetting of dependencies.
"""
# If any of the updated parent nodes is in the auto-rebuild collection,
# we reset dependencies...
if len(set.intersection(self._updated_parent_nodes, self._auto_rebuild_nodes)) > 0:
self.reset_dependencies()
def calculate_quality(self):
"""
Called after pre_calculate() and before calculate().
Merges data quality from parent nodes. You should override this if you
need to calculate quality in a custom way.
"""
self.quality.clear_to_good()
for parent_node in self._parent_nodes:
self.quality.merge(parent_node.quality)
def calculate(self):
"""
Should be implemented by derived classes if they perform any calculations.
"""
return GraphNode.CalculateChildrenType.CALCULATE_CHILDREN
def get_info_message(self):
"""
Should be implemented by derived classes, if you want to provide graph-dump
information about your node.
"""
return ""
def add_parent(self, node):
"""
Adds a parent node for this node and updates the child node collection
of the parent
"""
if node not in self._parent_nodes:
self._parent_nodes.add(node)
node._child_nodes.add(self)
def remove_parent(self, node):
"""
Removes a parent node for this node and update the child node collection
of the parent.
"""<|fim▁hole|> self._parent_nodes.remove(node)
node._child_nodes.remove(self)
# We mark the graph as needing garbage collection, as removing
# the parent link may leave unreferenced nodes...
self.graph_manager.link_removed()
def remove_parents(self):
"""
Removes all parent nodes for this node, also updates the child collections
of the parents.
"""
while len(self._parent_nodes) > 0:
node = self._parent_nodes.pop()
node._child_nodes.remove(self)
# We mark the graph as needing garbage collection, as removing
# the parents may leave unreferenced nodes...
self.graph_manager.link_removed()
def remove_children(self):
"""
Removes all child nodes for this node, also updates the parent collections
of the children.
"""
while len(self._child_nodes) > 0:
node = self._child_nodes.pop()
node._parent_nodes.remove(self)
def has_children(self):
"""
True if this node has any child nodes.
"""
return len(self._child_nodes) > 0
def invalidate(self, parent):
"""
Marks this node as invalid and, if this is the first invalidation, mark all
direct child nodes as invalid.
The parent node that is marking this node as needing calculation is passed
in, so that nodes can see who triggered them to calculate.
"""
# We add the parent to the collection of nodes that caused us to
# recalculate. (If this is one of the 'root' changed nodes for this
# calculation, the parent will be NULL, so we don't include it.)
if parent is not None:
self.add_updated_parent(parent)
self._invalid_count += 1
if self._invalid_count == 1:
# We have just gone invalid.
# Capture child set, as this may change as a result of calculation, and
# make recursive call for each node in captured child set
self._child_nodes_for_this_calculation_cycle = self._child_nodes.copy()
for node in self._child_nodes_for_this_calculation_cycle:
node.invalidate(self)
def validate(self):
"""
Called when one of the parent nodes has been calculated. We decrease the
invalidation count and if it has gone to zero, then all parents have been
calculated and we can calculate this node.
We then notify child nodes that they need to be calculated (by calling
validate on them).
"""
if self._invalid_count <= 0:
# Something has gone badly wrong in invalidate/validate...
raise GraphException(self.node_id + ": Invalidation count is unexpectedly non-positive")
self._invalid_count -= 1
if self._invalid_count == 0:
# All our parents are now valid, so we calculate our
# output value if necessary...
calculate_children = GraphNode.CalculateChildrenType.DO_NOT_CALCULATE_CHILDREN
if self._needs_calculation is True:
# We call pre-calculate. (This allows the node to do custom
# resetting of dependencies.)
self.pre_calculate()
# We merge data-quality...
self.calculate_quality()
# We do the calculation itself...
calculate_children = self.calculate()
self._needs_calculation = False
self.has_calculated = True
# We tell the graph-manager that the node has been calculated...
self.graph_manager.node_calculated(self)
# We calculate our child nodes...
for child_node in self._child_nodes_for_this_calculation_cycle:
# If this node's value has changed, force the _needs_calculation
# flag in the child node...
if calculate_children == GraphNode.CalculateChildrenType.CALCULATE_CHILDREN:
child_node._needs_calculation = True
# We tell the child node that this parent has calculated...
child_node.validate()
def reset_dependencies(self):
"""
Asks node to recreate its dependencies on other nodes and data objects.
"""
# We clear the collection of nodes that cause an auto-reset.
# (It will be repopulated when the new dependencies are set up.)
self._auto_rebuild_nodes.clear()
# We need to know if any new parents have been added to this node
# by this reset-dependencies operation. So we note the collection
# before and after setting them up...
parents_before_reset = self._parent_nodes.copy()
# We remove any existing parents, and add the new ones...
self.remove_parents()
self.set_dependencies()
# We find the collection of nodes that are now parents, but which
# weren't before, and we tell the graph-manager about them. (This
# is used to ensure that nodes are correctly calculated if the graph
# changes shape during the calculation-cycle.)
new_parents = self._parent_nodes.difference(parents_before_reset)
self.graph_manager.parents_updated(self, new_parents)
def parent_updated(self, parent):
"""
Returns true if the node passed in caused the calculation of the calling
node in this calculation cycle.
"""
return parent in self._updated_parent_nodes
def set_gc_type(self, gc_type):
"""
Sets whether or not this node can be garbage-collected.
"""
self._gc_type = gc_type
self.graph_manager.update_gc_info_for_node(self)
def needs_calculation(self):
"""
Marks the node as needing calculation in the next calculation cycle.
"""
self.graph_manager.needs_calculation(self)
def add_updated_parent(self, node):
"""
Adds a node to the collection of parent nodes that have updated for the next
calculation.
(See the wiki section about "Handling graph shape-changes during calculation"
for more details.)
"""
self._updated_parent_nodes.add(node)
self.graph_manager.node_has_updated_parents(self)
def clear_updated_parents(self):
"""
Clears out the collection of updated parents.
"""
self._updated_parent_nodes.clear()
def add_gc_ref_count(self):
"""
Increases the GC ref-count.
"""
self._gc_ref_count += 1
def release_gc_ref_count(self):
"""
Decreases the GC ref count.
"""
self._gc_ref_count -= 1
def get_gc_ref_count(self):
"""
Return the GC ref count.
"""
return self._gc_ref_count
def add_parent_node(self, node_type, *args, **kwargs):
"""
Adds a parent node of the type passed in for the identity parameters
supplied.
kwargs can include:
auto_rebuild = True / False (defaults to False if not supplied)
"""
# We find the node...
node = NodeFactory.get_node(
self.graph_manager,
GraphNode.GCType.COLLECTABLE,
node_type,
*args,
**kwargs)
self.add_parent(node)
# If the optional auto_rebuild flag is set, we will automatically reset
# dependencies if this node has updated in a calculation cycle...
auto_rebuild = kwargs["auto_rebuild"] if "auto_rebuild" in kwargs else False
if auto_rebuild is True:
self._auto_rebuild_nodes.add(node)
return node<|fim▁end|> | if node not in self._parent_nodes:
return # The node passed in is not one of our parent nodes.
# We remove the parent, and remove us as a child from the parent... |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|># License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from a10_horizon.dashboard.a10networks.a10appliances import views
urlpatterns = patterns(
'a10_horizon.dashboard.a10networks.a10appliances.views',
url(r'^$', views.IndexView.as_view(), name='index')
# url(r'^deleteappliance$', views.DeleteApplianceView.as_view(), name='deleteappliance')
# url(r'^addimage$', views.AddImageView.as_view(), name="addimage")
)<|fim▁end|> | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
<|file_name|>densenet_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and Benchmarks for Densenet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.densenet import densenet
from tensorflow.python.client import device_lib
class DensenetTest(tf.test.TestCase):
def test_bottleneck_true(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_bottleneck_false(self):
depth = 7
growth_rate = 2
num_blocks = 3
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=False, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_pool_initial_true(self):
depth = 7
growth_rate = 2
num_blocks = 4
output_classes = 10
num_layers_in_each_block = [1, 2, 2, 1]
batch_size = 1
data_format = ('channels_first') if tf.test.is_gpu_available() else (
'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if data_format == 'channels_last':
rand_input = tf.random_uniform((batch_size, 32, 32, 3))
else:
rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
def test_regularization(self):
if tf.test.is_gpu_available():
rand_input = tf.random_uniform((10, 3, 32, 32))
data_format = 'channels_first'
else:
rand_input = tf.random_uniform((10, 32, 32, 3))
data_format = 'channels_last'
weight_decay = 1e-4
conv = tf.keras.layers.Conv2D(
3, (3, 3),
padding='same',
use_bias=False,
data_format=data_format,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))
optimizer = tf.train.GradientDescentOptimizer(0.1)
conv(rand_input) # Initialize the variables in the layer
def compute_true_l2(vs, wd):
return tf.reduce_sum(tf.square(vs)) * wd
true_l2 = compute_true_l2(conv.variables, weight_decay)
keras_l2 = tf.add_n(conv.losses)
self.assertAllClose(true_l2, keras_l2)
with tf.GradientTape() as tape_true, tf.GradientTape() as tape_keras:
loss = tf.reduce_sum(conv(rand_input))
loss_with_true_l2 = loss + compute_true_l2(conv.variables, weight_decay)
loss_with_keras_l2 = loss + tf.add_n(conv.losses)
true_grads = tape_true.gradient(loss_with_true_l2, conv.variables)
keras_grads = tape_keras.gradient(loss_with_keras_l2, conv.variables)
self.assertAllClose(true_grads, keras_grads)
optimizer.apply_gradients(zip(keras_grads, conv.variables))
keras_l2_after_update = tf.add_n(conv.losses)
self.assertNotAllClose(keras_l2, keras_l2_after_update)
def compute_gradients(model, images, labels):
with tf.GradientTape() as tape:
logits = model(images, training=True)
cross_ent = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
regularization = tf.add_n(model.losses)
loss = cross_ent + regularization
tf.contrib.summary.scalar(name='loss', tensor=loss)
return tape.gradient(loss, model.variables)
<|fim▁hole|>
def device_and_data_format():
return ('/gpu:0',
'channels_first') if tf.test.is_gpu_available() else ('/cpu:0',
'channels_last')
def random_batch(batch_size, data_format):
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size,) + shape
num_classes = 1000
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class DensenetBenchmark(tf.test.Benchmark):
def __init__(self):
self.depth = 121
self.growth_rate = 32
self.num_blocks = 4
self.output_classes = 1000
self.num_layers_in_each_block = [6, 12, 24, 16]
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
return (32,)
return (16, 32)
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _force_device_sync(self):
# If this function is called in the context of a non-CPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
# which forces a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def _benchmark_eager_apply(self, label, device_and_format, defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
if defun:
# TODO(apassos) enable tfe.function here
model.call = tfe.defun(model.call)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = random_batch(batch_size, data_format)
for _ in xrange(num_burn):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images, training=False).cpu()
if execution_mode:
tfe.async_wait()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_apply_sync(self):
self._benchmark_eager_apply('eager_apply', device_and_data_format(),
defun=False)
def benchmark_eager_apply_async(self):
self._benchmark_eager_apply(
'eager_apply_async', device_and_data_format(), defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_apply_with_defun(self):
self._benchmark_eager_apply('eager_apply_with_defun',
device_and_data_format(), defun=True)
def _benchmark_eager_train(self,
label,
make_iterator,
device_and_format,
defun=False,
execution_mode=None):
with tfe.execution_mode(execution_mode):
device, data_format = device_and_format
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size, data_format)
model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
self.output_classes,
self.num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
optimizer = tf.train.GradientDescentOptimizer(0.1)
apply_grads = apply_gradients
if defun:
model.call = tfe.defun(model.call)
apply_grads = tfe.defun(apply_gradients)
num_burn = 3
num_iters = 10
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
apply_grads(model, optimizer,
compute_gradients(model, images, labels))
if execution_mode:
tfe.async_wait()
self._force_device_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train_sync(self):
self._benchmark_eager_train('eager_train', MockIterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_async(self):
self._benchmark_eager_train(
'eager_train_async',
MockIterator,
device_and_data_format(),
defun=False,
execution_mode=tfe.ASYNC)
def benchmark_eager_train_with_defun(self):
self._benchmark_eager_train(
'eager_train_with_defun', MockIterator,
device_and_data_format(), defun=True)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset', make_iterator,
device_and_data_format(), defun=False)
def benchmark_eager_train_datasets_with_defun(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train(
'eager_train_dataset_with_defun', make_iterator,
device_and_data_format(), defun=True)
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()<|fim▁end|> |
def apply_gradients(model, optimizer, gradients):
optimizer.apply_gradients(zip(gradients, model.variables))
|
<|file_name|>frameNavigator-i18n.js<|end_file_name|><|fim▁begin|>'use strict';
if (mejs.i18n.ca !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.cs !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.de !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.es !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.fr !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.hr !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.hu !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}<|fim▁hole|>if (mejs.i18n.it !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.ja !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.ko !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.nl !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.pl !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.pt !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n['pt-BR'] !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.ro !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.ru !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.sk !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.sv !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.uk !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n.zh !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}
if (mejs.i18n['zh-CN'] !== undefined) {
//mejs.i18n.en["mejs.frame-rate"] = "Media frame rate (select according to your video source)";
//mejs.i18n.en["mejs.step-fwd"] = "Step forward";
//mejs.i18n.en["mejs.step-back"] = "Step back";
}<|fim▁end|> | |
<|file_name|>event-list.component.spec.ts<|end_file_name|><|fim▁begin|>import { ComponentFixture, TestBed } from "@angular/core/testing";
import { EventListComponent } from "./event-list.component";
describe("EventListComponent", () => {
let component: EventListComponent;
let fixture: ComponentFixture<EventListComponent>;
beforeEach(async () => {
await TestBed.configureTestingModule({
declarations: [EventListComponent],
}).compileComponents();
});
beforeEach(() => {
fixture = TestBed.createComponent(EventListComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it("should create", () => {<|fim▁hole|><|fim▁end|> | expect(component).toBeTruthy();
});
}); |
<|file_name|>decorators.py<|end_file_name|><|fim▁begin|>"""
Decorators
"""
from __future__ import unicode_literals
from functools import wraps
from django.http import HttpResponseBadRequest
from django.utils.decorators import available_attrs
from django_ajax.shortcuts import render_to_json
def ajax(function=None, mandatory=True, **ajax_kwargs):
"""
Decorator who guesses the user response type and translates to a serialized<|fim▁hole|> def my_view(request):
do_something()
# will send {'status': 200, 'statusText': 'OK', 'content': null}
@ajax
def my_view(request):
return {'key': 'value'}
# will send {'status': 200, 'statusText': 'OK',
'content': {'key': 'value'}}
@ajax
def my_view(request):
return HttpResponse('<h1>Hi!</h1>')
# will send {'status': 200, 'statusText': 'OK',
'content': '<h1>Hi!</h1>'}
@ajax
def my_view(request):
return redirect('home')
# will send {'status': 302, 'statusText': 'FOUND', 'content': '/'}
# combination with others decorators:
@ajax
@login_required
@require_POST
def my_view(request):
pass
# if request user is not authenticated then the @login_required
# decorator redirect to login page.
# will send {'status': 302, 'statusText': 'FOUND',
'content': '/login'}
# if request method is 'GET' then the @require_POST decorator return
# a HttpResponseNotAllowed response.
# will send {'status': 405, 'statusText': 'METHOD NOT ALLOWED',
'content': null}
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if mandatory and not request.is_ajax():
return HttpResponseBadRequest()
if request.is_ajax():
# return json response
try:
return render_to_json(func(request, *args, **kwargs), **ajax_kwargs)
except Exception as exception:
return render_to_json(exception)
else:
# return standard response
return func(request, *args, **kwargs)
return inner
if function:
return decorator(function)
return decorator<|fim▁end|> | JSON response. Usage::
@ajax |
<|file_name|>set_zhihu_point_table.py<|end_file_name|><|fim▁begin|>import cPickle
point_table = {}
point_table[( 4, 4)] = 400.
point_table[( 3, 4)] = 270.
point_table[( 2, 4)] = 170.
point_table[( 1, 4)] = 100.
point_table[( 0, 4)] = 0.
point_table[(-1, 4)] = 0.
point_table[(-2, 4)] = 0.
point_table[(-3, 4)] = 0.
point_table[(-4, 4)] = 0.
point_table[( 4, 3)] = 240.
point_table[( 3, 3)] = 300.
point_table[( 2, 3)] = 200.
point_table[( 1, 3)] = 120.
point_table[( 0, 3)] = 0.
point_table[(-1, 3)] = 0.
point_table[(-2, 3)] = 0.<|fim▁hole|>
point_table[( 4, 2)] = 140.
point_table[( 3, 2)] = 180.
point_table[( 2, 2)] = 240.
point_table[( 1, 2)] = 160.
point_table[( 0, 2)] = 10.
point_table[(-1, 2)] = 0.
point_table[(-2, 2)] = 0.
point_table[(-3, 2)] = 0.
point_table[(-4, 2)] = 0.
point_table[( 4, 1)] = 100.
point_table[( 3, 1)] = 110.
point_table[( 2, 1)] = 150.
point_table[( 1, 1)] = 200.
point_table[( 0, 1)] = 40.
point_table[(-1, 1)] = 0.
point_table[(-2, 1)] = 0.
point_table[(-3, 1)] = 0.
point_table[(-4, 1)] = 0.
point_table[( 4, 0)] = 0.
point_table[( 3, 0)] = 0.
point_table[( 2, 0)] = 10.
point_table[( 1, 0)] = 20.
point_table[( 0, 0)] = 160.
point_table[(-1, 0)] = 20.
point_table[(-2, 0)] = 10.
point_table[(-3, 0)] = 0.
point_table[(-4, 0)] = 0.
point_table[( 4,-1)] = 0.
point_table[( 3,-1)] = 0.
point_table[( 2,-1)] = 0.
point_table[( 1,-1)] = 0.
point_table[( 0,-1)] = 40.
point_table[(-1,-1)] = 200.
point_table[(-2,-1)] = 150.
point_table[(-3,-1)] = 110.
point_table[(-4,-1)] = 100.
point_table[( 4,-2)] = 0.
point_table[( 3,-2)] = 0.
point_table[( 2,-2)] = 0.
point_table[( 1,-2)] = 0.
point_table[( 0,-2)] = 10.
point_table[(-1,-2)] = 160.
point_table[(-2,-2)] = 240.
point_table[(-3,-2)] = 180.
point_table[(-4,-2)] = 140.
point_table[( 4,-3)] = 0.
point_table[( 3,-3)] = 0.
point_table[( 2,-3)] = 0.
point_table[( 1,-3)] = 0.
point_table[( 0,-3)] = 0.
point_table[(-1,-3)] = 120.
point_table[(-2,-3)] = 200.
point_table[(-3,-3)] = 300.
point_table[(-4,-3)] = 240.
point_table[( 4,-4)] = 0.
point_table[( 3,-4)] = 0.
point_table[( 2,-4)] = 0.
point_table[( 1,-4)] = 0.
point_table[( 0,-4)] = 0.
point_table[(-1,-4)] = 100.
point_table[(-2,-4)] = 170.
point_table[(-3,-4)] = 270.
point_table[(-4,-4)] = 400.
cPickle.dump(point_table, open('../data/point_table.cpickle', 'wb'))<|fim▁end|> | point_table[(-3, 3)] = 0.
point_table[(-4, 3)] = 0. |
<|file_name|>test_indextable.py<|end_file_name|><|fim▁begin|>"""
A test script for the `indextable` module
"""
from random import randrange
import pytest
from HamiltonianPy.indextable import IndexTable
class TestIndexTable:
def test_init(self):
match0 = r"unhashable type"
match1 = r"The .* has different type from the previous ones"
match2 = r"The .* object already exists"
with pytest.raises(TypeError, match=match0):
IndexTable([[0, 1], [2, 3]])
with pytest.raises(TypeError, match=match1):
IndexTable([(0, 1), "ab"])
with pytest.raises(ValueError, match=match2):
IndexTable([(0, 1), (2, 3), (0, 1)])
def test_object_type(self):
table = IndexTable((x, y) for x in range(4) for y in range(4))
assert table.object_type is tuple
def test_str_and_iteration(self):
separator = "*" * 80
table = IndexTable((x, y) for x in range(2) for y in range(2))
print(table)
print(separator)
for index in table.indices():
print(index)
print(separator)
for item in table.objects():
print(item)
print(separator)
for index, item in table:
print(index, item)
print(separator)
def test_length(self):
num0 = 4
num1 = 7
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
assert len(table) == num0 * num1
def test_query_index(self):
num0 = 7
num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
key = (randrange(num0), randrange(num1))
assert table(key) == key[0] * num1 + key[1]
def test_query_object(self):<|fim▁hole|> num1 = 3
table = IndexTable((x, y) for x in range(num0) for y in range(num1))
for i in range(5):
index = randrange(num0 * num1)
assert table.query_object(index) == divmod(index, num1)<|fim▁end|> | num0 = 7 |
<|file_name|>longestValidParentheses.go<|end_file_name|><|fim▁begin|>package longestvalidparentheses
func longestValidParentheses(s string) int {
l := 0
if len(s) >= 1 {
i, stack := 0, make([]int, len(s)+1)
stack[0] = -1 // first index
for si := 0; si < len(s); si++ {
if s[si] == '(' { // if '('
i++ // push
stack[i] = si
} else { // if ')'
i-- // pop
if i < 0 { // keep first index
i = 0 // push
stack[i] = si
} else if tmp := si - stack[i]; tmp > l {
l = tmp // max length
}<|fim▁hole|> }
return l
}<|fim▁end|> | }
} |
<|file_name|>sharing.py<|end_file_name|><|fim▁begin|>import data
from utils import assert_403, assert_404, assert_200, parse_xml, xpath
PRD = 'prd'
def test_sharing(IndivoClient):
DS = 'ds'
def get_datastore(obj):
if hasattr(obj, DS):
return getattr(obj, DS).values()
return False
def set_datastore(obj, **kwargs):
if hasattr(obj, DS):
ds = getattr(obj, DS)
for kwarg, value in kwargs.items():
if hasattr(ds, kwarg):
setattr(ds, kwarg, value)
return obj
raise ValueError
def alice_setup(record_id, bob_account_id):
allergy_type = {'type' : 'http://indivo.org/vocab/xml/documents#Allergy'}
alice_chrome_client = IndivoClient('chrome', 'chrome')
alice_chrome_client.create_session(data.account)
alice_chrome_client.read_record(record_id=record_id)
alice_chrome_client.get_account_permissions(account_id=data.account['account_id'])
alice_chrome_client.get_account_records(account_id = data.account['account_id'])
# Alice posts a document
# (We save the first doc instead of zero
# due to the contact doc already in alice's account)
alice_chrome_client.post_document(data=data.doc01)
document_id = alice_chrome_client.read_documents().response[PRD]['Document'][1]
# Save the document_id in the client's datastore
alice_chrome_client.ds.document_id = document_id
# Save the first carenet_id in the client's datastore
carenet_id = alice_chrome_client.get_record_carenets().response[PRD]['Carenet'][0]
# post four documents to Alice's record, 2 allergies and 2 immunizations
document_1_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy00)), "/Document/@id")[0]
document_2_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy01)), "/Document/@id")[0]
document_3_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization)), "/Document/@id")[0]
document_4_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.immunization2)), "/Document/@id")[0]
# and one more to test nevershare
document_5_id = xpath(parse_xml(alice_chrome_client.post_document(data=data.allergy02)), "/Document/@id")[0]
# auto-share allergies
alice_chrome_client.post_autoshare(data=allergy_type, carenet_id=carenet_id)
assert_200(alice_chrome_client.get_autoshare_bytype_all(record_id=record_id))
# unshare that one allergy, which should negate the autoshare
alice_chrome_client.delete_carenet_document(record_id = record_id, document_id = document_2_id, carenet_id=carenet_id)
# nevershare the third allergy
alice_chrome_client.document_nevershare_set(record_id = record_id, document_id = document_5_id)
# immunizations are individually shared (well only one of them)
alice_chrome_client.post_carenet_document(document_id = document_3_id, carenet_id=carenet_id)
# Alice shares her contact document(s) with the carenet
contact_doc = parse_xml(alice_chrome_client.read_documents(record_id = record_id, parameters={'type':'Contact'}))
for doc_id in xpath(contact_doc, '/Documents/Document/@id'):
alice_chrome_client.post_carenet_document(record_id = record_id, document_id = doc_id, carenet_id = carenet_id)
# Alice adds bob_account_id to carenet[0]
alice_chrome_client.post_carenet_account(carenet_id = carenet_id, data='account_id=' + bob_account_id + '&write=false')
# Review all accounts within carenet[0]
assert xpath(parse_xml(alice_chrome_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')
alice_chrome_client.get_carenet_apps(carenet_id = carenet_id)
alice_chrome_client.read_allergies(record_id = record_id)
# Finally, return the carenet_id, document_id
# in order to check Bob's access
# and a second document that is disallowed
return carenet_id, [document_1_id, document_3_id], [document_2_id, document_4_id, document_5_id]
def bob_setup(bob_account_id, record_id, carenet_id, allowed_docs, disallowed_docs):
bob_chrome_client = IndivoClient('chrome', 'chrome')
bob_chrome_client.create_session(data.account02)
# SZ: Bob should NOT be able to read the docs directly in the record
for doc_id in allowed_docs+disallowed_docs:
assert_403(bob_chrome_client.read_document(record_id=record_id, document_id=doc_id))
assert_403(bob_chrome_client.get_record_carenets(record_id=record_id))
# Bob should be able to read the allowed docs
for doc_id in allowed_docs:
assert_200(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should not be able to read the disallowed docs
for doc_id in disallowed_docs:
assert_404(bob_chrome_client.get_carenet_document(carenet_id = carenet_id, document_id = doc_id))
# Bob should be able to list docs in the carenet
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id).response[PRD]['Document']
# with a parameter
carenet_documents_list = bob_chrome_client.get_carenet_documents(carenet_id = carenet_id, parameters={'type': 'http://indivo.org/vocab/xml/documents#Allergy'}).response[PRD]['Document']
# Read carenet allergies
assert_200(bob_chrome_client.read_carenet_allergies(carenet_id = carenet_id))
assert_200(bob_chrome_client.read_carenet_problems(carenet_id = carenet_id))
# Read the contact document, this should work
contact_doc = parse_xml(bob_chrome_client.read_carenet_special_document(carenet_id = carenet_id, special_document='contact'))
contact_name = xpath(contact_doc, '/ns:Contact/ns:name/ns:fullName/text()', namespaces={'ns':'http://indivo.org/vocab/xml/documents#'})
assert(contact_name)
bob_chrome_client.get_account_permissions(account_id=bob_account_id)<|fim▁hole|> # Not yet implemented
#bob_chrome_client.get_carenet_app_permissions(account_id=bob_account_id)
return True
def admin_setup(bob_account_id):
admin_client = IndivoClient(data.machine_app_email, data.machine_app_secret)
admin_client.set_app_id(data.app_email)
# Create a record for Alice and set her at the owner
record_id = admin_client.create_record(data=data.contact).response[PRD]['Record'][0]
admin_client.set_record_owner(data=data.account['account_id'])
# Create a basic set of carenets
carenet_names = ['Family2', 'Friends2', 'School/Office']
for cname in carenet_names:
admin_client.create_carenet(data='name=' + cname)
# Check to make sure the admin can list the carenets and the accounts within each one
carenets = xpath(parse_xml(admin_client.get_record_carenets(record_id = record_id)),'/Carenets/Carenet/@id')
for carenet_id in carenets:
assert len(xpath(parse_xml(admin_client.get_carenet_accounts(carenet_id = carenet_id)), '/CarenetAccounts')) > 0
return record_id
bob_account_id = '[email protected]'
# Admin spawning carenets under Alice's newly created record
record_id = admin_setup(bob_account_id)
# Given Bob's account id and a record that has been set up for her
# Alice gives Bob the document_id that she'd like to share with him
# Even though Alice gives Bob a document_id, Bob has the ability
# to read all documents within the carenet that Alice added him to
# 2010-09-13 now Alice also shares her contact URL and we check
# that Bob can read it at the special URL
carenet_id, allowed_documents, disallowed_documents = alice_setup(record_id, bob_account_id)
return bob_setup(bob_account_id, record_id, carenet_id, allowed_documents, disallowed_documents)<|fim▁end|> | bob_chrome_client.get_carenet_account_permissions(carenet_id= carenet_id,
record_id=record_id,
account_id=bob_account_id)
|
<|file_name|>bloom.py<|end_file_name|><|fim▁begin|>#
# bloom.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bloom filter support"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
import sys
import math
import bitcoin.core
import bitcoin.core.serialize
def ROTL32(x, r):
assert x <= 0xFFFFFFFF
return ((x << r) & 0xFFFFFFFF) | (x >> (32 - r))
def MurmurHash3(nHashSeed, vDataToHash):
"""MurmurHash3 (x86_32)
Used for bloom filters. See http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
"""
assert nHashSeed <= 0xFFFFFFFF
h1 = nHashSeed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
i = 0
while i < len(vDataToHash) - len(vDataToHash) % 4 \
and len(vDataToHash) - i >= 4:
k1 = struct.unpack(b"<L", vDataToHash[i:i+4])[0]
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = ROTL32(k1, 15)
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
h1 = ROTL32(h1, 13)
h1 = (((h1*5) & 0xFFFFFFFF) + 0xe6546b64) & 0xFFFFFFFF
<|fim▁hole|> j = (len(vDataToHash) // 4) * 4
import sys
bord = ord
if sys.version > '3':
# In Py3 indexing bytes returns numbers, not characters
bord = lambda x: x
if len(vDataToHash) & 3 >= 3:
k1 ^= bord(vDataToHash[j+2]) << 16
if len(vDataToHash) & 3 >= 2:
k1 ^= bord(vDataToHash[j+1]) << 8
if len(vDataToHash) & 3 >= 1:
k1 ^= bord(vDataToHash[j])
k1 &= 0xFFFFFFFF
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = ROTL32(k1, 15)
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
# finalization
h1 ^= len(vDataToHash) & 0xFFFFFFFF
h1 ^= (h1 & 0xFFFFFFFF) >> 16
h1 *= 0x85ebca6b
h1 ^= (h1 & 0xFFFFFFFF) >> 13
h1 *= 0xc2b2ae35
h1 ^= (h1 & 0xFFFFFFFF) >> 16
return h1 & 0xFFFFFFFF
class CBloomFilter(bitcoin.core.serialize.Serializable):
# 20,000 items with fp rate < 0.1% or 10,000 items and <0.0001%
MAX_BLOOM_FILTER_SIZE = 36000
MAX_HASH_FUNCS = 50
UPDATE_NONE = 0
UPDATE_ALL = 1
UPDATE_P2PUBKEY_ONLY = 2
UPDATE_MASK = 3
def __init__(self, nElements, nFPRate, nTweak, nFlags):
"""Create a new bloom filter
The filter will have a given false-positive rate when filled with the
given number of elements.
Note that if the given parameters will result in a filter outside the
bounds of the protocol limits, the filter created will be as close to
the given parameters as possible within the protocol limits. This will
apply if nFPRate is very low or nElements is unreasonably high.
nTweak is a constant which is added to the seed value passed to the
hash function It should generally always be a random value (and is
largely only exposed for unit testing)
nFlags should be one of the UPDATE_* enums (but not _MASK)
"""
LN2SQUARED = 0.4804530139182014246671025263266649717305529515945455
LN2 = 0.6931471805599453094172321214581765680755001343602552
self.vData = bytearray(int(min(-1 / LN2SQUARED * nElements * math.log(nFPRate), self.MAX_BLOOM_FILTER_SIZE * 8) / 8))
self.nHashFuncs = int(min(len(self.vData) * 8 / nElements * LN2, self.MAX_HASH_FUNCS))
self.nTweak = nTweak
self.nFlags = nFlags
def bloom_hash(self, nHashNum, vDataToHash):
return MurmurHash3(((nHashNum * 0xFBA4C795) + self.nTweak) & 0xFFFFFFFF, vDataToHash) % (len(self.vData) * 8)
__bit_mask = bytearray([0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80])
def insert(self, elem):
"""Insert an element in the filter.
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 0xff:
return
for i in range(0, self.nHashFuncs):
nIndex = self.bloom_hash(i, elem)
# Sets bit nIndex of vData
self.vData[nIndex >> 3] |= self.__bit_mask[7 & nIndex]
def contains(self, elem):
"""Test if the filter contains an element
elem may be a COutPoint or bytes
"""
if isinstance(elem, bitcoin.core.COutPoint):
elem = elem.serialize()
if len(self.vData) == 1 and self.vData[0] == 0xff:
return True
for i in range(0, self.nHashFuncs):
nIndex = self.bloom_hash(i, elem)
if not (self.vData[nIndex >> 3] & self.__bit_mask[7 & nIndex]):
return False
return True
def IsWithinSizeConstraints(self):
return len(self.vData) <= self.MAX_BLOOM_FILTER_SIZE and self.nHashFuncs <= self.MAX_HASH_FUNCS
def IsRelevantAndUpdate(tx, tx_hash):
# Not useful for a client, so not implemented yet.
raise NotImplementedError
__struct = struct.Struct(b'<IIB')
@classmethod
def stream_deserialize(cls, f):
vData = bitcoin.core.serialize.BytesSerializer.stream_deserialize(f)
(nHashFuncs,
nTweak,
nFlags) = self.__struct.unpack(_ser_read(f, self.__struct.size))
self = cls()
self.vData = vData
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
return self
def stream_serialize(self, f):
if sys.version > '3':
bitcoin.core.serialize.BytesSerializer.stream_serialize(self.vData, f)
else:
# 2.7 has problems with f.write(bytearray())
bitcoin.core.serialize.BytesSerializer.stream_serialize(bytes(self.vData), f)
f.write(self.__struct.pack(self.nHashFuncs, self.nTweak, self.nFlags))<|fim▁end|> | i += 4
# tail
k1 = 0 |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>var engine = require('../');
var express = require('express');
var path = require('path');
var app = express();
app.engine('dot', engine.__express);
app.set('views', path.join(__dirname, './views'));
app.set('view engine', 'dot');
app.get('/', function(req, res) {<|fim▁hole|>
app.get('/layout', function(req, res) {
res.render('layout/index');
});
app.get('/cascade', function(req, res) {
res.render('cascade/me');
});
app.get('/partial', function(req, res) {
res.render('partial/index');
});
app.get('/helper', function(req, res) {
// helper as a property
engine.helper.myHelperProperty = 'Hello from server property helper';
// helper as a method
engine.helper.myHelperMethod = function(param) {
return 'Hello from server method helper (parameter: ' + param + ', server model: ' + this.model.fromServer + ')';
}
res.render('helper/index', { fromServer: 'Hello from server', });
});
var server = app.listen(2015, function() {
console.log('Run the example at http://locahost:%d', server.address().port);
});<|fim▁end|> | res.render('index', { fromServer: 'Hello from server', });
}); |
<|file_name|>handler_test.go<|end_file_name|><|fim▁begin|>package http
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/http/httptest"
"runtime"
"testing"
cmds "gx/ipfs/QmSXUokcP4TJpFfqozT69AVAYRtzXVMUjzQVkYX41R9Svs/go-ipfs-cmds"
cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit"
)
type VersionOutput struct {
Version string
Commit string
Repo string
System string
Golang string
}
type testEnv struct {
version, commit, repoVersion string
rootCtx context.Context
t *testing.T
wait chan struct{}
}
func (env testEnv) Context() context.Context {
return env.rootCtx
}
func getCommit(env cmds.Environment) (string, bool) {
tEnv, ok := env.(testEnv)
return tEnv.commit, ok
}
func getVersion(env cmds.Environment) (string, bool) {
tEnv, ok := env.(testEnv)
return tEnv.version, ok
}
func getRepoVersion(env cmds.Environment) (string, bool) {
tEnv, ok := env.(testEnv)
return tEnv.repoVersion, ok
}
func getTestingT(env cmds.Environment) (*testing.T, bool) {
tEnv, ok := env.(testEnv)
return tEnv.t, ok
}
func getWaitChan(env cmds.Environment) (chan struct{}, bool) {
tEnv, ok := env.(testEnv)
return tEnv.wait, ok
}
var (
cmdRoot = &cmds.Command{
Options: []cmdkit.Option{
// global options, added to every command
cmds.OptionEncodingType,
cmds.OptionStreamChannels,
cmds.OptionTimeout,
},
Subcommands: map[string]*cmds.Command{
"error": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return errors.New("an error occurred")
},
},
"lateerror": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
re.Emit("some value")
return errors.New("an error occurred")
},
Type: "",
},
"encode": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return errors.New("an error occurred")
},
Type: "",
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, v string) error {
fmt.Fprintln(w, v)
return nil
}),
},
},
"lateencode": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
re.Emit("hello")
return errors.New("an error occurred")
},
Type: "",
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, v string) error {
fmt.Fprintln(w, v)
if v != "hello" {
return fmt.Errorf("expected hello, got %s", v)
}
return nil
}),
},
},
"protoencode": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
return errors.New("an error occurred")
},
Type: "",
Encoders: cmds.EncoderMap{
cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, v string) error {
fmt.Fprintln(w, v)
return nil
}),
},
},
"protolateencode": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
re.Emit("hello")
return errors.New("an error occurred")
},
Type: "",
Encoders: cmds.EncoderMap{
cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, v string) error {
fmt.Fprintln(w, v)
return nil
}),
},
},
"doubleclose": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
t, ok := getTestingT(env)
if !ok {
return errors.New("error getting *testing.T")
}
re.Emit("some value")
err := re.Close()
if err != nil {
t.Error("unexpected error closing:", err)
}
err = re.Close()
if err != cmds.ErrClosingClosedEmitter {
t.Error("expected double close error, got:", err)
}
return nil
},
Type: "",
},
"single": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
t, ok := getTestingT(env)
if !ok {
return errors.New("error getting *testing.T")
}
wait, ok := getWaitChan(env)
if !ok {
return errors.New("error getting wait chan")
}<|fim▁hole|> t.Error("unexpected emit error:", err)
}
err = re.Emit("this should not be emitted")
if err != cmds.ErrClosedEmitter {
t.Errorf("expected emit error %q, got: %v", cmds.ErrClosedEmitter, err)
}
err = re.Close()
if err != cmds.ErrClosingClosedEmitter {
t.Error("expected double close error, got:", err)
}
close(wait)
return nil
},
Type: "",
},
"reader": &cmds.Command{
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
buf := bytes.NewBufferString("the reader call returns a reader.")
return re.Emit(buf)
},
},
"version": &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Show ipfs version information.",
ShortDescription: "Returns the current version of ipfs and exits.",
},
Type: VersionOutput{},
Options: []cmdkit.Option{
cmdkit.BoolOption("number", "n", "Only show the version number."),
cmdkit.BoolOption("commit", "Show the commit hash."),
cmdkit.BoolOption("repo", "Show repo version."),
cmdkit.BoolOption("all", "Show all version information"),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
version, ok := getVersion(env)
if !ok {
return cmdkit.Errorf(cmdkit.ErrNormal, "couldn't get version")
}
repoVersion, ok := getRepoVersion(env)
if !ok {
return cmdkit.Errorf(cmdkit.ErrNormal, "couldn't get repo version")
}
commit, ok := getCommit(env)
if !ok {
return cmdkit.Errorf(cmdkit.ErrNormal, "couldn't get commit info")
}
re.Emit(&VersionOutput{
Version: version,
Commit: commit,
Repo: repoVersion,
System: runtime.GOARCH + "/" + runtime.GOOS, //TODO: Precise version here
Golang: runtime.Version(),
})
return nil
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, v *VersionOutput) error {
if repo, ok := req.Options["repo"].(bool); ok && repo {
_, err := fmt.Fprintf(w, "%v\n", v.Repo)
return err
}
var commitTxt string
if commit, ok := req.Options["commit"].(bool); ok && commit {
commitTxt = "-" + v.Commit
}
if number, ok := req.Options["number"].(bool); ok && number {
_, err := fmt.Fprintf(w, "%v%v\n", v.Version, commitTxt)
return err
}
if all, ok := req.Options["all"].(bool); ok && all {
_, err := fmt.Fprintf(w, "go-ipfs version: %s-%s\n"+
"Repo version: %s\nSystem version: %s\nGolang version: %s\n",
v.Version, v.Commit, v.Repo, v.System, v.Golang)
return err
}
_, err := fmt.Fprintf(w, "ipfs version %s%s\n", v.Version, commitTxt)
return err
}),
},
},
},
}
)
func getTestServer(t *testing.T, origins []string) (cmds.Environment, *httptest.Server) {
if len(origins) == 0 {
origins = defaultOrigins
}
env := testEnv{
version: "0.1.2",
commit: "c0mm17", // yes, I know there's no 'm' in hex.
repoVersion: "4",
rootCtx: context.Background(),
t: t,
wait: make(chan struct{}),
}
return env, httptest.NewServer(NewHandler(env, cmdRoot, originCfg(origins)))
}
func errEq(err1, err2 error) bool {
if err1 == nil && err2 == nil {
return true
}
if err1 == nil || err2 == nil {
return false
}
return err1.Error() == err2.Error()
}<|fim▁end|> |
err := cmds.EmitOnce(re, "some value")
if err != nil { |
<|file_name|>mlab.py<|end_file_name|><|fim▁begin|># This module is for compatibility only. All functions are defined elsewhere.
__all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle',
'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort',
'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud',
'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc',
'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean']
import numpy.oldnumeric.linear_algebra as LinearAlgebra
import numpy.oldnumeric.random_array as RandomArray
from numpy import tril, trapz as _Ntrapz, hanning, rot90, triu, diff, \
angle, roots, ptp as _Nptp, kaiser, cumprod as _Ncumprod, \
diag, msort, prod as _Nprod, std as _Nstd, hamming, flipud, \
amax as _Nmax, amin as _Nmin, blackman, bartlett, \
squeeze, sinc, median, fliplr, mean as _Nmean, transpose
from numpy.linalg import eig, svd
from numpy.random import rand, randn
import numpy as np
<|fim▁hole|>
from typeconv import convtypecode
def eye(N, M=None, k=0, typecode=None, dtype=None):
""" eye returns a N-by-M 2-d array where the k-th diagonal is all ones,
and everything else is zeros.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def tri(N, M=None, k=0, typecode=None, dtype=None):
""" returns a N-by-M array where all the diagonals starting from
lower left corner up to the k-th are all ones.
"""
dtype = convtypecode(typecode, dtype)
if M is None: M = N
m = np.greater_equal(np.subtract.outer(np.arange(N), np.arange(M)),-k)
if m.dtype != dtype:
return m.astype(dtype)
def trapz(y, x=None, axis=-1):
return _Ntrapz(y, x, axis=axis)
def ptp(x, axis=0):
return _Nptp(x, axis)
def cumprod(x, axis=0):
return _Ncumprod(x, axis)
def max(x, axis=0):
return _Nmax(x, axis)
def min(x, axis=0):
return _Nmin(x, axis)
def prod(x, axis=0):
return _Nprod(x, axis)
def std(x, axis=0):
N = asarray(x).shape[axis]
return _Nstd(x, axis)*sqrt(N/(N-1.))
def mean(x, axis=0):
return _Nmean(x, axis)
# This is exactly the same cov function as in MLab
def cov(m, y=None, rowvar=0, bias=0):
if y is None:
y = m
else:
y = y
if rowvar:
m = transpose(m)
y = transpose(y)
if (m.shape[0] == 1):
m = transpose(m)
if (y.shape[0] == 1):
y = transpose(y)
N = m.shape[0]
if (y.shape[0] != N):
raise ValueError("x and y must have the same number of observations")
m = m - _Nmean(m,axis=0)
y = y - _Nmean(y,axis=0)
if bias:
fact = N*1.0
else:
fact = N-1.0
return squeeze(dot(transpose(m), conjugate(y)) / fact)
from numpy import sqrt, multiply
def corrcoef(x, y=None):
c = cov(x, y)
d = diag(c)
return c/sqrt(multiply.outer(d,d))
from compat import *
from functions import *
from precision import *
from ufuncs import *
from misc import *
import compat
import precision
import functions
import misc
import ufuncs
import numpy
__version__ = numpy.__version__
del numpy
__all__ += ['__version__']
__all__ += compat.__all__
__all__ += precision.__all__
__all__ += functions.__all__
__all__ += ufuncs.__all__
__all__ += misc.__all__
del compat
del functions
del precision
del ufuncs
del misc<|fim▁end|> | |
<|file_name|>compat.py<|end_file_name|><|fim▁begin|>import sys
<|fim▁hole|>if IS_PY3:
from http.client import NO_CONTENT
from email import encoders as Encoders
from urllib.parse import quote, urlencode
unicode = str
bytes = bytes
else:
from email import Encoders
from httplib import NO_CONTENT
from urllib import quote, urlencode
unicode = unicode
_orig_bytes = bytes
bytes = lambda s, *a: _orig_bytes(s)<|fim▁end|> | IS_PY3 = sys.version_info[0] == 3
|
<|file_name|>inline_fragments.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::no_inline::NO_INLINE_DIRECTIVE_NAME;
use crate::node_identifier::{LocationAgnosticHash, LocationAgnosticPartialEq};
use crate::relay_client_component::RELAY_CLIENT_COMPONENT_SERVER_DIRECTIVE_NAME;
use fnv::FnvHashMap;
use graphql_ir::{
FragmentDefinition, FragmentSpread, InlineFragment, Program, ScalarField, Selection,
Transformed, Transformer,
};
use std::{hash::Hash, sync::Arc};
pub fn inline_fragments(program: &Program) -> Program {
let mut transform = InlineFragmentsTransform::new(program);
transform
.transform_program(program)
.replace_or_else(|| program.clone())
}
#[derive(Eq, Clone, Debug)]
struct FragmentSpreadKey(Arc<FragmentSpread>);
type Seen = FnvHashMap<FragmentSpreadKey, Arc<InlineFragment>>;
impl PartialEq for FragmentSpreadKey {
fn eq(&self, other: &Self) -> bool {
self.0.fragment.item == other.0.fragment.item
&& self.0.directives.location_agnostic_eq(&other.0.directives)
}
}
impl Hash for FragmentSpreadKey {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.fragment.item.hash(state);
self.0.directives.location_agnostic_hash(state);
}
}
struct InlineFragmentsTransform<'s> {
program: &'s Program,
seen: Seen,
}
impl<'s> InlineFragmentsTransform<'s> {
fn new(program: &'s Program) -> Self {
Self {
program,
seen: Default::default(),
}
}
fn transform_fragment_spread(&mut self, spread: &Arc<FragmentSpread>) -> Arc<InlineFragment> {
let key = FragmentSpreadKey(Arc::clone(spread));
// If we've already created an InlineFragment for this fragment name before,
// share it
if let Some(prev) = self.seen.get(&key) {
return Arc::clone(prev);
};
// Otherwise create the InlineFragment equivalent of the fragment (recursively
// inlining its contents). To guard against cycles, store a dummy value
// that we overwrite once we finish.
self.seen.insert(
key.clone(),
Arc::new(InlineFragment {
type_condition: None,
directives: Default::default(),
selections: Default::default(),
}),
);
let fragment = self
.program
.fragment(spread.fragment.item)
.unwrap_or_else(|| {
panic!(
"Fragment spread unable to resolve fragment `{}`.",
spread.fragment.item
)
});
let selections = self.transform_selections(&fragment.selections);
let result = Arc::new(InlineFragment {
type_condition: Some(fragment.type_condition),
directives: spread.directives.clone(),
selections: selections.replace_or_else(|| fragment.selections.clone()),
});
self.seen.insert(key, Arc::clone(&result));
result
}
}
impl<'s> Transformer for InlineFragmentsTransform<'s> {
const NAME: &'static str = "InlineFragmentsTransform";
const VISIT_ARGUMENTS: bool = false;
const VISIT_DIRECTIVES: bool = false;
fn transform_fragment(
&mut self,
_fragment: &FragmentDefinition,
) -> Transformed<FragmentDefinition> {
Transformed::Delete
}
fn transform_selection(&mut self, selection: &Selection) -> Transformed<Selection> {
match selection {
Selection::FragmentSpread(selection) => {
let should_skip_inline = selection.directives.iter().any(|directive| {<|fim▁hole|> || directive.name.item == *RELAY_CLIENT_COMPONENT_SERVER_DIRECTIVE_NAME
});
if should_skip_inline {
Transformed::Keep
} else {
Transformed::Replace(Selection::InlineFragment(
self.transform_fragment_spread(selection),
))
}
}
_ => self.default_transform_selection(selection),
}
}
fn transform_scalar_field(&mut self, _field: &ScalarField) -> Transformed<Selection> {
Transformed::Keep
}
}<|fim▁end|> | directive.name.item == *NO_INLINE_DIRECTIVE_NAME |
<|file_name|>bsp_renderer.rs<|end_file_name|><|fim▁begin|>/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: client/state/game/bsp_renderer.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A client-only state that depends on
the shared game state. This state is
used only in development to testing
the loading and rendering of Quake's
BSP maps.
*/
use super::{ State, Game_Renderer };
use std::mem;
use gl2 = opengles::gl2;
use gfx;
use glfw;
use ui;
use math;
use obj::bsp;
use log::Log;
#[macro_escape]
#[path = "../../../shared/log/macros.rs"]
mod macros;
#[macro_escape]
#[path = "../../gfx/check.rs"]
mod check;
pub struct BSP_Renderer
{
game_renderer: @mut Game_Renderer,
vao: gl2::GLuint,
vbo: gl2::GLuint,
shader: @mut gfx::Shader,
proj_loc: gl2::GLint,
world_loc: gl2::GLint,
}
impl BSP_Renderer
{
pub fn new(game_renderer: @mut Game_Renderer) -> @mut BSP_Renderer
{
let gr = @mut BSP_Renderer
{
game_renderer: game_renderer,
vao: 0,
vbo: 0,
shader: gfx::Shader_Builder::new_with_files("data/shaders/color.vert", "data/shaders/color.frag"),
proj_loc: 0,
world_loc: 0,
};
gr.upload();
gr
}
fn upload(&mut self)
{
let name = check!(gl2::gen_vertex_arrays(1));
log_assert!(name.len() == 1);
self.vao = name[0];
let name = check!(gl2::gen_buffers(1));
log_assert!(name.len() == 1);
self.vbo = name[0];
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::buffer_data(gl2::ARRAY_BUFFER, self.game_renderer.game.bsp_map.verts, gl2::STATIC_DRAW));
/* Setup vertex attribs. */
check!(gl2::bind_buffer(gl2::ARRAY_BUFFER, self.vbo));
check!(gl2::enable_vertex_attrib_array(0));
check!(gl2::enable_vertex_attrib_array(1));
check!(gl2::vertex_attrib_pointer_f32(0, 3, false,
mem::size_of::<bsp::lump::Vertex>() as i32,
0));
check!(gl2::vertex_attrib_pointer_u8(1, 4, true,
mem::size_of::<bsp::lump::Vertex>() as i32,
mem::size_of::<bsp::lump::Vertex>() as u32 -
mem::size_of::<math::Vec4u8>() as u32));
}
fn render_mesh(&self)
{
check!(gl2::bind_vertex_array(self.vao));
check!(gl2::draw_arrays(gl2::TRIANGLES, 0, self.game_renderer.game.bsp_map.verts.len() as i32));
check!(gl2::bind_vertex_array(0));
}
}
impl State for BSP_Renderer
{
fn load(&mut self)
{
log_debug!("Loading bsp renderer state.");
self.game_renderer.camera.show_fps = true;
self.shader.bind();
self.proj_loc = self.shader.get_uniform_location("proj");
self.world_loc = self.shader.get_uniform_location("world");
}
fn unload(&mut self)
{
log_debug!("Unloading bsp renderer state.");
/* Cleanup GL. */
check!(gl2::delete_vertex_arrays(&[self.vao]));
check!(gl2::delete_buffers(&[self.vbo]));
}
fn get_key(&self) -> &str
{ &"bsp_renderer" }
fn update(&mut self, delta: f32) -> bool /* dt is in terms of seconds. */<|fim▁hole|> }
fn render(&mut self) -> bool
{
self.shader.bind();
self.shader.update_uniform_mat(self.proj_loc, &self.game_renderer.camera.projection);
self.shader.update_uniform_mat(self.world_loc, &self.game_renderer.camera.view);
self.render_mesh();
let fps = self.game_renderer.camera.frame_rate;
let ui_renderer = ui::Renderer::get();
ui_renderer.begin();
{
if self.game_renderer.camera.show_fps
{
ui_renderer.render_font(
format!("{}", fps),
math::Vec2f::new(self.game_renderer.camera.window_size.x as f32 - 40.0, 0.0),
&self.game_renderer.fps_font);
}
} ui_renderer.end();
false
}
fn key_action(&mut self, key: glfw::Key, action: glfw::Action, _mods: glfw::Modifiers) -> bool
{ (self.game_renderer.camera as @mut State).key_action(key, action, _mods) }
fn mouse_moved(&mut self, x: f32, y: f32) -> bool
{ (self.game_renderer.camera as @mut State).mouse_moved(x, y) }
}<|fim▁end|> | {
self.game_renderer.camera.update(delta);
false |
<|file_name|>PlaiCDN.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#script is a replacement for https://github.com/Relys/3DS_Multi_Decryptor/blob/master/to3DS/CDNto3DS/CDNto3DS.py
#requires PyCrypto to be installed ("python3 -m ensurepip" then "pip3 install PyCrypto")
#requires makerom (https://github.com/profi200/Project_CTR/releases)
#this is a Python 3 script
from subprocess import DEVNULL, STDOUT, call, check_call
from struct import pack, unpack
from binascii import hexlify, unhexlify
from Crypto.Cipher import AES
from hashlib import sha256
from imp import reload
import json
import platform
import os
import struct
import errno
import shlex
import ssl
import sys
import urllib.request, urllib.error, urllib.parse
# from http://stackoverflow.com/questions/600268/
def pmkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# from http://stackoverflow.com/questions/377017/377028#377028
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# based on https://stackoverflow.com/questions/5783517/
def report_chunk(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write('\rDownloaded and decrypted %d of %d bytes (%0.2f%%)' % (bytes_so_far, total_size, percent))
sys.stdout.flush()
if bytes_so_far >= total_size:
print('')
# download in 0x200000 byte chunks, decrypt the chunk with IVs described below, then write the decrypted chunk to disk (half the file size of decrypting separately!)
def read_chunk(response, f_out, intitle_key, first_iv, chunk_size=0x200000, report_hook=None):
file_handler = open(f_out,'wb')
total_size = int(response.getheader('Content-Length'))
total_size = int(total_size)
bytes_so_far = 0
data = []
first_read_chunk = 0
while 1:
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
# IV of first chunk should be the Content ID + 28 0s like with the entire file, but each subsequent chunk should be the last 16 bytes of the previous still ciphered chunk
if first_read_chunk == 0:
decryptor = AES.new(intitle_key, AES.MODE_CBC, unhexlify(first_iv))
first_read_chunk = 1
else:
decryptor = AES.new(intitle_key, AES.MODE_CBC, prev_chunk[(0x200000 - 16):0x200000])
dec_chunk = decryptor.decrypt(chunk)
prev_chunk = chunk
file_handler.write(dec_chunk)
file_handler.close()
def system_usage():
print('Usage: PlaiCDN <TitleID TitleKey> <Options> for content options')
print('-redown : redownload content')
print('-no3ds : don\'t build 3DS file')
print('-nocia : don\'t build CIA file')
print('-nobuild : don\'t build 3DS or CIA')
print('-nohash : ignore hash checks')
print('-check : checks if title id matches key')
print('-fast : skips name retrieval when using -check')
print('')
print('Usage: PlaiCDN <TitleID> for general options')
print('-info : to display detailed metadata')
print('-seed : generates game-specific seeddb file when using -info')
print('')
print('Usage: PlaiCDN <Options> for decTitleKeys.bin options')
print('-deckey : print keys from decTitleKeys.bin')
print('-checkbin : checks titlekeys from decTitleKeys.bin')
print('-checkall : check all titlekeys when using -checkbin')
print('-fast : skips name retrieval when using -checkbin, cannot be used with seed/seeddb')
print('-seeddb : generates a single seeddb.bin')
raise SystemExit(0)
def getTitleInfo(title_id):
tid_high = ((hexlify(title_id)).decode()).upper()[:8]
tid_index = ['00040010', '0004001B', '000400DB', '0004009B',
'00040030', '00040130', '00040138', '00040001',
'00048005', '0004800F', '00040002', '0004008C']
res_index = ['-System Application-', '-System Data Archive-', '-System Data Archive-', '-System Data Archive-',
'-System Applet-', '-System Module-', '-System Firmware-', '-Download Play Title-',
'-TWL System Application-', '-TWL System Data Archive-', '-Game Demo-', '-Addon DLC-']
if fast == 1 and gen_seed != 1:
tid_index.extend(['00040000', '0004000E'])
res_index.extend(['-eShop Content-', '-eShop Content Update-'])
if tid_high in tid_index:
return(res_index[tid_index.index(tid_high)], '---', '-------', '------', '', '---', '---')
# create new SSL context to load decrypted CLCert-A off directory, key and cert are in PEM format
# see https://github.com/SciresM/ccrypt
try:
ctr_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctr_context.load_cert_chain('ctr-common-1.crt', keyfile='ctr-common-1.key')
except FileNotFoundError:
if '-checkbin' not in sys.argv:
print('\nCould not find certificate files, all secure connections will fail!\n')
nocert = 1
return('-eShop Content-', '---', '-------', '------', None, '---', '---')
# ninja handles handles actions that require authentication, in addition to converting title ID to internal the CDN content ID
ninja_url = 'https://ninja.ctr.shop.nintendo.net/ninja/ws/'
# use GET request with parameter "title_id[]=mytitle_id" with SSL context
# use header "Accept: application/json" to retrieve JSON instead of XML
try:
shop_request = urllib.request.Request(ninja_url + 'titles/id_pair' + '?title_id[]=' + (hexlify(title_id)).decode())
shop_request.get_method = lambda: 'GET'
shop_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(shop_request, context=ctr_context)
json_response = json.loads((response.read()).decode('UTF-8', 'replace'))
except urllib.error.URLError as e:
raise
# set ns_uid (the internal content ID) to field from JSON
ns_uid = json_response['title_id_pairs']['title_id_pair'][0]['ns_uid']
# samurai handles metadata actions, including getting a title's info
# URL regions are by country instead of geographical regions... for some reason
samurai_url = 'https://samurai.ctr.shop.nintendo.net/samurai/ws/'
region_dict = {'JP': 'JPN', 'HK': 'HKG', 'TW': 'TWN', 'KR': 'KOR', 'DE': 'EUR', 'FR': 'EUR', 'ES': 'EUR', 'NL': 'EUR', 'IT': 'EUR', 'GB': 'EUR', 'US': 'USA'}
region_dict_passed = {}
# try loop to figure out which region the title is from; there is no easy way to do this other than try them all
for country_code, region in region_dict.items():
try:
title_request = urllib.request.Request(samurai_url + country_code + '/title/' + str(ns_uid))
title_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(title_request, context=ctr_context)
title_response = json.loads((response.read()).decode('UTF-8', 'replace'))
except urllib.error.URLError as e:
pass
else:
region_dict_passed.update({country_code: region})
if len(region_dict_passed) == 0:
raise
elif len(region_dict_passed) > 1:
region = 'ALL'
else:
region = list(region_dict_passed.values())[0]
ec_request = urllib.request.Request(ninja_url + list(region_dict_passed.keys())[0] + '/title/' + str(ns_uid) + '/ec_info')
ec_request.headers['Accept'] = 'application/json'
response = urllib.request.urlopen(ec_request, context=ctr_context)
ec_response = json.loads((response.read()).decode('UTF-8', 'replace'))
# get info from the returned JSON from the URL
title_name = (title_response['title'].get('formal_name', '-eShop Content-')).replace('\n', ' ')
publisher = title_response['title']['publisher'].get('name', '------')
product_code = title_response['title'].get('product_code', '------')
curr_version = ec_response['title_ec_info'].get('title_version', '---')
title_size = '{:.5}'.format(int(ec_response['title_ec_info'].get('content_size', '---')) / 1000000)
try:
crypto_seed = ec_response['title_ec_info']['content_lock'].get('external_seed', None)
except KeyError:
crypto_seed = None
pass
# some windows unicode character bullshit
if 'Windows' in platform.system():
title_name_stripped = ''.join([i if ord(i) < 128 else ' ' for i in title_name])
publisher = ''.join([i if ord(i) < 128 else ' ' for i in publisher])
else:
title_name_stripped = title_name
return(title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size)
def printTitleInfo(title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size):
print('\n~\n')
print('Title Name: ' + title_name_stripped)
print('Region: ' + region)
print('Product Code: ' + product_code)
print('Publisher: ' + publisher)
print('Current Version: ' + str(curr_version))
if title_size == '---':
print('Title Size: ' + title_size)
else:
print('Title Size: ' + title_size + 'mb')
if crypto_seed != None:
print('9.6 Crypto Seed: ' + crypto_seed)
print('')
#=========================================================================================================
# Seeddb implementation
class crypto_handler:
def __init__(self):
self.crypto_db = {}
def add_seed(self, title_id, title_key):
self.crypto_db.update({title_id: title_key})
def gen_seeddb(self):
if self.crypto_db:
if '-seeddb' in sys.argv:
self.write_seed()
else:
for title_id in self.crypto_db:
self.write_seed(title_id)
def write_seed(self, title_id=None):
# Providing title_id makes a title specific seeddb
if title_id:
pmkdir(title_id)
s_out = title_id+'/seeddb.bin'
seed_db = {title_id: self.crypto_db[title_id]}
else:
s_out = 'seeddb.bin'
seed_db = self.crypto_db
with open(s_out, 'wb') as seeddb_handler:
seed_count = '{:032X}'.format(len(seed_db))
seeddb_handler.write(unhexlify(seed_count)[::-1])
for title_id in seed_db:
# Title_id is reversed in seeddb.bin
seed = unhexlify(title_id)[::-1] + unhexlify(seed_db[title_id]) + b'\x00'*8
seeddb_handler.write(seed)
seeddb_handler.close()
gen_seed = 0
fast = 0
for i in range(len(sys.argv)):
if sys.argv[i] in ['-seed', '-seeddb']: gen_seed = 1
elif sys.argv[i] == '-fast': fast = 1
crypto_db = crypto_handler()
#=========================================================================================================
#from https://github.com/Relys/3DS_Multi_Decryptor/blob/master/ticket-title_key_stuff/printKeys.py
for i in range(len(sys.argv)):
if sys.argv[i] == '-deckey':
with open('decTitleKeys.bin', 'rb') as file_handler:
n_entries = os.fstat(file_handler.fileno()).st_size / 32
file_handler.seek(16, os.SEEK_SET)
for i in range(int(n_entries)):
file_handler.seek(8, os.SEEK_CUR)
title_id = file_handler.read(8)
decrypted_title_key = file_handler.read(16)
print('%s: %s' % ((hexlify(title_id)).decode(), (hexlify(decrypted_title_key)).decode()))
raise SystemExit(0)
for i in range(len(sys.argv)):
if sys.argv[i] == '-info':
title_id = sys.argv[1]
if len(title_id) != 16:
print('Invalid arguments')
raise SystemExit(0)
base_url = 'http://ccs.cdn.c.shop.nintendowifi.net/ccs/download/' + title_id
# download tmd_var and set to object
try:
tmd_var = urllib.request.urlopen(base_url + '/tmd')
except urllib.error.URLError as e:
print('Could not retrieve tmd; received error: ' + str(e))
continue
tmd_var = tmd_var.read()
content_count = unpack('>H', tmd_var[0x206:0x208])[0]
for i in range(content_count):
c_offs = 0xB04+(0x30*i)
c_id = format(unpack('>I', tmd_var[c_offs:c_offs+4])[0], '08x')
c_idx = format(unpack('>H', tmd_var[c_offs+4:c_offs+6])[0], '04x')
c_size = format(unpack('>Q', tmd_var[c_offs+8:c_offs+16])[0], 'd')
c_hash = tmd_var[c_offs+16:c_offs+48]
# If content count above 8 (not a normal application), don't make 3ds
if unpack('>H', tmd_var[c_offs+4:c_offs+6])[0] >= 8:
make_3ds = 0
print('')
print('Content ID: ' + c_id)
print('Content Index: ' + c_idx)
print('Content Size: ' + c_size)
print('Content Hash: ' + (hexlify(c_hash)).decode())
title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size = getTitleInfo((unhexlify(title_id)))
printTitleInfo(title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size)
if crypto_seed != None:
# Add crypto seed to crypto database
crypto_db.add_seed(title_id, crypto_seed)
# Generate seeddb.bin from crypto seed database
if gen_seed == 1:
crypto_db.gen_seeddb()
raise SystemExit(0)
for i in range(len(sys.argv)):
if sys.argv[i] == '-checkbin':
if (not os.path.isfile('ctr-common-1.crt')) or (not os.path.isfile('ctr-common-1.key')):
print('\nCould not find certificate files, all secure connections will fail!')
nocert = 1
check_all = 0
for i in range(len(sys.argv)):
if sys.argv[i] == '-checkall': check_all = 1
with open('decTitleKeys.bin', 'rb') as file_handler:
n_entries = os.fstat(file_handler.fileno()).st_size / 32
file_handler.seek(16, os.SEEK_SET)
final_output = []
print('')
# format: Title Name (left aligned) gets 40 characters, Title ID (Right aligned) gets 16, Titlekey (Right aligned) gets 32, and Region (Right aligned) gets 3
# anything longer is truncated, anything shorter is padded
print("{0:<40} {1:>16} {2:>32} {3:>3}".format('Name', 'Title ID', 'Titlekey', 'Region'))
print("-"*100)
for i in range(int(n_entries)):
file_handler.seek(8, os.SEEK_CUR)
title_id = file_handler.read(8)
decrypted_title_key = file_handler.read(16)
# regular CDN URL for downloads off the CDN
base_url = 'http://ccs.cdn.c.shop.nintendowifi.net/ccs/download/' + (hexlify(title_id)).decode()
tid_high = ((hexlify(title_id)).decode()).upper()[:8]
if check_all == 0 and (tid_high not in ['00040000', '0004000E', '0004008C']):
continue
# download tmd_var and set to object
try:
tmd_var = urllib.request.urlopen(base_url + '/tmd')
except urllib.error.URLError as e:
continue
tmd_var = tmd_var.read()
# try to get info from the CDN
try:
title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size = getTitleInfo(title_id)
except:
raise
content_count = unpack('>H', tmd_var[0x206:0x208])[0]
for i in range(content_count):
c_offs = 0xB04+(0x30*i)
c_idx = format(unpack('>H', tmd_var[c_offs+4:c_offs+6])[0], '04x')
c_id = format(unpack('>I', tmd_var[c_offs:c_offs+4])[0], '08x')
# use range requests to download bytes 0 through 271, needed 272 instead of 260 because AES-128-CBC encrypts in chunks of 128 bits
try:
check_req = urllib.request.Request('%s/%s'%(base_url, c_id))
check_req.headers['Range'] = 'bytes=%s-%s' % (0, 271)
check_temp = urllib.request.urlopen(check_req)
except urllib.error.URLError as e:
continue
# set IV to offset 0xf0 length 0x10 of ciphertext; thanks to yellows8 for the offset
check_temp_perm = check_temp.read()
check_iv = check_temp_perm[0xf0:0x100]
decryptor = AES.new(decrypted_title_key, AES.MODE_CBC, check_iv)
# check for magic ('NCCH') at offset 0x100 length 0x104 of the decrypted content
check_temp_out = decryptor.decrypt(check_temp_perm)[0x100:0x104]
if 'NCCH' not in check_temp_out.decode('UTF-8', 'ignore'):
decryptor = AES.new(decrypted_title_key, AES.MODE_CBC, unhexlify(c_idx + '0000000000000000000000000000'))
dsi_check_temp_out = decryptor.decrypt(check_temp_perm)[0x60:0x64]
if 'NCCH' in check_temp_out.decode('UTF-8', 'ignore') or 'WfA' in dsi_check_temp_out.decode('UTF-8', 'ignore'):
# format: Title Name (left aligned) gets 40 characters, Title ID (Right aligned) gets 16, Titlekey (Right aligned) gets 32, and Region (Right aligned) gets 3
# anything longer is truncated, anything shorter is padded
print("{0:<40.40} {1:>16} {2:>32} {3:>3}".format(title_name_stripped, (hexlify(title_id).decode()).strip(), ((hexlify(decrypted_title_key)).decode()).strip(), region))
# Add crypto seed to crypto database
if crypto_seed != '':
crypto_db.add_seed((hexlify(title_id).decode()).strip(), crypto_seed)
# Generate seeddb.bin from crypto seed database
if gen_seed == 1:
crypto_db.gen_seeddb()
raise SystemExit(0)
#if args for deckeys or checkbin weren't used above, remaining functions require 3 args minimum
if len(sys.argv) < 3:
system_usage()
# default values
title_id = sys.argv[1]
title_key = sys.argv[2]
force_download = 0
make_3ds = 1
make_cia = 1
check_key = 0
no_hash = 0
check_temp_out = None
nocert = 0
first_pass = 1
# check args
for i in range(len(sys.argv)):
if sys.argv[i] == '-redown': force_download = 1
elif sys.argv[i] == '-no3ds': make_3ds = 0
elif sys.argv[i] == '-nocia': make_cia = 0
elif sys.argv[i] == '-check': check_key = 1
elif sys.argv[i] == '-nohash': no_hash = 1
elif sys.argv[i] == '-nobuild':
make_cia = 0
make_3ds = 0
if (len(title_key) != 32 and not os.path.isfile('decTitleKeys.bin')) or len(title_id) != 16:
print('Invalid arguments')
raise SystemExit(0)
# pull title key from decTitleKeys.bin if available
if len(title_key) != 32 and os.path.isfile('decTitleKeys.bin'):
decrypted_keys = {}
with open('decTitleKeys.bin', 'rb') as file_handler:
n_entries = os.fstat(file_handler.fileno()).st_size / 32
file_handler.seek(16, os.SEEK_SET)
for i in range(int(n_entries)):
file_handler.seek(8, os.SEEK_CUR)
tmp_title_id = file_handler.read(8)
decrypted_title_key = file_handler.read(16)
decrypted_keys.update({(hexlify(tmp_title_id)).decode() : (hexlify(decrypted_title_key)).decode()})
try:
title_key = decrypted_keys[title_id]
except KeyError:
print('Title key was not provided and is not available in decTitleKeys.bin')
raise SystemExit(0)
# set CDN default URL
base_url = 'http://ccs.cdn.c.shop.nintendowifi.net/ccs/download/' + title_id
# download tmd and set to 'tmd_var' object
try:
tmd_var = urllib.request.urlopen(base_url + '/tmd')
except urllib.error.URLError as e:
print('ERROR: Bad title ID?')
raise SystemExit(0)
tmd_var = tmd_var.read()
#create folder
if check_key == 0:
pmkdir(title_id)
# https://www.3dbrew.org/wiki/Title_metadata#Signature_Data
if bytes('\x00\x01\x00\x04', 'UTF-8') not in tmd_var[:4]:
print('Unexpected signature type.')
raise SystemExit(0)
# If not normal application, don't make 3ds
if title_id[:8] != '00040000':
make_3ds = 0
# Check OS, path, and current dir to set makerom location
if 'Windows' in platform.system():
if os.path.isfile('makerom.exe'):
makerom_command = 'makerom.exe'
else:
makerom_command = which('makerom.exe')
else:
if os.path.isfile('makerom'):
makerom_command = './makerom'
else:
makerom_command = which('makerom')
if makerom_command == None:
print('Could not find makerom!')
raise SystemExit(0)
# Set proper common key ID
if unpack('>H', tmd_var[0x18e:0x190])[0] & 0x10 == 0x10:
ckeyid = 1
else:
ckeyid = 0
# Set Proper Version
title_version = unpack('>H', tmd_var[0x1dc:0x1de])[0]
# Set Save Size
save_size = (unpack('<I', tmd_var[0x19a:0x19e])[0])/1024
# If DLC Set DLC flag
dlcflag = ''
if '0004008c' in title_id:
dlcflag = '-dlc'
content_count = unpack('>H', tmd_var[0x206:0x208])[0]
# If content count above 8 (not a normal application), don't make 3ds
if content_count > 8:
make_3ds = 0
command_c_id = []
# Download Contents
fSize = 16384
for i in range(content_count):
c_offs = 0xB04+(0x30*i)
c_id = format(unpack('>I', tmd_var[c_offs:c_offs+4])[0], '08x')
c_idx = format(unpack('>H', tmd_var[c_offs+4:c_offs+6])[0], '04x')
c_size = format(unpack('>Q', tmd_var[c_offs+8:c_offs+16])[0], 'd')
c_hash = tmd_var[c_offs+16:c_offs+48]
# If content count above 8 (not a normal application), don't make 3ds
if unpack('>H', tmd_var[c_offs+4:c_offs+6])[0] >= 8:
make_3ds = 0
# set output location to a folder named for title id and contentid.dec as the file
f_out = title_id + '/' + c_id + '.dec'
if first_pass == 1:
print('\nDownloading and decrypting the first 272 bytes of ' + c_id + ' for key check...\n')
# use range requests to download bytes 0 through 271, needed 272 instead of 260 because AES-128-CBC encrypts in chunks of 128 bits
try:
check_req = urllib.request.Request('%s/%s'%(base_url, c_id))
check_req.headers['Range'] = 'bytes=%s-%s' % (0, 271)
check_temp = urllib.request.urlopen(check_req)
except urllib.error.URLError as e:
print('ERROR: Possibly wrong container?\n')
raise SystemExit(0)
print('Fetching title metadata for ' + title_id + '\n')
title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size = getTitleInfo((unhexlify(title_id)))
# set IV to offset 0xf0 length 0x10 of ciphertext; thanks to yellows8 for the offset
check_temp_perm = check_temp.read()
decryptor = AES.new(unhexlify(title_key), AES.MODE_CBC, check_temp_perm[0xf0:0x100])
# check for magic ('NCCH') at offset 0x100 length 0x104 of the decrypted content
check_temp_out = decryptor.decrypt(check_temp_perm)[0x100:0x104]
printTitleInfo(title_name_stripped, region, product_code, publisher, crypto_seed, curr_version, title_size)
if gen_seed == 1:
print('')
if crypto_seed != '':
# Add crypto seed to crypto database
crypto_db.add_seed(title_id, crypto_seed)
crypto_db.gen_seeddb()
raise SystemExit(0)
if crypto_seed == '':
print('Title ' + title_id + ' does not have a 9.6 crypto seed')
raise SystemExit(0)
print('')
if 'NCCH' not in check_temp_out.decode('UTF-8', 'ignore'):
decryptor = AES.new(unhexlify(title_key), AES.MODE_CBC, unhexlify(c_idx + '0000000000000000000000000000'))
dsi_check_temp_out = decryptor.decrypt(check_temp_perm)[0x60:0x64]
if 'NCCH' not in check_temp_out.decode('UTF-8', 'ignore') and 'WfA' not in dsi_check_temp_out.decode('UTF-8', 'ignore'):
print('\nERROR: Decryption failed; invalid titlekey?')
raise SystemExit(0)
print('\nTitlekey successfully verified to match title ID ' + title_id + '...\n')
if check_key == 1:
raise SystemExit(0)
print('Content ID: ' + c_id)
print('Content Index: ' + c_idx)
print('Content Size: ' + c_size)
print('Content Hash: ' + (hexlify(c_hash)).decode())
<|fim▁hole|> if os.path.exists(f_out) == 0 or force_download == 1 or os.path.getsize(f_out) != unpack('>Q', tmd_var[c_offs+8:c_offs+16])[0]:
response = urllib.request.urlopen(base_url + '/' + c_id)
read_chunk(response, f_out, unhexlify(title_key), c_idx + '0000000000000000000000000000', report_hook=report_chunk)
# check hash and NCCH of downloaded content
with open(f_out,'rb') as file_handler:
file_handler.seek(0, os.SEEK_END)
file_handlerSize = file_handler.tell()
if file_handler.tell() != unpack('>Q', tmd_var[c_offs+8:c_offs+16])[0]:
print('Title size mismatch. Download likely incomplete')
print('Downloaded: ' + format(file_handler.tell(), 'd'))
raise SystemExit(0)
if no_hash == 0:
file_handler.seek(0)
hash = sha256()
while file_handler.tell() != file_handlerSize:
hash.update(file_handler.read(0x1000000))
print('Checking Hash: ' + format(float(file_handler.tell()*100)/file_handlerSize,'.1f') + '% done\r', end=' ')
sha256file = hash.hexdigest()
if sha256file != (hexlify(c_hash)).decode():
print('hash mismatched, Decryption likely failed, wrong key or file modified?')
print('got hash: ' + sha256file)
raise SystemExit(0)
print('Hash verified successfully.')
file_handler.seek(0x100)
if (file_handler.read(4)).decode('UTF-8', 'ignore') != 'NCCH':
make_cia = 0
make_3ds = 0
file_handler.seek(0x60)
if 'WfA' not in file_handler.read(4).decode():
print('Not NCCH, nor DSiWare, file likely corrupted')
raise SystemExit(0)
else:
print('Not an NCCH container, likely DSiWare')
file_handler.seek(0, os.SEEK_END)
fSize += file_handler.tell()
print('')
command_c_id = command_c_id + ['-i', f_out + ':0x' + c_idx + ':0x' + c_id]
first_pass = 0
if crypto_seed == '' and nocert == 1:
print('')
print('Could not check for 9.6 crypto seed automatically due to secure connection failure!')
print('')
print('If this is a 9.6+ game, then it will fail to load once installed unless the system')
print('connects to the eShop at least once after install to update seeddb, or you place')
print('the cert files in the current directory and rerun this script for manual decryption.')
print('')
if crypto_seed != '':
print('')
print('This is a 9.6+ eShop game which uses seed encryption.')
print('')
print('The NCCH on 9.6+ eShop games is seed encrypted and cannot be used')
print('without seed decryption on a 3DS unless the system connects to the eShop')
print('at least once after install to update seeddb.')
print('')
print('To fix this you should copy')
print('the generated seeddb.bin and .cia file in the Title ID folder')
print('to \'/D9Game/\' on your SD card, then use the following option in Decrypt9:')
print('')
print('\'Content Decryptor Options\' > \'CIA Decryptor (deep)\'')
print('')
print('Once you have decrypted the file, the resulting .cia can successfully be installed')
print('')
print('NOTE: The generated .3ds files will not work with Gateway')
print('')
# create the RSF File
rom_rsf = 'Option:\n MediaFootPadding: true\n EnableCrypt: false\nSystemControlInfo:\n SaveDataSize: $(SaveSize)K'
with open('rom.rsf', 'wb') as file_handler:
file_handler.write(rom_rsf.encode())
# set makerom command with subproces, removing '' if dlcflag isn't set (otherwise makerom breaks)
dotcia_command_array = ([makerom_command, '-f', 'cia', '-rsf', 'rom.rsf', '-o', title_id + '.cia', '-ckeyid', str(ckeyid), '-major', str((title_version & 0xfc00) >> 10), '-minor', str((title_version & 0x3f0) >> 4), '-micro', str(title_version & 0xF), '-DSaveSize=' + str(save_size), str(dlcflag)] + command_c_id)
dot3ds_command_array = ([makerom_command, '-f', 'cci', '-rsf', 'rom.rsf', '-nomodtid', '-o', title_id + '.3ds', '-ckeyid', str(ckeyid), '-major', str((title_version & 0xfc00) >> 10), '-minor', str((title_version & 0x3f0) >> 4), '-micro', str(title_version & 0xF), '-DSaveSize=' + str(save_size), str(dlcflag)] + command_c_id)
if '' in dotcia_command_array:
dotcia_command_array.remove('')
if '' in dot3ds_command_array:
dot3ds_command_array.remove('')
if make_cia == 1:
print('\nBuilding ' + title_id + '.cia...')
call(dotcia_command_array, stdout=DEVNULL, stderr=STDOUT)
if make_3ds == 1:
print('\nBuilding ' + title_id + '.3ds...')
call(dot3ds_command_array, stdout=DEVNULL, stderr=STDOUT)
if os.path.isfile('rom.rsf'):
os.remove('rom.rsf')
if make_cia == 1 and not os.path.isfile(title_id + '.cia'):
print('Something went wrong.')
raise SystemExit(0)
if make_3ds == 1 and not os.path.isfile(title_id + '.3ds'):
print('Something went wrong.')
raise SystemExit(0)
print('Done!')<|fim▁end|> | # if the content location does not exist, redown is set, or the size is incorrect redownload |
<|file_name|>xdatcar2xyz.1.04.py<|end_file_name|><|fim▁begin|># The MIT License (MIT)
#
# Copyright (c) 2014 Muratahan Aykol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import numpy as np
xdatcar = open('XDATCAR', 'r')
xyz = open('XDATCAR.xyz', 'w')
xyz_fract = open('XDATCAR_fract.xyz', 'w')
system = xdatcar.readline()
scale = float(xdatcar.readline().rstrip('\n'))
print scale
#get lattice vectors
a1 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a2 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
a3 = np.array([ float(s)*scale for s in xdatcar.readline().rstrip('\n').split() ])
print a1
print a2
print a3
#Save scaled lattice vectors
lat_rec = open('lattice.vectors', 'w')
lat_rec.write(str(a1[0])+' '+str(a1[1])+' '+str(a1[2])+'\n')
lat_rec.write(str(a2[0])+' '+str(a2[1])+' '+str(a2[2])+'\n')
lat_rec.write(str(a3[0])+' '+str(a3[1])+' '+str(a3[2]))
lat_rec.close()
#Read xdatcar
element_names = xdatcar.readline().rstrip('\n').split()
element_dict = {}
element_numbers = xdatcar.readline().rstrip('\n').split()
i = 0
N = 0
for t in range(len(element_names)):
element_dict[element_names[t]] = int(element_numbers[i])
N += int(element_numbers[i])
i += 1
print element_dict
<|fim▁hole|>while True:
line = xdatcar.readline()
if len(line) == 0:
break
xyz.write(str(N) + "\ncomment\n")
xyz_fract.write(str(N)+"\ncomment\n")
for el in element_names:
for i in range(element_dict[el]):
p = xdatcar.readline().rstrip('\n').split()
coords = np.array([ float(s) for s in p ])
# print coords
cartesian_coords = coords[0]*a1+coords[1]*a2+coords[2]*a3
xyz.write(el+ " " + str(cartesian_coords[0])+ " " + str(cartesian_coords[1]) + " " + str(cartesian_coords[2]) +"\n")
xyz_fract.write(el+ " " + str(coords[0])+ " " + str(coords[1]) + " " + str(coords[2]) +"\n")
xdatcar.close()
xyz.close()
xyz_fract.close()<|fim▁end|> | |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate cc;
extern crate gl_generator;
use gl_generator::{Api, Fallbacks, Profile, Registry};
use std::env;
use std::fs::File;
use std::path::Path;
fn main() {
let target = env::var("TARGET").unwrap();
if target.contains("android") {
android_main()
}
generate_gl_bindings(&target);
}
fn android_main() {
// Get the NDK path from NDK_HOME env.
let ndk_path =
env::var_os("ANDROID_NDK").expect("Please set the ANDROID_NDK environment variable");
let ndk_path = Path::new(&ndk_path);
// compiling android_native_app_glue.c
let c_file = ndk_path
.join("sources")
.join("android")
.join("native_app_glue")
.join("android_native_app_glue.c");
cc::Build::new()
.file(c_file)
.warnings(false)
.compile("android_native_app_glue");
// Get the output directory.
let out_dir =
env::var("OUT_DIR").expect("Cargo should have set the OUT_DIR environment variable");
println!("cargo:rustc-link-lib=static=android_native_app_glue");
println!("cargo:rustc-link-search=native={}", out_dir);
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=android");
}
fn generate_gl_bindings(target: &str) {
// For now, we only support EGL, and only on Windows and Android.
if target.contains("android") || target.contains("windows") {
let dest = env::var("OUT_DIR").unwrap();
let mut file = File::create(&Path::new(&dest).join("egl_bindings.rs")).unwrap();
if target.contains("android") {
Registry::new(Api::Egl, (1, 5), Profile::Core, Fallbacks::All, [])
.write_bindings(gl_generator::StaticStructGenerator, &mut file)
.unwrap();
}
if target.contains("windows") {
Registry::new(Api::Egl, (1, 5), Profile::Core, Fallbacks::All, [])
.write_bindings(gl_generator::StructGenerator, &mut file)
.unwrap();
};<|fim▁hole|><|fim▁end|> | }
} |
<|file_name|>pricing-admin.js<|end_file_name|><|fim▁begin|>jQuery(document).ready(function($){
// Pricing Tables Deleting
$('.uds-pricing-admin-table .pricing-delete').click(function(){
if(!confirm("Really delete table?")) {
return false;
}
});
// products
$('#uds-pricing-products form').submit(function(){
$('#uds-pricing-products .product').each(function(i, el){
$("input[type=checkbox]", this).each(function() {
$(this).attr('name', $(this).attr('name').replace('[]', '[' + i + ']'));
});
$("input[type=radio]", this).each(function() {
$(this).val(i);
});
});
});
// products moving
$('#uds-pricing-products').sortable({
containment: '#uds-pricing-products',
cursor: 'crosshair',
forcePlaceholderSize: true,
forceHelpserSize: true,
handle: '.move',
items: '.product',
placeholder: 'placeholder',
opacity: 0.6,
tolerance: 'pointer',
axis: 'y'
});
// products deleting
$('#uds-pricing-products .delete').click(function(){
if(confirm("Really delete product?")) {
$(this).parents('.product').slideUp(300, function(){
$(this).remove();
});
}
});
// products collapsing
$('#uds-pricing-products h3.collapsible').click(function(){
$('.options', $(this).parent()).slideToggle(300);
$(this).add($(this).parent()).toggleClass('collapsed');
}).trigger('click');
var collapsed = true;
$('.collapse-all').click(function(){
if(collapsed) {
$('.options').slideDown(300);
$('.product').add('h3.collapsible').removeClass('collapsed');
collapsed = false;
$(this).html('Collapse all');
} else {
$('.options').slideUp(300);
$('.product').add('h3.collapsible').addClass('collapsed');
collapsed = true;
$(this).html('Open all');
}
return false;
});
// table changer
$('.uds-change-table').click(function(){
window.location = window.location + "&uds_pricing_edit=" + $('.uds-load-pricing-table').val();
});
//structure
$('#uds-pricing-properties table').sortable({
containment: '#uds-pricing-properties',
cursor: 'crosshair',
forcePlaceHolderSize: true,
handle: '.move',
items: 'tr',
axis: 'y'<|fim▁hole|> // properties deleting
$('#uds-pricing-properties .delete').live('click', function(){
if(confirm("Really delete?")) {
$(this).parents("tr").remove();
$('#uds-pricing-properties table').sortable('refresh');
}
});
// properties adding
var empty = $('#uds-pricing-properties tr:last').clone();
$('#uds-pricing-properties .add').live('click', function(){
$('#uds-pricing-properties table').append($(empty).clone());
$('#uds-pricing-properties table').sortable('refresh');
});
// Tooltips
$('.tooltip').hover(function(){
$tt = $(this).parent().find('.tooltip-content');
$tt.stop().css({
display: 'block',
top: $(this).position().top,
left: $(this).position().left + 40 + 'px',
opacity: 0
}).animate({
opacity: 1
}, 300);
}, function(){
$tt = $(this).parent().find('.tooltip-content');
$tt.stop().css({
opacity: 1
}).animate({
opacity: 0
}, {
duration: 300,
complete: function(){
$(this).css('display', 'none');
}
});
});
});<|fim▁end|> | });
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.