prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>shrunk_covariance_.py<|end_file_name|><|fim▁begin|>"""<|fim▁hole|>
"""
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD Style.
# avoid division truncation
from __future__ import division
import warnings
import numpy as np
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance
from ..utils import array2d
###############################################################################
# ShrunkCovariance estimator
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculates a covariance matrix shrunk on the diagonal
Parameters
----------
emp_cov: array-like, shape (n_features, n_features)
Covariance matrix to be shrunk
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Returns
-------
shrunk_cov: array-like
shrunk covariance
Notes
-----
The regularized (shrunk) covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
emp_cov = array2d(emp_cov)
n_features = emp_cov.shape[0]
mu = np.trace(emp_cov) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
shrinkage: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized covariance is given by
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage
def fit(self, X, y=None):
""" Fits the shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X,
assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
###############################################################################
# Ledoit-Wolf estimator
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage
assume_centered: Boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
Returns
-------
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
# optionaly center data
if not assume_centered:
X = X - X.mean(0)
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X ** 2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0. # sum of the coefficients of <X2.T, X2>
delta_ = 0. # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in xrange(n_splits):
for j in xrange(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits:]))
delta_ += np.sum(
np.dot(X.T[rows], X[:, block_size * n_splits:]) ** 2)
for j in xrange(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits:], X2[:, cols]))
delta_ += np.sum(
np.dot(X.T[block_size * n_splits:], X[:, cols]) ** 2)
delta_ += np.sum(np.dot(X.T[block_size * n_splits:],
X[:, block_size * n_splits:]) ** 2)
delta_ /= n_samples ** 2
beta_ += np.sum(np.dot(
X2.T[block_size * n_splits:], X2[:, block_size * n_splits:]))
# use delta_ to compute beta
beta = 1. / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2. * mu * emp_cov_trace.sum() + n_features * mu ** 2
delta /= n_features
# get final beta as the min between beta and delta
beta = min(beta, delta)
# finally get shrinkage
shrinkage = beta / delta
return shrinkage
def ledoit_wolf(X, assume_centered=False, block_size=1000):
"""Estimates the shrunk Ledoit-Wolf covariance matrix.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: Boolean
If True, data are not centered before computation.
Usefull to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance.
shrinkage: float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
if n_features > block_size:
raise MemoryError("LW: n_features is too large, " +
"try increasing block_size")
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
block_size: int,
Size of the blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation.
If n_features > `block_size`, an error will be raised since the
shrunk covariance matrix will be considered as too large regarding
the available memory.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
EmpiricalCovariance.__init__(self, store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size
def fit(self, X, y=None):
""" Fits the Ledoit-Wolf shrunk covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = ledoit_wolf(X - self.location_,
assume_centered=True, block_size=self.block_size)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
###############################################################################
# OAS estimator
def oas(X, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered: boolean
If True, data are not centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data are centered before computation.
Returns
-------
shrunk_cov: array-like, shape (n_features, n_features)
Shrunk covariance
shrinkage: float
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised (shrunk) covariance is:
(1 - shrinkage)*cov
+ shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
The formula we used to implement the OAS
does not correspond to the one given in the article. It has been taken
from the MATLAB program available from the author's webpage
(https://tbayes.eecs.umich.edu/yilun/covestimation).
"""
X = np.asarray(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X ** 2).mean()), 0.
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. " \
"You may want to reshape your data array")
n_samples = 1
n_features = X.size
else:
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.trace(emp_cov) / n_features
# formula from Chen et al.'s **implementation**
alpha = np.mean(emp_cov ** 2)
num = alpha + mu ** 2
den = (n_samples + 1.) * (alpha - (mu ** 2) / n_features)
shrinkage = min(num / den, 1.)
shrunk_cov = (1. - shrinkage) * emp_cov
shrunk_cov.flat[::n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
class OAS(EmpiricalCovariance):
"""
Oracle Approximating Shrinkage Estimator
OAS is a particular form of shrinkage described in
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
The formula used here does not correspond to the one given in the
article. It has been taken from the Matlab program available from the
authors' webpage (https://tbayes.eecs.umich.edu/yilun/covestimation).
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered: bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
`covariance_` : array-like, shape (n_features, n_features)
Estimated covariance matrix
`precision_` : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
`shrinkage_`: float, 0 <= shrinkage <= 1
coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is::
(1 - shrinkage)*cov
+ shrinkage*mu*np.identity(n_features)
where mu = trace(cov) / n_features
and shinkage is given by the OAS formula (see References)
References
----------
"Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
def fit(self, X, y=None):
""" Fits the Oracle Approximating Shrinkage covariance model
according to the given training data and parameters.
Parameters
----------
X: array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y: not used, present for API consistence purpose.
Returns
-------
self: object
Returns self.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self<|fim▁end|> | Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate. |
<|file_name|>DevProjects.js<|end_file_name|><|fim▁begin|>import React, {Component} from "react";
import { addAnimationId } from './modules/animationFunctions.js'
import '../scss/dev-projects.css';
import close from "../assets/close.png";
export default class DevProjects extends Component {
componentDidMount() { addAnimationId('dev-projects', 'dev-section-fadein') }
render() {
return (
<div id="dev-projects">
<i onClick={this.props.closeButtonClick} className="fal fa-times"></i>
<div className="project-container">
<div>
<p>PERSONAL SITE PROJECT</p>
<p>BUILT A NEW PERSONAL SITE USING REACT.JS WHILE UTILIZING VARIOUS ANIMATIONS AND TRANSITIONS</p>
</div>
<div>
<a href="https://github.com/Ayaz2589/personal_site" target="_blank">
VIEW PERSONAL SITE CODEBASE
</a>
</div>
</div>
</div>
);<|fim▁hole|><|fim▁end|> | }
}; |
<|file_name|>TestComponent.spec.js<|end_file_name|><|fim▁begin|>'use strict';
describe('TestComponent', function () {
var componentController;
beforeEach(module('APPLICATION'));
beforeEach(inject(function ($componentController) {
componentController = $componentController('testComponent', null, {
test: {
data: 'data'
}
});
}));
it('is defined', function () {
expect(componentController).toBeDefined();
});
<|fim▁hole|> it('modifyTestData modifies test data', function () {
componentController.data = 'data';
componentController.modifyTestData();
expect(componentController.data).toBe(' edited in the component controller');
});
});
});<|fim▁end|> | describe('when called', function () { |
<|file_name|>translation-map.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
<|fim▁hole|><|fim▁end|> | let __TRANSLATION_MAP__ = {};
export default Ember.Service.extend({ map: __TRANSLATION_MAP__ }); |
<|file_name|>pair_lj_smooth_omp.cpp<|end_file_name|><|fim▁begin|>/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, [email protected]
This software is distributed under the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Axel Kohlmeyer (Temple U)
------------------------------------------------------------------------- */
#include "math.h"
#include "pair_lj_smooth_omp.h"
#include "atom.h"
#include "comm.h"
#include "force.h"
#include "neighbor.h"
#include "neigh_list.h"
#include "suffix.h"
using namespace LAMMPS_NS;
/* ---------------------------------------------------------------------- */
PairLJSmoothOMP::PairLJSmoothOMP(LAMMPS *lmp) :
PairLJSmooth(lmp), ThrOMP(lmp, THR_PAIR)
{
suffix_flag |= Suffix::OMP;
respa_enable = 0;
}
/* ---------------------------------------------------------------------- */
void PairLJSmoothOMP::compute(int eflag, int vflag)
{
if (eflag || vflag) {
ev_setup(eflag,vflag);
} else evflag = vflag_fdotr = 0;
const int nall = atom->nlocal + atom->nghost;
const int nthreads = comm->nthreads;
const int inum = list->inum;
#if defined(_OPENMP)
#pragma omp parallel default(none) shared(eflag,vflag)
#endif
{
int ifrom, ito, tid;
loop_setup_thr(ifrom, ito, tid, inum, nthreads);
ThrData *thr = fix->get_thr(tid);
ev_setup_thr(eflag, vflag, nall, eatom, vatom, thr);
if (evflag) {
if (eflag) {
if (force->newton_pair) eval<1,1,1>(ifrom, ito, thr);
else eval<1,1,0>(ifrom, ito, thr);
} else {
if (force->newton_pair) eval<1,0,1>(ifrom, ito, thr);<|fim▁hole|> }
} else {
if (force->newton_pair) eval<0,0,1>(ifrom, ito, thr);
else eval<0,0,0>(ifrom, ito, thr);
}
reduce_thr(this, eflag, vflag, thr);
} // end of omp parallel region
}
template <int EVFLAG, int EFLAG, int NEWTON_PAIR>
void PairLJSmoothOMP::eval(int iifrom, int iito, ThrData * const thr)
{
int i,j,ii,jj,jnum,itype,jtype;
double xtmp,ytmp,ztmp,delx,dely,delz,evdwl,fpair;
double rsq,r2inv,r6inv,forcelj,factor_lj;
double r,t,tsq,fskin;
int *ilist,*jlist,*numneigh,**firstneigh;
evdwl = 0.0;
const double * const * const x = atom->x;
double * const * const f = thr->get_f();
const int * const type = atom->type;
const int nlocal = atom->nlocal;
const double * const special_lj = force->special_lj;
double fxtmp,fytmp,fztmp;
ilist = list->ilist;
numneigh = list->numneigh;
firstneigh = list->firstneigh;
// loop over neighbors of my atoms
for (ii = iifrom; ii < iito; ++ii) {
i = ilist[ii];
xtmp = x[i][0];
ytmp = x[i][1];
ztmp = x[i][2];
itype = type[i];
jlist = firstneigh[i];
jnum = numneigh[i];
fxtmp=fytmp=fztmp=0.0;
for (jj = 0; jj < jnum; jj++) {
j = jlist[jj];
factor_lj = special_lj[sbmask(j)];
j &= NEIGHMASK;
delx = xtmp - x[j][0];
dely = ytmp - x[j][1];
delz = ztmp - x[j][2];
rsq = delx*delx + dely*dely + delz*delz;
jtype = type[j];
if (rsq < cutsq[itype][jtype]) {
r2inv = 1.0/rsq;
if (rsq < cut_inner_sq[itype][jtype]) {
r6inv = r2inv*r2inv*r2inv;
forcelj = r6inv * (lj1[itype][jtype]*r6inv-lj2[itype][jtype]);
} else {
r = sqrt(rsq);
t = r - cut_inner[itype][jtype];
tsq = t*t;
fskin = ljsw1[itype][jtype] + ljsw2[itype][jtype]*t +
ljsw3[itype][jtype]*tsq + ljsw4[itype][jtype]*tsq*t;
forcelj = fskin*r;
}
fpair = factor_lj*forcelj*r2inv;
fxtmp += delx*fpair;
fytmp += dely*fpair;
fztmp += delz*fpair;
if (NEWTON_PAIR || j < nlocal) {
f[j][0] -= delx*fpair;
f[j][1] -= dely*fpair;
f[j][2] -= delz*fpair;
}
if (EFLAG) {
if (rsq < cut_inner_sq[itype][jtype])
evdwl = r6inv * (lj3[itype][jtype]*r6inv -
lj4[itype][jtype]) - offset[itype][jtype];
else
evdwl = ljsw0[itype][jtype] - ljsw1[itype][jtype]*t -
ljsw2[itype][jtype]*tsq/2.0 - ljsw3[itype][jtype]*tsq*t/3.0 -
ljsw4[itype][jtype]*tsq*tsq/4.0 - offset[itype][jtype];
evdwl *= factor_lj;
}
if (EVFLAG) ev_tally_thr(this,i,j,nlocal,NEWTON_PAIR,
evdwl,0.0,fpair,delx,dely,delz,thr);
}
}
f[i][0] += fxtmp;
f[i][1] += fytmp;
f[i][2] += fztmp;
}
}
/* ---------------------------------------------------------------------- */
double PairLJSmoothOMP::memory_usage()
{
double bytes = memory_usage_thr();
bytes += PairLJSmooth::memory_usage();
return bytes;
}<|fim▁end|> | else eval<1,0,0>(ifrom, ito, thr); |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>/* Aurélien DESBRIÈRES
aurelien(at)hackers(dot)camp
License GNU GPL latest */
// Rust experimentations<|fim▁hole|> // A public struct with a public field of generic type `T`
pub struct WhiteBox<T> {
pub contents: T,
}
// A public struct with a private field of generic type `T`
#[allow(dead_code)]
pub struct BlackBox<T> {
contents: T,
}
impl<T> BlackBox<T> {
// A public constructor method
pub fn new(contents: T) -> BlackBox<T> {
BlackBox {
contents: contents,
}
}
}
}
fn main() {
// Public structs with public fields can be constructed as usual
let white_box = my::WhiteBox { contents: "public information" };
// and their fields can be normally accessed.
println!("The white box contains: {}", white_box.contents);
// Public structs with private fields cannot be constructed using field names.
// Error! `BlackBox` has private fields
//let black_box = my::BlackBox { contents: "classified information" };
// TODO ^ Try uncommenting this line
// However, structs with private fields can be created using
// public constructors
let _black_box = my::BlackBox::new("classified information");
// and the private fields of a public struct cannot be accessed.
// Error! The `contents` field is private
//println!("The black box contains: {}", _black_box.contents);
// TODO ^ Try uncommenting this line
}<|fim▁end|> | // Modules Struct Visibility in Rust
mod my { |
<|file_name|>TabularDataCollection.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Caleydo - Visualization for Molecular Biology - http://caleydo.org
* Copyright (c) The Caleydo Team. All rights reserved.
* Licensed under the new BSD license, available at http://caleydo.org/license
*******************************************************************************/
package org.caleydo.view.relationshipexplorer.ui.collection;
import java.util.HashSet;
import java.util.Set;
import org.caleydo.core.data.datadomain.ATableBasedDataDomain;
import org.caleydo.core.data.perspective.table.TablePerspective;
import org.caleydo.core.data.perspective.variable.Perspective;
import org.caleydo.core.data.virtualarray.VirtualArray;
import org.caleydo.core.id.IDCategory;
import org.caleydo.core.id.IDType;
import org.caleydo.view.relationshipexplorer.ui.ConTourElement;
import org.caleydo.view.relationshipexplorer.ui.collection.idprovider.IElementIDProvider;
import org.caleydo.view.relationshipexplorer.ui.column.factory.ColumnFactories;
import org.caleydo.view.relationshipexplorer.ui.column.factory.IColumnFactory;
import org.caleydo.view.relationshipexplorer.ui.detail.parcoords.ParallelCoordinatesDetailViewFactory;
import com.google.common.collect.Sets;
/**
* @author Christian
*
*/
public class TabularDataCollection extends AEntityCollection {
protected final ATableBasedDataDomain dataDomain;
protected final IDCategory itemIDCategory;
protected final TablePerspective tablePerspective;
protected final IDType itemIDType;
protected final VirtualArray va;
protected final Perspective dimensionPerspective;
protected final IDType mappingIDType;
public TabularDataCollection(TablePerspective tablePerspective, IDCategory itemIDCategory,
IElementIDProvider elementIDProvider, ConTourElement relationshipExplorer) {
super(relationshipExplorer);
dataDomain = tablePerspective.getDataDomain();
this.itemIDCategory = itemIDCategory;
this.tablePerspective = tablePerspective;
this.mappingIDType = dataDomain.getDatasetDescriptionIDType(itemIDCategory);
if (dataDomain.getDimensionIDCategory() == itemIDCategory) {
va = tablePerspective.getDimensionPerspective().getVirtualArray();
itemIDType = tablePerspective.getDimensionPerspective().getIdType();
dimensionPerspective = tablePerspective.getRecordPerspective();
} else {
va = tablePerspective.getRecordPerspective().getVirtualArray();
itemIDType = tablePerspective.getRecordPerspective().getIdType();
dimensionPerspective = tablePerspective.getDimensionPerspective();
}
if (elementIDProvider == null)
elementIDProvider = getDefaultElementIDProvider(va);
allElementIDs.addAll(elementIDProvider.getElementIDs());
filteredElementIDs.addAll(allElementIDs);
setLabel(dataDomain.getLabel());
detailViewFactory = new ParallelCoordinatesDetailViewFactory();
}
@Override
public IDType getBroadcastingIDType() {
return itemIDType;
}
@Override
protected Set<Object> getBroadcastIDsFromElementID(Object elementID) {
return Sets.newHashSet(elementID);
}
@Override
protected Set<Object> getElementIDsFromBroadcastID(Object broadcastingID) {
return Sets.newHashSet(broadcastingID);
}
@Override
protected IColumnFactory getDefaultColumnFactory() {
return ColumnFactories.createDefaultTabularDataColumnFactory();
}
/**
* @return the dataDomain, see {@link #dataDomain}
*/
public ATableBasedDataDomain getDataDomain() {
return dataDomain;
}
/**
* @return the perspective, see {@link #dimensionPerspective}
*/
public Perspective getDimensionPerspective() {
return dimensionPerspective;
}
/**
* @return the itemIDCategory, see {@link #itemIDCategory}
*/
public IDCategory getItemIDCategory() {
return itemIDCategory;
}
/**
* @return the itemIDType, see {@link #itemIDType}
*/
public IDType getItemIDType() {
return itemIDType;
}
@Override
public IDType getMappingIDType() {
return mappingIDType;
}
/**
* @return the tablePerspective, see {@link #tablePerspective}
*/
public TablePerspective getTablePerspective() {
return tablePerspective;
}
<|fim▁hole|> /**
* @return the va, see {@link #va}
*/
public VirtualArray getVa() {
return va;
}
public static IElementIDProvider getDefaultElementIDProvider(final VirtualArray va) {
return new IElementIDProvider() {
@Override
public Set<Object> getElementIDs() {
return new HashSet<Object>(va.getIDs());
}
};
}
@Override
public String getText(Object elementID) {
return elementID.toString();
}
}<|fim▁end|> | |
<|file_name|>htmlframesetelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLFrameSetElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLFrameSetElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::document::Document;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{Node, NodeTypeId};
use servo_util::str::DOMString;
#[dom_struct]
pub struct HTMLFrameSetElement {
htmlelement: HTMLElement
}
impl HTMLFrameSetElementDerived for EventTarget {
fn is_htmlframesetelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLFrameSetElement)))
}
}
impl HTMLFrameSetElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLFrameSetElement {
HTMLFrameSetElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLFrameSetElement, localName, prefix, document)
}
}
<|fim▁hole|> pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLFrameSetElement> {
let element = HTMLFrameSetElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLFrameSetElementBinding::Wrap)
}
}<|fim▁end|> | #[allow(unrooted_must_root)] |
<|file_name|>vj_serial.py<|end_file_name|><|fim▁begin|>import logging
import struct
import threading
import serial
import serial.tools.list_ports
COMMAND_TO_CHANNEL = {
'F': 0x00,
'W': 0x01,
'H': 0x02,
'C': 0x03
}
class SerialPort(object):
def __init__(self, port_name):
self.port_name = port_name
self.serial_port = None
self.serial_lock = None
self.log_thread = None
self.serial_lock = threading.Lock()
self.initSerialPort()
def initSerialPort(self):
port_device = self.get_serial_port_device()
logging.info("Initializing port %s", port_device)
try:
# Init Serial port
self.serial_port = serial.Serial(port_device, timeout=1, baudrate=115200)
self.serial_port.flushInput()
self.serial_port.flushOutput()
except (OSError, serial.serialutil.SerialException) as error:
logging.error("Cannot initialize. Reason: %s", error)<|fim▁hole|> logging.debug("Serial: %s", self.serial_port)
def _send_serial_command(self, command, value):
if command not in COMMAND_TO_CHANNEL:
logging.error("Unknown command: %s", command)
return
message = self.int2bin(0xF6) + self.int2bin(0x6F) + self.int2bin(0x04) + self.int2bin(COMMAND_TO_CHANNEL[command]) + self.int2bin(value)
if self.serial_port:
try:
self.serial_lock.acquire(True)
ret = self.serial_port.write(message)
logging.debug("Sent %s Bytes, being", ret)
for x in message:
logging.debug("%s", self.bin2int(x))
finally:
self.serial_lock.release()
else:
logging.error("Not sending %s, %s - no serial port?", command, value)
def send_serial_command(self, command, value):
if not self.serial_port:
self.initSerialPort()
if self.serial_port:
try:
self._send_serial_command(command, value)
except IOError:
self.initSerialPort()
self._send_serial_command(command, value)
def get_serial_port_device(self):
ports = serial.tools.list_ports.grep(self.port_name)
try:
return next(ports).device
except StopIteration:
return None
@staticmethod
def int2bin(value):
return struct.pack('!B', value)
@staticmethod
def bin2int(value):
if isinstance(value, int):
return value
return struct.unpack('!B', value)[0]
def close(self):
# Close serial port
logging.info("Close serial port")
if self.serial_port is not None and self.serial_port.isOpen():
self.serial_port.close()
self.serial_port = None<|fim▁end|> | from vjdummyserial import VjDummySerial
self.serial_port = VjDummySerial(port_device)
logging.error("Running on dummy serial")
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>extern crate symbiosis;
use std::path::Path;
use std::fs::{File, create_dir_all};
use std::io::Read;
use std::default::Default;
use symbiosis::TemplateGroup;
use symbiosis::rust::{self, Rust};
use symbiosis::javascript::{self, JavaScript};
fn main() {
let out_dir = std::env::var("OUT_DIR").unwrap();
let rust_dest = Path::new(&out_dir).join("symbiosis/");
if let Err(e) = create_dir_all(&rust_dest) {
panic!("failed to create Symbiosis output directory: {}", e);
}
let js_dest = Path::new("res");
let mut templates = TemplateGroup::new();
if let Err(e) = templates.parse_directory("templates/shared") {
panic!("failed to precompile templates/shared: {}", e);
}
let js = JavaScript {
namespace: Some("templates"),
..Default::default()
};
let rust = Rust { ..Default::default() };
if let Err(e) = File::create(js_dest.join("templates.js")).map_err(|e| javascript::Error::Io(e)).and_then(|mut file| templates.emit_code(&mut file, &js)) {
panic!("failed to create res/templates.js: {}", e);
}
let mut source = String::new();
if let Err(e) = File::open("templates/Document.html").and_then(|mut f| f.read_to_string(&mut source)) {<|fim▁hole|>
if let Err(e) = templates.parse_string("Document".into(), source) {
panic!("failed to parse templates/Document.html: {}", e);
}
if let Err(e) = File::create(rust_dest.join("templates.rs")).map_err(|e| rust::Error::Io(e)).and_then(|mut file| templates.emit_code(&mut file, &rust)) {
panic!("failed to create symbiosis/templates.rs: {}", e);
}
}<|fim▁end|> | panic!("failed to read templates/Document.html: {}", e);
} |
<|file_name|>fedjax_test.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax."""
import unittest
import fedjax
<|fim▁hole|>
class FedjaxTest(unittest.TestCase):
"""Test fedjax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(fedjax, 'FederatedAlgorithm'))
self.assertTrue(hasattr(fedjax.aggregators, 'Aggregator'))
self.assertTrue(hasattr(fedjax.algorithms, 'fed_avg'))
self.assertTrue(hasattr(fedjax.datasets, 'emnist'))
self.assertTrue(hasattr(fedjax.models, 'emnist'))
self.assertTrue(hasattr(fedjax.training, 'save_checkpoint'))
def test_no_core(self):
self.assertFalse(hasattr(fedjax, 'core'))
if __name__ == '__main__':
unittest.main()<|fim▁end|> | |
<|file_name|>test_002_board_class.py<|end_file_name|><|fim▁begin|>import code
import unittest
import os
import pcbnew
import pdb
import tempfile
from pcbnew import *
class TestBoardClass(unittest.TestCase):
def setUp(self):
self.pcb = LoadBoard("data/complex_hierarchy.kicad_pcb")
self.TITLE="Test Board"
self.COMMENT1="For load/save test"
self.FILENAME=tempfile.mktemp()+".kicad_pcb"
def test_pcb_find_module(self):
module = self.pcb.FindModule('P1')
self.assertEqual(module.GetReference(),'P1')
def test_pcb_get_track_count(self):
pcb = BOARD()
self.assertEqual(pcb.GetNumSegmTrack(),0)
track0 = TRACK(pcb)
pcb.Add(track0)
self.assertEqual(pcb.GetNumSegmTrack(),1)
track1 = TRACK(pcb)
pcb.Add(track1)
self.assertEqual(pcb.GetNumSegmTrack(),2)
def test_pcb_bounding_box(self):
pcb = BOARD()
track = TRACK(pcb)
pcb.Add(track)
#track.SetStartEnd(wxPointMM(10.0, 10.0),
# wxPointMM(20.0, 30.0))
track.SetStart(wxPointMM(10.0, 10.0))
track.SetEnd(wxPointMM(20.0, 30.0))
track.SetWidth(FromMM(0.5))
#!!! THIS FAILS? == 0.0 x 0.0 ??
#height, width = ToMM(pcb.ComputeBoundingBox().GetSize())
bounding_box = pcb.ComputeBoundingBox()
height, width = ToMM(bounding_box.GetSize())
self.assertAlmostEqual(width, (30-10) + 0.5, 2)
self.assertAlmostEqual(height, (20-10) + 0.5, 2)
def test_pcb_get_pad(self):
pcb = BOARD()
module = MODULE(pcb)
pcb.Add(module)
pad = D_PAD(module)
module.Add(pad)
pad.SetShape(PAD_OVAL)
pad.SetSize(wxSizeMM(2.0, 3.0))<|fim▁hole|> # easy case
p1 = pcb.GetPad(wxPointMM(0,0))
# top side
p2 = pcb.GetPad(wxPointMM(0.9,0.0))
# bottom side
p3 = pcb.GetPad(wxPointMM(0,1.4))
# TODO: get pad == p1 evaluated as true instead
# of relying in the internal C++ object pointer
self.assertEqual(pad.this, p1.this)
self.assertEqual(pad.this, p2.this)
self.assertEqual(pad.this, p3.this)
def test_pcb_save_and_load(self):
pcb = BOARD()
pcb.GetTitleBlock().SetTitle(self.TITLE)
pcb.GetTitleBlock().SetComment1(self.COMMENT1)
result = SaveBoard(self.FILENAME,pcb)
self.assertTrue(result)
pcb2 = LoadBoard(self.FILENAME)
self.assertNotEqual(pcb2,None)
tb = pcb2.GetTitleBlock()
self.assertEqual(tb.GetTitle(),self.TITLE)
self.assertEqual(tb.GetComment1(),self.COMMENT1)
os.remove(self.FILENAME)
#def test_interactive(self):
# code.interact(local=locals())
if __name__ == '__main__':
unittest.main()<|fim▁end|> | pad.SetPosition(wxPointMM(0,0))
|
<|file_name|>sourceMapValidationVariables.js<|end_file_name|><|fim▁begin|>var a = 10;
var b;
var c = 10, d, e;
var c2, d2 = 10;
<|fim▁hole|><|fim▁end|> | //# sourceMappingURL=sourceMapValidationVariables.js.map |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from . import test_product_margin_classification<|fim▁end|> | # -*- coding: utf-8 -*-
|
<|file_name|>arrayConfigurationTools.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
S. Leon @ ALMA
Classes, functions to be used for the array configuration evaluation with CASA
HISTORY:
2011.11.06:
- class to create the Casas pads file from a configuration file
2011.11.09:
- Class to compute statistics on the baselines
2012.03.07L:
- Class to manipulate the visibilities
2012.05.22:
- Change name of ArrayStatistics
- add plotAntPos
2012.11.30:
- Modification of createCasaConfig from a list of Pads (not a file)
2013.03.22:
- Modification of createCasaConfig to read as well from a file
2013.04.19:
- Update Arrayinfo.stats to get the information in the instance.
2013.04.20:
- Put the padPositionfile as a parameter
2013.09.13:
- fix a CASA problem
<|fim▁hole|>RUN:
## Create a CASA file
a=ArrayConfigurationCasaFile()
a.createCasaConfig("/home/stephane/alma/ArrayConfig/Cycle1/configurations/cycle1_config.txt")
CASA> :
sys.path.insert(0,'/home/stephane/git/ALMA/ALMA/ArrayConfiguration/')
"""
__version__="[email protected]"
__author__ ="ALMA: SL"
import numpy as np
import os
import pickle
from math import sqrt
import pylab as pl
home = os.environ['WTO']
class ArrayConfigurationCasaFile:
"""
Class to create the CASA configuration file matching the Pads and the positions
"""
def __init__(self, padPositionFile = home + "conf/Pads.cfg"):
self.padPositionFile = padPositionFile
self.pad={}
self.__readPadPosition__()
def __readPadPosition__(self):
"Read the position of all the Pads and put them in a Dictionary"
padFile=open(self.padPositionFile,"r")
dump=padFile.readline()
while(dump != ""):
if dump[0] !="#":
padsplt=dump.split()
self.pad[padsplt[4]]=[padsplt[0],padsplt[1],padsplt[2],padsplt[3]]
dump=padFile.readline()
padFile.close()
def createCasaConfig(self,configurationFile,listPads = []):
"""
If listPads is not empty, it will use configurationFile to create the CASA file.
"""
# Creating the array config files
headerCasa="# observatory=ALMA\n"
headerCasa+="# coordsys=LOC (local tangent plane)\n"
headerCasa+="# x y z diam pad#\n"
## Read the Pads in configurationFile if listPads is empty
if len(listPads) == 0:
listPads = []
fin = open(configurationFile)
for pad in fin:
dat = pad.split()
listPads.append(dat[0])
fin.close
configurationFile +=".cfg"
f = open(configurationFile,"w")
f.write(headerCasa)
for pads in listPads:
line=""
for s in self.pad[pads]:
line += s+" "
line+=pads
line+="\n"
f.write(line)
print "### %s created."%(configurationFile)
f.close()
class ArrayInfo:
"""
Compute the Statistics from a CASA array file.
max baseline, min baseline, rms, etc...
"""
def __init__(self,filename):
self.filename=filename
self.xPos = []
self.yPos = []
self.antName = []
self.__readFileArray__()
def __readFileArray__(self):
"Read the CASA file array and create the self.baseline array"
f=open(self.filename)
dump=f.readline()
while dump[0] == "#":
dump=f.readline()
ant=[]
xMean = 0.
yMean = 0.
while dump != "":
dataAnt=dump.split()
if dataAnt[0][0] != '#':
ant.append([float(dataAnt[0]),float(dataAnt[1])])
self.xPos.append(float(dataAnt[0]))
self.yPos.append(float(dataAnt[1]))
self.antName.append(dataAnt[4])
xMean += float(dataAnt[0])
yMean += float(dataAnt[1])
dump=f.readline()
nAnt=len(ant)
xMean = xMean / nAnt
yMean = yMean / nAnt
self.xMean = xMean
self.yMean = yMean
for i in range(nAnt):
self.xPos[i] -= xMean
self.yPos[i] -= yMean
nBl=(nAnt*(nAnt-1))/2
self.baseline=np.zeros(nBl,np.float32)
indexBl=0
for i in range(0,nAnt):
for j in range(i+1,nAnt):
blij2=(ant[i][0]-ant[j][0])*(ant[i][0]-ant[j][0])+(ant[i][1]-ant[j][1])*(ant[i][1]-ant[j][1])
self.baseline[indexBl]=sqrt(blij2)
indexBl+=1
print "Number of baselines: %d"%(nBl)
def stats(self):
"compute the statistics on self.baseline"
self.minBl=np.amin(self.baseline)
self.maxBl=np.amax(self.baseline)
bl2=self.baseline*self.baseline
self.rms=sqrt(np.average(bl2))
print "Array: %s"%(self.filename)
print "x Pos. Mean:%f"%(self.xMean)
print "y Pos. Mean:%f"%(self.yMean)
print "Min. baseline:%f"%(self.minBl)
print "Max. baseline:%f"%(self.maxBl)
print "RMS of the baselines:%f"%(self.rms)
print "\n"
def plotAntPos(self,xmin=-100,xmax=100,ymin=-100.,ymax=100,title='ALMA',xtitle=75.,ytitle=75.,figure=None):
"plot the positions of the antennas"
fig = pl.figure()
ax = fig.add_subplot('111')
ax.plot(self.xPos,self.yPos,'ro',markersize = 10.)
index = 0
for name in self.antName:
xx = self.xPos[index]
yy = self.yPos[index]
ax.text(xx,yy,name)
index += 1
ax.set_xlabel('X (meter)')
ax.set_ylabel('Y (meter)')
ax.set_xlim((xmin,xmax))
ax.set_ylim((ymin,ymax))
ax.text(xtitle,ytitle,title)
# pl.show()
if figure != None:
pl.savefig(figure)
class visibility:
def __init__(self,visname):
self.visname = visname
########################Main program####################################
if __name__=="__main__":
" main program"
## a=ArrayConfigurationCasaFile()
## a.createCasaConfig("/home/stephane/alma/ArrayConfig/Cycle1/configurations/cycle1_config.txt")<|fim▁end|> | |
<|file_name|>CreateJobPlaylistJsonUnmarshaller.java<|end_file_name|><|fim▁begin|>/*<|fim▁hole|> * Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.elastictranscoder.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import com.amazonaws.services.elastictranscoder.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* CreateJobPlaylist JSON Unmarshaller
*/
public class CreateJobPlaylistJsonUnmarshaller implements
Unmarshaller<CreateJobPlaylist, JsonUnmarshallerContext> {
public CreateJobPlaylist unmarshall(JsonUnmarshallerContext context)
throws Exception {
CreateJobPlaylist createJobPlaylist = new CreateJobPlaylist();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL)
return null;
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("Name", targetDepth)) {
context.nextToken();
createJobPlaylist.setName(StringJsonUnmarshaller
.getInstance().unmarshall(context));
}
if (context.testExpression("Format", targetDepth)) {
context.nextToken();
createJobPlaylist.setFormat(StringJsonUnmarshaller
.getInstance().unmarshall(context));
}
if (context.testExpression("OutputKeys", targetDepth)) {
context.nextToken();
createJobPlaylist
.setOutputKeys(new ListUnmarshaller<String>(
StringJsonUnmarshaller.getInstance())
.unmarshall(context));
}
if (context.testExpression("HlsContentProtection", targetDepth)) {
context.nextToken();
createJobPlaylist
.setHlsContentProtection(HlsContentProtectionJsonUnmarshaller
.getInstance().unmarshall(context));
}
if (context.testExpression("PlayReadyDrm", targetDepth)) {
context.nextToken();
createJobPlaylist
.setPlayReadyDrm(PlayReadyDrmJsonUnmarshaller
.getInstance().unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null
|| context.getLastParsedParentElement().equals(
currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return createJobPlaylist;
}
private static CreateJobPlaylistJsonUnmarshaller instance;
public static CreateJobPlaylistJsonUnmarshaller getInstance() {
if (instance == null)
instance = new CreateJobPlaylistJsonUnmarshaller();
return instance;
}
}<|fim▁end|> | * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
* |
<|file_name|>common.js<|end_file_name|><|fim▁begin|>/*
This file is a part of libertysoil.org website
Copyright (C) 2016 Loki Education (Social Enterprise)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import i from 'immutable';
import {
getTypeError,
createRequirableTypeChecker,
createSimplifiedRequirableTypeChecker,
checkValues,
checkKeys
} from './utils';
export const mapOfKeys = (keyCheckType) => (
createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location, ...rest) => {
const expectedType = 'object';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
return checkKeys(
keyCheckType,
propValue,
propFullName,
componentName,
location,
...rest
);
}
)
);
export const mapOfValues = (valueCheckType) => (
createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location, ...rest) => {
const expectedType = 'object';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
return checkValues(
valueCheckType,
propValue,
propFullName,
componentName,
location,
...rest
);
}
)
);
export const mapOf = (keyCheckType, valueCheckType) => (
createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location, ...rest) => {
const expectedType = 'object';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
const error = checkKeys(
keyCheckType,
propValue,
propFullName,
componentName,
location,
...rest
);
if (error instanceof Error) {
return error;
}
return checkValues(
valueCheckType,
propValue,
propFullName,
componentName,
location,
...rest
);
}
)
);
export const Immutable = (checkType) => (
createRequirableTypeChecker(
(props, propName, componentName, location, propFullName, ...rest) => {
const propValue = props[propName];
// all Immutable date types are subclasses of Immutable.Iterable
if (i.Iterable.isIterable(propValue)) {
const preparedPropValue = propValue.toJS();
const preraredProps = { ...props, [propName]: preparedPropValue };
// vanilla instance of PropTypes' checkType()
// or result of createRequirableTypeChecker()
if (!checkType.isSimplified) {
return checkType(
preraredProps,
propName,
componentName,
location,
propFullName,
...rest
);
}
// result of createSimplifiedRequirableTypeChecker()
let fullName = propName;
if (propFullName) {
fullName = propFullName;
}
return checkType(
preparedPropValue,
fullName,
componentName,
location,
...rest
);
}
return new Error(
`Invalid prop \`${propFullName}\` of type \`${typeof propValue}\` ` +
`supplied to \`${componentName}\` isn't an instance of any Immutable data type.`
);
}
)
);
export const uuid4 = createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location) => {
const expectedType = 'string';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
const test = RegExp(/^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}$/i);
if (!propValue.match(test)) {
return new Error(
`Invalid prop \`${propFullName}\` of type \`${expectedType}\` ` +
`supplied to \`${componentName}\` doesn't match the UUID pattern.`
);
}
return null;
}
);
export const date = createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location) => {
if (propValue instanceof Date) {
return null;<|fim▁hole|> const expectedType = 'string';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
const date = new Date(propValue);
const dateString = date.toString();
if (dateString === 'Invalid date') {
return new Error(
`Invalid prop \`${propFullName}\` of type \`${expectedType}\` ` +
`supplied to \`${componentName}\` is invalid date string representation.`
);
}
return null;
}
);
export const url = createSimplifiedRequirableTypeChecker(
(propValue, propFullName, componentName, location) => {
const expectedType = 'string';
if (typeof propValue !== expectedType) {
return getTypeError(propValue, expectedType, propFullName, componentName, location);
}
const test = RegExp(/^[a-z0-9_.'-]+$/i);
if (!propValue.match(test)) {
return new Error(
`Invalid prop \`${propFullName}\` of type \`${expectedType}\` ` +
`supplied to \`${componentName}\` is invalid URL representation.`
);
}
return null;
}
);<|fim▁end|> | }
|
<|file_name|>AndroidRenderer.cpp<|end_file_name|><|fim▁begin|>//
// Created by chan on 2017/9/19.
//
#include "AndroidRenderer.h"
#include "../misc/RawString2JavaStringHolder.h"
void AndroidRenderer::begin() {
mJNIEnv->CallVoidMethod(mJavaRenderer, mBeginId);
}
void AndroidRenderer::end() {
mJNIEnv->CallVoidMethod(mJavaRenderer, mEndId);
}
void AndroidRenderer::renderTitle(RENDERER_UNIT unit, const Text &content) {
jint titleSize = mTitleSize5;
switch (unit) {
case RENDERER_UNIT::TITLE_1:
titleSize = mTitleSize1;
break;
case RENDERER_UNIT::TITLE_2:
titleSize = mTitleSize2;
break;
case RENDERER_UNIT::TITLE_3:
titleSize = mTitleSize3;
break;
case RENDERER_UNIT::TITLE_4:
titleSize = mTitleSize4;
break;
default:
break;
}
RawString2JavaStringHolder holder(mJNIEnv, content);
jstring jContent = holder.toJstring();
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderTitleId, titleSize, jContent);
}
<|fim▁hole|> RawString2JavaStringHolder holder(mJNIEnv, content);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderTextureId, holder.toJstring());
}
void AndroidRenderer::renderTypeface(RENDERER_UNIT unit, const Text &content) {
jint typeface = mTypefaceItalic;
if (unit == RENDERER_UNIT::BOLD) {
typeface = mTypefaceBold;
}
RawString2JavaStringHolder holder(mJNIEnv, content);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderTypefaceId, typeface, holder.toJstring());
}
void AndroidRenderer::renderOrderedList(const Text &num, const Text &content) {
RawString2JavaStringHolder numHolder(mJNIEnv, num);
RawString2JavaStringHolder contentHolder(mJNIEnv, content);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderOrderedListId, numHolder.toJstring(),
contentHolder.toJstring());
}
void AndroidRenderer::renderUnorderedList(const Text &content) {
RawString2JavaStringHolder holder(mJNIEnv, content);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderUnorderedListId, holder.toJstring());
}
void AndroidRenderer::renderNewLine() {
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderNewLineId);
}
void AndroidRenderer::renderImage(const Text &label, const Text &url) {
RawString2JavaStringHolder labelHolder(mJNIEnv, label);
RawString2JavaStringHolder urlHolder(mJNIEnv, url);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderImageId, labelHolder.toJstring(),
urlHolder.toJstring());
}
void AndroidRenderer::renderLink(const Text &label, const Text &url) {
RawString2JavaStringHolder labelHolder(mJNIEnv, label);
RawString2JavaStringHolder urlHolder(mJNIEnv, url);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderLinkId, labelHolder.toJstring(),
urlHolder.toJstring());
}
void AndroidRenderer::renderReference(const Text &content, bool append) {
RawString2JavaStringHolder holder(mJNIEnv, content);
mJNIEnv->CallVoidMethod(mJavaRenderer, mRenderReferenceId, holder.toJstring(), append);
}
AndroidRenderer::AndroidRenderer(JNIEnv *jNIEnv, jobject &javaRenderer) : mJNIEnv(jNIEnv),
mJavaRenderer(
javaRenderer) {
mJavaClass = mJNIEnv->FindClass("com/chan/mulan/renderer/MarkdownRenderer");
mBeginId = mJNIEnv->GetMethodID(mJavaClass, "begin", "()V");
mEndId = mJNIEnv->GetMethodID(mJavaClass, "end", "()V");
mRenderTitleId = mJNIEnv->GetMethodID(mJavaClass, "renderTitle", "(ILjava/lang/String;)V");
mRenderTextureId = mJNIEnv->GetMethodID(mJavaClass, "renderTexture", "(Ljava/lang/String;)V");
mRenderTypefaceId = mJNIEnv->GetMethodID(mJavaClass, "renderTypeface", "(ILjava/lang/String;)V");
mRenderOrderedListId = mJNIEnv->GetMethodID(mJavaClass, "renderOrderedList", "(Ljava/lang/String;Ljava/lang/String;)V");
mRenderUnorderedListId = mJNIEnv->GetMethodID(mJavaClass, "renderUnorderedList", "(Ljava/lang/String;)V");
mRenderNewLineId = mJNIEnv->GetMethodID(mJavaClass, "renderNewLine", "()V");
mRenderImageId = mJNIEnv->GetMethodID(mJavaClass, "renderImage", "(Ljava/lang/String;Ljava/lang/String;)V");
mRenderLinkId = mJNIEnv->GetMethodID(mJavaClass, "renderLink", "(Ljava/lang/String;Ljava/lang/String;)V");
mRenderReferenceId = mJNIEnv->GetMethodID(mJavaClass, "renderReference", "(Ljava/lang/String;Z)V");
jfieldID field = mJNIEnv->GetStaticFieldID(mJavaClass, "TITLE_SIZE_1", "I");
mTitleSize1 = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TITLE_SIZE_2", "I");
mTitleSize2 = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TITLE_SIZE_3", "I");
mTitleSize3 = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TITLE_SIZE_4", "I");
mTitleSize4 = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TITLE_SIZE_5", "I");
mTitleSize5 = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TYPEFACE_BOLD", "I");
mTypefaceBold = mJNIEnv->GetStaticIntField(mJavaClass, field);
field = mJNIEnv->GetStaticFieldID(mJavaClass, "TYPEFACE_ITALIC", "I");
mTypefaceItalic = mJNIEnv->GetStaticIntField(mJavaClass, field);
}<|fim▁end|> | void AndroidRenderer::renderTexture(const Text &content) { |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import cgitb
import fnmatch
import io
import logging
import click
import pyjsdoc
import pyjsparser
import sys
from .parser.parser import ModuleMatcher
from .parser.visitor import Visitor, SKIP
from . import jsdoc
class Printer(Visitor):
def __init__(self, level=0):
super(Printer, self).__init__()
self.level = level
def _print(self, text):
print ' ' * self.level, text
def enter_generic(self, node):
self._print(node['type'])
self.level += 1
def exit_generic(self, node):
self.level -= 1
def enter_Identifier(self, node):
self._print(node['name'])
return SKIP
def enter_Literal(self, node):
self._print(node['value'])
return SKIP
def enter_BinaryExpression(self, node):
self._print(node['operator'])
self.level += 1
def visit_files(files, visitor, ctx):
for name in files:
with io.open(name) as f:
ctx.logger.info("%s", name)
try:
yield visitor().visit(pyjsparser.parse(f.read()))
except Exception as e:
if ctx.logger.isEnabledFor(logging.DEBUG):
ctx.logger.exception("while visiting %s", name)
else:
ctx.logger.error("%s while visiting %s", e, name)
# bunch of modules various bits depend on which are not statically defined
# (or are outside the scope of the system)
ABSTRACT_MODULES = [
jsdoc.ModuleDoc({
'module': 'web.web_client',
'dependency': {'web.AbstractWebClient'},
'exports': jsdoc.NSDoc({
'name': 'web_client',
'doc': 'instance of AbstractWebClient',
}),
}),
jsdoc.ModuleDoc({
'module': 'web.Tour',
'dependency': {'web_tour.TourManager'},
'exports': jsdoc.NSDoc({
'name': 'Tour',
'doc': 'maybe tourmanager instance?',
}),
}),
# OH FOR FUCK'S SAKE
jsdoc.ModuleDoc({
'module': 'summernote/summernote',
'exports': jsdoc.NSDoc({'doc': "totally real summernote"}),
})
]
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.option('-v', '--verbose', count=True)
@click.option('-q', '--quiet', count=True)
@click.pass_context
def autojsdoc(ctx, verbose, quiet):
logging.basicConfig(
level=logging.INFO + (quiet - verbose) * 10,
format="[%(levelname)s %(created)f] %(message)s",
)
ctx.logger = logging.getLogger('autojsdoc')
ctx.visitor = None
ctx.files = []
ctx.kw = {}
@autojsdoc.command()
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def ast(ctx, files):
""" Prints a structure tree of the provided files
"""
if not files:
print(ctx.get_help())
visit_files(files, lambda: Printer(level=1), ctx.parent)
@autojsdoc.command()
@click.option('-m', '--module', multiple=True, help="Only shows dependencies matching any of the patterns")
@click.argument('files', type=click.Path(exists=True), nargs=-1)
@click.pass_context
def dependencies(ctx, module, files):
""" Prints a dot file of all modules to stdout
"""
if not files:<|fim▁hole|> byname = {
mod.name: mod.dependencies
for mod in ABSTRACT_MODULES
}
for modules in visit_files(files, ModuleMatcher, ctx.parent):
for mod in modules:
byname[mod.name] = mod.dependencies
print('digraph dependencies {')
todo = set()
# if module filters, roots are only matching modules
if module:
for f in module:
todo.update(fnmatch.filter(byname.keys(), f))
for m in todo:
# set a different box for selected roots
print(' "%s" [color=orangered]' % m)
else:
# otherwise check all modules
todo.update(byname)
done = set()
while todo:
node = todo.pop()
if node in done:
continue
done.add(node)
deps = byname[node]
todo.update(deps - done)
for dep in deps:
print(' "%s" -> "%s";' % (node, dep))
print('}')
try:
autojsdoc.main(prog_name='autojsdoc')
except Exception:
print(cgitb.text(sys.exc_info()))<|fim▁end|> | print(ctx.get_help()) |
<|file_name|>resultset.py<|end_file_name|><|fim▁begin|># Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value<|fim▁hole|> elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)<|fim▁end|> | elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value |
<|file_name|>borg_config.py<|end_file_name|><|fim▁begin|>import os
from django.conf import settings
class BorgConfiguration():<|fim▁hole|> def initialize():
setattr(BorgConfiguration,"DEBUG",getattr(settings,"DEBUG",False))
config = getattr(settings,"HARVEST_CONFIG")
if not config:
config = {}
for name, value in config.iteritems():
setattr(BorgConfiguration, name, value)
setattr(BorgConfiguration,"TEST_INPUT_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.INPUT_SCHEMA))
setattr(BorgConfiguration,"TEST_NORMAL_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.NORMAL_SCHEMA))
setattr(BorgConfiguration,"TEST_TRANSFORM_SCHEMA",BorgConfiguration.test_schema(BorgConfiguration.TRANSFORM_SCHEMA))
@staticmethod
def test_schema(schema):
return "test_" + schema
BorgConfiguration.initialize()
#import ipdb;ipdb.set_trace()<|fim▁end|> | @staticmethod |
<|file_name|>number.js<|end_file_name|><|fim▁begin|><|fim▁hole|> 'group':".",
'list':";",
'percentSign':"%",
'nativeZeroDigit':"0",
'patternDigit':"#",
'plusSign':"+",
'minusSign':"-",
'exponential':"E",
'perMille':"‰",
'infinity':"∞",
'nan':"NaN",
'decimalFormat':"#,##0.###",
'scientificFormat':"#E0",
'percentFormat':"% #,##0",
'currencyFormat':"#,##0.00 ¤"
})<|fim▁end|> | // generated from ldml/main/*.xml, xpath: ldml/numbers
({
'decimal':",", |
<|file_name|>PrepareForApis.py<|end_file_name|><|fim▁begin|>def main():
info('Evacuate Microbone')
close(description='Jan Inlet')
open(description='Jan Ion Pump')
#close(description='Minibone to Bone')
open(description='Minibone to Bone')
#close(description='Microbone to Minibone')
open(description='Microbone to Minibone')
open('C')
close('P')
close(description='Microbone to CO2 Laser')
open(description='Microbone to Turbo')
open(description='Microbone to Inlet Pipette')
open(description='Microbone to Getter NP-10C')
#evacuate apis section<|fim▁hole|> #sleep(15)
#close(description='Microbone to Getter NP-10H')<|fim▁end|> | #info('evacuate apis')
open(description='Microbone to Getter NP-10H') |
<|file_name|>i18n_plural_pipe.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.<|fim▁hole|> *
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {Pipe, PipeTransform} from '@angular/core';
import {isBlank} from '../facade/lang';
import {NgLocalization, getPluralCategory} from '../localization';
import {InvalidPipeArgumentError} from './invalid_pipe_argument_error';
const _INTERPOLATION_REGEXP: RegExp = /#/g;
/**
* @ngModule CommonModule
* @whatItDoes Maps a value to a string that pluralizes the value according to locale rules.
* @howToUse `expression | i18nPlural:mapping`
* @description
*
* Where:
* - `expression` is a number.
* - `mapping` is an object that mimics the ICU format, see
* http://userguide.icu-project.org/formatparse/messages
*
* ## Example
*
* {@example common/pipes/ts/i18n_pipe.ts region='I18nPluralPipeComponent'}
*
* @experimental
*/
@Pipe({name: 'i18nPlural', pure: true})
export class I18nPluralPipe implements PipeTransform {
constructor(private _localization: NgLocalization) {}
transform(value: number, pluralMap: {[count: string]: string}): string {
if (isBlank(value)) return '';
if (typeof pluralMap !== 'object' || pluralMap === null) {
throw new InvalidPipeArgumentError(I18nPluralPipe, pluralMap);
}
const key = getPluralCategory(value, Object.keys(pluralMap), this._localization);
return pluralMap[key].replace(_INTERPOLATION_REGEXP, value.toString());
}
}<|fim▁end|> | |
<|file_name|>pytest_check_test.py<|end_file_name|><|fim▁begin|>import allure
from allure_commons_test.report import has_test_case
from allure_commons_test.result import with_status, with_message_contains, has_status_details
from hamcrest import assert_that
@allure.issue("376")
@allure.feature("Integration")
def test_pytest_check(allured_testdir):
"""
>>> import pytest_check as check
>>> def test_pytest_check_example():
... check.equal(1, 2, msg="First failure")
... check.equal(1, 2, msg="Second failure")
"""
allured_testdir.parse_docstring_source()
allured_testdir.run_with_allure()
<|fim▁hole|> with_status("failed"),
has_status_details(with_message_contains("First failure"),
with_message_contains("Second failure"))
),
)<|fim▁end|> | assert_that(allured_testdir.allure_report,
has_test_case("test_pytest_check_example", |
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use hcore;
#[derive(Debug, Fail)]
pub enum Error {<|fim▁hole|> #[fail(display = "Invalid bind specification '{}'", _0)]
InvalidBindSpec(String),
#[fail(display = "Invalid topology '{}'. Possible values: standalone, leader", _0)]
InvalidTopology(String),
#[fail(display = "Invalid binding \"{}\", must be of the form <NAME>:<SERVICE_GROUP> where \
<NAME> is a service name and <SERVICE_GROUP> is a valid service group",
_0)]
InvalidBinding(String),
#[fail(display = "{}", _0)]
HabitatCore(hcore::Error),
}
impl From<hcore::Error> for Error {
fn from(err: hcore::Error) -> Error {
Error::HabitatCore(err)
}
}<|fim▁end|> | |
<|file_name|>on-exit.ts<|end_file_name|><|fim▁begin|>'use strict';
//dts<|fim▁hole|>
//polyfills
const process = require('suman-browser-polyfills/modules/process');
const global = require('suman-browser-polyfills/modules/global');
//core
import fs = require('fs');
import path = require('path');
import EE = require('events');
//npm
import chalk from 'chalk';
const {events} = require('suman-events');
import su = require('suman-utils');
//project
const _suman: IGlobalSumanObj = global.__suman = global.__suman || {};
const resultBroadcaster = _suman.resultBroadcaster = _suman.resultBroadcaster || new EE();
////////////////////////////////////////////////////////////////////////////////////////
export const onExit = function (code: number) {
if (code > 0) {
//make a beep noise if a failing run
resultBroadcaster.emit(String(events.RUNNER_EXIT_CODE_GREATER_THAN_ZERO), code);
}
else {
resultBroadcaster.emit(String(events.RUNNER_EXIT_CODE_IS_ZERO));
}
if (code > 0) {
const logsDir = _suman.sumanConfig.logsDir || _suman.sumanHelperDirRoot + '/logs';
const sumanCPLogs = path.resolve(logsDir + '/runs/');
const logsPath = path.resolve(sumanCPLogs + '/' + _suman.timestamp + '-' + _suman.runId);
console.log('\n', ' => At least one test experienced an error => View the test logs => ',
'\n', chalk.yellow.bold(logsPath), '\n');
}
resultBroadcaster.emit(String(events.RUNNER_EXIT_CODE), code);
//write synchronously to ensure it gets written
fs.appendFileSync(_suman.sumanRunnerStderrStreamPath, '\n\n### Suman runner end ###\n\n');
};<|fim▁end|> | import {IGlobalSumanObj} from "../../../suman-types/dts/global"; |
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use std::error::Error as StdError;
use std::fmt;
use std::io::Error as IoError;
use std::num::ParseIntError;
use std::result::Result as StdResult;
use std::str::Utf8Error;
/// A specialized Result type for metadata operations.
pub type Result<T> = StdResult<T, Error>;
/// Describes all errors that may occur.
pub enum Error {
/// An IO error occured. Contains `std::io::Error`.
Io(IoError),
/// An error when attempting to interpret a sequence of u8 as a string.
FromUtf8(Utf8Error),
/// An error when parsing an integer. Contains `std::num::ParseIntError`.
ParseInt(ParseIntError),
/// Unexpected item kind given while parsing a tag.
BadItemKind,
/// APE header contains invalid tag size.
BadTagSize,
/// Unable to write a tag without items.
EmptyTag,
/// Invalid APE version. It works with APEv2 tags only.
InvalidApeVersion,
/// Item keys can have a length of 2 (including) up to 255 (including) characters.
InvalidItemKeyLen,
/// Item key contains non-ascii characters.
InvalidItemKeyValue,
/// Not allowed are the following keys: ID3, TAG, OggS and MP+.
ItemKeyDenied,
/// There is no APE tag in a file.
TagNotFound,
}
impl StdError for Error {<|fim▁hole|> Error::Io(ref err) => err.description(),
Error::ParseInt(ref err) => err.description(),
Error::FromUtf8(ref err) => err.description(),
Error::BadItemKind => "Unexpected item kind",
Error::BadTagSize => "APE header contains invalid tag size",
Error::EmptyTag => "Unable to perform operations on empty tag",
Error::InvalidApeVersion => "Invalid APE version",
Error::InvalidItemKeyLen => "Item keys can have a length of 2 up to 255 characters",
Error::InvalidItemKeyValue => "Item key contains non-ascii characters",
Error::ItemKeyDenied => "Not allowed are the following keys: ID3, TAG, OggS and MP+",
Error::TagNotFound => "APE tag does not exists",
}
}
fn cause(&self) -> Option<&StdError> {
match *self {
Error::Io(ref err) => Some(err),
Error::ParseInt(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Debug for Error {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
write!(out, "{}", self.description())
}
}
impl fmt::Display for Error {
fn fmt(&self, out: &mut fmt::Formatter) -> fmt::Result {
write!(out, "{}", self.description())
}
}
impl From<IoError> for Error {
fn from(error: IoError) -> Error {
Error::Io(error)
}
}
impl From<ParseIntError> for Error {
fn from(error: ParseIntError) -> Error {
Error::ParseInt(error)
}
}
impl From<Utf8Error> for Error {
fn from(error: Utf8Error) -> Error {
Error::FromUtf8(error)
}
}<|fim▁end|> | fn description(&self) -> &str {
match *self { |
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>extern crate pkg_config;
use pkg_config::find_library;
<|fim▁hole|>fn main() {
if find_library("liblzma").is_ok() {
return
} else {
panic!("Could not find liblzma using pkg-config")
}
}<|fim▁end|> | |
<|file_name|>3_nested_for_loop_with_connector_in_pattern.rs<|end_file_name|><|fim▁begin|><|fim▁hole|> for variable_in_y /* ... */ in 0..1 {}
}
}<|fim▁end|> | fn main() {
for variable_in_x /* ... */ in 0..1 { |
<|file_name|>footerController.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|> $('#footer').hide();
$(function () {
$(window).scroll(function () {
// set distance user needs to scroll before we start fadeIn
if ($(this).scrollTop() > 500) {
$('.navbar').fadeIn();
} else {
$('.navbar').fadeOut();
}
});
});
});<|fim▁end|> | angular.module('sportzCast')
.controller('FooterCtrl', function ($scope) { |
<|file_name|>sort.py<|end_file_name|><|fim▁begin|>""" Sorted input and output.
"""
from collections import deque
from operator import itemgetter
from .buffer import _ReaderBuffer
from .buffer import _WriterBuffer
__all__ = "SortReader", "SortWriter"
class _Sorter(object):
""" Abstract base class for SortReader and SortWriter.
"""
def __init__(self, key, group=None):
""" Initialize this object.
The key argument determines sort order and is either a single field
name, a sequence of names, or a key function that returns a key value.
The optional group argument is like the key argument but is used to
group records that are already partially sorted. Records will be sorted
within each group rather than as a single sequence. If the groups are
small relative to the total sequence length this can significantly
improve performance and memory usage.
"""
def keyfunc(key):
""" Create a key function. """
if not key or callable(key):
return key
if isinstance(key, str):
key = (key,)
return itemgetter(*key)
self._get_key = keyfunc(key)
self._get_group = keyfunc(group)
self._group = None
self._buffer = []
self._output = None # initialized by derived classes
return
def _queue(self, record):
""" Process each incoming record.
"""
if self._get_group:
group = self._get_group(record)
if group != self._group:
# This is a new group; process the previous group.
self._flush()
self._group = group
self._buffer.append(record)<|fim▁hole|>
"""
if not self._buffer:
return
self._buffer.sort(key=self._get_key)
self._output = deque(self._buffer)
self._buffer = []
return
class SortReader(_Sorter, _ReaderBuffer):
""" Sort input from another reader.
"""
def __init__(self, reader, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_ReaderBuffer.__init__(self, reader)
return
def _uflow(self):
""" Handle an underflow condition.
This is called when the input reader is exhausted and there are no
records in the output queue.
"""
if not self._buffer:
# All data has been output.
raise StopIteration
self._flush()
return
class SortWriter(_Sorter, _WriterBuffer):
""" Sort output for another writer.
"""
def __init__(self, writer, key, group=None):
""" Initialize this object.
"""
_Sorter.__init__(self, key, group)
_WriterBuffer.__init__(self, writer)
return<|fim▁end|> | return
def _flush(self):
""" Send sorted records to the output queue. |
<|file_name|>WorkerCallable.java<|end_file_name|><|fim▁begin|>/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package sw13.executorNegative;
import java.util.concurrent.Callable;
/**
*
* @author Steve Ineichen
*/<|fim▁hole|>public class WorkerCallable implements Callable {
final private Worker worker;
public WorkerCallable(Worker worker) {
this.worker = worker;
}
@Override
public Integer call() throws InterruptedException {
worker.doWork();
return worker.getResult();
}
}<|fim▁end|> | |
<|file_name|>stdafx.cpp<|end_file_name|><|fim▁begin|>// stdafx.cpp : Ö»°üÀ¨±ê×¼°üº¬ÎļþµÄÔ´Îļþ
// testsocket.pch ½«×÷ΪԤ±àÒëÍ·
// stdafx.obj ½«°üº¬Ô¤±àÒëÀàÐÍÐÅÏ¢<|fim▁hole|><|fim▁end|> |
#include "stdafx.h" |
<|file_name|>user_update_eligibility.py<|end_file_name|><|fim▁begin|>from datetime import timedelta
import logging
from django.utils.timezone import now
from django.core.management.base import BaseCommand
from TWLight.users.models import Editor
from TWLight.users.helpers.editor_data import (
editor_global_userinfo,
editor_valid,
editor_enough_edits,
editor_not_blocked,
editor_bundle_eligible,
editor_account_old_enough,
)
<|fim▁hole|> help = "Updates editor info and Bundle eligibility for currently-eligible Editors."
def add_arguments(self, parser):
"""
Adds command arguments.
"""
parser.add_argument(
"--datetime",
action="store",
help="ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.",
)
parser.add_argument(
"--global_userinfo",
action="store",
help="Specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--timedelta_days",
action="store",
help="Number of days used to define 'recent' edits. Defaults to 30. Currently only used for faking command runs in tests.",
)
parser.add_argument(
"--wp_username",
action="store",
help="Specify a single editor to update. Other arguments and filters still apply.",
)
def handle(self, *args, **options):
"""
Updates editor info and Bundle eligibility for currently-eligible Editors.
Parameters
----------
args
options
Returns
-------
None
"""
# Default behavior is to use current datetime for timestamps to check all editors.
now_or_datetime = now()
datetime_override = None
timedelta_days = 0
wp_username = None
editors = Editor.objects.all()
# This may be overridden so that values may be treated as if they were valid for an arbitrary datetime.
# This is also passed to the model method.
if options["datetime"]:
datetime_override = now_or_datetime.fromisoformat(options["datetime"])
now_or_datetime = datetime_override
# These are used to limit the set of editors updated by the command.
# Nothing is passed to the model method.
if options["timedelta_days"]:
timedelta_days = int(options["timedelta_days"])
# Get editors that haven't been updated in the specified time range, with an option to limit on wp_username.
if timedelta_days:
editors = editors.exclude(
editorlogs__timestamp__gt=now_or_datetime
- timedelta(days=timedelta_days),
)
# Optional wp_username filter.
if options["wp_username"]:
editors = editors.filter(wp_username=str(options["wp_username"]))
# Iterator reduces memory footprint for large querysets
for editor in editors.iterator():
# T296853: avoid stale editor data while looping through big sets.
editor.refresh_from_db()
# `global_userinfo` data may be overridden.
if options["global_userinfo"]:
global_userinfo = options["global_userinfo"]
editor.check_sub(global_userinfo["id"])
# Default behavior is to fetch live `global_userinfo`
else:
global_userinfo = editor_global_userinfo(editor.wp_sub)
if global_userinfo:
editor.update_editcount(global_userinfo["editcount"], datetime_override)
# Determine editor validity.
editor.wp_enough_edits = editor_enough_edits(editor.wp_editcount)
editor.wp_not_blocked = editor_not_blocked(global_userinfo["merged"])
# We will only check if the account is old enough if the value is False
# Accounts that are already old enough will never cease to be old enough
if not editor.wp_account_old_enough:
editor.wp_account_old_enough = editor_account_old_enough(
editor.wp_registered
)
editor.wp_valid = editor_valid(
editor.wp_enough_edits,
editor.wp_account_old_enough,
# editor.wp_not_blocked can only be rechecked on login, so we're going with the existing value.
editor.wp_not_blocked,
editor.ignore_wp_blocks,
)
# Determine Bundle eligibility.
editor.wp_bundle_eligible = editor_bundle_eligible(editor)
# Save editor.
editor.save()
# Prune EditorLogs, with daily_prune_range set to only check the previous day to improve performance.
editor.prune_editcount(
current_datetime=datetime_override, daily_prune_range=2
)
# Update bundle authorizations.
editor.update_bundle_authorization()<|fim▁end|> | logger = logging.getLogger(__name__)
class Command(BaseCommand): |
<|file_name|>color-field-adv-init.es5.js<|end_file_name|><|fim▁begin|>/**
* @copyright (C) 2016 Open Source Matters, Inc. <https://www.joomla.org>
* @license GNU General Public License version 2 or later; see LICENSE.txt
*/
!(function(document, $) {
"use strict";
function initMinicolorsField (event) {
$(event.target).find('.minicolors').each(function() {
$(this).minicolors({
control: $(this).attr('data-control') || 'hue',<|fim▁hole|> ? 'rgb'
: $(this).attr('data-format'))
|| 'hex',
keywords: $(this).attr('data-keywords') || '',
opacity: $(this).attr('data-format') === 'rgba',
position: $(this).attr('data-position') || 'default',
swatches: $(this).attr('data-colors') ? $(this).attr('data-colors').split(",") : [],
theme: 'bootstrap'
});
});
}
/**
* Initialize at an initial page load
*/
document.addEventListener("DOMContentLoaded", initMinicolorsField);
/**
* Initialize when a part of the page was updated
*/
document.addEventListener("joomla:updated", initMinicolorsField);
})(document, jQuery);<|fim▁end|> | format: $(this).attr('data-validate') === 'color'
? 'hex'
: ($(this).attr('data-format') === 'rgba' |
<|file_name|>preprocess_bert.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
"""Creates training data for the BERT network training
(noisified + masked gold predictions) using the input corpus.
<|fim▁hole|>We only leave `coverage` percent of symbols for classification. These
symbols are left unchanged on input with a probability of `1 - mask_prob`.
If they are being changed, they are replaced by the `mask_token` with a
probability of `1 - replace_prob` and by a random vocabulary token otherwise.
"""
import argparse
import os
import numpy as np
from neuralmonkey.logging import log as _log
from neuralmonkey.vocabulary import (
Vocabulary, PAD_TOKEN, UNK_TOKEN, from_wordlist)
def log(message: str, color: str = "blue") -> None:
_log(message, color)
def main() -> None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--input_file", type=str, default="/dev/stdin")
parser.add_argument("--vocabulary", type=str, required=True)
parser.add_argument("--output_prefix", type=str, default=None)
parser.add_argument("--mask_token", type=str, default=UNK_TOKEN,
help="token used to mask the tokens")
parser.add_argument("--coverage", type=float, default=0.15,
help=("percentage of tokens that should be left "
"for classification during training"))
parser.add_argument("--mask_prob", type=float, default=0.8,
help=("probability of the classified token being "
"replaced by a different token on input"))
parser.add_argument("--replace_prob", type=float, default=0.1,
help=("probability of the classified token being "
"replaced by a random token instead of "
"mask_token"))
parser.add_argument("--vocab_contains_header", type=bool, default=True)
parser.add_argument("--vocab_contains_frequencies",
type=bool, default=True)
args = parser.parse_args()
assert (args.coverage <= 1 and args.coverage >= 0)
assert (args.mask_prob <= 1 and args.mask_prob >= 0)
assert (args.replace_prob <= 1 and args.replace_prob >= 0)
log("Loading vocabulary.")
vocabulary = from_wordlist(
args.vocabulary,
contains_header=args.vocab_contains_header,
contains_frequencies=args.vocab_contains_frequencies)
mask_prob = args.mask_prob
replace_prob = args.replace_prob
keep_prob = 1 - mask_prob - replace_prob
sample_probs = (keep_prob, mask_prob, replace_prob)
output_prefix = args.output_prefix
if output_prefix is None:
output_prefix = args.input_file
out_f_noise = "{}.noisy".format(output_prefix)
out_f_mask = "{}.mask".format(output_prefix)
out_noise_h = open(out_f_noise, "w", encoding="utf-8")
out_mask_h = open(out_f_mask, "w", encoding="utf-8")
log("Processing data.")
with open(args.input_file, "r", encoding="utf-8") as input_h:
# TODO: performance optimizations
for line in input_h:
line = line.strip().split(" ")
num_samples = int(args.coverage * len(line))
sampled_indices = np.random.choice(len(line), num_samples, False)
output_noisy = list(line)
output_masked = [PAD_TOKEN] * len(line)
for i in sampled_indices:
random_token = np.random.choice(vocabulary.index_to_word[4:])
new_token = np.random.choice(
[line[i], args.mask_token, random_token], p=sample_probs)
output_noisy[i] = new_token
output_masked[i] = line[i]
out_noise_h.write(str(" ".join(output_noisy)) + "\n")
out_mask_h.write(str(" ".join(output_masked)) + "\n")
if __name__ == "__main__":
main()<|fim▁end|> | The masked Gold predictions use Neural Monkey's PAD_TOKEN to indicate
tokens that should not be classified during training.
|
<|file_name|>example_5.py<|end_file_name|><|fim▁begin|>"Run the keyword-names example pipeline, which has keyword-style inputs."
import kiveapi
import example_tools
# Use HTTPS on a real server, so your password is encrypted.
# Don't put your real password in source code, store it in a text file
# that is only readable by your user account or some more secure storage.
session = kiveapi.KiveAPI("http://localhost:8000")
session.login('kive', 'kive')
# Get datasets to collate
names_dataset = session.find_datasets(name="example_names.csv")[0]
salutations_dataset = session.find_datasets(name="salutations.csv")[0]
# Get the collation app from the samplecode container
kwsalutationsapp = session.endpoints.containerapps.filter("name", "kw_salutations")[0]
appargs = session.get(kwsalutationsapp["argument_list"]).json()
# Start a run of the app providing the datasets as arguments
inputargs = {a["name"]: a["url"] for a in appargs if a["type"] == "I"}
runspec = {
"name": "API Example 5",
"app": kwsalutationsapp["url"],
"datasets": [
{
"argument": inputargs["names"],
"dataset": names_dataset.raw["url"],<|fim▁hole|> "dataset": salutations_dataset.raw["url"],
},
]
}
print("Starting example run...")
containerrun = session.endpoints.containerruns.post(json=runspec)
# Monitor the run for completion
containerrun = example_tools.await_containerrun(session, containerrun)
# Retrieve the output and save it to a file
run_datasets = session.get(containerrun["dataset_list"]).json()
for run_dataset in run_datasets:
if run_dataset.get("argument_type") == "O":
dataset = session.get(run_dataset["dataset"]).json()
filename = dataset["name"]
print(f" downloading {filename}")
with open(filename, "wb") as outf:
session.download_file(outf, dataset["download_url"])
print("Example run finished")<|fim▁end|> | },
{
"argument": inputargs["salutations"], |
<|file_name|>ceres.py<|end_file_name|><|fim▁begin|># Copyright 2011 Chris Davis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Ceres requires Python 2.7 or newer
import itertools
import os
import struct
import json
import errno
from math import isnan
from os.path import isdir, exists, join, dirname, abspath, getsize, getmtime
from glob import glob
from bisect import bisect_left
izip = getattr(itertools, 'izip', zip)
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK_WRITES = False
TIMESTAMP_FORMAT = "!L"
TIMESTAMP_SIZE = struct.calcsize(TIMESTAMP_FORMAT)
DATAPOINT_FORMAT = "!d"
DATAPOINT_SIZE = struct.calcsize(DATAPOINT_FORMAT)
NAN = float('nan')
PACKED_NAN = struct.pack(DATAPOINT_FORMAT, NAN)
MAX_SLICE_GAP = 80
DEFAULT_TIMESTEP = 60
DEFAULT_NODE_CACHING_BEHAVIOR = 'all'
DEFAULT_SLICE_CACHING_BEHAVIOR = 'none'
SLICE_AGGREGATION_METHODS = ['average', 'sum', 'last', 'max', 'min']
SLICE_PERMS = 0o644
DIR_PERMS = 0o755
class CeresTree(object):
"""Represents a tree of Ceres metrics contained within a single path on disk
This is the primary Ceres API.
:param root: The directory root of the Ceres tree
.. note:: Use :func:`createTree` to initialize and instantiate a new CeresTree
.. seealso:: :func:`setDefaultNodeCachingBehavior` to adjust caching behavior
"""
def __init__(self, root):
if isdir(root):
self.root = abspath(root)
else:
raise ValueError("Invalid root directory '%s'" % root)
self.nodeCache = {}
self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresTree[0x%x]: %s>" % (id(self), self.root)
__str__ = __repr__
@classmethod
def createTree(cls, root, **props):
"""Create and returns a new Ceres tree with the given properties
:param root: The root directory of the new Ceres tree
:param \*\*props: Arbitrary key-value properties to store as tree metadata
:returns: :class:`CeresTree`
"""
ceresDir = join(root, '.ceres-tree')
if not isdir(ceresDir):
os.makedirs(ceresDir, DIR_PERMS)
for prop, value in props.items():
propFile = join(ceresDir, prop)
with open(propFile, 'w') as fh:
fh.write(str(value))
return cls(root)
def walk(self, **kwargs):
"""Iterate through the nodes contained in this :class:`CeresTree`
:param \*\*kwargs: Options to pass to :func:`os.walk`
:returns: An iterator yielding :class:`CeresNode` objects
"""
for (fsPath, subdirs, filenames) in os.walk(self.root, **kwargs):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
yield CeresNode(self, nodePath, fsPath)
def getFilesystemPath(self, nodePath):
"""Get the on-disk path of a Ceres node given a metric name
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: The Ceres node path on disk"""
return join(self.root, nodePath.replace('.', os.sep))
def getNodePath(self, fsPath):
"""Get the metric name of a Ceres node given the on-disk path
:param fsPath: The filesystem path of a Ceres node
:returns: A metric name
:raises ValueError: When `fsPath` is not a path within the :class:`CeresTree`
"""
fsPath = abspath(fsPath)
if not fsPath.startswith(self.root):
raise ValueError("path '%s' not beneath tree root '%s'" % (fsPath, self.root))
nodePath = fsPath[len(self.root):].strip(os.sep).replace(os.sep, '.')
return nodePath
def hasNode(self, nodePath):
"""Returns whether the Ceres tree contains the given metric
:param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage``
:returns: `True` or `False`"""
return isdir(self.getFilesystemPath(nodePath))
def setNodeCachingBehavior(self, behavior):
"""Set node caching behavior.
:param behavior: See :func:`getNode` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.nodeCachingBehavior = behavior
self.nodeCache = {}
def getNode(self, nodePath):
"""Returns a Ceres node given a metric name. Because nodes are looked up in
every read and write, a caching mechanism is provided. Cache behavior is set
using :func:`setNodeCachingBehavior` and defaults to the value set in
``DEFAULT_NODE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` - Node is read from the filesystem at every access.
* `all` (default) - All nodes are cached.
:param nodePath: A metric name
:returns: :class:`CeresNode` or `None`
"""
if self.nodeCachingBehavior == 'all':
if nodePath not in self.nodeCache:
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath)
else:
return None
return self.nodeCache[nodePath]
elif self.nodeCachingBehavior == 'none':
fsPath = self.getFilesystemPath(nodePath)
if CeresNode.isNodeDir(fsPath):
return CeresNode(self, nodePath, fsPath)
else:
return None
else:
raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
def find(self, nodePattern, fromTime=None, untilTime=None):
"""Find nodes which match a wildcard pattern, optionally filtering on
a time range
:param nodePattern: A glob-style metric wildcard
:param fromTime: Optional interval start time in unix-epoch.
:param untilTime: Optional interval end time in unix-epoch.
:returns: An iterator yielding :class:`CeresNode` objects
"""
for fsPath in glob(self.getFilesystemPath(nodePattern)):
if CeresNode.isNodeDir(fsPath):
nodePath = self.getNodePath(fsPath)
node = self.getNode(nodePath)
if fromTime is None and untilTime is None:
yield node
elif node.hasDataForInterval(fromTime, untilTime):
yield node
def createNode(self, nodePath, **properties):
"""Creates a new metric given a new metric name and optional per-node metadata
:param nodePath: The new metric name.
:param \*\*properties: Arbitrary key-value properties to store as metric metadata.
:returns: :class:`CeresNode`
"""
return CeresNode.create(self, nodePath, **properties)
def store(self, nodePath, datapoints):
"""Store a list of datapoints associated with a metric
:param nodePath: The metric name to write to e.g. ``carbon.agents.graphite-a.cpuUsage``
:param datapoints: A list of datapoint tuples: ``[(timestamp, value), ...]``
"""
node = self.getNode(nodePath)
if node is None:
raise NodeNotFound("The node '%s' does not exist in this tree" % nodePath)
node.write(datapoints)
def fetch(self, nodePath, fromTime, untilTime):
"""Fetch data within a given interval from the given metric
:param nodePath: The metric name to fetch from
:param fromTime: Requested interval start time in unix-epoch.
:param untilTime: Requested interval end time in unix-epoch.
:returns: :class:`TimeSeriesData`
:raises: :class:`NodeNotFound`, :class:`InvalidRequest`
"""
node = self.getNode(nodePath)
if not node:
raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath)
return node.read(fromTime, untilTime)
class CeresNode(object):
"""A :class:`CeresNode` represents a single time-series metric of a given `timeStep`
(its seconds-per-point resolution) and containing arbitrary key-value metadata.
A :class:`CeresNode` is associated with its most precise `timeStep`. This `timeStep` is the finest
resolution that can be used for writing, though a :class:`CeresNode` can contain and read data with
other, less-precise `timeStep` values in its underlying :class:`CeresSlice` data.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param fsPath: The filesystem path of this metric
.. note:: This class generally should be instantiated through use of :class:`CeresTree`. See
:func:`CeresTree.createNode` and :func:`CeresTree.getNode`
.. seealso:: :func:`setDefaultSliceCachingBehavior` to adjust caching behavior
"""
__slots__ = ('tree', 'nodePath', 'fsPath',
'metadataFile', 'timeStep', 'aggregationMethod',
'sliceCache', 'sliceCachingBehavior')
def __init__(self, tree, nodePath, fsPath):
self.tree = tree
self.nodePath = nodePath
self.fsPath = fsPath
self.metadataFile = join(fsPath, '.ceres-node')
self.timeStep = None
self.aggregationMethod = 'average'
self.sliceCache = None
self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
def __repr__(self):
return "<CeresNode[0x%x]: %s>" % (id(self), self.nodePath)
__str__ = __repr__
@classmethod
def create(cls, tree, nodePath, **properties):
"""Create a new :class:`CeresNode` on disk with the specified properties.
:param tree: The :class:`CeresTree` this node is associated with
:param nodePath: The name of the metric this node represents
:param \*\*properties: A set of key-value properties to be associated with this node
A :class:`CeresNode` always has the `timeStep` property which is an integer value representing
the precision of the node in seconds-per-datapoint. E.g. a value of ``60`` represents one datapoint
per minute. If no `timeStep` is specified at creation, the value of ``ceres.DEFAULT_TIMESTEP`` is
used
:returns: :class:`CeresNode`
"""
# Create the node directory
fsPath = tree.getFilesystemPath(nodePath)
os.makedirs(fsPath, DIR_PERMS)
properties['timeStep'] = properties.get('timeStep', DEFAULT_TIMESTEP)
# Create the initial metadata
node = cls(tree, nodePath, fsPath)
node.writeMetadata(properties)
# Create the initial data file
# timeStep = properties['timeStep']
# now = int( time.time() )
# baseTime = now - (now % timeStep)
# slice = CeresSlice.create(node, baseTime, timeStep)
return node
@staticmethod
def isNodeDir(path):
"""Tests whether the given path is a :class:`CeresNode`
:param path: Path to test
:returns `True` or `False`
"""
return isdir(path) and exists(join(path, '.ceres-node'))
@classmethod
def fromFilesystemPath(cls, fsPath):
"""Instantiate a :class:`CeresNode` from the on-disk path of an existing node
:params fsPath: The filesystem path of an existing node
:returns: :class:`CeresNode`
"""
dirPath = dirname(fsPath)
while True:
ceresDir = join(dirPath, '.ceres-tree')
if isdir(ceresDir):
tree = CeresTree(dirPath)
nodePath = tree.getNodePath(fsPath)
return cls(tree, nodePath, fsPath)
dirPath = dirname(dirPath)
if dirPath == '/':
raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
@property
def slice_info(self):
"""A property providing a list of current information about each slice
:returns: ``[(startTime, endTime, timeStep), ...]``
"""
return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
def readMetadata(self):
"""Update node metadata from disk
:raises: :class:`CorruptNode`
"""
with open(self.metadataFile, 'r') as fh:
try:
metadata = json.load(fh)
self.timeStep = int(metadata['timeStep'])
if metadata.get('aggregationMethod'):
self.aggregationMethod = metadata['aggregationMethod']
return metadata
except (KeyError, IOError, ValueError) as e:
raise CorruptNode(self, "Unable to parse node metadata: %s" % e.args)
def writeMetadata(self, metadata):
"""Writes new metadata to disk
:param metadata: a JSON-serializable dict of node metadata
"""
self.timeStep = int(metadata['timeStep'])
with open(self.metadataFile, 'w') as fh:
json.dump(metadata, fh)
@property
def slices(self):
"""A property providing access to information about this node's underlying slices. Because this
information is accessed in every read and write, a caching mechanism is provided. Cache behavior is
set using :func:`setSliceCachingBehavior` and defaults to the value set in
``DEFAULT_SLICE_CACHING_BEHAVIOR``
The following behaviors are available:
* `none` (default) - Slice information is read from the filesystem at every access
* `latest` - The latest slice is served from cache, all others from disk. Reads and writes of recent
data are most likely to be in the latest slice
* `all` - All slices are cached. The cache is only refreshed on new slice creation or deletion
:returns: ``[(startTime, timeStep), ...]``
"""
if self.sliceCache:
if self.sliceCachingBehavior == 'all':
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
yield self.sliceCache
infos = self.readSlices()
for info in infos[1:]:
yield CeresSlice(self, *info)
else:
if self.sliceCachingBehavior == 'all':
self.sliceCache = [CeresSlice(self, *info) for info in self.readSlices()]
for slice in self.sliceCache:
yield slice
elif self.sliceCachingBehavior == 'latest':
infos = self.readSlices()
if infos:
self.sliceCache = CeresSlice(self, *infos[0])
yield self.sliceCache
for info in infos[1:]:
yield CeresSlice(self, *info)
elif self.sliceCachingBehavior == 'none':
for info in self.readSlices():
yield CeresSlice(self, *info)
else:
raise ValueError("invalid caching behavior configured '%s'" % self.sliceCachingBehavior)
def readSlices(self):
"""Read slice information from disk
:returns: ``[(startTime, timeStep), ...]``
"""
if not exists(self.fsPath):
raise NodeDeleted()
slice_info = []
for filename in os.listdir(self.fsPath):
if filename.endswith('.slice'):
startTime, timeStep = filename[:-6].split('@')
slice_info.append((int(startTime), int(timeStep)))
slice_info.sort(reverse=True)
return slice_info
def setSliceCachingBehavior(self, behavior):
"""Set slice caching behavior.
:param behavior: See :func:`slices` for valid behavior values
"""
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
self.sliceCachingBehavior = behavior
self.sliceCache = None
def clearSliceCache(self):
"""Clear slice cache, forcing a refresh from disk at the next access"""
self.sliceCache = None
def hasDataForInterval(self, fromTime, untilTime):
"""Test whether this node has any data in the given time interval. All slices are inspected
which will trigger a read of slice information from disk if slice cache behavior is set to `latest`
or `none` (See :func:`slices`)
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns `True` or `False`
"""
slices = list(self.slices)
if not slices:
return False
earliestData = slices[-1].startTime
latestData = slices[0].endTime
return ((fromTime is None) or (fromTime < latestData)) and \
((untilTime is None) or (untilTime > earliestData))
def read(self, fromTime, untilTime):
"""Read data from underlying slices and return as a single time-series
:param fromTime: Beginning of interval in unix epoch seconds
:param untilTime: End of interval in unix epoch seconds
:returns: :class:`TimeSeriesData`
"""
if self.timeStep is None:
self.readMetadata()
# Normalize the timestamps to fit proper intervals
fromTime = int(fromTime - (fromTime % self.timeStep))
untilTime = int(untilTime - (untilTime % self.timeStep))
sliceBoundary = None # to know when to split up queries across slices
resultValues = []
earliestData = None
timeStep = self.timeStep
method = self.aggregationMethod
for slice in self.slices:
# If there was a prior slice covering the requested interval, dont ask for that data again
if (sliceBoundary is not None) and untilTime > sliceBoundary:
requestUntilTime = sliceBoundary
else:
requestUntilTime = untilTime
# if the requested interval starts after the start of this slice
if fromTime >= slice.startTime:
try:
series = slice.read(fromTime, requestUntilTime)
except NoData:
break
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
break
# or if slice contains data for part of the requested interval
elif untilTime >= slice.startTime:
try:
series = slice.read(slice.startTime, requestUntilTime)
except NoData:
continue
if series.timeStep != timeStep:
if len(resultValues) == 0:
# First slice holding series data, this becomes the default timeStep.
timeStep = series.timeStep
elif series.timeStep < timeStep:
# Series is at a different precision, aggregate to fit our current set.
series.values = aggregateSeries(method, series.timeStep, timeStep, series.values)
else:
# Normalize current set to fit new series data.
resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues)
timeStep = series.timeStep
earliestData = series.startTime
rightMissing = (requestUntilTime - series.endTime) // timeStep
rightNulls = [None for i in range(rightMissing)]
resultValues = series.values + rightNulls + resultValues
# this is the right-side boundary on the next iteration
sliceBoundary = slice.startTime
# The end of the requested interval predates all slices
if earliestData is None:
missing = int(untilTime - fromTime) // timeStep
resultValues = [None for i in range(missing)]
# Left pad nulls if the start of the requested interval predates all slices
else:
leftMissing = (earliestData - fromTime) // timeStep
leftNulls = [None for i in range(leftMissing)]
resultValues = leftNulls + resultValues
return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
def write(self, datapoints):
"""Writes datapoints to underlying slices. Datapoints that round to the same timestamp for the
node's `timeStep` will be treated as duplicates and dropped.
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
"""
if self.timeStep is None:
self.readMetadata()
if not datapoints:
return
sequences = self.compact(datapoints)
needsEarlierSlice = [] # keep track of sequences that precede all existing slices
while sequences:
sequence = sequences.pop()
timestamps = [t for t, v in sequence]
beginningTime = timestamps[0]
endingTime = timestamps[-1]
sliceBoundary = None # used to prevent writing sequences across slice boundaries
slicesExist = False
for slice in self.slices:
if slice.timeStep != self.timeStep:
continue
slicesExist = True
# truncate sequence so it doesn't cross the slice boundaries
if beginningTime >= slice.startTime:
if sliceBoundary is None:
sequenceWithinSlice = sequence
else:
# index of highest timestamp that doesn't exceed sliceBoundary
boundaryIndex = bisect_left(timestamps, sliceBoundary)
sequenceWithinSlice = sequence[:boundaryIndex]
try:
slice.write(sequenceWithinSlice)
except SliceGapTooLarge:
newSlice = CeresSlice.create(self, beginningTime, slice.timeStep)
newSlice.write(sequenceWithinSlice)
self.sliceCache = None
except SliceDeleted:
self.sliceCache = None
self.write(datapoints) # recurse to retry
return
sequence = []
break
# sequence straddles the current slice, write the right side
# left side will be taken up in the next slice down
elif endingTime >= slice.startTime:
# index of lowest timestamp that doesn't precede slice.startTime
boundaryIndex = bisect_left(timestamps, slice.startTime)
sequenceWithinSlice = sequence[boundaryIndex:]
# write the leftovers on the next earlier slice
sequence = sequence[:boundaryIndex]
slice.write(sequenceWithinSlice)
if not sequence:
break
sliceBoundary = slice.startTime
else: # slice list exhausted with stuff still to write
needsEarlierSlice.append(sequence)
if not slicesExist:
sequences.append(sequence)
needsEarlierSlice = sequences
break
for sequence in needsEarlierSlice:
slice = CeresSlice.create(self, int(sequence[0][0]), self.timeStep)
slice.write(sequence)
self.clearSliceCache()
def compact(self, datapoints):
"""Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
timestamps and null values removed
:param datapoints: List of datapoint tuples ``[(timestamp, value), ...]``
:returns: A list of lists of contiguous sorted datapoint tuples
``[[(timestamp, value), ...], ...]``
"""
datapoints = sorted(((int(timestamp), float(value))
for timestamp, value in datapoints if value is not None),
key=lambda datapoint: datapoint[0])
sequences = []
sequence = []
minimumTimestamp = 0 # used to avoid duplicate intervals
for timestamp, value in datapoints:
timestamp -= timestamp % self.timeStep # round it down to a proper interval
if not sequence:
sequence.append((timestamp, value))
else:
if timestamp == minimumTimestamp: # overwrite duplicate intervals with latest value
sequence[-1] = (timestamp, value)
continue
if timestamp == sequence[-1][0] + self.timeStep: # append contiguous datapoints
sequence.append((timestamp, value))
else: # start a new sequence if not contiguous
sequences.append(sequence)
sequence = [(timestamp, value)]
minimumTimestamp = timestamp
if sequence:
sequences.append(sequence)
return sequences
class CeresSlice(object):
__slots__ = ('node', 'startTime', 'timeStep', 'fsPath')
def __init__(self, node, startTime, timeStep):
self.node = node
self.startTime = startTime
self.timeStep = timeStep
self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
def __repr__(self):
return "<CeresSlice[0x%x]: %s>" % (id(self), self.fsPath)
__str__ = __repr__
@property
def isEmpty(self):
return getsize(self.fsPath) == 0
@property
def endTime(self):
return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
@property
def mtime(self):
return getmtime(self.fsPath)
@classmethod
def create(cls, node, startTime, timeStep):
slice = cls(node, startTime, timeStep)
fileHandle = open(slice.fsPath, 'wb')
fileHandle.close()
os.chmod(slice.fsPath, SLICE_PERMS)
return slice
def read(self, fromTime, untilTime):
timeOffset = int(fromTime) - self.startTime
if timeOffset < 0:
raise InvalidRequest("requested time range (%d, %d) precedes this slice: %d" % (
fromTime, untilTime, self.startTime))
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if byteOffset >= getsize(self.fsPath):
raise NoData()
with open(self.fsPath, 'rb') as fileHandle:
fileHandle.seek(byteOffset)
timeRange = int(untilTime - fromTime)
pointRange = timeRange // self.timeStep
byteRange = pointRange * DATAPOINT_SIZE
packedValues = fileHandle.read(byteRange)
pointsReturned = len(packedValues) // DATAPOINT_SIZE
format = '!' + ('d' * pointsReturned)
values = struct.unpack(format, packedValues)
values = [v if not isnan(v) else None for v in values]
endTime = fromTime + (len(values) * self.timeStep)
# print '[DEBUG slice.read] startTime=%s fromTime=%s untilTime=%s' % (
# self.startTime, fromTime, untilTime)
# print '[DEBUG slice.read] timeInfo = (%s, %s, %s)' % (fromTime, endTime, self.timeStep)
# print '[DEBUG slice.read] values = %s' % str(values)
return TimeSeriesData(fromTime, endTime, self.timeStep, values)
def write(self, sequence):
beginningTime = sequence[0][0]
timeOffset = beginningTime - self.startTime
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
values = [v for t, v in sequence]
format = '!' + ('d' * len(values))
packedValues = struct.pack(format, *values)
try:
filesize = getsize(self.fsPath)
except OSError as e:
if e.errno == errno.ENOENT:
raise SliceDeleted()
else:
raise
byteGap = byteOffset - filesize
if byteGap > 0: # pad the allowable gap with nan's
pointGap = byteGap // DATAPOINT_SIZE
if pointGap > MAX_SLICE_GAP:
raise SliceGapTooLarge()
else:
packedGap = PACKED_NAN * pointGap
packedValues = packedGap + packedValues
byteOffset -= byteGap
with open(self.fsPath, 'r+b') as fileHandle:
if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
try:
fileHandle.seek(byteOffset)
except IOError:
# print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % (
# self.fsPath, byteOffset, filesize, sequence)
raise
fileHandle.write(packedValues)
def deleteBefore(self, t):
if not exists(self.fsPath):
raise SliceDeleted()
if t % self.timeStep != 0:
t = t - (t % self.timeStep) + self.timeStep
timeOffset = t - self.startTime
if timeOffset < 0:
return
pointOffset = timeOffset // self.timeStep
byteOffset = pointOffset * DATAPOINT_SIZE
if not byteOffset:
return<|fim▁hole|> if LOCK_WRITES:
fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX)
fileHandle.seek(byteOffset)
fileData = fileHandle.read()
if fileData:
fileHandle.seek(0)
fileHandle.write(fileData)
fileHandle.truncate()
fileHandle.close()
newFsPath = join(dirname(self.fsPath), "%d@%d.slice" % (t, self.timeStep))
os.rename(self.fsPath, newFsPath)
else:
os.unlink(self.fsPath)
raise SliceDeleted()
def __lt__(self, other):
return self.startTime < other.startTime
class TimeSeriesData(object):
__slots__ = ('startTime', 'endTime', 'timeStep', 'values')
def __init__(self, startTime, endTime, timeStep, values):
self.startTime = startTime
self.endTime = endTime
self.timeStep = timeStep
self.values = values
@property
def timestamps(self):
return range(self.startTime, self.endTime, self.timeStep)
def __iter__(self):
return izip(self.timestamps, self.values)
def __len__(self):
return len(self.values)
def merge(self, other):
for timestamp, value in other:
if value is None:
continue
timestamp -= timestamp % self.timeStep
if timestamp < self.startTime:
continue
index = int((timestamp - self.startTime) // self.timeStep)
try:
if self.values[index] is None:
self.values[index] = value
except IndexError:
continue
class CorruptNode(Exception):
def __init__(self, node, problem):
Exception.__init__(self, problem)
self.node = node
self.problem = problem
class NoData(Exception):
pass
class NodeNotFound(Exception):
pass
class NodeDeleted(Exception):
pass
class InvalidRequest(Exception):
pass
class InvalidAggregationMethod(Exception):
pass
class SliceGapTooLarge(Exception):
"For internal use only"
class SliceDeleted(Exception):
pass
def aggregate(aggregationMethod, values):
# Filter out None values
knownValues = list(filter(lambda x: x is not None, values))
if len(knownValues) is 0:
return None
# Aggregate based on method
if aggregationMethod == 'average':
return float(sum(knownValues)) / float(len(knownValues))
elif aggregationMethod == 'sum':
return float(sum(knownValues))
elif aggregationMethod == 'last':
return knownValues[-1]
elif aggregationMethod == 'max':
return max(knownValues)
elif aggregationMethod == 'min':
return min(knownValues)
else:
raise InvalidAggregationMethod("Unrecognized aggregation method %s" %
aggregationMethod)
def aggregateSeries(method, oldTimeStep, newTimeStep, values):
# Aggregate current values to fit newTimeStep.
# Makes the assumption that the caller has already guaranteed
# that newTimeStep is bigger than oldTimeStep.
factor = int(newTimeStep // oldTimeStep)
newValues = []
subArr = []
for val in values:
subArr.append(val)
if len(subArr) == factor:
newValues.append(aggregate(method, subArr))
subArr = []
if len(subArr):
newValues.append(aggregate(method, subArr))
return newValues
def getTree(path):
while path not in (os.sep, ''):
if isdir(join(path, '.ceres-tree')):
return CeresTree(path)
path = dirname(path)
def setDefaultNodeCachingBehavior(behavior):
global DEFAULT_NODE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_NODE_CACHING_BEHAVIOR = behavior
def setDefaultSliceCachingBehavior(behavior):
global DEFAULT_SLICE_CACHING_BEHAVIOR
behavior = behavior.lower()
if behavior not in ('none', 'all', 'latest'):
raise ValueError("invalid caching behavior '%s'" % behavior)
DEFAULT_SLICE_CACHING_BEHAVIOR = behavior<|fim▁end|> |
self.node.clearSliceCache()
with open(self.fsPath, 'r+b') as fileHandle: |
<|file_name|>sync.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# sync.py
# Copyright (C) 2017 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Synchronization between blobs client/server
"""
from collections import defaultdict
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.internet import error
from .sql import SyncStatus
from .errors import RetriableTransferError
logger = Logger()
def sleep(seconds):
d = defer.Deferred()
reactor.callLater(seconds, d.callback, None)
return d
MAX_WAIT = 60 # In seconds. Max time between retries
@defer.inlineCallbacks
def with_retry(func, *args, **kwargs):
"""
Run func repeatedly until success, as long as the exception raised is
a "retriable error". If an exception of another kind is raised by func,
the retrying stops and that exception is propagated up the stack.
"""
retry_wait = 1
retriable_errors = (error.ConnectError, error.ConnectionClosed,
RetriableTransferError,)
while True:
try:
yield func(*args, **kwargs)
break
except retriable_errors:
yield sleep(retry_wait)
retry_wait = min(retry_wait + 10, MAX_WAIT)
class BlobsSynchronizer(object):
def __init__(self):
self.locks = defaultdict(defer.DeferredLock)
@defer.inlineCallbacks
def refresh_sync_status_from_server(self, namespace=''):
d1 = self.remote_list(namespace=namespace)
d2 = self.local_list(namespace=namespace)
remote_list, local_list = yield defer.gatherResults([d1, d2])
pending_download_ids = tuple(set(remote_list) - set(local_list))
pending_upload_ids = tuple(set(local_list) - set(remote_list))
yield self.local.update_batch_sync_status(
pending_download_ids,
SyncStatus.PENDING_DOWNLOAD,
namespace=namespace)
yield self.local.update_batch_sync_status(
pending_upload_ids,<|fim▁hole|> @defer.inlineCallbacks
def _apply_deletions_from_server(self, namespace=''):
remote_deletions = self.remote_list(namespace=namespace, deleted=True)
remote_deletions = yield remote_deletions
yield self.local.batch_delete(remote_deletions)
yield self.local.update_batch_sync_status(
remote_deletions,
SyncStatus.SYNCED,
namespace=namespace)
def send_missing(self, namespace=''):
"""
Compare local and remote blobs and send what's missing in server.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all local blobs were sent to
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['send_missing']
d = lock.run(self._send_missing, namespace)
return d
@defer.inlineCallbacks
def _send_missing(self, namespace):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._send_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _send_next(self, namespace, scheduled):
status = SyncStatus.PENDING_UPLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob uploads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Sending blob: %s" % (blob_id,))
yield with_retry(self._send, blob_id, namespace)
defer.returnValue(True)
def fetch_missing(self, namespace=''):
"""
Compare local and remote blobs and fetch what's missing in local
storage.
:param namespace:
Optional parameter to restrict operation to a given namespace.
:type namespace: str
:return: A deferred that fires when all remote blobs were received from
server.
:rtype: twisted.internet.defer.Deferred
"""
lock = self.locks['fetch_missing']
d = lock.run(self._fetch_missing, namespace)
return d
@defer.inlineCallbacks
def _fetch_missing(self, namespace=''):
# the list of priorities must be refreshed every time a new blob will
# be transferred. To do that, we use a semaphore and get a new ordered
# list only when there are free slots for new transfers.
max_transfers = self.concurrent_transfers_limit
semaphore = defer.DeferredSemaphore(max_transfers)
scheduled = set()
while True:
d = semaphore.run(self._fetch_next, namespace, scheduled)
success = yield d
if not success:
break
@defer.inlineCallbacks
def _fetch_next(self, namespace, scheduled):
status = SyncStatus.PENDING_DOWNLOAD
pending = yield self.local_list_status(status, namespace)
pending = [x for x in pending if x not in scheduled]
logger.info("There are %d pending blob downloads." % len(pending))
if not pending:
# we are finished, indicate that to our caller
defer.returnValue(False)
blob_id = pending[0]
logger.info("Fetching blob: %s" % (blob_id,))
yield with_retry(self._fetch, blob_id, namespace)
defer.returnValue(True)
@defer.inlineCallbacks
def sync(self, namespace=''):
try:
yield self._apply_deletions_from_server(namespace)
yield self.refresh_sync_status_from_server(namespace)
yield self.fetch_missing(namespace)
yield self.send_missing(namespace)
except defer.FirstError as e:
e.subFailure.raiseException()
@property
def sync_progress(self):
return self.local.get_sync_progress()<|fim▁end|> | SyncStatus.PENDING_UPLOAD,
namespace=namespace)
|
<|file_name|>widgets2.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from calibre.gui2.complete2 import LineEdit
from calibre.gui2.widgets import history
class HistoryLineEdit2(LineEdit):
max_history_items = None
def __init__(self, parent=None, completer_widget=None, sort_func=lambda x:None):
LineEdit.__init__(self, parent=parent, completer_widget=completer_widget, sort_func=sort_func)
@property
def store_name(self):
return 'lineedit_history_'+self._name
def initialize(self, name):
self._name = name
self.history = history.get(self.store_name, [])
self.set_separator(None)
self.update_items_cache(self.history)
self.setText('')
self.editingFinished.connect(self.save_history)
<|fim▁hole|> try:
self.history.remove(ct)
except ValueError:
pass
self.history.insert(0, ct)
if self.max_history_items is not None:
del self.history[self.max_history_items:]
history.set(self.store_name, self.history)
self.update_items_cache(self.history)
def clear_history(self):
self.history = []
history.set(self.store_name, self.history)
self.update_items_cache(self.history)<|fim▁end|> | def save_history(self):
ct = unicode(self.text())
if len(ct) > 2: |
<|file_name|>UserRecyclerViewAdapter.java<|end_file_name|><|fim▁begin|>package me.vadik.instaclimb.view.adapter;
import android.content.Context;
import android.databinding.ViewDataBinding;
import android.view.LayoutInflater;
import android.view.ViewGroup;
import me.vadik.instaclimb.databinding.RowLayoutRouteBinding;
import me.vadik.instaclimb.databinding.UserCardBinding;
import me.vadik.instaclimb.model.Route;
import me.vadik.instaclimb.model.User;
import me.vadik.instaclimb.view.adapter.abstr.AbstractRecyclerViewWithHeaderAdapter;
import me.vadik.instaclimb.viewmodel.RouteItemViewModel;
import me.vadik.instaclimb.viewmodel.UserViewModel;
/**
* User: vadik
* Date: 4/13/16
*/
public class UserRecyclerViewAdapter extends AbstractRecyclerViewWithHeaderAdapter<User, Route> {
public UserRecyclerViewAdapter(Context context, User user) {
super(context, user);
}
@Override
protected ViewDataBinding onCreateHeader(LayoutInflater inflater, ViewGroup parent) {
return UserCardBinding.inflate(inflater, parent, false);
}
@Override
protected ViewDataBinding onCreateItem(LayoutInflater inflater, ViewGroup parent) {
return RowLayoutRouteBinding.inflate(inflater, parent, false);
}
@Override
protected void onBindHeader(ViewDataBinding binding, User user) {
((UserCardBinding) binding).setUser(new UserViewModel(mContext, user));
}
@Override<|fim▁hole|> ((RowLayoutRouteBinding) binding).setRoute(new RouteItemViewModel(mContext, route));
}
}<|fim▁end|> | protected void onBindItem(ViewDataBinding binding, Route route) { |
<|file_name|>error-pages.js<|end_file_name|><|fim▁begin|>module.exports = handler
var debug = require('../debug').server
var fs = require('fs')
function handler (err, req, res, next) {
debug('Error page because of ' + err.message)
var ldp = req.app.locals.ldp
// If the user specifies this function
// then, they can customize the error programmatically
if (ldp.errorHandler) {
return ldp.errorHandler(err, req, res, next)
}
// If noErrorPages is set,
// then use built-in express default error handler
if (ldp.noErrorPages) {
return res
.status(err.status)
.send(err.message + '\n' || '')
}
// Check if error page exists
var errorPage = ldp.errorPages + err.status.toString() + '.html'
fs.readFile(errorPage, 'utf8', function (readErr, text) {
if (readErr) {
return res<|fim▁hole|> }
res.status(err.status)
res.header('Content-Type', 'text/html')
res.send(text)
})
}<|fim▁end|> | .status(err.status)
.send(err.message || '') |
<|file_name|>Widget.cpp<|end_file_name|><|fim▁begin|>#include "sp/sp.h"
#include "Widget.h"
namespace sp { namespace graphics { namespace ui {
Widget::Widget(const maths::Rectangle& bounds)
: m_Bounds(bounds), m_Active(true), m_Focused(false)
{
}
<|fim▁hole|> {
return false;
}
bool Widget::OnMouseReleased(events::MouseReleasedEvent& e)
{
return false;
}
bool Widget::OnMouseMoved(events::MouseMovedEvent& e)
{
return false;
}
void Widget::OnUpdate()
{
}
void Widget::OnRender(Renderer2D& renderer)
{
}
} } }<|fim▁end|> | bool Widget::OnMousePressed(events::MousePressedEvent& e) |
<|file_name|>consts.rs<|end_file_name|><|fim▁begin|>use crate::mir::interpret::ConstValue;
use crate::mir::interpret::{LitToConstInput, Scalar};
use crate::ty::{
self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty,
TyCtxt, TypeFoldable,
};
use rustc_errors::ErrorReported;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_macros::HashStable;
mod int;
mod kind;
mod valtree;
pub use int::*;
pub use kind::*;
pub use valtree::*;
/// Typed constant value.
#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
#[derive(HashStable)]
pub struct Const<'tcx> {
pub ty: Ty<'tcx>,
pub val: ConstKind<'tcx>,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Const<'_>, 48);
impl<'tcx> Const<'tcx> {
/// Literals and const generic parameters are eagerly converted to a constant, everything else
/// becomes `Unevaluated`.
pub fn from_anon_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx Self {
Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id))
}
pub fn from_opt_const_arg_anon_const(
tcx: TyCtxt<'tcx>,
def: ty::WithOptConstParam<LocalDefId>,
) -> &'tcx Self {
debug!("Const::from_anon_const(def={:?})", def);
let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
let body_id = match tcx.hir().get(hir_id) {
hir::Node::AnonConst(ac) => ac.body,
_ => span_bug!(
tcx.def_span(def.did.to_def_id()),
"from_anon_const can only process anonymous constants"
),
};
let expr = &tcx.hir().body(body_id).value;
let ty = tcx.type_of(def.def_id_for_type_of());
match Self::try_eval_lit_or_param(tcx, ty, expr) {
Some(v) => v,
None => tcx.mk_const(ty::Const {
val: ty::ConstKind::Unevaluated(ty::Unevaluated {
def: def.to_global(),
substs_: None,
promoted: None,
}),
ty,
}),
}
}
fn try_eval_lit_or_param(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
expr: &'tcx hir::Expr<'tcx>,
) -> Option<&'tcx Self> {
let lit_input = match expr.kind {
hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
hir::ExprKind::Lit(ref lit) => {
Some(LitToConstInput { lit: &lit.node, ty, neg: true })
}
_ => None,
},
_ => None,
};
if let Some(lit_input) = lit_input {
// If an error occurred, ignore that it's a literal and leave reporting the error up to
// mir.
if let Ok(c) = tcx.at(expr.span).lit_to_const(lit_input) {
return Some(c);
} else {
tcx.sess.delay_span_bug(expr.span, "Const::from_anon_const: couldn't lit_to_const");
}
}
// Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
// currently have to be wrapped in curly brackets, so it's necessary to special-case.
let expr = match &expr.kind {
hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
block.expr.as_ref().unwrap()
}
_ => expr,
};
use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
match expr.kind {
ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
// Find the name and index of the const parameter by indexing the generics of
// the parent item and construct a `ParamConst`.
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
let item_id = tcx.hir().get_parent_node(hir_id);
let item_def_id = tcx.hir().local_def_id(item_id);
let generics = tcx.generics_of(item_def_id.to_def_id());
let index = generics.param_def_id_to_index[&def_id];
let name = tcx.hir().name(hir_id);
Some(tcx.mk_const(ty::Const {
val: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
ty,
}))
}
_ => None,
}
}
pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx Self {
debug!("Const::from_inline_const(def_id={:?})", def_id);
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
let body_id = match tcx.hir().get(hir_id) {
hir::Node::AnonConst(ac) => ac.body,
_ => span_bug!(
tcx.def_span(def_id.to_def_id()),
"from_inline_const can only process anonymous constants"
),
};
let expr = &tcx.hir().body(body_id).value;
let ty = tcx.typeck(def_id).node_type(hir_id);
let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) {
Some(v) => v,
None => {
let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
let parent_substs =
tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
let substs =
InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
.substs;
tcx.mk_const(ty::Const {
val: ty::ConstKind::Unevaluated(ty::Unevaluated {
def: ty::WithOptConstParam::unknown(def_id).to_global(),
substs_: Some(substs),
promoted: None,
}),
ty,
})
}
};
debug_assert!(!ret.has_free_regions(tcx));
ret
}
/// Interns the given value as a constant.
#[inline]
pub fn from_value(tcx: TyCtxt<'tcx>, val: ConstValue<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
tcx.mk_const(Self { val: ConstKind::Value(val), ty })
}
#[inline]
/// Interns the given scalar as a constant.
pub fn from_scalar(tcx: TyCtxt<'tcx>, val: Scalar, ty: Ty<'tcx>) -> &'tcx Self {
Self::from_value(tcx, ConstValue::Scalar(val), ty)
}
#[inline]
/// Creates a constant with the given integer value and interns it.
pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> &'tcx Self {
let size = tcx
.layout_of(ty)
.unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
.size;
Self::from_scalar(tcx, Scalar::from_uint(bits, size), ty.value)
}
#[inline]
/// Creates an interned zst constant.
pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Self {
Self::from_scalar(tcx, Scalar::ZST, ty)
}
#[inline]
/// Creates an interned bool constant.
pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> &'tcx Self {
Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool))
}
#[inline]
/// Creates an interned usize constant.
pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> &'tcx Self {
Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
}
#[inline]
/// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
/// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
/// contains const generic parameters or pointers).
pub fn try_eval_bits(
&self,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
ty: Ty<'tcx>,
) -> Option<u128> {
assert_eq!(self.ty, ty);
let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
// if `ty` does not depend on generic parameters, use an empty param_env
self.val.eval(tcx, param_env).try_to_bits(size)
}
#[inline]
pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
self.val.eval(tcx, param_env).try_to_bool()
}
#[inline]
pub fn try_eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<u64> {
self.val.eval(tcx, param_env).try_to_machine_usize(tcx)
}
#[inline]
/// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
/// unevaluated constant.
pub fn eval(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> &Const<'tcx> {
if let Some(val) = self.val.try_eval(tcx, param_env) {
match val {<|fim▁hole|> self
}
}
#[inline]
/// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
pub fn eval_bits(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
self.try_eval_bits(tcx, param_env, ty)
.unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
}
#[inline]
/// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
pub fn eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> u64 {
self.try_eval_usize(tcx, param_env)
.unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
}
}
pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx Const<'tcx> {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
let default_def_id = match tcx.hir().get(hir_id) {
hir::Node::GenericParam(hir::GenericParam {
kind: hir::GenericParamKind::Const { ty: _, default: Some(ac) },
..
}) => tcx.hir().local_def_id(ac.hir_id),
_ => span_bug!(
tcx.def_span(def_id),
"`const_param_default` expected a generic parameter with a constant"
),
};
Const::from_anon_const(tcx, default_def_id)
}<|fim▁end|> | Ok(val) => Const::from_value(tcx, val, self.ty),
Err(ErrorReported) => tcx.const_error(self.ty),
}
} else { |
<|file_name|>Roche.py<|end_file_name|><|fim▁begin|># We calculate the flatness with the Roche model
# calculate omk knowing omc and vice-versa
from numpy import *<|fim▁hole|># we have to solve a cubic equation a-J2*a**3=1+J2+0.5*omk**2
def eps(omk):
return omk**2/(2+omk**2)
def om_k(omc):
khi=arcsin(omc)
return sqrt(6*sin(khi/3)/omc-2)
omc=0.88
print 'omc=',omc,' omk=',om_k(omc)<|fim▁end|> | from scipy.optimize import root
|
<|file_name|>build.js<|end_file_name|><|fim▁begin|>function load(/*String*/fileName){
//summary: opens the file at fileName and evals the contents as JavaScript.
//Read the file
var fileContents = readFile(fileName);
//Eval the contents.
var Context = Packages.org.mozilla.javascript.Context;
var context = Context.enter();
try{
return context.evaluateString(this, fileContents, fileName, 1, null);
}finally{
Context.exit();
}
}
function readFile(/*String*/path, /*String?*/encoding){
//summary: reads a file and returns a string
encoding = encoding || "utf-8";
var file = new java.io.File(path);
var lineSeparator = "\n";
var input = new java.io.BufferedReader(new java.io.InputStreamReader(new java.io.FileInputStream(file), encoding));
try {
var stringBuffer = new java.lang.StringBuffer();
var line = "";
while((line = input.readLine()) !== null){
stringBuffer.append(line);
stringBuffer.append(lineSeparator);
}
//Make sure we return a JavaScript string and not a Java string.
return new String(stringBuffer.toString()); //String
} finally {
input.close();
}
}
//TODO: inlining this function since the new shrinksafe.jar is used, and older
//versions of Dojo's buildscripts are not compatible.
function optimizeJs(/*String fileName*/fileName, /*String*/fileContents, /*String*/copyright, /*String*/optimizeType, /*String*/stripConsole){
//summary: either strips comments from string or compresses it.
copyright = copyright || "";
//Use rhino to help do minifying/compressing.
var context = Packages.org.mozilla.javascript.Context.enter();
try{
// Use the interpreter for interactive input (copied this from Main rhino class).
context.setOptimizationLevel(-1);
// the "packer" type is now just a synonym for shrinksafe
if(optimizeType.indexOf("shrinksafe") == 0 || optimizeType == "packer"){
//Apply compression using custom compression call in Dojo-modified rhino.
fileContents = new String(Packages.org.dojotoolkit.shrinksafe.Compressor.compressScript(fileContents, 0, 1, stripConsole));
if(optimizeType.indexOf(".keepLines") == -1){
fileContents = fileContents.replace(/[\r\n]/g, "");
}
}else if(optimizeType == "comments"){
//Strip comments
var script = context.compileString(fileContents, fileName, 1, null);
fileContents = new String(context.decompileScript(script, 0));
//Replace the spaces with tabs.
//Ideally do this in the pretty printer rhino code.
fileContents = fileContents.replace(/ /g, "\t");
//If this is an nls bundle, make sure it does not end in a ;
//Otherwise, bad things happen.
if(fileName.match(/\/nls\//)){
fileContents = fileContents.replace(/;\s*$/, "");
}
}
}finally{
Packages.org.mozilla.javascript.Context.exit();
}
return copyright + fileContents;
}
build = {
make: function(
//The path to this file. Assumes dojo builds under it.
/*String*/builderPath,
//"1.1.1" or "1.3.2": used to choose directory of dojo to use.
/*String*/version,
//"google" or "aol"
/*String*/cdnType,
//comma-separated list of resource names. No double-quotes or quotes around values.
/*String*/dependencies,
//comments, shrinksafe, none
/*String*/optimizeType){
//Validate.
if(version != "1.3.2"){
return "invalid version";
}
if(cdnType != "google" && cdnType != "aol"){
return "invalide CDN type";
}
if(optimizeType != "comments" && optimizeType != "shrinksafe"
&& optimizeType != "none" && optimizeType != "shrinksafe.keepLines"){
return "invalid optimize type";
}
if(!dependencies.match(/^[\w\-\,\s\.]+$/)){
return "invalid dependency list";
}
//Set up full CDN path.<|fim▁hole|> xdDojoPath += version;
//Directory that holds dojo source distro. Direct child under the helma dir
var dojoDir = builderPath + version + "/";
//Normalize the dependencies so that have double-quotes
//around each dependency.
var normalizedDependencies = dependencies || "";
if(normalizedDependencies){
normalizedDependencies = '"' + normalizedDependencies.split(",").join('","') + '"';
}
var buildscriptDir = dojoDir + "util/buildscripts/";
//Load the libraries to help in the build.
load(dojoDir + "util/buildscripts/jslib/logger.js");
load(dojoDir + "util/buildscripts/jslib/fileUtil.js");
load(dojoDir + "util/buildscripts/jslib/buildUtil.js");
load(dojoDir + "util/buildscripts/jslib/buildUtilXd.js");
load(dojoDir + "util/buildscripts/jslib/i18nUtil.js");
//Set up the build args.
var kwArgs = buildUtil.makeBuildOptions([
"loader=xdomain",
"version=" + version,
"xdDojoPath=" + xdDojoPath,
"layerOptimize=" + optimizeType
]);
//Specify the basic profile for build.
var profileText = 'dependencies = {'
+ 'layers: ['
+ ' {'
+ ' name: "dojo.js",'
+ ' dependencies: ['
+ normalizedDependencies
+ ' ]'
+ ' }'
+ '],'
+ 'prefixes: ['
+ ' [ "dojo", "' + dojoDir + 'dojo" ],'
+ ' [ "dijit", "' + dojoDir + 'dijit" ],'
+ ' [ "dojox", "' + dojoDir + 'dojox" ]'
+ ']'
+ '}';
//Bring the profile into existence
var profileProperties = buildUtil.evalProfile(profileText, true);
kwArgs.profileProperties = profileProperties;
//Set up some helper variables.
dependencies = kwArgs.profileProperties.dependencies;
var prefixes = dependencies.prefixes;
var lineSeparator = fileUtil.getLineSeparator();
var layerLegalText = fileUtil.readFile(buildscriptDir + "copyright.txt")
+ lineSeparator
+ fileUtil.readFile(buildscriptDir + "build_notice.txt");
//Manually set the loader on the dependencies object. Ideally the buildUtil.loadDependencyList() function
//and subfunctions would take kwArgs directly.
dependencies.loader = kwArgs.loader;
//Build the layer contents.
var depResult = buildUtil.makeDojoJs(buildUtil.loadDependencyList(kwArgs.profileProperties, null, buildscriptDir), kwArgs.version, kwArgs);
//Grab the content from the "dojo.xd.js" layer.
var layerName = depResult[1].layerName;
var layerContents = depResult[1].contents;
//Burn in xd path for dojo if requested, and only do this in dojo.xd.js.
if(layerName.match(/dojo\.xd\.js/) && kwArgs.xdDojoPath){
layerContents = buildUtilXd.setXdDojoConfig(layerContents, kwArgs.xdDojoPath);
}
//Intern strings
if(kwArgs.internStrings){
prefixes = dependencies["prefixes"] || [];
var skiplist = dependencies["internSkipList"] || [];
layerContents = buildUtil.interningRegexpMagic(layerName, layerContents, dojoDir, prefixes, skiplist);
}
//Minify the contents
return optimizeJs(layerName, layerContents, layerLegalText, kwArgs.layerOptimize, "");
}
};<|fim▁end|> | var xdDojoPath = "http://ajax.googleapis.com/ajax/libs/dojo/";
if(cdnType == "aol"){
xdDojoPath = "http://o.aolcdn.com/dojo/";
} |
<|file_name|>pyplot03.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import re
rootPath = "/Users/jeff/work/debug/20181216_hard_fe2k_15fps/"
finalLogFile = "rosout.log.2"
def appendTimestamps(arr, start, stop, flag):
#flag = True
d = stop - start
if flag or (d > -10 and d < 2000):
arr.append(d)
return True
return False
## camera -> OA(ObjectAanalytics) -> Fusion -> Flink -> V2X
stamps = [[],[],[],[],[]]
log = open(rootPath + finalLogFile)
lines = log.readlines()
log.close()
for i in range(0, len(lines)):
line = lines[i].rstrip('\n').strip()<|fim▁hole|> #print("line", line)
print("ret:", ret)
stamps[0].append(long(ret[0][0])) # camera
stamps[2].append(long(ret[0][2])) # fusion
stamps[3].append(long(ret[0][1])) # flink
stamps[4].append(long(ret[0][4])) # v2x
# oa
oastamps = ret[0][3].split(',')
t1 = long(oastamps[0])
t2 = long(oastamps[1])
t3 = long(oastamps[2])
mi = t1
ma = t1
if mi > t2:
mi = t2
if mi > t3:
mi = t3
if ma < t2:
ma = t2
if ma < t3:
ma = t3
#stamps[1].append((t1 + t2 + t3) / 3)
#stamps[1].append(mi)
stamps[1].append(ma)
stamps[1].append(long(oastamps[0]))
stamps[1].append(long(oastamps[1]))
stamps[1].append(long(oastamps[2]))
## [ 0 1 2 3 4 ]
## [ Camera OA(3) Fusion Flink V2X ]
## [ 0 1 2 3 4 5 ]
## [ Total(V2X - Camera), OA(OA-Camera), Fusion(Fusion-OA), Flink(Flink - Fusion), V2X(V2X - Flink) Fusion-CAM ]
delays = [[], [], [], [], [], [], [], []]
for i in range(len(stamps[0])):
if appendTimestamps(delays[0], stamps[0][i], stamps[4][i], False): # total
appendTimestamps(delays[1], stamps[0][i], stamps[1][i * 4], True) # OA
appendTimestamps(delays[2], stamps[1][i * 4], stamps[2][i], True) # Fusion
appendTimestamps(delays[3], stamps[2][i], stamps[3][i], True) # Flink
appendTimestamps(delays[4], stamps[3][i], stamps[4][i], True) # V2x
appendTimestamps(delays[5], stamps[0][i], stamps[2][i], True) # Fusion - Cam
print("===length: ", len(delays[0]),len(delays[1]),len(delays[2]),len(delays[3]),len(delays[4]))
delayavg = [0,0,0,0,0,0]
if len(delays[0]) == 0:
print("empty delay array")
quit()
for i in range(len(delays[0])):
delayavg[0] = delayavg[0] + delays[0][i]
delayavg[1] = delayavg[1] + delays[1][i]
delayavg[2] = delayavg[2] + delays[2][i]
delayavg[3] = delayavg[3] + delays[3][i]
delayavg[4] = delayavg[4] + delays[4][i]
delayavg[5] = delayavg[5] + delays[5][i]
for i in range(6):
delayavg[i] = delayavg[i] / len(delays[0])
print("===AVG(Total, OA, Fusion, Flink, V2X): ", delayavg)
frameIntervals = []
for i in range(len(stamps[0]) - 1):
tmp = stamps[0][i + 1] - stamps[0][i]
if tmp < 1000:
frameIntervals.append(stamps[0][i + 1] - stamps[0][i])
## plot
plt.figure()
#plt.plot(delays[0])
#plt.plot(delays[1])
#plt.plot(delays[2])
#plt.plot(delays[3])
plt.plot(delays[4])
#plt.plot(delays[5])
plt.legend(["Total", "OA", "Fusion", "Flink", "V2X", "OA+Fusion"])
plt.show()
'''
## interval
plt.plot(frameIntervals)
plt.show()
'''
print("done!")<|fim▁end|> | ret = re.findall(r'\"camera_output_ts\":(\d+),.*\"flink_output_ts\":(\d+),.*\"fusion_output_ts\":(\d+),.*\"oa_output_ts\":\[([\d,]+)\],.*\"v2xnode_input_ts\":(\d+)', line)
if len(ret) > 0 and len(ret[0]) == 5:
if i < 2: |
<|file_name|>base.py<|end_file_name|><|fim▁begin|>from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from . import _hmmc
from .utils import normalize, logsumexp, iter_from_X_lengths
DECODER_ALGORITHMS = frozenset(("viterbi", "map"))
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability
improvement between the two consecutive iterations is less
than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
fmt = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 1
def report(self, logprob):
if self.history and self.verbose:
delta = logprob - self.history[-1]
message = self.fmt.format(
iter=self.iter, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Parameters
----------
n_components : int
Number of states in the model.
startprob_prior : array, shape (n_components, )
Initial state occupation prior distribution.
transmat_prior : array, shape (n_components, n_components)
Matrix of prior transition probabilities between states.
algorithm : string, one of the ``DECODER_ALGORITHMS```
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emission parameters. Defaults to all
parameters.
Attributes
----------
monitor_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
startprob_ : array, shape (n_components, )
Initial state occupation distribution.
transmat_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.params = params
self.init_params = init_params
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.n_iter = n_iter
self.tol = tol
self.verbose = verbose
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors
def score(self, X, lengths=None):
"""Compute the log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
# XXX we can unroll forward pass for speed and memory efficiency.
logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, _fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
return logprob
def _decode_viterbi(self, X):
framelogprob = self._compute_log_likelihood(X)
return self._do_viterbi_pass(framelogprob)
def _decode_map(self, X):
_, posteriors = self.score_samples(X)
logprob = np.max(posteriors, axis=1).sum()
state_sequence = np.argmax(posteriors, axis=1)
return logprob, state_sequence
def decode(self, X, lengths=None, algorithm=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
algorithm : string, one of the ``DECODER_ALGORITHMS``
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the produced state sequence.
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X`` obtained via a given
decoder ``algorithm``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
check_is_fitted(self, "startprob_")
self._check()
algorithm = algorithm or self.algorithm
if algorithm not in DECODER_ALGORITHMS:
raise ValueError("Unknown decoder {0!r}".format(algorithm))
decoder = {<|fim▁hole|>
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
state_sequence = np.empty(n_samples, dtype=int)
for i, j in iter_from_X_lengths(X, lengths):
# XXX decoder works on a single sample at a time!
logprobij, state_sequenceij = decoder(X[i:j])
logprob += logprobij
state_sequence[i:j] = state_sequenceij
return logprob, state_sequence
def predict(self, X, lengths=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
_, state_sequence = self.decode(X, lengths)
return state_sequence
def predict_proba(self, X, lengths=None):
"""Compute the posterior probability for each state in the model.
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample from ``X``.
"""
_, posteriors = self.score_samples(X, lengths)
return posteriors
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If ``None``, the object's
random_state is used.
Returns
-------
X : array, shape (n_samples, n_features)
Feature matrix.
state_sequence : array, shape (n_samples, )
State sequence produced by the model.
"""
check_is_fitted(self, "startprob_")
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.startprob_)
transmat_cdf = np.cumsum(self.transmat_, axis=1)
currstate = (startprob_cdf > random_state.rand()).argmax()
state_sequence = [currstate]
X = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for t in range(n_samples - 1):
currstate = (transmat_cdf[currstate] > random_state.rand()) \
.argmax()
state_sequence.append(currstate)
X.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.atleast_2d(X), np.array(state_sequence, dtype=int)
def fit(self, X, lengths=None):
"""Estimate model parameters.
An initialization step is performed before entering the
EM-algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X, lengths=lengths, params=self.init_params)
self._check()
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice)
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
self._do_mstep(stats, self.params)
return self
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob, bwdlattice)
return bwdlattice
def _compute_posteriors(self, fwdlattice, bwdlattice):
log_gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
log_gamma += np.finfo(float).eps
log_gamma -= logsumexp(log_gamma, axis=1)[:, np.newaxis]
out = np.exp(log_gamma)
normalize(out, axis=1)
return out
def _compute_log_likelihood(self, X):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, X, lengths, params):
init = 1. / self.n_components
if 's' in params or not hasattr(self, "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if 't' in params or not hasattr(self, "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
def _check(self):
self.startprob_ = np.asarray(self.startprob_)
if len(self.startprob_) != self.n_components:
raise ValueError("startprob_ must have length n_components")
if not np.allclose(self.startprob_.sum(), 1.0):
raise ValueError("startprob_ must sum to 1.0 (got {0:.4f})"
.format(self.startprob_.sum()))
self.transmat_ = np.asarray(self.transmat_)
if self.transmat_.shape != (self.n_components, self.n_components):
raise ValueError(
"transmat_ must have shape (n_components, n_components)")
if not np.allclose(self.transmat_.sum(axis=1), 1.0):
raise ValueError("rows of transmat_ must sum to 1.0 (got {0})"
.format(self.transmat_.sum(axis=1)))
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
np.log(self.transmat_),
bwdlattice, framelogprob, lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if 's' in params:
self.startprob_ = self.startprob_prior - 1.0 + stats['start']
normalize(self.startprob_)
if 't' in params:
self.transmat_ = self.transmat_prior - 1.0 + stats['trans']
normalize(self.transmat_, axis=1)<|fim▁end|> | "viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm] |
<|file_name|>common.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys, os, logging, functools
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
import pytest
from tempfile import TemporaryDirectory
import locale
xfail_when_nonstandard_decimal_separator = pytest.mark.xfail(
locale.localeconv()["decimal_point"] != ".",
reason="Some operators break when the decimal separator is set to anything other than \".\". "
"These operators should be rewritten to utilize the new FFI. Please see #18097 for more "
"information."
)
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
np.random.seed()
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def _assert_raise_cuxx_version_not_satisfied(min_version, cfg):
def less_than(version_left, version_right):<|fim▁hole|> """Compares two version strings in the format num(.[num])*"""
if not version_left or not version_right:
return False
left = version_left.split(".")
right = version_right.split(".")
# 0 pad shortest version - e.g.
# less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9")
longest = max(len(left), len(right))
left.extend([0] * (longest - len(left)))
right.extend([0] * (longest - len(right)))
# compare each of the version components
for l, r in zip(left, right):
if l == r:
continue
return int(l) < int(r)
return False
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
cuxx_off = os.getenv(cfg['TEST_OFF_ENV_VAR']) == 'true'
cuxx_env_version = os.getenv(cfg['VERSION_ENV_VAR'], None if cuxx_off else cfg['DEFAULT_VERSION'])
cuxx_test_disabled = cuxx_off or less_than(cuxx_env_version, min_version)
if not cuxx_test_disabled or mx.context.current_context().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def assert_raises_cudnn_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDNN_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDNN_VERSION',
'DEFAULT_VERSION': '7.3.1'
})
def assert_raises_cuda_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDA_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDA_VERSION',
'DEFAULT_VERSION': '10.1'
})
def with_seed(seed=None):
"""
A decorator for test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
pytest --verbose --capture=no <test_module_name.py>::<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
logger = default_logger()
# 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logger.log(log_level, test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at INFO level to be sure it's seen.
if log_level < logging.INFO:
logger.info(test_msg)
raise
finally:
np.random.set_state(post_test_state)
return test_new
return test_helper
def setup_module():
"""
A function with a 'magic name' executed automatically before each pytest module
(file of tests) that helps reproduce a test segfault by setting and outputting the rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "pytest --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 pytest --logging-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 pytest --logging-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
"""
module_seed_str = os.getenv('MXNET_MODULE_SEED')
logger = default_logger()
if module_seed_str is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = int(module_seed_str)
logger.warn('*** module-level seed is set: all tests running deterministically ***')
logger.info('Setting module np/mx/python random seeds, use MXNET_MODULE_SEED=%s to reproduce.', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
# The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
# the 'with_seed()' decoration. Inform the user of this once here at the module level.
if os.getenv('MXNET_TEST_SEED') is not None:
logger.warn('*** test-level seed set: all "@with_seed()" tests run deterministically ***')
def teardown_module():
"""
A function with a 'magic name' executed automatically after each pytest test module.
It waits for all operations in one file to finish before carrying on the next.
"""
mx.nd.waitall()
def run_in_spawned_process(func, env, *args):
"""
Helper function to run a test in its own process.
Avoids issues with Singleton- or otherwise-cached environment variable lookups in the backend.
Adds a seed as first arg to propagate determinism.
Parameters
----------
func : function to run in a spawned process.
env : dict of additional environment values to set temporarily in the environment before exec.
args : args to pass to the function.
Returns
-------
Whether the python version supports running the function as a spawned process.
This routine calculates a random seed and passes it into the test as a first argument. If the
test uses random values, it should include an outer 'with random_seed(seed):'. If the
test needs to return values to the caller, consider use of shared variable arguments.
"""
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
return False
else:
seed = np.random.randint(0,1024*1024*1024)
orig_environ = os.environ.copy()
try:
for (key, value) in env.items():
os.environ[key] = str(value)
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
finally:
os.environ.clear()
os.environ.update(orig_environ)
return True
def retry(n):
"""Retry n times before failing for stochastic test cases."""
# TODO(szha): replace with flaky
# https://github.com/apache/incubator-mxnet/issues/17803
assert n > 0
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
"""Wrapper for tests function."""
for _ in range(n):
try:
orig_test(*args, **kwargs)
except AssertionError as e:
err = e
continue
return
raise err
return test_new
return test_helper<|fim▁end|> | |
<|file_name|>etl_voter_north_carolina.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3
#
# Copyright (C) 2019 by Compassion International. All rights reserved.
# License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
# This is free software: you are free to change and redistribute it.
# There is NO WARRANTY, to the extent permitted by law.
"""
ETL North Carolina voter registration files
1. Read state-wide tab-delimited file
2. Narrow down the columns
3. Output all counties to a single CSV file
Source data available here:
https://www.ncsbe.gov/data-stats/other-election-related-data
http://dl.ncsbe.gov/data/ncvoter_Statewide.zip
"""
import glob
import sys
import pandas as pd
def groupby(df, col):
gb = df.groupby(col)[[col]].count()
print(gb)
def go():
"""The main loop"""
if not len(sys.argv) == 3:
print('As arguments pass (1) the state-wide NC voter registration file and (2) the output .csv filename.')
print('Example: python3 %s ncvoter_Statewide.txt nc.csv' % sys.argv[0])
sys.exit(1)
in_fn = sys.argv[1]
out_fn = sys.argv[2]
usecols = [3, 9, 10, 11, 12, 25, 26, 28]
print('Reading tab-delimited file: %s' % in_fn)
df = pd.read_csv(in_fn, sep='\t', usecols=usecols)<|fim▁hole|> print('Row count: {:,}'.format(df.shape[0]))
groupby(df, 'status_cd')
groupby(df, 'race_code')
groupby(df, 'ethnic_code')
groupby(df, 'gender_code')
print('Writing to CSV file: %s' % out_fn)
df.to_csv(out_fn, index=False)
go()<|fim▁end|> | |
<|file_name|>KafkaIOTest.java<|end_file_name|><|fim▁begin|>/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.io.kafka;
import static org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult;
import static org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem;
import static org.hamcrest.Matchers.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.annotation.Nullable;
import org.apache.beam.sdk.Pipeline.PipelineExecutionException;
import org.apache.beam.sdk.PipelineResult;
import org.apache.beam.sdk.coders.BigEndianIntegerCoder;
import org.apache.beam.sdk.coders.BigEndianLongCoder;
import org.apache.beam.sdk.coders.CoderRegistry;
import org.apache.beam.sdk.coders.InstantCoder;
import org.apache.beam.sdk.coders.StringUtf8Coder;
import org.apache.beam.sdk.coders.VarLongCoder;
import org.apache.beam.sdk.io.Read;
import org.apache.beam.sdk.io.UnboundedSource;
import org.apache.beam.sdk.io.UnboundedSource.UnboundedReader;
import org.apache.beam.sdk.io.kafka.serialization.InstantDeserializer;
import org.apache.beam.sdk.metrics.GaugeResult;
import org.apache.beam.sdk.metrics.MetricName;
import org.apache.beam.sdk.metrics.MetricNameFilter;
import org.apache.beam.sdk.metrics.MetricQueryResults;
import org.apache.beam.sdk.metrics.MetricResult;
import org.apache.beam.sdk.metrics.MetricsFilter;
import org.apache.beam.sdk.metrics.SinkMetrics;
import org.apache.beam.sdk.metrics.SourceMetrics;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.testing.PAssert;
import org.apache.beam.sdk.testing.TestPipeline;
import org.apache.beam.sdk.transforms.Count;
import org.apache.beam.sdk.transforms.Distinct;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.Flatten;
import org.apache.beam.sdk.transforms.Max;
import org.apache.beam.sdk.transforms.Min;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.transforms.SerializableFunction;
import org.apache.beam.sdk.transforms.Values;
import org.apache.beam.sdk.transforms.display.DisplayData;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionList;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.apache.kafka.clients.producer.MockProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongDeserializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.utils.Utils;
import org.hamcrest.collection.IsIterableContainingInAnyOrder;
import org.hamcrest.collection.IsIterableWithSize;
import org.joda.time.Instant;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of {@link KafkaIO}.
* Run with 'mvn test -Dkafka.clients.version=0.10.1.1',
* or 'mvn test -Dkafka.clients.version=0.9.0.1' for either Kafka client version.
*/
@RunWith(JUnit4.class)
public class KafkaIOTest {
private static final Logger LOG = LoggerFactory.getLogger(KafkaIOTest.class);
/*
* The tests below borrow code and structure from CountingSourceTest. In addition verifies
* the reader interleaves the records from multiple partitions.
*
* Other tests to consider :
* - test KafkaRecordCoder
*/
@Rule
public final transient TestPipeline p = TestPipeline.create();
@Rule
public ExpectedException thrown = ExpectedException.none();
// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(
List<String> topics, int partitionsPerTopic, int numElements,
OffsetResetStrategy offsetResetStrategy) {
final List<TopicPartition> partitions = new ArrayList<>();
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
for (String topic : topics) {
List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
for (int i = 0; i < partitionsPerTopic; i++) {
TopicPartition tp = new TopicPartition(topic, i);
partitions.add(tp);
partIds.add(new PartitionInfo(topic, i, null, null, null));
records.put(tp, new ArrayList<ConsumerRecord<byte[], byte[]>>());
}
partitionMap.put(topic, partIds);
}
int numPartitions = partitions.size();
final long[] offsets = new long[numPartitions];
for (int i = 0; i < numElements; i++) {
int pIdx = i % numPartitions;
TopicPartition tp = partitions.get(pIdx);
records.get(tp).add(
new ConsumerRecord<>(
tp.topic(),
tp.partition(),
offsets[pIdx]++,
ByteBuffer.wrap(new byte[4]).putInt(i).array(), // key is 4 byte record id
ByteBuffer.wrap(new byte[8]).putLong(i).array())); // value is 8 byte record id
}
// This is updated when reader assigns partitions.
final AtomicReference<List<TopicPartition>> assignedPartitions =
new AtomicReference<>(Collections.<TopicPartition>emptyList());
final MockConsumer<byte[], byte[]> consumer =
new MockConsumer<byte[], byte[]>(offsetResetStrategy) {
// override assign() in order to set offset limits & to save assigned partitions.
//remove keyword '@Override' here, it can work with Kafka client 0.9 and 0.10 as:
//1. SpEL can find this function, either input is List or Collection;
//2. List extends Collection, so super.assign() could find either assign(List)
// or assign(Collection).
public void assign(final List<TopicPartition> assigned) {
super.assign(assigned);
assignedPartitions.set(ImmutableList.copyOf(assigned));
for (TopicPartition tp : assigned) {
updateBeginningOffsets(ImmutableMap.of(tp, 0L));
updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
}
}
// Override offsetsForTimes() in order to look up the offsets by timestamp.
// Remove keyword '@Override' here, Kafka client 0.10.1.0 previous versions does not have
// this method.
// Should return Map<TopicPartition, OffsetAndTimestamp>, but 0.10.1.0 previous versions
// does not have the OffsetAndTimestamp class. So return a raw type and use reflection
// here.
@SuppressWarnings("unchecked")
public Map offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
HashMap<TopicPartition, Object> result = new HashMap<>();
try {
Class<?> cls = Class.forName("org.apache.kafka.clients.consumer.OffsetAndTimestamp");
// OffsetAndTimestamp(long offset, long timestamp)
Constructor constructor = cls.getDeclaredConstructor(long.class, long.class);
// In test scope, timestamp == offset.
for (Map.Entry<TopicPartition, Long> entry : timestampsToSearch.entrySet()) {
long maxOffset = offsets[partitions.indexOf(entry.getKey())];
Long offset = entry.getValue();
if (offset >= maxOffset) {
offset = null;
}
result.put(
entry.getKey(), constructor.newInstance(entry.getValue(), offset));
}
return result;
} catch (ClassNotFoundException | IllegalAccessException
| InstantiationException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
};
for (String topic : topics) {
consumer.updatePartitions(topic, partitionMap.get(topic));
}
// MockConsumer does not maintain any relationship between partition seek position and the
// records added. e.g. if we add 10 records to a partition and then seek to end of the
// partition, MockConsumer is still going to return the 10 records in next poll. It is
// our responsibility to make sure currently enqueued records sync with partition offsets.
// The following task will be called inside each invocation to MockConsumer.poll().
// We enqueue only the records with the offset >= partition's current position.
Runnable recordEnqueueTask = new Runnable() {
@Override
public void run() {
// add all the records with offset >= current partition position.
for (TopicPartition tp : assignedPartitions.get()) {
long curPos = consumer.position(tp);
for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
if (r.offset() >= curPos) {
consumer.addRecord(r);
}
}
}
consumer.schedulePollTask(this);
}
};
consumer.schedulePollTask(recordEnqueueTask);
return consumer;
}
private static class ConsumerFactoryFn
implements SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> {
private final List<String> topics;
private final int partitionsPerTopic;
private final int numElements;
private final OffsetResetStrategy offsetResetStrategy;
public ConsumerFactoryFn(List<String> topics,
int partitionsPerTopic,
int numElements,
OffsetResetStrategy offsetResetStrategy) {
this.topics = topics;
this.partitionsPerTopic = partitionsPerTopic;
this.numElements = numElements;
this.offsetResetStrategy = offsetResetStrategy;
}
@Override
public Consumer<byte[], byte[]> apply(Map<String, Object> config) {
return mkMockConsumer(topics, partitionsPerTopic, numElements, offsetResetStrategy);
}
}
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
return mkKafkaReadTransform(numElements, numElements, timestampFn);
}
/**
* Creates a consumer with two topics, with 10 partitions each.
* numElements are (round-robin) assigned all the 20 partitions.
*/
private static KafkaIO.Read<Integer, Long> mkKafkaReadTransform(
int numElements,
int maxNumRecords,
@Nullable SerializableFunction<KV<Integer, Long>, Instant> timestampFn) {
List<String> topics = ImmutableList.of("topic_a", "topic_b");
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 20 partitions
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(maxNumRecords);
if (timestampFn != null) {
return reader.withTimestampFn(timestampFn);
} else {
return reader;
}
}
private static class AssertMultipleOf implements SerializableFunction<Iterable<Long>, Void> {
private final int num;
public AssertMultipleOf(int num) {
this.num = num;
}
@Override
public Void apply(Iterable<Long> values) {
for (Long v : values) {
assertEquals(0, v % num);
}
return null;
}
}
public static void addCountingAsserts(PCollection<Long> input, long numElements) {
// Count == numElements
// Unique count == numElements
// Min == 0
// Max == numElements-1
addCountingAsserts(input, numElements, numElements, 0L, numElements - 1);
}
public static void addCountingAsserts(
PCollection<Long> input, long count, long uniqueCount, long min, long max) {
PAssert
.thatSingleton(input.apply("Count", Count.<Long>globally()))
.isEqualTo(count);
PAssert
.thatSingleton(input.apply(Distinct.<Long>create())
.apply("UniqueCount", Count.<Long>globally()))
.isEqualTo(uniqueCount);
PAssert
.thatSingleton(input.apply("Min", Min.<Long>globally()))
.isEqualTo(min);
PAssert
.thatSingleton(input.apply("Max", Max.<Long>globally()))
.isEqualTo(max);
}
@Test
public void testUnboundedSource() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnreachableKafkaBrokers() {
// Expect an exception when the Kafka brokers are not reachable on the workers.
// We specify partitions explicitly so that splitting does not involve server interaction.
// Set request timeout to 10ms so that test does not take long.
thrown.expect(Exception.class);
thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'");
int numElements = 1000;
PCollection<Long> input = p
.apply(KafkaIO.<Integer, Long>read()
.withBootstrapServers("8.8.8.8:9092") // Google public DNS ip.
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0)))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.updateConsumerProperties(ImmutableMap.<String, Object>of(
ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10,
ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8,
ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8))
.withMaxNumRecords(10)
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithSingleTopic() {
// same as testUnboundedSource, but with single topic
int numElements = 1000;
String topic = "my_topic";
KafkaIO.Read<Integer, Long> reader = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopic("my_topic")
.withConsumerFactoryFn(new ConsumerFactoryFn(
ImmutableList.of(topic), 10, numElements, OffsetResetStrategy.EARLIEST))
.withMaxNumRecords(numElements)
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
p.run();
}
@Test
public void testUnboundedSourceWithExplicitPartitions() {
int numElements = 1000;
List<String> topics = ImmutableList.of("test");
KafkaIO.Read<byte[], Long> reader = KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements / 10);
PCollection<Long> input = p
.apply(reader.withoutMetadata())
.apply(Values.<Long>create());
// assert that every element is a multiple of 5.
PAssert
.that(input)
.satisfies(new AssertMultipleOf(5));
PAssert
.thatSingleton(input.apply(Count.<Long>globally()))
.isEqualTo(numElements / 10L);
p.run();
}
private static class ElementValueDiff extends DoFn<Long, Long> {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
c.output(c.element() - c.timestamp().getMillis());
}
}
@Test
public void testUnboundedSourceTimestamps() {
int numElements = 1000;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, numElements);
PCollection<Long> diffs = input
.apply("TimestampDiff", ParDo.of(new ElementValueDiff()))
.apply("DistinctTimestamps", Distinct.<Long>create());
// This assert also confirms that diffs only has one unique value.
PAssert.thatSingleton(diffs).isEqualTo(0L);
p.run();
}
private static class RemoveKafkaMetadata<K, V> extends DoFn<KafkaRecord<K, V>, KV<K, V>> {
@ProcessElement
public void processElement(ProcessContext ctx) throws Exception {
ctx.output(ctx.element().getKV());
}
}
@Test
public void testUnboundedSourceSplits() throws Exception {
int numElements = 1000;
int numSplits = 10;
// Coders must be specified explicitly here due to the way the transform
// is used in the test.
UnboundedSource<KafkaRecord<Integer, Long>, ?> initial =
mkKafkaReadTransform(numElements, null)
.withKeyDeserializerAndCoder(IntegerDeserializer.class, BigEndianIntegerCoder.of())
.withValueDeserializerAndCoder(LongDeserializer.class, BigEndianLongCoder.of())
.makeSource();
List<? extends UnboundedSource<KafkaRecord<Integer, Long>, ?>> splits =
initial.split(numSplits, p.getOptions());
assertEquals("Expected exact splitting", numSplits, splits.size());
long elementsPerSplit = numElements / numSplits;
assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits);
PCollectionList<Long> pcollections = PCollectionList.empty(p);
for (int i = 0; i < splits.size(); ++i) {
pcollections = pcollections.and(
p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit))
.apply("Remove Metadata " + i, ParDo.of(new RemoveKafkaMetadata<Integer, Long>()))
.apply("collection " + i, Values.<Long>create()));
}
PCollection<Long> input = pcollections.apply(Flatten.<Long>pCollections());
addCountingAsserts(input, numElements);
p.run();
}
/**
* A timestamp function that uses the given value as the timestamp.
*/
private static class ValueAsTimestampFn
implements SerializableFunction<KV<Integer, Long>, Instant> {
@Override
public Instant apply(KV<Integer, Long> input) {
return new Instant(input.getValue());
}
}
// Kafka records are read in a separate thread inside the reader. As a result advance() might not
// read any records even from the mock consumer, especially for the first record.
// This is a helper method to loop until we read a record.
private static void advanceOnce(UnboundedReader<?> reader, boolean isStarted) throws IOException {
if (!isStarted && reader.start()) {
return;
}
while (!reader.advance()) {
// very rarely will there be more than one attempts.
// In case of a bug we might end up looping forever, and test will fail with a timeout.
// Avoid hard cpu spinning in case of a test failure.
try {
Thread.sleep(1);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Test
public void testUnboundedSourceCheckpointMark() throws Exception {
int numElements = 85; // 85 to make sure some partitions have more records than other.
// create a single split:
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
final int numToSkip = 20; // one from each partition.
// advance numToSkip elements
for (int i = 0; i < numToSkip; ++i) {
advanceOnce(reader, i > 0);
}
// Confirm that we get the expected element in sequence before checkpointing.
assertEquals(numToSkip - 1, (long) reader.getCurrent().getKV().getValue());
assertEquals(numToSkip - 1, reader.getCurrentTimestamp().getMillis());
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
reader = source.createReader(null, mark);
// Confirm that we get the next elements in sequence.
// This also confirms that Reader interleaves records from each partitions by the reader.
for (int i = numToSkip; i < numElements; i++) {
advanceOnce(reader, i > numToSkip);
assertEquals(i, (long) reader.getCurrent().getKV().getValue());
assertEquals(i, reader.getCurrentTimestamp().getMillis());
}
}
@Test
public void testUnboundedSourceCheckpointMarkWithEmptyPartitions() throws Exception {
// Similar to testUnboundedSourceCheckpointMark(), but verifies that source resumes
// properly from empty partitions, without missing messages added since checkpoint.
// Initialize consumer with fewer elements than number of partitions so that some are empty.
int initialNumElements = 5;
UnboundedSource<KafkaRecord<Integer, Long>, KafkaCheckpointMark> source =
mkKafkaReadTransform(initialNumElements, new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
UnboundedReader<KafkaRecord<Integer, Long>> reader = source.createReader(null, null);
for (int l = 0; l < initialNumElements; ++l) {
advanceOnce(reader, l > 0);
}
// Checkpoint and restart, and confirm that the source continues correctly.
KafkaCheckpointMark mark = CoderUtils.clone(
source.getCheckpointMarkCoder(), (KafkaCheckpointMark) reader.getCheckpointMark());
// Create another source with MockConsumer with OffsetResetStrategy.LATEST. This insures that
// the reader need to explicitly need to seek to first offset for partitions that were empty.
int numElements = 100; // all the 20 partitions will have elements
List<String> topics = ImmutableList.of("topic_a", "topic_b");
source = KafkaIO.<Integer, Long>read()
.withBootstrapServers("none")
.withTopics(topics)
.withConsumerFactoryFn(new ConsumerFactoryFn(
topics, 10, numElements, OffsetResetStrategy.LATEST))
.withKeyDeserializer(IntegerDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements)
.withTimestampFn(new ValueAsTimestampFn())
.makeSource()
.split(1, PipelineOptionsFactory.create())
.get(0);
reader = source.createReader(null, mark);
// Verify in any order. As the partitions are unevenly read, the returned records are not in a
// simple order. Note that testUnboundedSourceCheckpointMark() verifies round-robin oder.
List<Long> expected = new ArrayList<>();
List<Long> actual = new ArrayList<>();
for (long i = initialNumElements; i < numElements; i++) {
advanceOnce(reader, i > initialNumElements);
expected.add(i);
actual.add(reader.getCurrent().getKV().getValue());
}
assertThat(actual, IsIterableContainingInAnyOrder.containsInAnyOrder(expected.toArray()));
}
@Test
public void testUnboundedSourceMetrics() {
int numElements = 1000;
String readStep = "readFromKafka";
p.apply(readStep,
mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata());
PipelineResult result = p.run();
String splitId = "0";
MetricName elementsRead = SourceMetrics.elementsRead().getName();
MetricName elementsReadBySplit = SourceMetrics.elementsReadBySplit(splitId).getName();
MetricName bytesRead = SourceMetrics.bytesRead().getName();
MetricName bytesReadBySplit = SourceMetrics.bytesReadBySplit(splitId).getName();
MetricName backlogElementsOfSplit = SourceMetrics.backlogElementsOfSplit(splitId).getName();
MetricName backlogBytesOfSplit = SourceMetrics.backlogBytesOfSplit(splitId).getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder().build());
Iterable<MetricResult<Long>> counters = metrics.counters();
assertThat(counters, hasItem(attemptedMetricsResult(
elementsRead.namespace(),
elementsRead.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
elementsReadBySplit.namespace(),
elementsReadBySplit.name(),
readStep,
1000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesRead.namespace(),
bytesRead.name(),
readStep,
12000L)));
assertThat(counters, hasItem(attemptedMetricsResult(
bytesReadBySplit.namespace(),
bytesReadBySplit.name(),
readStep,
12000L)));
MetricQueryResults backlogElementsMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogElementsOfSplit.namespace(),
backlogElementsOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogElementsMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
MetricQueryResults backlogBytesMetrics =
result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.named(
backlogBytesOfSplit.namespace(),
backlogBytesOfSplit.name()))
.build());
// since gauge values may be inconsistent in some environments assert only on their existence.
assertThat(backlogBytesMetrics.gauges(),
IsIterableWithSize.<MetricResult<GaugeResult>>iterableWithSize(1));
}
@Test
public void testSink() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the records
// are correctly published to mock kafka producer.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testValuesSink() throws Exception {
// similar to testSink(), but use values()' interface.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(Values.<Long>create()) // there are no keys
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey))
.values());
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, true);
}
}
@Test
public void testEOSink() {
// testSink() with EOS enabled.
// This does not actually inject retries in a stage to test exactly-once-semantics.
// It mainly exercises the code in normal flow without retries.
// Ideally we should test EOS Sink by triggering replays of a messages between stages.
// It is not feasible to test such retries with direct runner. When DoFnTester supports
// state, we can test KafkaEOWriter DoFn directly to ensure it handles retries correctly.
if (!ProducerSpEL.supportsTransactions()) {
LOG.warn("testEOSink() is disabled as Kafka client version does not support transactions.");
return;
}
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withEOS(1, "test")
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList(topic), 10, 10, OffsetResetStrategy.EARLIEST))
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false);
}
}
@Test
public void testSinkWithSendErrors() throws Throwable {
// similar to testSink(), except that up to 10 of the send calls to producer will fail
// asynchronously.
// TODO: Ideally we want the pipeline to run to completion by retrying bundles that fail.
// We limit the number of errors injected to 10 below. This would reflect a real streaming
// pipeline. But I am sure how to achieve that. For now expect an exception:
thrown.expect(InjectedErrorException.class);
thrown.expectMessage("Injected Error #1");
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThreadWithErrors =
new ProducerSendCompletionThread(producerWrapper.mockProducer, 10, 100).start();
String topic = "test";
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply(KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
try {
p.run();
} catch (PipelineExecutionException e) {
// throwing inner exception helps assert that first exception is thrown from the Sink
throw e.getCause().getCause();
} finally {
completionThreadWithErrors.shutdown();
}
}
}
@Test
public void testUnboundedSourceStartReadTime() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can read half elements.
int startTime = numElements / 20 / 2;
int maxNumRecords = numElements / 2;
PCollection<Long> input = p
.apply(mkKafkaReadTransform(numElements, maxNumRecords, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
addCountingAsserts(input, maxNumRecords, maxNumRecords, maxNumRecords, numElements - 1);
p.run();
}
@Rule public ExpectedException noMessagesException = ExpectedException.none();
@Test
public void testUnboundedSourceStartReadTimeException() {
assumeTrue(new ConsumerSpEL().hasOffsetsForTimes());
noMessagesException.expect(RuntimeException.class);
int numElements = 1000;
// In this MockConsumer, we let the elements of the time and offset equal and there are 20
// partitions. So set this startTime can not read any element.
int startTime = numElements / 20;
p.apply(mkKafkaReadTransform(numElements, numElements, new ValueAsTimestampFn())
.withStartReadTime(new Instant(startTime))
.withoutMetadata())
.apply(Values.<Long>create());
p.run();
}
@Test
public void testSourceDisplayData() {
KafkaIO.Read<Integer, Long> read = mkKafkaReadTransform(10, null);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topics", "topic_a,topic_b"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSourceWithExplicitPartitionsDisplayData() {
KafkaIO.Read<byte[], Long> read = KafkaIO.<byte[], Long>read()
.withBootstrapServers("myServer1:9092,myServer2:9092")
.withTopicPartitions(ImmutableList.of(new TopicPartition("test", 5),
new TopicPartition("test", 6)))
.withConsumerFactoryFn(new ConsumerFactoryFn(
Lists.newArrayList("test"), 10, 10, OffsetResetStrategy.EARLIEST)) // 10 partitions
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class);
DisplayData displayData = DisplayData.from(read);
assertThat(displayData, hasDisplayItem("topicPartitions", "test-5,test-6"));
assertThat(displayData, hasDisplayItem("enable.auto.commit", false));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServer1:9092,myServer2:9092"));
assertThat(displayData, hasDisplayItem("auto.offset.reset", "latest"));
assertThat(displayData, hasDisplayItem("receive.buffer.bytes", 524288));
}
@Test
public void testSinkDisplayData() {
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
KafkaIO.Write<Integer, Long> write = KafkaIO.<Integer, Long>write()
.withBootstrapServers("myServerA:9092,myServerB:9092")
.withTopic("myTopic")
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey));
DisplayData displayData = DisplayData.from(write);
assertThat(displayData, hasDisplayItem("topic", "myTopic"));
assertThat(displayData, hasDisplayItem("bootstrap.servers", "myServerA:9092,myServerB:9092"));
assertThat(displayData, hasDisplayItem("retries", 3));
}
}
// interface for testing coder inference
private interface DummyInterface<T> {
}
// interface for testing coder inference
private interface DummyNonparametricInterface {
}
// class for testing coder inference
private static class DeserializerWithInterfaces
implements DummyInterface<String>, DummyNonparametricInterface,
Deserializer<Long> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public Long deserialize(String topic, byte[] bytes) {
return 0L;
}
@Override
public void close() {
}
}
// class for which a coder cannot be infered
private static class NonInferableObject {
}
// class for testing coder inference
private static class NonInferableObjectDeserializer
implements Deserializer<NonInferableObject> {
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
@Override
public NonInferableObject deserialize(String topic, byte[] bytes) {
return new NonInferableObject();
}
@Override
public void close() {
}
}
@Test
public void testInferKeyCoder() {
CoderRegistry registry = CoderRegistry.createDefault();
assertTrue(KafkaIO.inferCoder(registry, LongDeserializer.class).getValueCoder()
instanceof VarLongCoder);
assertTrue(KafkaIO.inferCoder(registry, StringDeserializer.class).getValueCoder()
instanceof StringUtf8Coder);
assertTrue(KafkaIO.inferCoder(registry, InstantDeserializer.class).getValueCoder()
instanceof InstantCoder);
assertTrue(KafkaIO.inferCoder(registry, DeserializerWithInterfaces.class).getValueCoder()
instanceof VarLongCoder);
}
@Rule public ExpectedException cannotInferException = ExpectedException.none();
@Test
public void testInferKeyCoderFailure() throws Exception {
cannotInferException.expect(RuntimeException.class);
CoderRegistry registry = CoderRegistry.createDefault();
KafkaIO.inferCoder(registry, NonInferableObjectDeserializer.class);
}
@Test
public void testSinkMetrics() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the metrics are reported.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper()) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";<|fim▁hole|>
p
.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn())
.withoutMetadata())
.apply("writeToKafka", KafkaIO.<Integer, Long>write()
.withBootstrapServers("none")
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
PipelineResult result = p.run();
MetricName elementsWritten = SinkMetrics.elementsWritten().getName();
MetricQueryResults metrics = result.metrics().queryMetrics(
MetricsFilter.builder()
.addNameFilter(MetricNameFilter.inNamespace(elementsWritten.namespace()))
.build());
assertThat(metrics.counters(), hasItem(
attemptedMetricsResult(
elementsWritten.namespace(),
elementsWritten.name(),
"writeToKafka",
1000L)));
completionThread.shutdown();
}
}
private static void verifyProducerRecords(MockProducer<Integer, Long> mockProducer,
String topic, int numElements, boolean keyIsAbsent) {
// verify that appropriate messages are written to kafka
List<ProducerRecord<Integer, Long>> sent = mockProducer.history();
// sort by values
Collections.sort(sent, new Comparator<ProducerRecord<Integer, Long>>() {
@Override
public int compare(ProducerRecord<Integer, Long> o1, ProducerRecord<Integer, Long> o2) {
return Long.compare(o1.value(), o2.value());
}
});
for (int i = 0; i < numElements; i++) {
ProducerRecord<Integer, Long> record = sent.get(i);
assertEquals(topic, record.topic());
if (keyIsAbsent) {
assertNull(record.key());
} else {
assertEquals(i, record.key().intValue());
}
assertEquals(i, record.value().longValue());
}
}
/**
* This wrapper over MockProducer. It also places the mock producer in global MOCK_PRODUCER_MAP.
* The map is needed so that the producer returned by ProducerFactoryFn during pipeline can be
* used in verification after the test. We also override {@code flush()} method in MockProducer
* so that test can control behavior of {@code send()} method (e.g. to inject errors).
*/
private static class MockProducerWrapper implements AutoCloseable {
final String producerKey;
final MockProducer<Integer, Long> mockProducer;
// MockProducer has "closed" method starting version 0.11.
private static Method closedMethod;
static {
try {
closedMethod = MockProducer.class.getMethod("closed");
} catch (NoSuchMethodException e) {
closedMethod = null;
}
}
MockProducerWrapper() {
producerKey = String.valueOf(ThreadLocalRandom.current().nextLong());
mockProducer = new MockProducer<Integer, Long>(
false, // disable synchronous completion of send. see ProducerSendCompletionThread below.
new IntegerSerializer(),
new LongSerializer()) {
// override flush() so that it does not complete all the waiting sends, giving a chance to
// ProducerCompletionThread to inject errors.
@Override
public void flush() {
while (completeNext()) {
// there are some uncompleted records. let the completion thread handle them.
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
};
// Add the producer to the global map so that producer factory function can access it.
assertNull(MOCK_PRODUCER_MAP.putIfAbsent(producerKey, mockProducer));
}
public void close() {
MOCK_PRODUCER_MAP.remove(producerKey);
try {
if (closedMethod == null || !((Boolean) closedMethod.invoke(mockProducer))) {
mockProducer.close();
}
} catch (Exception e) { // Not expected.
throw new RuntimeException(e);
}
}
}
private static final ConcurrentMap<String, MockProducer<Integer, Long>> MOCK_PRODUCER_MAP =
new ConcurrentHashMap<>();
private static class ProducerFactoryFn
implements SerializableFunction<Map<String, Object>, Producer<Integer, Long>> {
final String producerKey;
ProducerFactoryFn(String producerKey) {
this.producerKey = producerKey;
}
@SuppressWarnings("unchecked")
@Override
public Producer<Integer, Long> apply(Map<String, Object> config) {
// Make sure the config is correctly set up for serializers.
// There may not be a key serializer if we're interested only in values.
if (config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG) != null) {
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, true);
}
Utils.newInstance(
((Class<?>) config.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG))
.asSubclass(Serializer.class)
).configure(config, false);
// Returning same producer in each instance in a pipeline seems to work fine currently.
// If DirectRunner creates multiple DoFn instances for sinks, we might need to handle
// it appropriately. I.e. allow multiple producers for each producerKey and concatenate
// all the messages written to each producer for verification after the pipeline finishes.
return MOCK_PRODUCER_MAP.get(producerKey);
}
}
private static class InjectedErrorException extends RuntimeException {
InjectedErrorException(String message) {
super(message);
}
}
/**
* We start MockProducer with auto-completion disabled. That implies a record is not marked sent
* until #completeNext() is called on it. This class starts a thread to asynchronously 'complete'
* the the sends. During completion, we can also make those requests fail. This error injection
* is used in one of the tests.
*/
private static class ProducerSendCompletionThread {
private final MockProducer<Integer, Long> mockProducer;
private final int maxErrors;
private final int errorFrequency;
private final AtomicBoolean done = new AtomicBoolean(false);
private final ExecutorService injectorThread;
private int numCompletions = 0;
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer) {
// complete everything successfully
this(mockProducer, 0, 0);
}
ProducerSendCompletionThread(MockProducer<Integer, Long> mockProducer,
int maxErrors,
int errorFrequency) {
this.mockProducer = mockProducer;
this.maxErrors = maxErrors;
this.errorFrequency = errorFrequency;
injectorThread = Executors.newSingleThreadExecutor();
}
ProducerSendCompletionThread start() {
injectorThread.submit(new Runnable() {
@Override
public void run() {
int errorsInjected = 0;
while (!done.get()) {
boolean successful;
if (errorsInjected < maxErrors && ((numCompletions + 1) % errorFrequency) == 0) {
successful = mockProducer.errorNext(
new InjectedErrorException("Injected Error #" + (errorsInjected + 1)));
if (successful) {
errorsInjected++;
}
} else {
successful = mockProducer.completeNext();
}
if (successful) {
numCompletions++;
} else {
// wait a bit since there are no unsent records
try {
Thread.sleep(1);
} catch (InterruptedException e) {
// ok to retry.
}
}
}
}
});
return this;
}
void shutdown() {
done.set(true);
injectorThread.shutdown();
try {
assertTrue(injectorThread.awaitTermination(10, TimeUnit.SECONDS));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}<|fim▁end|> | |
<|file_name|>test_physical_host_plugin.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
import ddt
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from oslo_config import cfg
from oslo_config import fixture as conf_fixture
import testtools
from blazar import context
from blazar.db import api as db_api
from blazar.db import exceptions as db_exceptions
from blazar.db import utils as db_utils
from blazar.manager import exceptions as manager_exceptions
from blazar.manager import service
from blazar.plugins import oshosts as plugin
from blazar.plugins.oshosts import host_plugin
from blazar import tests
from blazar.utils.openstack import base
from blazar.utils.openstack import nova
from blazar.utils.openstack import placement
from blazar.utils import trusts
CONF = cfg.CONF
class AggregateFake(object):
def __init__(self, i, name, hosts):
self.id = i
self.name = name
self.hosts = hosts
class PhysicalHostPluginSetupOnlyTestCase(tests.TestCase):
def setUp(self):
super(PhysicalHostPluginSetupOnlyTestCase, self).setUp()
self.cfg = self.useFixture(conf_fixture.Config(CONF))
self.cfg.config(os_admin_username='fake-user')
self.cfg.config(os_admin_password='fake-passwd')
self.cfg.config(os_admin_user_domain_name='fake-user-domain')
self.cfg.config(os_admin_project_name='fake-pj-name')
self.cfg.config(os_admin_project_domain_name='fake-pj-domain')
self.context = context
self.patch(self.context, 'BlazarContext')
self.patch(base, 'url_for').return_value = 'http://foo.bar'
self.host_plugin = host_plugin
self.fake_phys_plugin = self.host_plugin.PhysicalHostPlugin()
self.nova = nova
self.rp_create = self.patch(self.nova.ReservationPool, 'create')
self.db_api = db_api
self.db_host_extra_capability_get_all_per_host = (
self.patch(self.db_api, 'host_extra_capability_get_all_per_host'))
def test_configuration(self):
self.assertEqual("fake-user", self.fake_phys_plugin.username)
self.assertEqual("fake-passwd", self.fake_phys_plugin.password)
self.assertEqual("fake-user-domain",
self.fake_phys_plugin.user_domain_name)
self.assertEqual("fake-pj-name", self.fake_phys_plugin.project_name)
self.assertEqual("fake-pj-domain",
self.fake_phys_plugin.project_domain_name)
def test__get_extra_capabilities_with_values(self):
self.db_host_extra_capability_get_all_per_host.return_value = [
{'id': 1,
'capability_name': 'foo',
'capability_value': 'bar',
'other': 'value',
'computehost_id': 1
},
{'id': 2,
'capability_name': 'buzz',
'capability_value': 'word',
'computehost_id': 1
}]
res = self.fake_phys_plugin._get_extra_capabilities(1)
self.assertEqual({'foo': 'bar', 'buzz': 'word'}, res)
def test__get_extra_capabilities_with_no_capabilities(self):
self.db_host_extra_capability_get_all_per_host.return_value = []
res = self.fake_phys_plugin._get_extra_capabilities(1)
self.assertEqual({}, res)
@ddt.ddt
class PhysicalHostPluginTestCase(tests.TestCase):
def setUp(self):
super(PhysicalHostPluginTestCase, self).setUp()
self.cfg = cfg
self.context = context
self.patch(self.context, 'BlazarContext')
self.nova_client = nova_client
self.nova_client = self.patch(self.nova_client, 'Client').return_value
self.service = service
self.manager = self.service.ManagerService()
self.fake_host_id = '1'
self.fake_host = {
'id': self.fake_host_id,
'hypervisor_hostname': 'hypvsr1',
'service_name': 'compute1',
'vcpus': 4,
'cpu_info': 'foo',
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'memory_mb': 8192,
'local_gb': 10,
'trust_id': 'exxee111qwwwwe',
}
self.patch(base, 'url_for').return_value = 'http://foo.bar'
self.host_plugin = host_plugin
self.fake_phys_plugin = self.host_plugin.PhysicalHostPlugin()
self.db_api = db_api
self.db_utils = db_utils
self.db_host_get = self.patch(self.db_api, 'host_get')
self.db_host_get.return_value = self.fake_host
self.db_host_list = self.patch(self.db_api, 'host_list')
self.db_host_create = self.patch(self.db_api, 'host_create')
self.db_host_update = self.patch(self.db_api, 'host_update')
self.db_host_destroy = self.patch(self.db_api, 'host_destroy')
self.db_host_extra_capability_get_all_per_host = self.patch(
self.db_api, 'host_extra_capability_get_all_per_host')
self.db_host_extra_capability_get_all_per_name = self.patch(
self.db_api, 'host_extra_capability_get_all_per_name')
self.db_host_extra_capability_create = self.patch(
self.db_api, 'host_extra_capability_create')
self.db_host_extra_capability_update = self.patch(
self.db_api, 'host_extra_capability_update')
self.nova = nova
self.rp_create = self.patch(self.nova.ReservationPool, 'create')
self.patch(self.nova.ReservationPool, 'get_aggregate_from_name_or_id')
self.add_compute_host = self.patch(self.nova.ReservationPool,
'add_computehost')
self.remove_compute_host = self.patch(self.nova.ReservationPool,
'remove_computehost')
self.get_host_details = self.patch(self.nova.NovaInventory,
'get_host_details')
self.get_host_details.return_value = self.fake_host
self.get_servers_per_host = self.patch(
self.nova.NovaInventory, 'get_servers_per_host')
self.get_servers_per_host.return_value = None
self.get_extra_capabilities = self.patch(
self.fake_phys_plugin, '_get_extra_capabilities')
self.get_extra_capabilities.return_value = {
'foo': 'bar',
'buzz': 'word',
}
self.placement = placement
self.prov_create = self.patch(self.placement.BlazarPlacementClient,
'create_reservation_provider')
self.prov_create.return_value = {
"generation": 0,
"name": "blazar_foo",
"uuid": "7d2590ae-fb85-4080-9306-058b4c915e3f",
"parent_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8",
"root_provider_uuid": "542df8ed-9be2-49b9-b4db-6d3183ff8ec8"
}
self.prov_delete = self.patch(self.placement.BlazarPlacementClient,
'delete_reservation_provider')
self.fake_phys_plugin.setup(None)
self.trusts = trusts
self.trust_ctx = self.patch(self.trusts, 'create_ctx_from_trust')
self.trust_create = self.patch(self.trusts, 'create_trust')
self.ServerManager = nova.ServerManager
def test_get_host(self):
host = self.fake_phys_plugin.get_computehost(self.fake_host_id)
self.db_host_get.assert_called_once_with('1')
expected = self.fake_host.copy()
expected.update({'foo': 'bar', 'buzz': 'word'})
self.assertEqual(expected, host)
def test_get_host_without_extracapabilities(self):
self.get_extra_capabilities.return_value = {}
host = self.fake_phys_plugin.get_computehost(self.fake_host_id)
self.db_host_get.assert_called_once_with('1')
self.assertEqual(self.fake_host, host)
@testtools.skip('incorrect decorator')
def test_list_hosts(self):
self.fake_phys_plugin.list_computehosts({})
self.db_host_list.assert_called_once_with()
del self.service_utils
def test_create_host_without_extra_capabilities(self):
self.get_extra_capabilities.return_value = {}
host = self.fake_phys_plugin.create_computehost(self.fake_host)
self.db_host_create.assert_called_once_with(self.fake_host)
self.prov_create.assert_called_once_with('hypvsr1')
self.assertEqual(self.fake_host, host)
def test_create_host_with_extra_capabilities(self):
fake_host = self.fake_host.copy()
fake_host.update({'foo': 'bar'})
# NOTE(sbauza): 'id' will be pop'd, we need to keep track of it
fake_request = fake_host.copy()
fake_capa = {'computehost_id': '1',
'capability_name': 'foo',
'capability_value': 'bar',
}
self.get_extra_capabilities.return_value = {'foo': 'bar'}
self.db_host_create.return_value = self.fake_host
host = self.fake_phys_plugin.create_computehost(fake_request)
self.db_host_create.assert_called_once_with(self.fake_host)
self.prov_create.assert_called_once_with('hypvsr1')
self.db_host_extra_capability_create.assert_called_once_with(fake_capa)
self.assertEqual(fake_host, host)
def test_create_host_with_capabilities_too_long(self):
fake_host = self.fake_host.copy()
fake_host.update({'foo': 'bar'})
# NOTE(sbauza): 'id' will be pop'd, we need to keep track of it
fake_request = fake_host.copy()
long_key = ""
for i in range(65):
long_key += "0"
fake_request[long_key] = "foo"
self.assertRaises(manager_exceptions.ExtraCapabilityTooLong,
self.fake_phys_plugin.create_computehost,
fake_request)
def test_create_host_without_trust_id(self):
self.assertRaises(manager_exceptions.MissingTrustId,
self.fake_phys_plugin.create_computehost, {})
def test_create_host_without_host_id(self):
self.assertRaises(manager_exceptions.InvalidHost,
self.fake_phys_plugin.create_computehost,
{'trust_id': 'exxee111qwwwwe'})
def test_create_host_with_existing_vms(self):
self.get_servers_per_host.return_value = ['server1', 'server2']
self.assertRaises(manager_exceptions.HostHavingServers,
self.fake_phys_plugin.create_computehost,
self.fake_host)
def test_create_host_issuing_rollback(self):
def fake_db_host_create(*args, **kwargs):
raise db_exceptions.BlazarDBException
self.db_host_create.side_effect = fake_db_host_create
self.assertRaises(db_exceptions.BlazarDBException,
self.fake_phys_plugin.create_computehost,
self.fake_host)
self.prov_create.assert_called_once_with('hypvsr1')
self.prov_delete.assert_called_once_with('hypvsr1')
def test_create_host_having_issue_when_storing_extra_capability(self):
def fake_db_host_extra_capability_create(*args, **kwargs):
raise db_exceptions.BlazarDBException
fake_host = self.fake_host.copy()
fake_host.update({'foo': 'bar'})
fake_request = fake_host.copy()
self.get_extra_capabilities.return_value = {'foo': 'bar'}
self.db_host_create.return_value = self.fake_host
fake = self.db_host_extra_capability_create
fake.side_effect = fake_db_host_extra_capability_create
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_phys_plugin.create_computehost,
fake_request)
def test_update_host(self):
host_values = {'foo': 'baz'}
self.db_host_extra_capability_get_all_per_name.return_value = [
{'id': 'extra_id1',
'computehost_id': self.fake_host_id,
'capability_name': 'foo',
'capability_value': 'bar'
},
]
self.get_reservations_by_host = self.patch(
self.db_utils, 'get_reservations_by_host_id')
self.get_reservations_by_host.return_value = []
self.fake_phys_plugin.update_computehost(self.fake_host_id,
host_values)
self.db_host_extra_capability_update.assert_called_once_with(
'extra_id1', {'capability_name': 'foo', 'capability_value': 'baz'})
def test_update_host_having_issue_when_storing_extra_capability(self):
def fake_db_host_extra_capability_update(*args, **kwargs):
raise RuntimeError
host_values = {'foo': 'baz'}
self.get_reservations_by_host = self.patch(
self.db_utils, 'get_reservations_by_host_id')
self.get_reservations_by_host.return_value = []
self.db_host_extra_capability_get_all_per_name.return_value = [
{'id': 'extra_id1',
'computehost_id': self.fake_host_id,
'capability_name': 'foo',
'capability_value': 'bar'
},
]
fake = self.db_host_extra_capability_update
fake.side_effect = fake_db_host_extra_capability_update
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_phys_plugin.update_computehost,
self.fake_host_id, host_values)
def test_update_host_with_new_extra_capability(self):
host_values = {'qux': 'word'}
self.db_host_extra_capability_get_all_per_host.return_value = []
self.fake_phys_plugin.update_computehost(self.fake_host_id,
host_values)
self.db_host_extra_capability_create.assert_called_once_with({
'computehost_id': '1',
'capability_name': 'qux',
'capability_value': 'word'
})
def test_update_host_with_used_capability(self):
host_values = {'foo': 'buzz'}
self.db_host_extra_capability_get_all_per_name.return_value = [
{'id': 'extra_id1',
'computehost_id': self.fake_host_id,
'capability_name': 'foo',
'capability_value': 'bar'
},
]
fake_phys_reservation = {
'resource_type': plugin.RESOURCE_TYPE,
'resource_id': 'resource-1',
}
fake_get_reservations = self.patch(self.db_utils,
'get_reservations_by_host_id')
fake_get_reservations.return_value = [fake_phys_reservation]
fake_get_plugin_reservation = self.patch(self.db_utils,
'get_plugin_reservation')
fake_get_plugin_reservation.return_value = {
'resource_properties': '["==", "$foo", "bar"]'
}
self.assertRaises(manager_exceptions.CantAddExtraCapability,
self.fake_phys_plugin.update_computehost,
self.fake_host_id, host_values)
fake_get_plugin_reservation.assert_called_once_with(
plugin.RESOURCE_TYPE, 'resource-1')
def test_delete_host(self):
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = []
self.fake_phys_plugin.delete_computehost(self.fake_host_id)
self.db_host_destroy.assert_called_once_with(self.fake_host_id)
self.prov_delete.assert_called_once_with('hypvsr1')
self.get_servers_per_host.assert_called_once_with(
self.fake_host["hypervisor_hostname"])
def test_delete_host_reserved(self):
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': self.fake_host_id
}
]
self.assertRaises(manager_exceptions.CantDeleteHost,
self.fake_phys_plugin.delete_computehost,
self.fake_host_id)
def test_delete_host_having_vms(self):
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = []
self.get_servers_per_host.return_value = ['server1', 'server2']
self.assertRaises(manager_exceptions.HostHavingServers,
self.fake_phys_plugin.delete_computehost,
self.fake_host_id)
self.get_servers_per_host.assert_called_once_with(
self.fake_host["hypervisor_hostname"])
def test_delete_host_not_existing_in_db(self):
self.db_host_get.return_value = None
self.assertRaises(manager_exceptions.HostNotFound,
self.fake_phys_plugin.delete_computehost,
self.fake_host_id)
def test_delete_host_issuing_rollback(self):
def fake_db_host_destroy(*args, **kwargs):
raise db_exceptions.BlazarDBException
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = []
self.db_host_destroy.side_effect = fake_db_host_destroy
self.assertRaises(manager_exceptions.CantDeleteHost,
self.fake_phys_plugin.delete_computehost,
self.fake_host_id)
def test_list_allocations(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
('reservation-1', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-3'),
('reservation-3', 'lease-2', 'host-1'),
]
expected = [
{
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
{'id': 'reservation-3', 'lease_id': 'lease-2'},
]
},
{
'resource_id': 'host-2',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
{'id': 'reservation-2', 'lease_id': 'lease-1'},
]
},
{
'resource_id': 'host-3',
'reservations': [
{'id': 'reservation-2', 'lease_id': 'lease-1'},
]
}
]
ret = self.fake_phys_plugin.list_allocations({})
# Sort returned value to use assertListEqual
for r in ret:
r['reservations'].sort(key=lambda x: x['id'])
ret.sort(key=lambda x: x['resource_id'])
self.assertListEqual(expected, ret)
def test_list_allocations_with_lease_id(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
('reservation-1', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-3'),
]
expected = [
{
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
]
},
{
'resource_id': 'host-2',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
{'id': 'reservation-2', 'lease_id': 'lease-1'},
]
},
{
'resource_id': 'host-3',
'reservations': [
{'id': 'reservation-2', 'lease_id': 'lease-1'},
]
}
]
ret = self.fake_phys_plugin.list_allocations({'lease_id': 'lease-1'})
# Sort returned value to use assertListEqual
for r in ret:
r['reservations'].sort(key=lambda x: x['id'])
ret.sort(key=lambda x: x['resource_id'])
self.assertListEqual(expected, ret)
def test_list_allocations_with_reservation_id(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
('reservation-1', 'lease-1', 'host-2'),
]
expected = [
{
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
]
},
{
'resource_id': 'host-2',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
]
},
]
ret = self.fake_phys_plugin.list_allocations(
{'reservation_id': 'reservation-1'})
# Sort returned value to use assertListEqual
for r in ret:
r['reservations'].sort(key=lambda x: x['id'])
ret.sort(key=lambda x: x['resource_id'])
self.assertListEqual(expected, ret)
def test_get_allocations(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
('reservation-1', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-3'),
('reservation-3', 'lease-2', 'host-1'),
]
expected = {
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
{'id': 'reservation-3', 'lease_id': 'lease-2'},
]
}
ret = self.fake_phys_plugin.get_allocations('host-1', {})
# sort returned value to use assertListEqual
ret['reservations'].sort(key=lambda x: x['id'])
self.assertDictEqual(expected, ret)
def test_get_allocations_with_lease_id(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
]
expected = {
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
]
}
ret = self.fake_phys_plugin.get_allocations('host-1',
{'lease_id': 'lease-1'})
# sort returned value to use assertListEqual
ret['reservations'].sort(key=lambda x: x['id'])
self.assertDictEqual(expected, ret)
def test_get_allocations_with_reservation_id(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
]
expected = {
'resource_id': 'host-1',
'reservations': [
{'id': 'reservation-1', 'lease_id': 'lease-1'},
]
}
ret = self.fake_phys_plugin.get_allocations(
'host-1', {'reservation_id': 'reservation-1'})
# sort returned value to use assertListEqual
ret['reservations'].sort(key=lambda x: x['id'])
self.assertDictEqual(expected, ret)
def test_get_allocations_with_invalid_host(self):
self.db_get_reserv_allocs = self.patch(
self.db_utils, 'get_reservation_allocations_by_host_ids')
# Expecting a list of (Reservation, Allocation)
self.db_get_reserv_allocs.return_value = [
('reservation-1', 'lease-1', 'host-1'),
('reservation-1', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-2'),
('reservation-2', 'lease-1', 'host-3'),
('reservation-3', 'lease-2', 'host-1'),
]
expected = {'resource_id': 'no-reserved-host', 'reservations': []}
ret = self.fake_phys_plugin.get_allocations('no-reserved-host', {})
self.assertDictEqual(expected, ret)
def test_create_reservation_no_hosts_available(self):
now = datetime.datetime.utcnow()
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 1,
'max': 1,
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': now,
'end_date': now + datetime.timedelta(hours=1),
'resource_type': plugin.RESOURCE_TYPE,
}
host_reservation_create = self.patch(self.db_api,
'host_reservation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
self.assertRaises(manager_exceptions.NotEnoughHostsAvailable,
self.fake_phys_plugin.reserve_resource,
u'f9894fcf-e2ed-41e9-8a4c-92fac332608e',
values)
self.rp_create.assert_not_called()
host_reservation_create.assert_not_called()
def test_create_reservation_hosts_available(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 1,
'max': 1,
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00),
'resource_type': plugin.RESOURCE_TYPE,<|fim▁hole|> }
self.rp_create.return_value = mock.MagicMock(id=1)
host_reservation_create = self.patch(self.db_api,
'host_reservation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host1', 'host2']
host_allocation_create = self.patch(
self.db_api,
'host_allocation_create')
self.fake_phys_plugin.reserve_resource(
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
host_values = {
'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
'aggregate_id': 1,
'resource_properties': '',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'count_range': '1-1',
'status': 'pending',
'before_end': 'default'
}
host_reservation_create.assert_called_once_with(host_values)
calls = [
mock.call(
{'compute_host_id': 'host1',
'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
}),
mock.call(
{'compute_host_id': 'host2',
'reservation_id': u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
}),
]
host_allocation_create.assert_has_calls(calls)
@ddt.data("min", "max", "hypervisor_properties", "resource_properties")
def test_create_reservation_with_missing_param(self, missing_param):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 1,
'max': 2,
'before_end': 'default',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE}
del values[missing_param]
self.assertRaises(
manager_exceptions.MissingParameter,
self.fake_phys_plugin.reserve_resource,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
@ddt.data({"params": {'max': 0}},
{"params": {'max': -1}},
{"params": {'max': 'one'}},
{"params": {'min': 0}},
{"params": {'min': -1}},
{"params": {'min': 'one'}},
{"params": {'before_end': 'invalid'}})
@ddt.unpack
def test_create_reservation_with_invalid_param(self, params):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 1,
'max': 2,
'before_end': 'default',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE}
for key, value in params.items():
values[key] = value
self.assertRaises(
manager_exceptions.MalformedParameter,
self.fake_phys_plugin.reserve_resource,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
@ddt.data({"params": {'max': 0}},
{"params": {'max': -1}},
{"params": {'max': 'one'}},
{"params": {'min': 0}},
{"params": {'min': -1}},
{"params": {'min': 'one'}})
@ddt.unpack
def test_update_reservation_with_invalid_param(self, params):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 1,
'max': 2,
'before_end': 'default',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE}
self.patch(self.db_api, 'reservation_get')
self.patch(self.db_api, 'lease_get')
host_reservation_get = self.patch(self.db_api,
'host_reservation_get')
host_reservation_get.return_value = {
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
for key, value in params.items():
values[key] = value
self.assertRaises(
manager_exceptions.MalformedParameter,
self.fake_phys_plugin.update_reservation,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_create_update_reservation_with_invalid_range(self):
values = {
'lease_id': u'018c1b43-e69e-4aef-a543-09681539cf4c',
'min': 2,
'max': 1,
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': '',
'start_date': datetime.datetime(2017, 3, 1, 20, 00),
'end_date': datetime.datetime(2017, 3, 2, 20, 00),
'resource_type': plugin.RESOURCE_TYPE,
}
self.patch(self.db_api, 'reservation_get')
self.patch(self.db_api, 'lease_get')
host_reservation_get = self.patch(self.db_api,
'host_reservation_get')
host_reservation_get.return_value = {
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
self.assertRaises(
manager_exceptions.InvalidRange,
self.fake_phys_plugin.reserve_resource,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
self.assertRaises(
manager_exceptions.InvalidRange,
self.fake_phys_plugin.update_reservation,
u'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_update_reservation_shorten(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 30),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_not_called()
def test_update_reservation_extend(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [{'id': 'host1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
]
host_allocation_create = self.patch(
self.db_api,
'host_allocation_create')
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_allocation_create.assert_not_called()
host_allocation_destroy.assert_not_called()
def test_update_reservation_move_failure(self):
values = {
'start_date': datetime.datetime(2013, 12, 20, 20, 00),
'end_date': datetime.datetime(2013, 12, 20, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'active'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(
self.db_api,
'host_reservation_get')
host_reservation_get.return_value = {
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [{'id': 'host1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 20, 20, 30),
datetime.datetime(2013, 12, 20, 21, 00))
]
get_computehosts = self.patch(self.nova.ReservationPool,
'get_computehosts')
get_computehosts.return_value = ['host1']
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
self.assertRaises(
manager_exceptions.NotEnoughHostsAvailable,
self.fake_phys_plugin.update_reservation,
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
reservation_get.assert_called()
def test_update_reservation_move_overlap(self):
values = {
'start_date': datetime.datetime(2013, 12, 19, 20, 30),
'end_date': datetime.datetime(2013, 12, 19, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(
self.db_api,
'host_reservation_get')
host_reservation_get.return_value = {
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [{'id': 'host1'}]
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 19, 20, 30),
datetime.datetime(2013, 12, 19, 21, 00))
]
host_allocation_create = self.patch(
self.db_api,
'host_allocation_create')
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_allocation_create.assert_not_called()
host_allocation_destroy.assert_not_called()
def test_update_reservation_move_realloc(self):
values = {
'start_date': datetime.datetime(2013, 12, 20, 20, 00),
'end_date': datetime.datetime(2013, 12, 20, 21, 30)
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': u'10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(
self.db_api,
'host_reservation_get')
host_reservation_get.return_value = {
'aggregate_id': 1,
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "256"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': u'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [{'id': 'host1'},
{'id': 'host2'}]
host_allocation_create = self.patch(
self.db_api,
'host_allocation_create')
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
get_reserved_periods = self.patch(self.db_utils,
'get_reserved_periods')
get_reserved_periods.return_value = [
(datetime.datetime(2013, 12, 20, 20, 30),
datetime.datetime(2013, 12, 20, 21, 00))
]
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host2']
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
u'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
host_allocation_destroy.assert_called_with(
'dd305477-4df8-4547-87f6-69069ee546a6')
host_allocation_create.assert_called_with(
{
'compute_host_id': 'host2',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
def test_update_reservation_min_increase_success(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'min': 3
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '2-3',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'}
]
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_allocation_create = self.patch(self.db_api,
'host_allocation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host3']
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_hosts.assert_called_with(
'["=", "$memory_mb", "16384"]',
'',
'1-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
host_allocation_destroy.assert_not_called()
host_allocation_create.assert_called_with(
{
'compute_host_id': 'host3',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '3-3'}
)
def test_update_reservation_min_increase_fail(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'min': 3
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '2-3',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'}
]
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
self.assertRaises(
manager_exceptions.NotEnoughHostsAvailable,
self.fake_phys_plugin.update_reservation,
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
matching_hosts.assert_called_with(
'["=", "$memory_mb", "16384"]',
'',
'1-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
def test_update_reservation_min_decrease(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'min': 1
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '2-2',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'}
]
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_allocation_create = self.patch(self.db_api,
'host_allocation_create')
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
matching_hosts.assert_not_called()
host_allocation_destroy.assert_not_called()
host_allocation_create.assert_not_called()
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '1-2'}
)
def test_update_reservation_max_increase_alloc(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'max': 3
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-2',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'}
]
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_allocation_create = self.patch(self.db_api,
'host_allocation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host3']
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_hosts.assert_called_with(
'["=", "$memory_mb", "16384"]',
'',
'0-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
host_allocation_destroy.assert_not_called()
host_allocation_create.assert_called_with(
{
'compute_host_id': 'host3',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '1-3'}
)
def test_update_active_reservation_max_increase_alloc(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'max': 3
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'active'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-2',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': '',
'reservation_id': u'706eb3bc-07ed-4383-be93-b32845ece672',
'aggregate_id': 1,
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'}
]
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_allocation_create = self.patch(self.db_api,
'host_allocation_create')
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host3']
host_get = self.patch(self.db_api, 'host_get')
host_get.return_value = {'service_name': 'host3_hostname'}
add_computehost = self.patch(
self.nova.ReservationPool, 'add_computehost')
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_hosts.assert_called_with(
'["=", "$memory_mb", "16384"]',
'',
'0-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
host_allocation_destroy.assert_not_called()
host_allocation_create.assert_called_with(
{
'compute_host_id': 'host3',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
add_computehost.assert_called_with(1, ['host3_hostname'])
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '1-3'}
)
def test_update_reservation_max_increase_noalloc(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'max': 3
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-2',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'}
]
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_hosts.assert_called_with(
'["=", "$memory_mb", "16384"]',
'',
'0-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '1-3'}
)
def test_update_reservation_max_decrease(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'max': 1
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-2',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
},
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a7',
'compute_host_id': 'host2'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [
{'id': 'host1'},
{'id': 'host2'}
]
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
host_allocation_destroy.assert_called_with(
'dd305477-4df8-4547-87f6-69069ee546a6')
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'count_range': '1-1'}
)
def test_update_reservation_realloc_with_properties_change(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'hypervisor_properties': '["=", "$memory_mb", "32768"]',
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = [{'id': 'host2'}]
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = ['host2']
host_allocation_create = self.patch(self.db_api,
'host_allocation_create')
host_allocation_destroy = self.patch(self.db_api,
'host_allocation_destroy')
host_reservation_update = self.patch(self.db_api,
'host_reservation_update')
self.fake_phys_plugin.update_reservation(
'706eb3bc-07ed-4383-be93-b32845ece672',
values)
host_reservation_get.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b')
matching_hosts.assert_called_with(
'["=", "$memory_mb", "32768"]',
'',
'1-1',
datetime.datetime(2017, 7, 12, 20, 00),
datetime.datetime(2017, 7, 12, 21, 00)
)
host_allocation_create.assert_called_with(
{
'compute_host_id': 'host2',
'reservation_id': '706eb3bc-07ed-4383-be93-b32845ece672'
}
)
host_allocation_destroy.assert_called_with(
'dd305477-4df8-4547-87f6-69069ee546a6'
)
host_reservation_update.assert_called_with(
'91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
{'hypervisor_properties': '["=", "$memory_mb", "32768"]'}
)
def test_update_reservation_no_requested_hosts_available(self):
values = {
'start_date': datetime.datetime(2017, 7, 12, 20, 00),
'end_date': datetime.datetime(2017, 7, 12, 21, 00),
'resource_properties': '[">=", "$vcpus", "32768"]'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = {
'lease_id': '10870923-6d56-45c9-b592-f788053f5baa',
'resource_id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'status': 'pending'
}
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = {
'start_date': datetime.datetime(2013, 12, 19, 20, 00),
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
}
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': '91253650-cc34-4c4f-bbe8-c943aa7d0c9b',
'count_range': '1-1',
'hypervisor_properties': '["=", "$memory_mb", "16384"]',
'resource_properties': ''
}
host_allocation_get_all = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all.return_value = [
{
'id': 'dd305477-4df8-4547-87f6-69069ee546a6',
'compute_host_id': 'host1'
}
]
host_get_all_by_queries = self.patch(self.db_api,
'host_get_all_by_queries')
host_get_all_by_queries.return_value = []
matching_hosts = self.patch(self.fake_phys_plugin, '_matching_hosts')
matching_hosts.return_value = []
self.assertRaises(
manager_exceptions.NotEnoughHostsAvailable,
self.fake_phys_plugin.update_reservation,
'441c1476-9f8f-4700-9f30-cd9b6fef3509',
values)
def test_on_start(self):
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'aggregate_id': 1,
}
host_allocation_get_all_by_values = self.patch(
self.db_api, 'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'compute_host_id': 'host1'},
]
host_get = self.patch(self.db_api, 'host_get')
host_get.return_value = {'service_name': 'host1_hostname'}
add_computehost = self.patch(
self.nova.ReservationPool, 'add_computehost')
self.fake_phys_plugin.on_start(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
add_computehost.assert_called_with(1, ['host1_hostname'])
def test_before_end_with_no_action(self):
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {'before_end': ''}
reservationpool = self.patch(self.nova, 'ReservationPool')
self.fake_phys_plugin.before_end(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
reservationpool.assert_not_called()
def test_before_end_with_snapshot(self):
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'aggregate_id': 1,
'before_end': 'snapshot'
}
get_computehosts = self.patch(self.nova.ReservationPool,
'get_computehosts')
get_computehosts.return_value = ['host']
list_servers = self.patch(self.ServerManager, 'list')
list_servers.return_value = ['server1', 'server2']
create_image = self.patch(self.ServerManager, 'create_image')
self.fake_phys_plugin.before_end(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
create_image.assert_any_call(server='server1')
create_image.assert_any_call(server='server2')
def test_on_end_with_instances(self):
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'aggregate_id': 1
}
host_reservation_update = self.patch(
self.db_api,
'host_reservation_update')
host_allocation_get_all_by_values = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'compute_host_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
get_computehosts = self.patch(self.nova.ReservationPool,
'get_computehosts')
get_computehosts.return_value = ['host']
list_servers = self.patch(self.ServerManager, 'list')
list_servers.return_value = ['server1', 'server2']
delete_server = self.patch(self.ServerManager, 'delete')
# Mock delete_server so the first call fails to find the instance.
# This can happen when the user is deleting instances concurrently.
delete_server.side_effect = mock.Mock(
side_effect=[nova_exceptions.NotFound(
404, 'Instance server1 could not be found.'), None])
delete_pool = self.patch(self.nova.ReservationPool, 'delete')
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
host_reservation_update.assert_called_with(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'})
host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
list_servers.assert_called_with(search_opts={'host': 'host',
'all_tenants': 1})
delete_server.assert_any_call(server='server1')
delete_server.assert_any_call(server='server2')
delete_pool.assert_called_with(1)
def test_on_end_without_instances(self):
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = {
'id': u'04de74e8-193a-49d2-9ab8-cba7b49e45e8',
'reservation_id': u'593e7028-c0d1-4d76-8642-2ffd890b324c',
'aggregate_id': 1
}
host_reservation_update = self.patch(
self.db_api,
'host_reservation_update')
host_allocation_get_all_by_values = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_allocation_get_all_by_values.return_value = [
{'id': u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f',
'compute_host_id': u'cdae2a65-236f-475a-977d-f6ad82f828b7',
},
]
host_allocation_destroy = self.patch(
self.db_api,
'host_allocation_destroy')
get_computehosts = self.patch(self.nova.ReservationPool,
'get_computehosts')
get_computehosts.return_value = ['host']
list_servers = self.patch(self.ServerManager, 'list')
list_servers.return_value = []
delete_server = self.patch(self.ServerManager, 'delete')
delete_pool = self.patch(self.nova.ReservationPool, 'delete')
self.fake_phys_plugin.on_end(u'04de74e8-193a-49d2-9ab8-cba7b49e45e8')
host_reservation_update.assert_called_with(
u'04de74e8-193a-49d2-9ab8-cba7b49e45e8', {'status': 'completed'})
host_allocation_destroy.assert_called_with(
u'bfa9aa0b-8042-43eb-a4e6-4555838bf64f')
delete_server.assert_not_called()
delete_pool.assert_called_with(1)
def test_heal_reservations_before_start_and_resources_changed(self):
failed_host = {'id': '1'}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'pending',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1',
'computehost_allocations': [{
'id': 'alloc-1', 'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}]
}
get_reservations = self.patch(self.db_utils,
'get_reservations_by_host_ids')
get_reservations.return_value = [dummy_reservation]
reallocate = self.patch(self.fake_phys_plugin, '_reallocate')
reallocate.return_value = True
result = self.fake_phys_plugin.heal_reservations(
[failed_host],
datetime.datetime(2020, 1, 1, 12, 00),
datetime.datetime(2020, 1, 1, 13, 00))
reallocate.assert_called_once_with(
dummy_reservation['computehost_allocations'][0])
self.assertEqual({}, result)
def test_heal_reservations_before_start_and_missing_resources(self):
failed_host = {'id': '1'}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'pending',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1',
'computehost_allocations': [{
'id': 'alloc-1', 'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}]
}
get_reservations = self.patch(self.db_utils,
'get_reservations_by_host_ids')
get_reservations.return_value = [dummy_reservation]
reallocate = self.patch(self.fake_phys_plugin, '_reallocate')
reallocate.return_value = False
result = self.fake_phys_plugin.heal_reservations(
[failed_host],
datetime.datetime(2020, 1, 1, 12, 00),
datetime.datetime(2020, 1, 1, 13, 00))
reallocate.assert_called_once_with(
dummy_reservation['computehost_allocations'][0])
self.assertEqual(
{dummy_reservation['id']: {'missing_resources': True}},
result)
def test_heal_active_reservations_and_resources_changed(self):
failed_host = {'id': '1'}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'active',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1',
'computehost_allocations': [{
'id': 'alloc-1', 'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}]
}
get_reservations = self.patch(self.db_utils,
'get_reservations_by_host_ids')
get_reservations.return_value = [dummy_reservation]
reallocate = self.patch(self.fake_phys_plugin, '_reallocate')
reallocate.return_value = True
result = self.fake_phys_plugin.heal_reservations(
[failed_host],
datetime.datetime(2020, 1, 1, 12, 00),
datetime.datetime(2020, 1, 1, 13, 00))
reallocate.assert_called_once_with(
dummy_reservation['computehost_allocations'][0])
self.assertEqual(
{dummy_reservation['id']: {'resources_changed': True}},
result)
def test_heal_active_reservations_and_missing_resources(self):
failed_host = {'id': '1'}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'active',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1',
'computehost_allocations': [{
'id': 'alloc-1', 'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}]
}
get_reservations = self.patch(self.db_utils,
'get_reservations_by_host_ids')
get_reservations.return_value = [dummy_reservation]
reallocate = self.patch(self.fake_phys_plugin, '_reallocate')
reallocate.return_value = False
result = self.fake_phys_plugin.heal_reservations(
[failed_host],
datetime.datetime(2020, 1, 1, 12, 00),
datetime.datetime(2020, 1, 1, 13, 00))
reallocate.assert_called_once_with(
dummy_reservation['computehost_allocations'][0])
self.assertEqual(
{dummy_reservation['id']: {'missing_resources': True}},
result)
def test_reallocate_before_start(self):
failed_host = {'id': '1'}
new_host = {'id': '2'}
dummy_allocation = {
'id': 'alloc-1',
'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'pending',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1'
}
dummy_host_reservation = {
'aggregate_id': 1
}
dummy_lease = {
'name': 'lease-name',
'start_date': datetime.datetime(2020, 1, 1, 12, 00),
'end_date': datetime.datetime(2020, 1, 2, 12, 00),
'trust_id': 'trust-1'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = dummy_reservation
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = dummy_host_reservation
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = dummy_lease
matching_hosts = self.patch(host_plugin.PhysicalHostPlugin,
'_matching_hosts')
matching_hosts.return_value = [new_host['id']]
alloc_update = self.patch(self.db_api, 'host_allocation_update')
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = datetime.datetime(
2020, 1, 1, 11, 00)
result = self.fake_phys_plugin._reallocate(dummy_allocation)
matching_hosts.assert_called_once_with(
dummy_reservation['hypervisor_properties'],
dummy_reservation['resource_properties'],
'1-1', dummy_lease['start_date'], dummy_lease['end_date'])
alloc_update.assert_called_once_with(
dummy_allocation['id'],
{'compute_host_id': new_host['id']})
self.assertEqual(True, result)
def test_reallocate_active(self):
failed_host = {'id': '1',
'service_name': 'compute-1'}
new_host = {'id': '2',
'service_name': 'compute-2'}
dummy_allocation = {
'id': 'alloc-1',
'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'active',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1'
}
dummy_host_reservation = {
'aggregate_id': 1
}
dummy_lease = {
'name': 'lease-name',
'start_date': datetime.datetime(2020, 1, 1, 12, 00),
'end_date': datetime.datetime(2020, 1, 2, 12, 00),
'trust_id': 'trust-1'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = dummy_reservation
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = dummy_lease
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = dummy_host_reservation
host_get = self.patch(self.db_api, 'host_get')
host_get.side_effect = [failed_host, new_host]
matching_hosts = self.patch(host_plugin.PhysicalHostPlugin,
'_matching_hosts')
matching_hosts.return_value = [new_host['id']]
alloc_update = self.patch(self.db_api, 'host_allocation_update')
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = datetime.datetime(
2020, 1, 1, 13, 00)
result = self.fake_phys_plugin._reallocate(dummy_allocation)
self.remove_compute_host.assert_called_once_with(
dummy_host_reservation['aggregate_id'],
failed_host['service_name'])
matching_hosts.assert_called_once_with(
dummy_reservation['hypervisor_properties'],
dummy_reservation['resource_properties'],
'1-1', datetime.datetime(2020, 1, 1, 13, 00),
dummy_lease['end_date'])
alloc_update.assert_called_once_with(
dummy_allocation['id'],
{'compute_host_id': new_host['id']})
self.add_compute_host(
dummy_host_reservation['aggregate_id'],
new_host['service_name'])
self.assertEqual(True, result)
def test_reallocate_missing_resources(self):
failed_host = {'id': '1'}
dummy_allocation = {
'id': 'alloc-1',
'compute_host_id': failed_host['id'],
'reservation_id': 'rsrv-1'
}
dummy_reservation = {
'id': 'rsrv-1',
'resource_type': plugin.RESOURCE_TYPE,
'lease_id': 'lease-1',
'status': 'pending',
'hypervisor_properties': [],
'resource_properties': [],
'resource_id': 'resource-1'
}
dummy_host_reservation = {
'aggregate_id': 1
}
dummy_lease = {
'name': 'lease-name',
'start_date': datetime.datetime(2020, 1, 1, 12, 00),
'end_date': datetime.datetime(2020, 1, 2, 12, 00),
'trust_id': 'trust-1'
}
reservation_get = self.patch(self.db_api, 'reservation_get')
reservation_get.return_value = dummy_reservation
host_reservation_get = self.patch(self.db_api, 'host_reservation_get')
host_reservation_get.return_value = dummy_host_reservation
lease_get = self.patch(self.db_api, 'lease_get')
lease_get.return_value = dummy_lease
matching_hosts = self.patch(host_plugin.PhysicalHostPlugin,
'_matching_hosts')
matching_hosts.return_value = []
alloc_destroy = self.patch(self.db_api, 'host_allocation_destroy')
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = datetime.datetime(
2020, 1, 1, 11, 00)
result = self.fake_phys_plugin._reallocate(dummy_allocation)
matching_hosts.assert_called_once_with(
dummy_reservation['hypervisor_properties'],
dummy_reservation['resource_properties'],
'1-1', dummy_lease['start_date'], dummy_lease['end_date'])
alloc_destroy.assert_called_once_with(dummy_allocation['id'])
self.assertEqual(False, result)
def test_matching_hosts_not_allocated_hosts(self):
def host_allocation_get_all_by_values(**kwargs):
if kwargs['compute_host_id'] == 'host1':
return True
host_get = self.patch(
self.db_api,
'reservable_host_get_all_by_queries')
host_get.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'},
]
host_get = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_get.side_effect = host_allocation_get_all_by_values
host_get = self.patch(
self.db_utils,
'get_free_periods')
host_get.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00)),
]
result = self.fake_phys_plugin._matching_hosts(
'[]', '[]', '1-3',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual(['host2', 'host3'], result)
def test_matching_hosts_allocated_hosts(self):
def host_allocation_get_all_by_values(**kwargs):
if kwargs['compute_host_id'] == 'host1':
return True
host_get = self.patch(
self.db_api,
'reservable_host_get_all_by_queries')
host_get.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'},
]
host_get = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_get.side_effect = host_allocation_get_all_by_values
host_get = self.patch(
self.db_utils,
'get_free_periods')
host_get.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00)),
]
result = self.fake_phys_plugin._matching_hosts(
'[]', '[]', '3-3',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual(['host1', 'host2', 'host3'], result)
def test_matching_hosts_allocated_hosts_with_cleaning_time(self):
def host_allocation_get_all_by_values(**kwargs):
if kwargs['compute_host_id'] == 'host1':
return True
self.cfg.CONF.set_override('cleaning_time', '5')
host_get = self.patch(
self.db_api,
'reservable_host_get_all_by_queries')
host_get.return_value = [
{'id': 'host1'},
{'id': 'host2'},
{'id': 'host3'},
]
host_get = self.patch(
self.db_api,
'host_allocation_get_all_by_values')
host_get.side_effect = host_allocation_get_all_by_values
host_get = self.patch(
self.db_utils,
'get_free_periods')
host_get.return_value = [
(datetime.datetime(2013, 12, 19, 20, 00)
- datetime.timedelta(minutes=5),
datetime.datetime(2013, 12, 19, 21, 00)
+ datetime.timedelta(minutes=5))
]
result = self.fake_phys_plugin._matching_hosts(
'[]', '[]', '3-3',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual(['host1', 'host2', 'host3'], result)
def test_matching_hosts_not_matching(self):
host_get = self.patch(
self.db_api,
'reservable_host_get_all_by_queries')
host_get.return_value = []
result = self.fake_phys_plugin._matching_hosts(
'["=", "$memory_mb", "2048"]', '[]', '1-1',
datetime.datetime(2013, 12, 19, 20, 00),
datetime.datetime(2013, 12, 19, 21, 00))
self.assertEqual([], result)
def test_check_params_with_valid_before_end(self):
values = {
'min': 1,
'max': 2,
'resource_properties': '',
'hypervisor_properties': '',
'before_end': 'snapshot'
}
self.fake_phys_plugin._check_params(values)
self.assertEqual(values['before_end'], 'snapshot')
def test_check_params_with_invalid_before_end(self):
values = {
'min': 1,
'max': 2,
'resource_properties': '',
'hypervisor_properties': '',
'before_end': 'invalid'
}
self.assertRaises(manager_exceptions.MalformedParameter,
self.fake_phys_plugin._check_params,
values)
def test_check_params_without_before_end(self):
self.cfg.CONF.set_override('before_end', '',
group='physical:host')
values = {
'min': 1,
'max': 2,
'resource_properties': '',
'hypervisor_properties': ''
}
self.fake_phys_plugin._check_params(values)
self.assertEqual(values['before_end'], 'default')
class PhysicalHostMonitorPluginTestCase(tests.TestCase):
def setUp(self):
super(PhysicalHostMonitorPluginTestCase, self).setUp()
self.patch(nova_client, 'Client')
self.host_monitor_plugin = host_plugin.PhysicalHostMonitorPlugin()
def test_configuration(self):
# reset the singleton at first
host_plugin.PhysicalHostMonitorPlugin._instance = None
self.cfg = self.useFixture(conf_fixture.Config(CONF))
self.cfg.config(os_admin_username='fake-user')
self.cfg.config(os_admin_password='fake-passwd')
self.cfg.config(os_admin_user_domain_name='fake-user-domain')
self.cfg.config(os_admin_project_name='fake-pj-name')
self.cfg.config(os_admin_project_domain_name='fake-pj-domain')
self.host_monitor_plugin = host_plugin.PhysicalHostMonitorPlugin()
self.assertEqual('fake-user', self.host_monitor_plugin.username)
self.assertEqual("fake-passwd", self.host_monitor_plugin.password)
self.assertEqual("fake-user-domain",
self.host_monitor_plugin.user_domain_name)
self.assertEqual("fake-pj-name", self.host_monitor_plugin.project_name)
self.assertEqual("fake-pj-domain",
self.host_monitor_plugin.project_domain_name)
def test_notification_callback_disabled_true(self):
failed_host = {'hypervisor_hostname': 'hypvsr1'}
event_type = 'service.update'
payload = {
'nova_object.namespace': 'nova',
'nova_object.name': 'ServiceStatusPayload',
'nova_object.version': '1.1',
'nova_object.data': {
'host': failed_host['hypervisor_hostname'],
'disabled': True,
'last_seen_up': '2012-10-29T13:42:05Z',
'binary': 'nova-compute',
'topic': 'compute',
'disabled_reason': None,
'report_count': 1,
'forced_down': False,
'version': 22,
'availability_zone': None,
'uuid': 'fa69c544-906b-4a6a-a9c6-c1f7a8078c73'
}
}
host_get_all = self.patch(db_api,
'reservable_host_get_all_by_queries')
host_get_all.return_value = [failed_host]
handle_failures = self.patch(self.host_monitor_plugin,
'_handle_failures')
handle_failures.return_value = {'rsrv-1': {'missing_resources': True}}
result = self.host_monitor_plugin.notification_callback(event_type,
payload)
host_get_all.assert_called_once_with(
['hypervisor_hostname == ' + payload['nova_object.data']['host']])
self.assertEqual({'rsrv-1': {'missing_resources': True}}, result)
def test_notification_callback_no_failure(self):
event_type = 'service.update'
payload = {
'nova_object.namespace': 'nova',
'nova_object.name': 'ServiceStatusPayload',
'nova_object.version': '1.1',
'nova_object.data': {
'host': 'compute-1',
'disabled': False,
'last_seen_up': '2012-10-29T13:42:05Z',
'binary': 'nova-compute',
'topic': 'compute',
'disabled_reason': None,
'report_count': 1,
'forced_down': False,
'version': 22,
'availability_zone': None,
'uuid': 'fa69c544-906b-4a6a-a9c6-c1f7a8078c73'
}
}
host_get_all = self.patch(db_api, 'host_get_all_by_queries')
host_get_all.return_value = []
handle_failures = self.patch(self.host_monitor_plugin,
'_handle_failures')
result = self.host_monitor_plugin.notification_callback(event_type,
payload)
host_get_all.assert_called_once_with(
['reservable == 0',
'hypervisor_hostname == ' + payload['nova_object.data']['host']])
handle_failures.assert_not_called()
self.assertEqual({}, result)
def test_notification_callback_recover(self):
recovered_host = {'hypervisor_hostname': 'hypvsr1', 'id': 1}
event_type = 'service.update'
payload = {
'nova_object.namespace': 'nova',
'nova_object.name': 'ServiceStatusPayload',
'nova_object.version': '1.1',
'nova_object.data': {
'host': 'compute-1',
'disabled': False,
'last_seen_up': '2012-10-29T13:42:05Z',
'binary': 'nova-compute',
'topic': 'compute',
'disabled_reason': None,
'report_count': 1,
'forced_down': False,
'version': 22,
'availability_zone': None,
'uuid': 'fa69c544-906b-4a6a-a9c6-c1f7a8078c73'
}
}
host_get_all = self.patch(db_api, 'host_get_all_by_queries')
host_get_all.return_value = [recovered_host]
handle_failures = self.patch(self.host_monitor_plugin,
'_handle_failures')
host_update = self.patch(db_api, 'host_update')
result = self.host_monitor_plugin.notification_callback(event_type,
payload)
host_get_all.assert_called_once_with(
['reservable == 0',
'hypervisor_hostname == ' + payload['nova_object.data']['host']])
host_update.assert_called_once_with(recovered_host['id'],
{'reservable': True})
handle_failures.assert_not_called()
self.assertEqual({}, result)
def test_poll_resource_failures_state_down(self):
hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1',
'reservable': True},
{'id': '2',
'hypervisor_hostname': 'hypvsr2',
'reservable': True},
]
host_get_all = self.patch(db_api,
'host_get_all_by_filters')
host_get_all.return_value = hosts
hypervisors_list = self.patch(
self.host_monitor_plugin.nova.hypervisors, 'list')
hypervisors_list.return_value = [
mock.MagicMock(id=1, state='down', status='enabled'),
mock.MagicMock(id=2, state='down', status='enabled')]
result = self.host_monitor_plugin._poll_resource_failures()
self.assertEqual((hosts, []), result)
def test_poll_resource_failures_status_disabled(self):
hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1',
'reservable': True},
{'id': '2',
'hypervisor_hostname': 'hypvsr2',
'reservable': True},
]
host_get_all = self.patch(db_api,
'host_get_all_by_filters')
host_get_all.return_value = hosts
hypervisors_list = self.patch(
self.host_monitor_plugin.nova.hypervisors, 'list')
hypervisors_list.return_value = [
mock.MagicMock(id=1, state='up', status='disabled'),
mock.MagicMock(id=2, state='up', status='disabled')]
result = self.host_monitor_plugin._poll_resource_failures()
self.assertEqual((hosts, []), result)
def test_poll_resource_failures_nothing(self):
hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1',
'reservable': True},
{'id': '2',
'hypervisor_hostname': 'hypvsr2',
'reservable': True},
]
host_get_all = self.patch(db_api,
'host_get_all_by_filters')
host_get_all.return_value = hosts
hypervisors_list = self.patch(
self.host_monitor_plugin.nova.hypervisors, 'list')
hypervisors_list.return_value = [
mock.MagicMock(id=1, state='up', status='enabled'),
mock.MagicMock(id=2, state='up', status='enabled')]
result = self.host_monitor_plugin._poll_resource_failures()
self.assertEqual(([], []), result)
def test_poll_resource_failures_recover(self):
hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1',
'reservable': False},
{'id': '2',
'hypervisor_hostname': 'hypvsr2',
'reservable': False},
]
host_get_all = self.patch(db_api,
'host_get_all_by_filters')
host_get_all.return_value = hosts
hypervisors_list = self.patch(
self.host_monitor_plugin.nova.hypervisors, 'list')
hypervisors_list.return_value = [
mock.MagicMock(id=1, state='up', status='enabled'),
mock.MagicMock(id=2, state='up', status='enabled')]
result = self.host_monitor_plugin._poll_resource_failures()
self.assertEqual(([], hosts), result)
def test_handle_failures(self):
failed_hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1'}
]
host_update = self.patch(db_api, 'host_update')
heal = self.patch(self.host_monitor_plugin, 'heal')
self.host_monitor_plugin._handle_failures(failed_hosts)
host_update.assert_called_once_with(failed_hosts[0]['id'],
{'reservable': False})
heal.assert_called_once()
def test_heal(self):
failed_hosts = [
{'id': '1',
'hypervisor_hostname': 'hypvsr1'}
]
reservation_flags = {
'rsrv-1': {'missing_resources': True}
}
hosts_get = self.patch(db_api, 'unreservable_host_get_all_by_queries')
hosts_get.return_value = failed_hosts
get_healing_interval = self.patch(self.host_monitor_plugin,
'get_healing_interval')
get_healing_interval.return_value = 60
healing_handler = mock.Mock()
healing_handler.return_value = reservation_flags
self.host_monitor_plugin.healing_handlers = [healing_handler]
start_date = datetime.datetime(2020, 1, 1, 12, 00)
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = start_date
result = self.host_monitor_plugin.heal()
healing_handler.assert_called_once_with(
failed_hosts, start_date,
start_date + datetime.timedelta(minutes=60)
)
self.assertEqual(reservation_flags, result)<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#### PATTERN #######################################################################################
# Authors: Tom De Smedt <[email protected]>, Walter Daelemans <[email protected]>
# License: BSD License, see LICENSE.txt
# Copyright (c) 2010 University of Antwerp, Belgium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:<|fim▁hole|># notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Pattern nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# CLiPS Computational Linguistics Group, University of Antwerp, Belgium
# http://www.clips.ua.ac.be/pages/pattern
### CREDITS ########################################################################################
__author__ = "Tom De Smedt"
__credits__ = "Tom De Smedt, Walter Daelemans"
__version__ = "2.6"
__copyright__ = "Copyright (c) 2010 University of Antwerp (BE)"
__license__ = "BSD"
####################################################################################################
import os
# Shortcuts to pattern.en, pattern.es, ...
# (instead of pattern.text.en, pattern.text.es, ...)
try: __path__.append(os.path.join(__path__[0], "text"))
except:
pass<|fim▁end|> | #
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright |
<|file_name|>test_utf8.py<|end_file_name|><|fim▁begin|>import unittest
from pystan import stanc, StanModel
from pystan._compat import PY2
class TestUTF8(unittest.TestCase):
desired = sorted({"status", "model_cppname", "cppcode", "model_name", "model_code", "include_paths"})
def test_utf8(self):
model_code = 'parameters {real y;} model {y ~ normal(0,1);}'
result = stanc(model_code=model_code)
self.assertEqual(sorted(result.keys()), self.desired)
self.assertTrue(result['cppcode'].startswith("// Code generated by Stan "))
self.assertEqual(result['status'], 0)
def test_utf8_linecomment(self):
model_code = u'parameters {real y;\n //äöéü\n} model {y ~ normal(0,1);}'<|fim▁hole|> self.assertEqual(sorted(result.keys()), self.desired)
self.assertTrue(result['cppcode'].startswith("// Code generated by Stan "))
self.assertEqual(result['status'], 0)
def test_utf8_multilinecomment(self):
model_code = u'parameters {real y;\n /*äöéü\näöéü*/\n} model {y ~ normal(0,1);}'
result = stanc(model_code=model_code)
self.assertEqual(sorted(result.keys()), self.desired)
self.assertTrue(result['cppcode'].startswith("// Code generated by Stan "))
self.assertEqual(result['status'], 0)
def test_utf8_inprogramcode(self):
model_code = u'parameters {real ö;\n} model {ö ~ normal(0,1);}'
assertRaisesRegex = self.assertRaisesRegexp if PY2 else self.assertRaisesRegex
with assertRaisesRegex(ValueError, 'Failed to parse Stan model .*'):
stanc(model_code=model_code)<|fim▁end|> | result = stanc(model_code=model_code) |
<|file_name|>runescapecompare.py<|end_file_name|><|fim▁begin|>import discord
import asyncio
import datetime
import time
import aiohttp
import threading
import glob
import re
import json
import os
import urllib.request
from discord.ext import commands
from random import randint
from random import choice as randchoice
from random import choice as rndchoice
from random import shuffle
from .utils.dataIO import fileIO
from .utils import checks
from bs4 import BeautifulSoup
class Runescapecompare:
"""Runescape-relate commands"""
def __init__(self, bot):
self.bot = bot
"""
imLink = http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=
nmLink = http://services.runescape.com/m=hiscore/index_lite.ws?player=
"""
@commands.group(name="compare", pass_context=True)
async def _compare(self, ctx):
if ctx.invoked_subcommand is None:
await self.bot.say("Please, choose a skill to compare!")
#####Overall#####
@_compare.command(name="overall", pass_context=True)
async def compare_overall(self, ctx, name1 : str, name2 : str):
address1 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name1
address2 = "http://services.runescape.com/m=hiscore_ironman/index_lite.ws?player=" + name2
try:
website1 = urllib.request.urlopen(address1)
website2 = urllib.request.urlopen(address2)
website_html1 = website1.read().decode(website1.headers.get_content_charset())
website_html2 = website2.read().decode(website2.headers.get_content_charset())
stats1 = website_html1.split("\n")
stats2 = website_html2.split("\n")
stat1 = stats1[0].split(",")<|fim▁hole|> if stat1[2] > stat2[2]:
comparerank = int(stat2[0]) - int(stat1[0])
comparelvl = int(stat1[1]) - int(stat2[1])
comparexp = int(stat1[2]) - int(stat2[2])
await self.bot.say("```" + name1 + "'s ranking is " + str(comparerank) + " ranks higher than " + name2 + "'s rank.\n" + name1 + "'s level is " + str(comparelvl) + " levels higher than " + name2 + "'s.\n" + name1 + "'s total experience is " + str(comparexp) + " higher than " + name2 + "'s.```")
if stat2[2] > stat1[2]:
comparerank = stat2[0] - stat1[0]
comparelvl = stat2[1] - stat1[1]
comparexp = stat2[2] - stat1[2]
await self.bot.say("```" + name2 + "'s ranking is " + str(comparerank) + " ranks higher than " + name1 + "'s rank.\n" + name2 + "'s level is " + str(comparelvl) + " levels higher than " + name1 + "'s.\n" + name2 + "'s total experience is " + str(comparexp) + " higher than " + name1 + "'s.```")
except:
await self.bot.say("Sorry... Something went wrong there. Did you type the name correctly?")
def setup(bot):
n = Runescapecompare(bot)
bot.add_cog(n)<|fim▁end|> | stat2= stats2[0].split(",") |
<|file_name|>FlowableFlatMapCompletable.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.internal.operators.flowable;
import java.util.concurrent.atomic.AtomicReference;
import org.reactivestreams.*;
import io.reactivex.*;
import io.reactivex.annotations.Nullable;
import io.reactivex.disposables.*;
import io.reactivex.exceptions.Exceptions;
import io.reactivex.functions.Function;
import io.reactivex.internal.disposables.DisposableHelper;
import io.reactivex.internal.functions.ObjectHelper;
import io.reactivex.internal.subscriptions.*;
import io.reactivex.internal.util.AtomicThrowable;
import io.reactivex.plugins.RxJavaPlugins;
/**
* Maps a sequence of values into CompletableSources and awaits their termination.
* @param <T> the value type
*/
public final class FlowableFlatMapCompletable<T> extends AbstractFlowableWithUpstream<T, T> {
final Function<? super T, ? extends CompletableSource> mapper;
final int maxConcurrency;
final boolean delayErrors;
public FlowableFlatMapCompletable(Flowable<T> source,
Function<? super T, ? extends CompletableSource> mapper, boolean delayErrors,
int maxConcurrency) {
super(source);
this.mapper = mapper;
this.delayErrors = delayErrors;
this.maxConcurrency = maxConcurrency;
}
@Override
protected void subscribeActual(Subscriber<? super T> subscriber) {
source.subscribe(new FlatMapCompletableMainSubscriber<T>(subscriber, mapper, delayErrors, maxConcurrency));
}
static final class FlatMapCompletableMainSubscriber<T> extends BasicIntQueueSubscription<T><|fim▁hole|> private static final long serialVersionUID = 8443155186132538303L;
final Subscriber<? super T> downstream;
final AtomicThrowable errors;
final Function<? super T, ? extends CompletableSource> mapper;
final boolean delayErrors;
final CompositeDisposable set;
final int maxConcurrency;
Subscription upstream;
volatile boolean cancelled;
FlatMapCompletableMainSubscriber(Subscriber<? super T> subscriber,
Function<? super T, ? extends CompletableSource> mapper, boolean delayErrors,
int maxConcurrency) {
this.downstream = subscriber;
this.mapper = mapper;
this.delayErrors = delayErrors;
this.errors = new AtomicThrowable();
this.set = new CompositeDisposable();
this.maxConcurrency = maxConcurrency;
this.lazySet(1);
}
@Override
public void onSubscribe(Subscription s) {
if (SubscriptionHelper.validate(this.upstream, s)) {
this.upstream = s;
downstream.onSubscribe(this);
int m = maxConcurrency;
if (m == Integer.MAX_VALUE) {
s.request(Long.MAX_VALUE);
} else {
s.request(m);
}
}
}
@Override
public void onNext(T value) {
CompletableSource cs;
try {
cs = ObjectHelper.requireNonNull(mapper.apply(value), "The mapper returned a null CompletableSource");
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
upstream.cancel();
onError(ex);
return;
}
getAndIncrement();
InnerConsumer inner = new InnerConsumer();
if (!cancelled && set.add(inner)) {
cs.subscribe(inner);
}
}
@Override
public void onError(Throwable e) {
if (errors.addThrowable(e)) {
if (delayErrors) {
if (decrementAndGet() == 0) {
Throwable ex = errors.terminate();
downstream.onError(ex);
} else {
if (maxConcurrency != Integer.MAX_VALUE) {
upstream.request(1);
}
}
} else {
cancel();
if (getAndSet(0) > 0) {
Throwable ex = errors.terminate();
downstream.onError(ex);
}
}
} else {
RxJavaPlugins.onError(e);
}
}
@Override
public void onComplete() {
if (decrementAndGet() == 0) {
Throwable ex = errors.terminate();
if (ex != null) {
downstream.onError(ex);
} else {
downstream.onComplete();
}
} else {
if (maxConcurrency != Integer.MAX_VALUE) {
upstream.request(1);
}
}
}
@Override
public void cancel() {
cancelled = true;
upstream.cancel();
set.dispose();
}
@Override
public void request(long n) {
// ignored, no values emitted
}
@Nullable
@Override
public T poll() throws Exception {
return null; // always empty
}
@Override
public boolean isEmpty() {
return true; // always empty
}
@Override
public void clear() {
// nothing to clear
}
@Override
public int requestFusion(int mode) {
return mode & ASYNC;
}
void innerComplete(InnerConsumer inner) {
set.delete(inner);
onComplete();
}
void innerError(InnerConsumer inner, Throwable e) {
set.delete(inner);
onError(e);
}
final class InnerConsumer extends AtomicReference<Disposable> implements CompletableObserver, Disposable {
private static final long serialVersionUID = 8606673141535671828L;
@Override
public void onSubscribe(Disposable d) {
DisposableHelper.setOnce(this, d);
}
@Override
public void onComplete() {
innerComplete(this);
}
@Override
public void onError(Throwable e) {
innerError(this, e);
}
@Override
public void dispose() {
DisposableHelper.dispose(this);
}
@Override
public boolean isDisposed() {
return DisposableHelper.isDisposed(get());
}
}
}
}<|fim▁end|> | implements FlowableSubscriber<T> { |
<|file_name|>GtkFixed.java<|end_file_name|><|fim▁begin|>/*******************************************************************************
* Copyright (c) 2000, 2008 IBM Corporation and others. All rights reserved.
* The contents of this file are made available under the terms
* of the GNU Lesser General Public License (LGPL) Version 2.1 that
* accompanies this distribution (lgpl-v21.txt). The LGPL is also
* available at http://www.gnu.org/licenses/lgpl.html. If the version
* of the LGPL at http://www.gnu.org is different to the version of
* the LGPL accompanying this distribution and there is any conflict
* between the two license versions, the terms of the LGPL accompanying<|fim▁hole|> * IBM Corporation - initial API and implementation
*******************************************************************************/
package org.eclipse.swt.internal.gtk;
public class GtkFixed {
/** @field cast=(GList *) */
public long /*int*/ children;
}<|fim▁end|> | * this distribution shall govern.
*
* Contributors: |
<|file_name|>MultipartPostHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
####
# 06/2010 Nic Wolfe <[email protected]>
# 02/2006 Will Holcomb <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
import urllib
<|fim▁hole|># Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) in (file, list, tuple):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
@staticmethod
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = ''
for(key, value) in vars:
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"' % key
buffer += '\r\n\r\n' + value + '\r\n'
for(key, fd) in files:
# allow them to pass in a file or a tuple with name & data
if type(fd) == file:
name_in = fd.name
fd.seek(0)
data_in = fd.read()
elif type(fd) in (tuple, list):
name_in, data_in = fd
filename = os.path.basename(name_in)
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buffer += 'Content-Type: %s\r\n' % contenttype
# buffer += 'Content-Length: %s\r\n' % file_size
buffer += '\r\n' + data_in + '\r\n'
buffer += '--%s--\r\n\r\n' % boundary
return boundary, buffer
https_request = http_request<|fim▁end|> | import urllib2
import mimetools, mimetypes
import os, sys
|
<|file_name|>controller.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap;
use std::rc::Rc;
use rustc_serialize::{Decodable, Decoder};
use animation::AnimationClip;
use transform::{Transform, FromTransform};
use blend_tree::{BlendTreeNode, BlendTreeNodeDef, ClipId};
use skeleton::Skeleton;
const MAX_JOINTS: usize = 64;
/// A state that an AnimationController can be in, consisting
/// of a blend tree and a collection of transitions to other states
pub struct AnimationState<T: Transform> {
/// The blend tree used to determine the final blended pose
/// for this state
pub blend_tree: BlendTreeNode<T>,
/// Transitions from this state to other AnimationStates
pub transitions: Vec<AnimationTransition>,
}
/// Representation of a state transition to a target state, with a condition and a duration
#[derive(Debug, Clone, RustcDecodable)]
pub struct AnimationTransition {
/// The name of the target state to transition to
pub target_state: String,
/// The condition that will be checked in order to determine
/// if the controller should transition to the target state
pub condition: TransitionCondition,
/// The duration of the transition, during which a linear blend
/// transition between the current and target states should occur
pub duration: f32,
}
/// Representation of a condition to check for an AnimationTransition
#[derive(Debug, Clone, RustcDecodable)]
pub struct TransitionCondition {
/// The name of the controller parameter to compare with
pub parameter: String,
/// The comparision operator to use
pub operator: Operator,
/// The constant value to compare with the controller parameter value
pub value: f32,
}
impl TransitionCondition {
/// Returns true if the condition is satisfied
pub fn is_true(&self, parameters: &HashMap<String, f32>) -> bool {
match self.operator {
Operator::LessThan => parameters[&self.parameter[..]] < self.value,
Operator::GreaterThan => parameters[&self.parameter[..]] > self.value,
Operator::LessThanEqual => parameters[&self.parameter[..]] <= self.value,
Operator::GreaterThanEqual => parameters[&self.parameter[..]] >= self.value,
Operator::Equal => parameters[&self.parameter[..]] == self.value,
Operator::NotEqual => parameters[&self.parameter[..]] != self.value,
}
}
}
#[derive(Debug, Clone)]
pub enum Operator {
LessThan,
LessThanEqual,
GreaterThan,
GreaterThanEqual,
Equal,
NotEqual,
}
impl Decodable for Operator {
fn decode<D: Decoder>(decoder: &mut D) -> Result<Operator, D::Error> {
match &try!(decoder.read_str())[..] {
"<" => Ok(Operator::LessThan),
">" => Ok(Operator::GreaterThan),
"<=" => Ok(Operator::LessThanEqual),
">=" => Ok(Operator::GreaterThanEqual),
"=" => Ok(Operator::Equal),
"!=" => Ok(Operator::NotEqual),
_ => Ok(Operator::Equal), // FIXME -- figure out how to throw a D::Error...
}
}
}
/// Definition struct for an AnimationController, which can be deserialized from JSON
/// and converted to an AnimationController instance at runtime
#[derive(Clone, Debug, RustcDecodable)]
pub struct AnimationControllerDef {
/// Identifying name for the controller definition
pub name: String,
/// Declaration list of all parameters that are used by the AnimationController,
/// including state transition conditions and blend tree parameters
pub parameters: Vec<String>,
/// List of animation state definitions
pub states: Vec<AnimationStateDef>,
/// The name of the state that the AnimationController should start in
pub initial_state: String,
}
/// Definition struct for an AnimationState, which can be deserialized from JSON
/// and converted to an AnimationState instance at runtime
#[derive(Clone, Debug)]
pub struct AnimationStateDef {
/// The identifying name for the state
pub name: String,
/// The blend tree definition for this state
pub blend_tree: BlendTreeNodeDef,
/// The transitions to other states that can occur from this state
pub transitions: Vec<AnimationTransition>,
}
impl Decodable for AnimationStateDef {
fn decode<D: Decoder>(decoder: &mut D) -> Result<AnimationStateDef, D::Error> {
decoder.read_struct("root", 0, |decoder| {
let name = try!(decoder.read_struct_field("name", 0, |decoder| {
Ok(try!(decoder.read_str()))
}));
let blend_tree = try!(decoder.read_struct_field("blend_tree", 0, Decodable::decode));
let transitions = try!(decoder.read_struct_field("transitions", 0, |decoder| {
decoder.read_seq(|decoder, len| {
let mut transitions = Vec::new();
for i in (0 .. len) {
transitions.push(try!(decoder.read_seq_elt(i, Decodable::decode)));
}
Ok(transitions)
})
}));
Ok(AnimationStateDef {
name: name,
blend_tree: blend_tree,
transitions: transitions,
})
})
}
}
/// A runtime representation of an Animation State Machine, consisting of one or more
/// AnimationStates connected by AnimationTransitions, where the output animation
/// pose depends on the current state or any active transitions between states.
pub struct AnimationController<T: Transform> {
/// Parameters that will be referenced by blend tree nodes and animation states
parameters: HashMap<String, f32>,
/// Shared reference to the skeleton this controller is using
skeleton: Rc<Skeleton>,
/// Tracks seconds since controller started running
local_clock: f64,
/// Playback speed multiplier.
playback_speed: f64,
/// Mapping of all animation state names to their instances
states: HashMap<String, AnimationState<T>>,
/// The name of the current active AnimationState
current_state: String,
/// The current active AnimationTransition and its start time, if any
transition: Option<(f64, AnimationTransition)>,
}
impl<T: Transform> AnimationController<T> {
/// Create an AnimationController instance from its definition, the desired skeleton, and a
/// collection of currently loaded animation clips.
pub fn new(controller_def: AnimationControllerDef, skeleton: Rc<Skeleton>, animations: &HashMap<ClipId, Rc<AnimationClip<T>>>) -> AnimationController<T> {
let mut parameters = HashMap::new();
for parameter in controller_def.parameters.iter() {
parameters.insert(parameter.clone(), 0.0);
};
let mut states = HashMap::new();
for state_def in controller_def.states.iter() {
let mut blend_tree = BlendTreeNode::from_def(state_def.blend_tree.clone(), animations);
blend_tree.synchronize_subtree(0.0, ¶meters);
states.insert(state_def.name.clone(), AnimationState {
blend_tree: blend_tree,
transitions: state_def.transitions.clone()
});
}
AnimationController {
parameters: parameters,
skeleton: skeleton.clone(),
local_clock: 0.0,
playback_speed: 1.0,
states: states,
current_state: controller_def.initial_state,
transition: None,
}
}
/// Update the controller's local clock with the given time delta
pub fn update(&mut self, delta_time: f64) {
self.local_clock += delta_time * self.playback_speed;
}
/// Checks if controller should transition to a different state, or if currently
/// in a transition, checks if the transition is complete
fn update_state(&mut self, ext_dt: f64) {
match self.transition.clone() {
Some((ref start_time, ref transition)) => {
// If transition is finished, switch state to new transition
if self.local_clock + ext_dt >= start_time + transition.duration as f64{
self.current_state = transition.target_state.clone();
self.transition = None;
}
},
None => {
// Check for any transitions with passing conditions
let current_state = &self.states[&self.current_state[..]];
for transition in current_state.transitions.iter() {
if transition.condition.is_true(&self.parameters) {
self.transition = Some((self.local_clock + ext_dt, transition.clone()));
break;
}
}
}
}
}<|fim▁hole|> /// Set the playback speed for the controller
pub fn set_playback_speed(&mut self, speed: f64) {
self.playback_speed = speed;
}
/// Set the value for the given controller parameter
pub fn set_param_value(&mut self, name: &str, value: f32) {
self.parameters.insert(name.to_string(), value); // :(
}
/// Return the value for the given controller parameter
pub fn get_param_value(&self, name: &str) -> f32 {
self.parameters[name]
}
/// Return a read-only reference to the controller parameter map
pub fn get_parameters(&self) -> &HashMap<String, f32> {
&self.parameters
}
/// Calculate global skeletal joint poses for the given time since last update
pub fn get_output_pose<TOutput: Transform + FromTransform<T>>(&mut self, ext_dt: f64, output_poses: &mut [TOutput]) {
self.update_state(ext_dt);
let elapsed_time = self.local_clock + ext_dt * self.playback_speed;
let mut local_poses = [ T::identity(); MAX_JOINTS ];
{
let current_state = self.states.get_mut(&self.current_state[..]).unwrap();
current_state.blend_tree.get_output_pose(elapsed_time as f32, &self.parameters, &mut local_poses[..]);
}
// TODO - would be kinda cool if you could just use a lerp node that pointed to the two
// blend trees, but then we'd need RC pointers?
if let Some((transition_start_time, ref transition)) = self.transition {
// Blend with the target state ...
let mut target_poses = [ T::identity(); MAX_JOINTS ];
let target_state = self.states.get_mut(&transition.target_state[..]).unwrap();
target_state.blend_tree.get_output_pose(elapsed_time as f32, &self.parameters, &mut target_poses[..]);
let blend_parameter = ((self.local_clock + ext_dt - transition_start_time) / transition.duration as f64) as f32;
for i in (0 .. output_poses.len()) {
let pose_1 = &mut local_poses[i];
let pose_2 = target_poses[i];
*pose_1 = pose_1.lerp(pose_2, blend_parameter);
}
}
self.calculate_global_poses(&local_poses[..], output_poses);
}
/// Calculate global poses from the controller's skeleton and the given local poses
fn calculate_global_poses<TOutput: Transform + FromTransform<T>>(
&self,
local_poses: &[T],
global_poses: &mut [TOutput],
) {
for (joint_index, joint) in self.skeleton.joints.iter().enumerate() {
let parent_pose = if !joint.is_root() {
global_poses[joint.parent_index as usize]
} else {
TOutput::identity()
};
let local_pose = local_poses[joint_index];
global_poses[joint_index] = parent_pose.concat(TOutput::from_transform(local_pose));
}
}
}<|fim▁end|> | |
<|file_name|>ResponderEventPlugin.js<|end_file_name|><|fim▁begin|>/**
* Copyright 2013-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*
* @providesModule ResponderEventPlugin
*/
'use strict';
var EventConstants = require('EventConstants');
var EventPluginUtils = require('EventPluginUtils');
var EventPropagators = require('EventPropagators');
var ResponderSyntheticEvent = require('ResponderSyntheticEvent');
var ResponderTouchHistoryStore = require('ResponderTouchHistoryStore');
var accumulate = require('accumulate');
var invariant = require('invariant');
var keyOf = require('keyOf');
var isStartish = EventPluginUtils.isStartish;
var isMoveish = EventPluginUtils.isMoveish;
var isEndish = EventPluginUtils.isEndish;
var executeDirectDispatch = EventPluginUtils.executeDirectDispatch;
var hasDispatches = EventPluginUtils.hasDispatches;
var executeDispatchesInOrderStopAtTrue =
EventPluginUtils.executeDispatchesInOrderStopAtTrue;
/**
* Instance of element that should respond to touch/move types of interactions,
* as indicated explicitly by relevant callbacks.
*/
var responderInst = null;
/**
* Count of current touches. A textInput should become responder iff the
* selection changes while there is a touch on the screen.
*/
var trackedTouchCount = 0;
/**
* Last reported number of active touches.
*/
var previousActiveTouches = 0;
var changeResponder = function(nextResponderInst, blockHostResponder) {
var oldResponderInst = responderInst;
responderInst = nextResponderInst;
if (ResponderEventPlugin.GlobalResponderHandler !== null) {
ResponderEventPlugin.GlobalResponderHandler.onChange(
oldResponderInst,
nextResponderInst,
blockHostResponder
);
}
};
var eventTypes = {
/**
* On a `touchStart`/`mouseDown`, is it desired that this element become the
* responder?
*/
startShouldSetResponder: {
phasedRegistrationNames: {
bubbled: keyOf({onStartShouldSetResponder: null}),
captured: keyOf({onStartShouldSetResponderCapture: null}),
},
},
/**
* On a `scroll`, is it desired that this element become the responder? This
* is usually not needed, but should be used to retroactively infer that a
* `touchStart` had occurred during momentum scroll. During a momentum scroll,
* a touch start will be immediately followed by a scroll event if the view is
* currently scrolling.
*
* TODO: This shouldn't bubble.
*/
scrollShouldSetResponder: {
phasedRegistrationNames: {
bubbled: keyOf({onScrollShouldSetResponder: null}),
captured: keyOf({onScrollShouldSetResponderCapture: null}),
},
},
/**
* On text selection change, should this element become the responder? This
* is needed for text inputs or other views with native selection, so the
* JS view can claim the responder.
*
* TODO: This shouldn't bubble.
*/
selectionChangeShouldSetResponder: {
phasedRegistrationNames: {
bubbled: keyOf({onSelectionChangeShouldSetResponder: null}),
captured: keyOf({onSelectionChangeShouldSetResponderCapture: null}),
},
},
/**
* On a `touchMove`/`mouseMove`, is it desired that this element become the
* responder?
*/
moveShouldSetResponder: {
phasedRegistrationNames: {
bubbled: keyOf({onMoveShouldSetResponder: null}),
captured: keyOf({onMoveShouldSetResponderCapture: null}),
},
},
/**
* Direct responder events dispatched directly to responder. Do not bubble.
*/
responderStart: {registrationName: keyOf({onResponderStart: null})},
responderMove: {registrationName: keyOf({onResponderMove: null})},
responderEnd: {registrationName: keyOf({onResponderEnd: null})},
responderRelease: {registrationName: keyOf({onResponderRelease: null})},
responderTerminationRequest: {
registrationName: keyOf({onResponderTerminationRequest: null}),
},
responderGrant: {registrationName: keyOf({onResponderGrant: null})},
responderReject: {registrationName: keyOf({onResponderReject: null})},
responderTerminate: {registrationName: keyOf({onResponderTerminate: null})},
};
/**
*
* Responder System:
* ----------------
*
* - A global, solitary "interaction lock" on a view.
* - If a node becomes the responder, it should convey visual feedback
* immediately to indicate so, either by highlighting or moving accordingly.
* - To be the responder means, that touches are exclusively important to that
* responder view, and no other view.
* - While touches are still occurring, the responder lock can be transferred to
* a new view, but only to increasingly "higher" views (meaning ancestors of
* the current responder).
*
* Responder being granted:
* ------------------------
*
* - Touch starts, moves, and scrolls can cause an ID to become the responder.
* - We capture/bubble `startShouldSetResponder`/`moveShouldSetResponder` to
* the "appropriate place".
* - If nothing is currently the responder, the "appropriate place" is the
* initiating event's `targetID`.
* - If something *is* already the responder, the "appropriate place" is the
* first common ancestor of the event target and the current `responderInst`.
* - Some negotiation happens: See the timing diagram below.
* - Scrolled views automatically become responder. The reasoning is that a
* platform scroll view that isn't built on top of the responder system has
* began scrolling, and the active responder must now be notified that the
* interaction is no longer locked to it - the system has taken over.
*
* - Responder being released:
* As soon as no more touches that *started* inside of descendants of the
* *current* responderInst, an `onResponderRelease` event is dispatched to the
* current responder, and the responder lock is released.
*
* TODO:
* - on "end", a callback hook for `onResponderEndShouldRemainResponder` that
* determines if the responder lock should remain.
* - If a view shouldn't "remain" the responder, any active touches should by
* default be considered "dead" and do not influence future negotiations or
* bubble paths. It should be as if those touches do not exist.
* -- For multitouch: Usually a translate-z will choose to "remain" responder
* after one out of many touches ended. For translate-y, usually the view
* doesn't wish to "remain" responder after one of many touches end.
* - Consider building this on top of a `stopPropagation` model similar to
* `W3C` events.
* - Ensure that `onResponderTerminate` is called on touch cancels, whether or
* not `onResponderTerminationRequest` returns `true` or `false`.
*
*/
/* Negotiation Performed
+-----------------------+
/ \
Process low level events to + Current Responder + wantsResponderID
determine who to perform negot-| (if any exists at all) |
iation/transition | Otherwise just pass through|
-------------------------------+----------------------------+------------------+
Bubble to find first ID | |
to return true:wantsResponderID| |
| |
+-------------+ | |
| onTouchStart| | |
+------+------+ none | |
| return| |
+-----------v-------------+true| +------------------------+ |
|onStartShouldSetResponder|----->|onResponderStart (cur) |<-----------+
+-----------+-------------+ | +------------------------+ | |
| | | +--------+-------+
| returned true for| false:REJECT +-------->|onResponderReject
| wantsResponderID | | | +----------------+
| (now attempt | +------------------+-----+ |
| handoff) | | onResponder | |
+------------------->| TerminationRequest| |
| +------------------+-----+ |
| | | +----------------+
| true:GRANT +-------->|onResponderGrant|
| | +--------+-------+
| +------------------------+ | |
| | onResponderTerminate |<-----------+
| +------------------+-----+ |
| | | +----------------+
| +-------->|onResponderStart|
| | +----------------+
Bubble to find first ID | |
to return true:wantsResponderID| |
| |
+-------------+ | |
| onTouchMove | | |
+------+------+ none | |
| return| |
+-----------v-------------+true| +------------------------+ |
|onMoveShouldSetResponder |----->|onResponderMove (cur) |<-----------+
+-----------+-------------+ | +------------------------+ | |
| | | +--------+-------+
| returned true for| false:REJECT +-------->|onResponderRejec|
| wantsResponderID | | | +----------------+
| (now attempt | +------------------+-----+ |
| handoff) | | onResponder | |
+------------------->| TerminationRequest| |
| +------------------+-----+ |
| | | +----------------+
| true:GRANT +-------->|onResponderGrant|
| | +--------+-------+
| +------------------------+ | |
| | onResponderTerminate |<-----------+
| +------------------+-----+ |
| | | +----------------+
| +-------->|onResponderMove |
| | +----------------+
| |
| |
Some active touch started| |
inside current responder | +------------------------+ |
+------------------------->| onResponderEnd | |
| | +------------------------+ |
+---+---------+ | |
| onTouchEnd | | |
+---+---------+ | |
| | +------------------------+ |
+------------------------->| onResponderEnd | |
No active touches started| +-----------+------------+ |
inside current responder | | |
| v |
| +------------------------+ |
| | onResponderRelease | |
| +------------------------+ |
| |
+ + */
/**
* A note about event ordering in the `EventPluginHub`.
*
* Suppose plugins are injected in the following order:
*
* `[R, S, C]`
*
* To help illustrate the example, assume `S` is `SimpleEventPlugin` (for
* `onClick` etc) and `R` is `ResponderEventPlugin`.
*
* "Deferred-Dispatched Events":
*
* - The current event plugin system will traverse the list of injected plugins,
* in order, and extract events by collecting the plugin's return value of
* `extractEvents()`.
* - These events that are returned from `extractEvents` are "deferred
* dispatched events".
* - When returned from `extractEvents`, deferred-dispatched events contain an<|fim▁hole|> * - These deferred dispatches are accumulated/collected before they are
* returned, but processed at a later time by the `EventPluginHub` (hence the
* name deferred).
*
* In the process of returning their deferred-dispatched events, event plugins
* themselves can dispatch events on-demand without returning them from
* `extractEvents`. Plugins might want to do this, so that they can use event
* dispatching as a tool that helps them decide which events should be extracted
* in the first place.
*
* "On-Demand-Dispatched Events":
*
* - On-demand-dispatched events are not returned from `extractEvents`.
* - On-demand-dispatched events are dispatched during the process of returning
* the deferred-dispatched events.
* - They should not have side effects.
* - They should be avoided, and/or eventually be replaced with another
* abstraction that allows event plugins to perform multiple "rounds" of event
* extraction.
*
* Therefore, the sequence of event dispatches becomes:
*
* - `R`s on-demand events (if any) (dispatched by `R` on-demand)
* - `S`s on-demand events (if any) (dispatched by `S` on-demand)
* - `C`s on-demand events (if any) (dispatched by `C` on-demand)
* - `R`s extracted events (if any) (dispatched by `EventPluginHub`)
* - `S`s extracted events (if any) (dispatched by `EventPluginHub`)
* - `C`s extracted events (if any) (dispatched by `EventPluginHub`)
*
* In the case of `ResponderEventPlugin`: If the `startShouldSetResponder`
* on-demand dispatch returns `true` (and some other details are satisfied) the
* `onResponderGrant` deferred dispatched event is returned from
* `extractEvents`. The sequence of dispatch executions in this case
* will appear as follows:
*
* - `startShouldSetResponder` (`ResponderEventPlugin` dispatches on-demand)
* - `touchStartCapture` (`EventPluginHub` dispatches as usual)
* - `touchStart` (`EventPluginHub` dispatches as usual)
* - `responderGrant/Reject` (`EventPluginHub` dispatches as usual)
*/
function setResponderAndExtractTransfer(
topLevelType,
targetInst,
nativeEvent,
nativeEventTarget
) {
var shouldSetEventType =
isStartish(topLevelType) ? eventTypes.startShouldSetResponder :
isMoveish(topLevelType) ? eventTypes.moveShouldSetResponder :
topLevelType === EventConstants.topLevelTypes.topSelectionChange ?
eventTypes.selectionChangeShouldSetResponder :
eventTypes.scrollShouldSetResponder;
// TODO: stop one short of the current responder.
var bubbleShouldSetFrom = !responderInst ?
targetInst :
EventPluginUtils.getLowestCommonAncestor(responderInst, targetInst);
// When capturing/bubbling the "shouldSet" event, we want to skip the target
// (deepest ID) if it happens to be the current responder. The reasoning:
// It's strange to get an `onMoveShouldSetResponder` when you're *already*
// the responder.
var skipOverBubbleShouldSetFrom = bubbleShouldSetFrom === responderInst;
var shouldSetEvent = ResponderSyntheticEvent.getPooled(
shouldSetEventType,
bubbleShouldSetFrom,
nativeEvent,
nativeEventTarget
);
shouldSetEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
if (skipOverBubbleShouldSetFrom) {
EventPropagators.accumulateTwoPhaseDispatchesSkipTarget(shouldSetEvent);
} else {
EventPropagators.accumulateTwoPhaseDispatches(shouldSetEvent);
}
var wantsResponderInst = executeDispatchesInOrderStopAtTrue(shouldSetEvent);
if (!shouldSetEvent.isPersistent()) {
shouldSetEvent.constructor.release(shouldSetEvent);
}
if (!wantsResponderInst || wantsResponderInst === responderInst) {
return null;
}
var extracted;
var grantEvent = ResponderSyntheticEvent.getPooled(
eventTypes.responderGrant,
wantsResponderInst,
nativeEvent,
nativeEventTarget
);
grantEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(grantEvent);
var blockHostResponder = executeDirectDispatch(grantEvent) === true;
if (responderInst) {
var terminationRequestEvent = ResponderSyntheticEvent.getPooled(
eventTypes.responderTerminationRequest,
responderInst,
nativeEvent,
nativeEventTarget
);
terminationRequestEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(terminationRequestEvent);
var shouldSwitch = !hasDispatches(terminationRequestEvent) ||
executeDirectDispatch(terminationRequestEvent);
if (!terminationRequestEvent.isPersistent()) {
terminationRequestEvent.constructor.release(terminationRequestEvent);
}
if (shouldSwitch) {
var terminateEvent = ResponderSyntheticEvent.getPooled(
eventTypes.responderTerminate,
responderInst,
nativeEvent,
nativeEventTarget
);
terminateEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(terminateEvent);
extracted = accumulate(extracted, [grantEvent, terminateEvent]);
changeResponder(wantsResponderInst, blockHostResponder);
} else {
var rejectEvent = ResponderSyntheticEvent.getPooled(
eventTypes.responderReject,
wantsResponderInst,
nativeEvent,
nativeEventTarget
);
rejectEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(rejectEvent);
extracted = accumulate(extracted, rejectEvent);
}
} else {
extracted = accumulate(extracted, grantEvent);
changeResponder(wantsResponderInst, blockHostResponder);
}
return extracted;
}
/**
* A transfer is a negotiation between a currently set responder and the next
* element to claim responder status. Any start event could trigger a transfer
* of responderInst. Any move event could trigger a transfer.
*
* @param {string} topLevelType Record from `EventConstants`.
* @return {boolean} True if a transfer of responder could possibly occur.
*/
function canTriggerTransfer(topLevelType, topLevelInst, nativeEvent) {
return topLevelInst && (
// responderIgnoreScroll: We are trying to migrate away from specifically
// tracking native scroll events here and responderIgnoreScroll indicates we
// will send topTouchCancel to handle canceling touch events instead
(topLevelType === EventConstants.topLevelTypes.topScroll &&
!nativeEvent.responderIgnoreScroll) ||
(trackedTouchCount > 0 &&
topLevelType === EventConstants.topLevelTypes.topSelectionChange) ||
isStartish(topLevelType) ||
isMoveish(topLevelType)
);
}
/**
* Returns whether or not this touch end event makes it such that there are no
* longer any touches that started inside of the current `responderInst`.
*
* @param {NativeEvent} nativeEvent Native touch end event.
* @return {boolean} Whether or not this touch end event ends the responder.
*/
function noResponderTouches(nativeEvent) {
var touches = nativeEvent.touches;
if (!touches || touches.length === 0) {
return true;
}
for (var i = 0; i < touches.length; i++) {
var activeTouch = touches[i];
var target = activeTouch.target;
if (target !== null && target !== undefined && target !== 0) {
// Is the original touch location inside of the current responder?
var targetInst = EventPluginUtils.getInstanceFromNode(target);
if (EventPluginUtils.isAncestor(responderInst, targetInst)) {
return false;
}
}
}
return true;
}
var ResponderEventPlugin = {
/* For unit testing only */
_getResponderID: function() {
return responderInst ? responderInst._rootNodeID : null;
},
eventTypes: eventTypes,
/**
* We must be resilient to `targetInst` being `null` on `touchMove` or
* `touchEnd`. On certain platforms, this means that a native scroll has
* assumed control and the original touch targets are destroyed.
*/
extractEvents: function(
topLevelType,
targetInst,
nativeEvent,
nativeEventTarget
) {
if (isStartish(topLevelType)) {
trackedTouchCount += 1;
} else if (isEndish(topLevelType)) {
trackedTouchCount -= 1;
invariant(
trackedTouchCount >= 0,
'Ended a touch event which was not counted in trackedTouchCount.'
);
}
ResponderTouchHistoryStore.recordTouchTrack(topLevelType, nativeEvent, nativeEventTarget);
var extracted = canTriggerTransfer(topLevelType, targetInst, nativeEvent) ?
setResponderAndExtractTransfer(
topLevelType,
targetInst,
nativeEvent,
nativeEventTarget) :
null;
// Responder may or may not have transferred on a new touch start/move.
// Regardless, whoever is the responder after any potential transfer, we
// direct all touch start/move/ends to them in the form of
// `onResponderMove/Start/End`. These will be called for *every* additional
// finger that move/start/end, dispatched directly to whoever is the
// current responder at that moment, until the responder is "released".
//
// These multiple individual change touch events are are always bookended
// by `onResponderGrant`, and one of
// (`onResponderRelease/onResponderTerminate`).
var isResponderTouchStart = responderInst && isStartish(topLevelType);
var isResponderTouchMove = responderInst && isMoveish(topLevelType);
var isResponderTouchEnd = responderInst && isEndish(topLevelType);
var incrementalTouch =
isResponderTouchStart ? eventTypes.responderStart :
isResponderTouchMove ? eventTypes.responderMove :
isResponderTouchEnd ? eventTypes.responderEnd :
null;
if (incrementalTouch) {
var gesture =
ResponderSyntheticEvent.getPooled(
incrementalTouch,
responderInst,
nativeEvent,
nativeEventTarget
);
gesture.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(gesture);
extracted = accumulate(extracted, gesture);
}
var isResponderTerminate =
responderInst &&
topLevelType === EventConstants.topLevelTypes.topTouchCancel;
var isResponderRelease =
responderInst &&
!isResponderTerminate &&
isEndish(topLevelType) &&
noResponderTouches(nativeEvent);
var finalTouch =
isResponderTerminate ? eventTypes.responderTerminate :
isResponderRelease ? eventTypes.responderRelease :
null;
if (finalTouch) {
var finalEvent = ResponderSyntheticEvent.getPooled(
finalTouch, responderInst, nativeEvent, nativeEventTarget
);
finalEvent.touchHistory = ResponderTouchHistoryStore.touchHistory;
EventPropagators.accumulateDirectDispatches(finalEvent);
extracted = accumulate(extracted, finalEvent);
changeResponder(null);
}
var numberActiveTouches =
ResponderTouchHistoryStore.touchHistory.numberActiveTouches;
if (ResponderEventPlugin.GlobalInteractionHandler &&
numberActiveTouches !== previousActiveTouches) {
ResponderEventPlugin.GlobalInteractionHandler.onChange(
numberActiveTouches
);
}
previousActiveTouches = numberActiveTouches;
return extracted;
},
GlobalResponderHandler: null,
GlobalInteractionHandler: null,
injection: {
/**
* @param {{onChange: (ReactID, ReactID) => void} GlobalResponderHandler
* Object that handles any change in responder. Use this to inject
* integration with an existing touch handling system etc.
*/
injectGlobalResponderHandler: function(GlobalResponderHandler) {
ResponderEventPlugin.GlobalResponderHandler = GlobalResponderHandler;
},
/**
* @param {{onChange: (numberActiveTouches) => void} GlobalInteractionHandler
* Object that handles any change in the number of active touches.
*/
injectGlobalInteractionHandler: function(GlobalInteractionHandler) {
ResponderEventPlugin.GlobalInteractionHandler = GlobalInteractionHandler;
},
},
};
module.exports = ResponderEventPlugin;<|fim▁end|> | * "accumulation" of deferred dispatches. |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># =============================================================================
# 2013+ Copyright (c) Alexey Ivanov <[email protected]>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.<|fim▁hole|><|fim▁end|> | # =============================================================================
from __future__ import absolute_import |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""urls.py: messages extends"""
from django.conf.urls import url
from messages_extends.views import message_mark_all_read, message_mark_read
urlpatterns = [
url(r'^mark_read/(?P<message_id>\d+)/$', message_mark_read, name='message_mark_read'),
url(r'^mark_read/all/$', message_mark_all_read, name='message_mark_all_read'),<|fim▁hole|><|fim▁end|> | ] |
<|file_name|>tags.js<|end_file_name|><|fim▁begin|>import { CONSTANT_TAG, DirtyableTag } from 'glimmer-reference';
import { meta as metaFor } from './meta';
import require from 'require';
import { isProxy } from './is_proxy';
<|fim▁hole|>let hasViews = () => false;
export function setHasViews(fn) {
hasViews = fn;
}
function makeTag() {
return new DirtyableTag();
}
export function tagForProperty(object, propertyKey, _meta) {
if (isProxy(object)) {
return tagFor(object, _meta);
}
if (typeof object === 'object' && object) {
let meta = _meta || metaFor(object);
let tags = meta.writableTags();
let tag = tags[propertyKey];
if (tag) { return tag; }
return tags[propertyKey] = makeTag();
} else {
return CONSTANT_TAG;
}
}
export function tagFor(object, _meta) {
if (typeof object === 'object' && object) {
let meta = _meta || metaFor(object);
return meta.writableTag(makeTag);
} else {
return CONSTANT_TAG;
}
}
export function markObjectAsDirty(meta, propertyKey) {
let objectTag = meta && meta.readableTag();
if (objectTag) {
objectTag.dirty();
}
let tags = meta && meta.readableTags();
let propertyTag = tags && tags[propertyKey];
if (propertyTag) {
propertyTag.dirty();
}
if (objectTag || propertyTag) {
ensureRunloop();
}
}
let run;
function K() {}
function ensureRunloop() {
if (!run) {
run = require('ember-metal/run_loop').default;
}
if (hasViews() && !run.backburner.currentInstance) {
run.schedule('actions', K);
}
}<|fim▁end|> | |
<|file_name|>test_soft_conf.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class TestSoftwareConfig(base.BaseOrchestrationTest):
def setUp(self):
super(TestSoftwareConfig, self).setUp()
self.configs = []
# Add 2 sets of software configuration
self.configs.append(self._config_create('a'))
self.configs.append(self._config_create('b'))
# Create a deployment using config a's id
self._deployment_create(self.configs[0]['id'])
def _config_create(self, suffix):
configuration = {'group': 'script',
'inputs': [],
'outputs': [],
'options': {}}
configuration['name'] = 'heat_soft_config_%s' % suffix
configuration['config'] = '#!/bin/bash echo init-%s' % suffix
api_config = self.client.create_software_config(**configuration)
configuration['id'] = api_config['software_config']['id']
self.addCleanup(self._config_delete, configuration['id'])
self._validate_config(configuration, api_config)
return configuration
def _validate_config(self, configuration, api_config):
# Assert all expected keys are present with matching data
for k in configuration:
self.assertEqual(configuration[k],
api_config['software_config'][k])
def _deployment_create(self, config_id):
self.server_id = data_utils.rand_name('dummy-server')
self.action = 'ACTION_0'
self.status = 'STATUS_0'
self.input_values = {}
self.output_values = []
self.status_reason = 'REASON_0'
self.signal_transport = 'NO_SIGNAL'
self.deployment = self.client.create_software_deploy(
self.server_id, config_id, self.action, self.status,
self.input_values, self.output_values, self.status_reason,
self.signal_transport)
self.deployment_id = self.deployment['software_deployment']['id']
self.addCleanup(self._deployment_delete, self.deployment_id)
<|fim▁hole|> self.assertRaises(
lib_exc.NotFound, self.client.show_software_deployment,
self.deployment_id)
def _config_delete(self, config_id):
self.client.delete_software_config(config_id)
# Testing that it is really gone
self.assertRaises(
lib_exc.NotFound, self.client.show_software_config, config_id)
@test.attr(type='smoke')
@test.idempotent_id('136162ed-9445-4b9c-b7fc-306af8b5da99')
def test_get_software_config(self):
"""Testing software config get."""
for conf in self.configs:
api_config = self.client.show_software_config(conf['id'])
self._validate_config(conf, api_config)
@test.attr(type='smoke')
@test.idempotent_id('1275c835-c967-4a2c-8d5d-ad533447ed91')
def test_get_deployment_list(self):
"""Getting a list of all deployments"""
deploy_list = self.client.list_software_deployments()
deploy_ids = [deploy['id'] for deploy in
deploy_list['software_deployments']]
self.assertIn(self.deployment_id, deploy_ids)
@test.attr(type='smoke')
@test.idempotent_id('fe7cd9f9-54b1-429c-a3b7-7df8451db913')
def test_get_deployment_metadata(self):
"""Testing deployment metadata get"""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
conf_ids = [conf['id'] for conf in metadata['metadata']]
self.assertIn(self.configs[0]['id'], conf_ids)
def _validate_deployment(self, action, status, reason, config_id):
deployment = self.client.show_software_deployment(self.deployment_id)
self.assertEqual(action, deployment['software_deployment']['action'])
self.assertEqual(status, deployment['software_deployment']['status'])
self.assertEqual(reason,
deployment['software_deployment']['status_reason'])
self.assertEqual(config_id,
deployment['software_deployment']['config_id'])
@test.attr(type='smoke')
@test.idempotent_id('f29d21f3-ed75-47cf-8cdc-ef1bdeb4c674')
def test_software_deployment_create_validate(self):
"""Testing software deployment was created as expected."""
# Asserting that all fields were created
self.assert_fields_in_dict(
self.deployment['software_deployment'], 'action', 'config_id',
'id', 'input_values', 'output_values', 'server_id', 'status',
'status_reason')
# Testing get for this deployment and verifying parameters
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[0]['id'])
@test.attr(type='smoke')
@test.idempotent_id('2ac43ab3-34f2-415d-be2e-eabb4d14ee32')
def test_software_deployment_update_no_metadata_change(self):
"""Testing software deployment update without metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Updating values without changing the configuration ID
new_action = 'ACTION_1'
new_status = 'STATUS_1'
new_reason = 'REASON_1'
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[0]['id'],
new_action, new_status, self.input_values, self.output_values,
new_reason, self.signal_transport)
# Verifying get and that the deployment was updated as expected
self._validate_deployment(new_action, new_status,
new_reason, self.configs[0]['id'])
# Metadata should not be changed at this point
test_metadata = self.client.show_software_deployment_metadata(
self.server_id)
for key in metadata['metadata'][0]:
self.assertEqual(
metadata['metadata'][0][key],
test_metadata['metadata'][0][key])
@test.attr(type='smoke')
@test.idempotent_id('92c48944-d79d-4595-a840-8e1a581c1a72')
def test_software_deployment_update_with_metadata_change(self):
"""Testing software deployment update with metadata change."""
metadata = self.client.show_software_deployment_metadata(
self.server_id)
self.client.update_software_deploy(
self.deployment_id, self.server_id, self.configs[1]['id'],
self.action, self.status, self.input_values,
self.output_values, self.status_reason, self.signal_transport)
self._validate_deployment(self.action, self.status,
self.status_reason, self.configs[1]['id'])
# Metadata should now be changed
new_metadata = self.client.show_software_deployment_metadata(
self.server_id)
# Its enough to test the ID in this case
meta_id = metadata['metadata'][0]['id']
test_id = new_metadata['metadata'][0]['id']
self.assertNotEqual(meta_id, test_id)<|fim▁end|> | def _deployment_delete(self, deploy_id):
self.client.delete_software_deploy(deploy_id)
# Testing that it is really gone |
<|file_name|>eagle.py<|end_file_name|><|fim▁begin|>#===========================================================================
#
# Port to use for the web server. Configure the Eagle to use this
# port as it's 'cloud provider' using http://host:PORT
#
#===========================================================================
httpPort = 22042
#===========================================================================
#
# MQTT topic names
#
#===========================================================================<|fim▁hole|>mqttEnergy = 'power/elec/Home/energy'
# Instantaneous power usage topic (reports power usage in W)
mqttPower = 'power/elec/Home/power'
#===========================================================================
#
# Logging configuration. Env variables are allowed in the file name.
#
#===========================================================================
logFile = '/var/log/tHome/eagle.log'
logLevel = 40<|fim▁end|> | # Meter reading topic (reports current meter reading in kWh) |
<|file_name|>stacktrace.cc<|end_file_name|><|fim▁begin|>// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ---
// Author: Sanjay Ghemawat
//
// Produce stack trace.
//
// There are three different ways we can try to get the stack trace:
//
// 1) Our hand-coded stack-unwinder. This depends on a certain stack
// layout, which is used by gcc (and those systems using a
// gcc-compatible ABI) on x86 systems, at least since gcc 2.95.
// It uses the frame pointer to do its work.
//
// 2) The libunwind library. This is still in development, and as a
// separate library adds a new dependency, abut doesn't need a frame
// pointer. It also doesn't call malloc.
//
// 3) The gdb unwinder -- also the one used by the c++ exception code.
// It's obviously well-tested, but has a fatal flaw: it can call
// malloc() from the unwinder. This is a problem because we're
// trying to use the unwinder to instrument malloc().
//
// Note: if you add a new implementation here, make sure it works
// correctly when GetStackTrace() is called with max_depth == 0.
// Some code may do that.
#include <config.h>
#include <stdlib.h> // for getenv
#include <string.h> // for strcmp
#include <stdio.h> // for fprintf
#include "gperftools/stacktrace.h"
#include "base/commandlineflags.h"
#include "base/googleinit.h"
// we're using plain struct and not class to avoid any possible issues
// during initialization. Struct of pointers is easy to init at
// link-time.
struct GetStackImplementation {
int (*GetStackFramesPtr)(void** result, int* sizes, int max_depth,
int skip_count);
int (*GetStackFramesWithContextPtr)(void** result, int* sizes, int max_depth,
int skip_count, const void *uc);
int (*GetStackTracePtr)(void** result, int max_depth,
int skip_count);
int (*GetStackTraceWithContextPtr)(void** result, int max_depth,
int skip_count, const void *uc);
const char *name;
};
#if HAVE_DECL_BACKTRACE
#define STACKTRACE_INL_HEADER "stacktrace_generic-inl.h"
#define GST_SUFFIX generic
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_generic
#endif
// libunwind uses __thread so we check for both libunwind.h and
// __thread support
#if defined(HAVE_LIBUNWIND_H) && defined(HAVE_TLS)
#define STACKTRACE_INL_HEADER "stacktrace_libunwind-inl.h"
#define GST_SUFFIX libunwind
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_libunwind
#endif // HAVE_LIBUNWIND_H
#if defined(__i386__) || defined(__x86_64__)
#define STACKTRACE_INL_HEADER "stacktrace_x86-inl.h"
#define GST_SUFFIX x86
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_x86
#endif // i386 || x86_64
<|fim▁hole|>#define STACKTRACE_INL_HEADER "stacktrace_powerpc-darwin-inl.h"
#endif
#define GST_SUFFIX ppc
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_ppc
#endif
#if defined(__arm__)
#define STACKTRACE_INL_HEADER "stacktrace_arm-inl.h"
#define GST_SUFFIX arm
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_arm
#endif
#ifdef TCMALLOC_ENABLE_INSTRUMENT_STACKTRACE
#define STACKTRACE_INL_HEADER "stacktrace_instrument-inl.h"
#define GST_SUFFIX instrument
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_instrument
#endif
// The Windows case -- probably cygwin and mingw will use one of the
// x86-includes above, but if not, we can fall back to windows intrinsics.
#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__)
#define STACKTRACE_INL_HEADER "stacktrace_win32-inl.h"
#define GST_SUFFIX win32
#include "stacktrace_impl_setup-inl.h"
#undef GST_SUFFIX
#undef STACKTRACE_INL_HEADER
#define HAVE_GST_win32
#endif
static GetStackImplementation *all_impls[] = {
#ifdef HAVE_GST_generic
&impl__generic,
#endif
#ifdef HAVE_GST_libunwind
&impl__libunwind,
#endif
#ifdef HAVE_GST_x86
&impl__x86,
#endif
#ifdef HAVE_GST_arm
&impl__arm,
#endif
#ifdef HAVE_GST_ppc
&impl__ppc,
#endif
#ifdef HAVE_GST_instrument
&impl__instrument,
#endif
#ifdef HAVE_GST_win32
&impl__win32,
#endif
NULL
};
// ppc and i386 implementations prefer arch-specific asm implementations.
// arm's asm implementation is broken
#if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || defined(__PPC__)
#if !defined(NO_FRAME_POINTER)
#define TCMALLOC_DONT_PREFER_LIBUNWIND
#endif
#endif
#if defined(HAVE_GST_instrument)
static GetStackImplementation *get_stack_impl = &impl__instrument;
#elif defined(HAVE_GST_win32)
static GetStackImplementation *get_stack_impl = &impl__win32;
#elif defined(HAVE_GST_x86) && defined(TCMALLOC_DONT_PREFER_LIBUNWIND)
static GetStackImplementation *get_stack_impl = &impl__x86;
#elif defined(HAVE_GST_ppc) && defined(TCMALLOC_DONT_PREFER_LIBUNWIND)
static GetStackImplementation *get_stack_impl = &impl__ppc;
#elif defined(HAVE_GST_libunwind)
static GetStackImplementation *get_stack_impl = &impl__libunwind;
#elif defined(HAVE_GST_arm)
static GetStackImplementation *get_stack_impl = &impl__arm;
#elif defined(HAVE_GST_generic)
static GetStackImplementation *get_stack_impl = &impl__generic;
#elif 0
// This is for the benefit of code analysis tools that may have
// trouble with the computed #include above.
# include "stacktrace_x86-inl.h"
# include "stacktrace_libunwind-inl.h"
# include "stacktrace_generic-inl.h"
# include "stacktrace_powerpc-inl.h"
# include "stacktrace_win32-inl.h"
# include "stacktrace_arm-inl.h"
# include "stacktrace_instrument-inl.h"
#else
#error Cannot calculate stack trace: will need to write for your environment
#endif
static int ATTRIBUTE_NOINLINE frame_forcer(int rv) {
return rv;
}
PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count) {
return frame_forcer(get_stack_impl->GetStackFramesPtr(result, sizes, max_depth, skip_count));
}
PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void *uc) {
return frame_forcer(get_stack_impl->GetStackFramesWithContextPtr(
result, sizes, max_depth,
skip_count, uc));
}
PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth,
int skip_count) {
return frame_forcer(get_stack_impl->GetStackTracePtr(result, max_depth, skip_count));
}
PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void *uc) {
return frame_forcer(get_stack_impl->GetStackTraceWithContextPtr(
result, max_depth, skip_count, uc));
}
static void init_default_stack_impl_inner(void) {
char *val = getenv("TCMALLOC_STACKTRACE_METHOD");
if (!val || !*val) {
return;
}
for (GetStackImplementation **p = all_impls; *p; p++) {
GetStackImplementation *c = *p;
if (strcmp(c->name, val) == 0) {
get_stack_impl = c;
return;
}
}
fprintf(stderr, "Unknown or unsupported stacktrace method requested: %s. Ignoring it\n", val);
}
static void init_default_stack_impl(void) {
init_default_stack_impl_inner();
if (EnvToBool("TCMALLOC_STACKTRACE_METHOD_VERBOSE", false)) {
fprintf(stderr, "Chosen stacktrace method is %s\nSupported methods:\n", get_stack_impl->name);
for (GetStackImplementation **p = all_impls; *p; p++) {
GetStackImplementation *c = *p;
fprintf(stderr, "* %s\n", c->name);
}
fputs("\n", stderr);
}
}
REGISTER_MODULE_INITIALIZER(stacktrace_init_default_stack_impl, init_default_stack_impl());<|fim▁end|> | #if defined(__ppc__) || defined(__PPC__)
#if defined(__linux__)
#define STACKTRACE_INL_HEADER "stacktrace_powerpc-linux-inl.h"
#else |
<|file_name|>session.rs<|end_file_name|><|fim▁begin|>/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use crate::log::*;
use crate::kerberos::*;
use crate::smb::smb::*;
use crate::smb::smb1_session::*;
use crate::smb::auth::*;
#[derive(Debug)]
pub struct SMBTransactionSessionSetup {
pub request_host: Option<SessionSetupRequest>,
pub response_host: Option<SessionSetupResponse>,
pub ntlmssp: Option<NtlmsspData>,
pub krb_ticket: Option<Kerberos5Ticket>,
}
impl SMBTransactionSessionSetup {
pub fn new() -> SMBTransactionSessionSetup {
return SMBTransactionSessionSetup {
request_host: None,
response_host: None,
ntlmssp: None,
krb_ticket: None,
}
}<|fim▁hole|> -> &mut SMBTransaction
{
let mut tx = self.new_tx();
tx.hdr = hdr;
tx.type_data = Some(SMBTransactionTypeData::SESSIONSETUP(
SMBTransactionSessionSetup::new()));
tx.request_done = true;
tx.response_done = self.tc_trunc; // no response expected if tc is truncated
SCLogDebug!("SMB: TX SESSIONSETUP created: ID {}", tx.id);
self.transactions.push(tx);
let tx_ref = self.transactions.last_mut();
return tx_ref.unwrap();
}
pub fn get_sessionsetup_tx(&mut self, hdr: SMBCommonHdr)
-> Option<&mut SMBTransaction>
{
for tx in &mut self.transactions {
let hit = tx.hdr.compare(&hdr) && match tx.type_data {
Some(SMBTransactionTypeData::SESSIONSETUP(_)) => { true },
_ => { false },
};
if hit {
return Some(tx);
}
}
return None;
}
}<|fim▁end|> | }
impl SMBState {
pub fn new_sessionsetup_tx(&mut self, hdr: SMBCommonHdr) |
<|file_name|>takeSnapshot.js<|end_file_name|><|fim▁begin|><|fim▁hole|> *
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @providesModule takeSnapshot
* @flow
*/
'use strict';
const UIManager = require('UIManager');
const findNumericNodeHandle = require('findNumericNodeHandle');
/**
* Capture an image of the screen, window or an individual view. The image
* will be stored in a temporary file that will only exist for as long as the
* app is running.
*
* The `view` argument can be the literal string `window` if you want to
* capture the entire window, or it can be a reference to a specific
* React Native component.
*
* The `options` argument may include:
* - width/height (number) - the width and height of the image to capture.
* - format (string) - either 'png' or 'jpeg'. Defaults to 'png'.
* - quality (number) - the quality when using jpeg. 0.0 - 1.0 (default).
*
* Returns a Promise.
* @platform ios
*/
function takeSnapshot(
view?: 'window' | React$Element<any> | number,
options?: {
width?: number,
height?: number,
format?: 'png' | 'jpeg',
quality?: number,
},
): Promise<any> {
if (typeof view !== 'number' && view !== 'window') {
view = findNumericNodeHandle(view) || 'window';
}
// Call the hidden '__takeSnapshot' method; the main one throws an error to
// prevent accidental backwards-incompatible usage.
return UIManager.__takeSnapshot(view, options);
}
module.exports = takeSnapshot;<|fim▁end|> | /**
* Copyright (c) 2015-present, Facebook, Inc. |
<|file_name|>ApplicationServiceController.java<|end_file_name|><|fim▁begin|>/*
* matrix-appservice-email - Matrix Bridge to E-mail
* Copyright (C) 2017 Kamax Sarl
*
* https://www.kamax.io/
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package io.kamax.matrix.bridge.email.controller;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import io.kamax.matrix.MatrixErrorInfo;
import io.kamax.matrix.bridge.email.exception.*;
import io.kamax.matrix.bridge.email.model.matrix.MatrixTransactionPush;
import io.kamax.matrix.bridge.email.model.matrix.RoomQuery;
import io.kamax.matrix.bridge.email.model.matrix.UserQuery;
import io.kamax.matrix.bridge.email.model.matrix._MatrixApplicationService;
import io.kamax.matrix.event._MatrixEvent;
import io.kamax.matrix.json.MatrixJsonEventFactory;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.*;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.springframework.web.bind.annotation.RequestMethod.GET;
import static org.springframework.web.bind.annotation.RequestMethod.PUT;
@RestController
public class ApplicationServiceController {
private Logger log = LoggerFactory.getLogger(ApplicationServiceController.class);
@Autowired
private _MatrixApplicationService as;
private JsonParser jsonParser = new JsonParser();
@ResponseStatus(value = HttpStatus.BAD_REQUEST)
@ExceptionHandler({InvalidMatrixIdException.class, InvalidBodyContentException.class})
@ResponseBody
MatrixErrorInfo handleBadRequest(HttpServletRequest request, MatrixException e) {
log.error("Error when processing {} {}", request.getMethod(), request.getServletPath(), e);
return new MatrixErrorInfo(e.getErrorCode());
}
@ResponseStatus(value = HttpStatus.UNAUTHORIZED)
@ExceptionHandler(NoHomeserverTokenException.class)
@ResponseBody
MatrixErrorInfo handleUnauthorized(MatrixException e) {
return new MatrixErrorInfo(e.getErrorCode());
}
@ResponseStatus(value = HttpStatus.FORBIDDEN)
@ExceptionHandler(InvalidHomeserverTokenException.class)
@ResponseBody
MatrixErrorInfo handleForbidden(MatrixException e) {
return new MatrixErrorInfo(e.getErrorCode());
}
@ResponseStatus(value = HttpStatus.NOT_FOUND)
@ExceptionHandler({RoomNotFoundException.class, UserNotFoundException.class})
@ResponseBody
MatrixErrorInfo handleNotFound(MatrixException e) {
return new MatrixErrorInfo(e.getErrorCode());
}
@ResponseStatus(value = HttpStatus.INTERNAL_SERVER_ERROR)
@ExceptionHandler(Throwable.class)
@ResponseBody
MatrixErrorInfo handleGeneric(HttpServletRequest request, Throwable t) {
log.error("Error when processing {} {}", request.getMethod(), request.getServletPath(), t);
return new MatrixErrorInfo(t);
}
@RequestMapping(value = "/rooms/{roomAlias:.+}", method = GET)
public Object getRoom(
@RequestParam(name = "access_token", required = false) String accessToken,
@PathVariable String roomAlias) {
log.info("Room {} was requested by HS", roomAlias);
as.queryRoom(new RoomQuery(roomAlias, accessToken));
return EmptyJsonResponse.get();
}
@RequestMapping(value = "/users/{mxId:.+}", method = GET)
public Object getUser(
@RequestParam(name = "access_token", required = false) String accessToken,
@PathVariable String mxId) {
log.info("User {} was requested by HS", mxId);
as.queryUser(new UserQuery(as.getId(mxId), accessToken));
return EmptyJsonResponse.get();
}
@RequestMapping(value = "/transactions/{txnId:.+}", method = PUT)
public Object getTransaction(
HttpServletRequest request,<|fim▁hole|>
String json = IOUtils.toString(request.getInputStream(), request.getCharacterEncoding());
try {
JsonObject rootObj = jsonParser.parse(json).getAsJsonObject();
JsonArray eventsJson = rootObj.get("events").getAsJsonArray();
List<_MatrixEvent> events = new ArrayList<>();
for (JsonElement event : eventsJson) {
events.add(MatrixJsonEventFactory.get(event.getAsJsonObject()));
}
MatrixTransactionPush transaction = new MatrixTransactionPush();
transaction.setCredentials(accessToken);
transaction.setId(txnId);
transaction.setEvents(events);
as.push(transaction);
return EmptyJsonResponse.get();
} catch (IllegalStateException e) {
throw new InvalidBodyContentException(e);
}
}
}<|fim▁end|> | @RequestParam(name = "access_token", required = false) String accessToken,
@PathVariable String txnId) throws IOException {
log.info("Processing {}", request.getServletPath()); |
<|file_name|>olt_los_alarm.py<|end_file_name|><|fim▁begin|># Copyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from voltha.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
from voltha.extensions.alarms.adapter_alarms import AlarmBase
class OltLosAlarm(AlarmBase):
def __init__(self, alarm_mgr, intf_id, port_type_name):
super(OltLosAlarm, self).__init__(alarm_mgr, object_type='olt LOS',
alarm='OLT_LOS',
alarm_category=AlarmEventCategory.OLT,
alarm_type=AlarmEventType.COMMUNICATION,
alarm_severity=AlarmEventSeverity.MAJOR)<|fim▁hole|> def get_context_data(self):
return {'olt-intf-id:': self._intf_id,
'olt-port-type-name': self._port_type_name}<|fim▁end|> | # Added port type to indicate if alarm was on NNI or PON
self._intf_id = intf_id
self._port_type_name = port_type_name
|
<|file_name|>ControllableSlimeHandle.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) EntityAPI Team
*
* This file is part of EntityAPI.
*
* EntityAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* EntityAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of<|fim▁hole|> * along with EntityAPI. If not, see <http://www.gnu.org/licenses/>.
*/
package org.entityapi.api.entity.type.nms;
import org.bukkit.entity.Slime;
import org.entityapi.api.entity.ControllableEntityHandle;
public interface ControllableSlimeHandle extends ControllableEntityHandle<Slime> {
}<|fim▁end|> | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.keras import Input
from tensorflow.python.keras import Model
from tensorflow.python.keras import Sequential
from tensorflow.tools.api.generator.api.keras import activations
from tensorflow.tools.api.generator.api.keras import applications
from tensorflow.tools.api.generator.api.keras import backend
from tensorflow.tools.api.generator.api.keras import callbacks
from tensorflow.tools.api.generator.api.keras import constraints
from tensorflow.tools.api.generator.api.keras import datasets
from tensorflow.tools.api.generator.api.keras import estimator
from tensorflow.tools.api.generator.api.keras import initializers
from tensorflow.tools.api.generator.api.keras import layers
from tensorflow.tools.api.generator.api.keras import losses
from tensorflow.tools.api.generator.api.keras import metrics
from tensorflow.tools.api.generator.api.keras import models
from tensorflow.tools.api.generator.api.keras import optimizers
from tensorflow.tools.api.generator.api.keras import preprocessing
from tensorflow.tools.api.generator.api.keras import regularizers
from tensorflow.tools.api.generator.api.keras import utils<|fim▁hole|><|fim▁end|> | from tensorflow.tools.api.generator.api.keras import wrappers |
<|file_name|>validators.py<|end_file_name|><|fim▁begin|>from rest_framework.serializers import ValidationError
from six import string_types
def has_id_field(value):
if value is None:
raise ValidationError('Nested object must contain an `id` attribute.')
if isinstance(value, string_types):<|fim▁hole|><|fim▁end|> | raise ValidationError(value) |
<|file_name|>wireshark_gen.py<|end_file_name|><|fim▁begin|># -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at https://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a dissector for Wireshark.
#
#
# Strategy (sneaky but ...)
#
# problem: I dont know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
"""Wireshark IDL compiler back-end."""
from __future__ import print_function
import collections
import tempfile
from omniidl import idlast, idltype, idlutil, output
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Don't generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
class wireshark_gen_C:
# Some string constants for our templates
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_" # loop index
c_i_lim = "guint32 u_octet4_loop_" # loop limit
c_u_disc = "guint32 disc_u_" # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_" # signed int union discriminant variable name (other cases, except Enum)
def __init__(self, st, protocol_name, dissector_name, description, debug=False, aggressive=False):
self.DEBUG = debug
self.AGGRESSIVE = aggressive
self.st = output.Stream(tempfile.TemporaryFile(mode="w"), 4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
def genCode(self, oplist, atlist, enlist, stlist, unlist): # operation, attribute, enums, struct and union lists
"""Main entry point, controls sequence of generated code."""
# sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
self.genHelpers(oplist, stlist, unlist)
self.genExceptionHelpers(oplist)
self.genAttributeHelpers(atlist)
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genWrsCopyright()
self.genGPL()
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist, atlist, enlist, stlist, unlist)
if len(atlist) > 0:
self.genAtList(atlist) # string constant declares for Attributes
if len(enlist) > 0:
self.genEnList(enlist) # string constant declares for Enums
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if len(atlist) > 0:
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist, stlist, unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
#self.dumpvars() # debug
self.genModelines()
def genHeader(self):
"""Generate Standard Wireshark Header Comments"""
self.st.out(self.template_Header, dissector_name=self.dissname)
if self.DEBUG:
print("//XXX genHeader")
def genWrsCopyright(self):
if self.DEBUG:
print("//XXX genWrsCopyright")
self.st.out(self.template_wireshark_copyright)
def genModelines(self):
if self.DEBUG:
print("//XXX genModelines")
self.st.out(self.template_Modelines)
def genGPL(self):
if self.DEBUG:
print("//XXX genGPL")
self.st.out(self.template_GPL)
def genIncludes(self):
if self.DEBUG:
print("//XXX genIncludes")
self.st.out(self.template_Includes)
def genOpDeclares(self, op):
"""" Generate hf variables for operation filters
in: opnode ( an operation node)
"""
if self.DEBUG:
print("//XXX genOpDeclares")
print("//XXX return type = ", op.returnType().kind())
sname = self.namespace(op, "_")
rt = op.returnType()
if rt.kind() != idltype.tk_void:
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
#self.get_CDR_alias(rt, rt.name())
if rt.unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_return_loop")
if self.isSeqNativeType(rt.unalias().seqType()) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_return")
elif (rt.unalias().kind() != idltype.tk_struct and
rt.unalias().kind() != idltype.tk_objref and
rt.unalias().kind() != idltype.tk_any):
self.st.out(self.template_hf, name=sname + "_return")
elif (rt.kind() != idltype.tk_struct and
rt.kind() != idltype.tk_objref and
rt.kind() != idltype.tk_union and
rt.kind() != idltype.tk_any):
self.st.out(self.template_hf, name=sname + "_return")
for p in op.parameters():
if p.paramType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_loop")
if (self.isSeqNativeType(p.paramType().unalias().seqType())) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
elif (p.paramType().unalias().kind() != idltype.tk_any and
p.paramType().unalias().kind() != idltype.tk_struct and
p.paramType().unalias().kind() != idltype.tk_objref and
p.paramType().unalias().kind() != idltype.tk_union):
if p.paramType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
def genAtDeclares(self, at):
"""Generate hf variables for attributes
in: at ( an attribute)
"""
if self.DEBUG:
print("//XXX genAtDeclares")
for decl in at.declarators():
sname = self.namespace(decl, "_")
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
if self.AGGRESSIVE:
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier()+"_loop")
if not at.readonly():
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
if self.AGGRESSIVE:
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier()+"_loop")
def genStDeclares(self, st):
"""Generate hf variables for structs
in: st ( a struct)
"""
if self.DEBUG:
print("//XXX genStDeclares")
sname = self.namespace(st, "_")
for m in st.members():
if (self.isSeqNativeType(m.memberType())
or m.memberType().unalias().kind() == idltype.tk_sequence
or m.memberType().unalias().kind() == idltype.tk_alias):
for decl in m.declarators():
if m.memberType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
if (self.isSeqNativeType(m.memberType().unalias().seqType())) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
else:
if m.memberType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
def genExDeclares(self, ex):
"""Generate hf variables for user exception filters
in: exnode ( an exception node)
"""
if self.DEBUG:
print("//XXX genExDeclares")
sname = self.namespace(ex, "_")
for m in ex.members():
for decl in m.declarators():
if m.memberType().unalias().kind() == idltype.tk_sequence:
if self.isSeqNativeType(m.memberType().unalias().seqType()):
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
elif m.memberType().unalias().kind() != idltype.tk_struct:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
def genUnionDeclares(self, un):
"""Generate hf variables for union filters
in: un ( an union)
"""
if self.DEBUG:
print("//XXX genUnionDeclares")
sname = self.namespace(un, "_")
self.st.out(self.template_hf, name=sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
# TODO: Is this loop necessary? cl is not used
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
if uc.caseType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_loop")
if self.isSeqNativeType(uc.caseType().unalias().seqType()):
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
elif self.isSeqNativeType(uc.caseType()):
if uc.caseType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
def genExpertInfoDeclares(self):
"""Generate ei variables for expert info filters"""
if self.DEBUG:
print("//XXX genExpertInfoDeclares")
self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
def genDeclares(self, oplist, atlist, enlist, stlist, unlist):
"""generate function prototypes if required
Currently this is used for struct and union helper function declarations.
"""
if self.DEBUG:
print("//XXX genDeclares")
# prototype for operation filters
self.st.out(self.template_hf_operations)
# operation specific filters
if len(oplist) > 0:
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOpDeclares(op)
# attribute filters
if len(atlist) > 0:
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAtDeclares(at)
# struct filters
if len(stlist) > 0:
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
self.genStDeclares(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if len(exlist) > 0:
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if ex.members(): # only if has members
self.genExDeclares(ex)
# union filters
if len(unlist) > 0:
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnionDeclares(un)
# expert info filters
self.genExpertInfoDeclares()
# prototype for start_dissecting()
self.st.out(self.template_prototype_start_dissecting)
# struct prototypes
if len(stlist):
self.st.out(self.template_prototype_struct_start)
for st in stlist:
#print st.repoId()
sname = self.namespace(st, "_")
self.st.out(self.template_prototype_struct_body, stname=st.repoId(), name=sname)
self.st.out(self.template_prototype_struct_end)
# union prototypes
if len(unlist):
self.st.out(self.template_prototype_union_start)
for un in unlist:
sname = self.namespace(un, "_")
self.st.out(self.template_prototype_union_body, unname=un.repoId(), name=sname)
self.st.out(self.template_prototype_union_end)
def genPrototype(self):
self.st.out(self.template_prototype, dissector_name=self.dissname)
def genProtocol(self):
self.st.out(self.template_protocol, dissector_name=self.dissname)
self.st.out(self.template_init_boundary)
def genMainEntryStart(self, oplist):
self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
self.st.inc_indent()
self.st.out(self.template_main_dissector_switch_msgtype_start)
self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
self.st.inc_indent()
def genMainEntryEnd(self):
self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
self.st.dec_indent()
self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
self.st.dec_indent()
self.st.out(self.template_main_dissector_end)
# NOTE: Mapping of attributes to operation(function) names is tricky.
#
# The actual accessor function names are language-mapping specific. The attribute name
# is subject to OMG IDL's name scoping rules; the accessor function names are
# guaranteed not to collide with any legal operation names specifiable in OMG IDL.
#
# eg:
#
# static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
#
# or:
#
# static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
#
# TODO: Implement some language dependent templates to handle naming conventions
# language <=> attribute. for C, C++. Java etc
#
# OR, just add a runtime GUI option to select language binding for attributes -- FS
def genAtList(self, atlist):
"""in: atlist
out: C code for IDL attribute decalarations.
ie: def genAtlist(self,atlist,language)
"""
self.st.out(self.template_comment_attributes_start)
for n in atlist:
for i in n.declarators(): #
sname = self.namespace(i, "_")
atname = i.identifier()
self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
if not n.readonly():
self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
self.st.out(self.template_comment_attributes_end)
def genEnList(self, enlist):
"""in: enlist
out: C code for IDL Enum decalarations using "static const value_string" template
"""
self.st.out(self.template_comment_enums_start)
for enum in enlist:
sname = self.namespace(enum, "_")
self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
self.st.out(self.template_value_string_start, valstringname=sname)
for enumerator in enum.enumerators():
self.st.out(self.template_value_string_entry,
intval=str(self.valFromEnum(enum, enumerator)),
description=enumerator.identifier())
#atname = n.identifier()
self.st.out(self.template_value_string_end, valstringname=sname)
self.st.out(self.template_comment_enums_end)
def genExceptionDelegator(self, oplist):
"""in: oplist
out: C code for User exception delegator
"""
self.st.out(self.template_main_exception_delegator_start)
self.st.inc_indent()
exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
for ex in exlist:
if self.DEBUG:
print("//XXX Exception ", ex.repoId())
print("//XXX Exception Identifier", ex.identifier())
print("//XXX Exception Scoped Name", ex.scopedName())
if ex.members(): # only if has members
sname = self.namespace(ex, "_")
self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
self.st.dec_indent()
self.st.out(self.template_main_exception_delegator_end)
def genAttributeHelpers(self, atlist):
"""Generate private helper functions to decode Attributes.
in: atlist
For readonly attribute - generate get_xxx()
If NOT readonly attribute - also generate set_xxx()
"""
if self.DEBUG:
print("//XXX genAttributeHelpers: atlist = ", atlist)
self.st.out(self.template_attribute_helpers_start)
for attrib in atlist:
for decl in attrib.declarators():
self.genAtHelper(attrib, decl, "get") # get accessor
if not attrib.readonly():
self.genAtHelper(attrib, decl, "set") # set accessor
self.st.out(self.template_attribute_helpers_end)
def genAtHelper(self, attrib, decl, order):
"""Generate private helper functions to decode an attribute
in: at - attribute node
in: decl - declarator belonging to this attribute
in: order - to generate a "get" or "set" helper
"""
if self.DEBUG:
print("//XXX genAtHelper")
sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
self.getCDR(attrib.attrType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_attribute_helper_function_end)
def genExceptionHelpers(self, oplist):
"""Generate private helper functions to decode Exceptions used
within operations
in: oplist
"""
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if self.DEBUG:
print("//XXX genExceptionHelpers: exlist = ", exlist)
self.st.out(self.template_exception_helpers_start)
for ex in exlist:
if ex.members(): # only if has members
#print("//XXX Exception = " + ex.identifier())
self.genExHelper(ex)
self.st.out(self.template_exception_helpers_end)
def genExHelper(self, ex):
"""Generate private helper functions to decode User Exceptions
in: exnode ( an exception node)
"""
if self.DEBUG:
print("//XXX genExHelper")
# check to see if we need an item
need_item = False
for m in ex.members():
if self.isItemVarType(m.memberType()):
need_item = True
break
sname = self.namespace(ex, "_")
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
if need_item:
self.st.out(self.template_exception_helper_function_start_item, sname=sname, exname=ex.repoId())
else:
self.st.out(self.template_exception_helper_function_start_no_item, sname=sname, exname=ex.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
if need_item:
self.st.out(self.template_helper_function_vars_end_item)
else:
self.st.out(self.template_helper_function_vars_end)
for m in ex.members():
if self.DEBUG:
print("//XXX genExhelper, member = ", m, "member type = ", m.memberType())
for decl in m.declarators():
if self.DEBUG:
print("//XXX genExhelper, d = ", decl)
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_exception_helper_function_end)
def genHelpers(self, oplist, stlist, unlist):
"""Generate private helper functions
Generate private helper functions for each IDL operation.
Generate private helper functions for each IDL struct.
Generate private helper functions for each IDL union.
in: oplist, stlist, unlist
"""
for op in oplist:
self.genOperation(op)
for st in stlist:
self.genStructHelper(st)
for un in unlist:
self.genUnionHelper(un)
def genOperation(self, opnode):
"""Generate private helper functions for a specific IDL operation.
in: opnode
"""
if self.DEBUG:
print("//XXX genOperation called")
sname = self.namespace(opnode, "_")
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.curr_sname = sname # update current opnode's scoped name
opname = opnode.identifier()
self.st.out(self.template_helper_function_comment, repoid=opnode.repoId())
self.st.out(self.template_helper_function_start, sname=sname)
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
self.st.out(self.template_helper_switch_msgtype_start)
self.st.out(self.template_helper_switch_msgtype_request_start)
self.st.inc_indent()
self.genOperationRequest(opnode)
self.st.out(self.template_helper_switch_msgtype_request_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_start)
self.st.inc_indent()
self.st.out(self.template_helper_switch_rep_status_start)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
self.st.inc_indent()
self.genOperationReply(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
self.st.inc_indent()
self.genOpExceptions(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_reply_default_end)
self.st.out(self.template_helper_switch_rep_status_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_default_end)
self.st.out(self.template_helper_switch_msgtype_end)
self.st.dec_indent()
self.st.out(self.template_helper_function_end, sname=sname)
def genOperationRequest(self, opnode):
"""Decode function parameters for a GIOP request message"""
for p in opnode.parameters():
if p.is_in():
if self.DEBUG:
print("//XXX parameter = ", p)
print("//XXX parameter type = ", p.paramType())
print("//XXX parameter type kind = ", p.paramType().kind())
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
def genOperationReply(self, opnode):
"""Decode function parameters for a GIOP reply message"""
rt = opnode.returnType() # get return type
if self.DEBUG:
print("//XXX genOperationReply")
print("//XXX opnode = ", opnode)
print("//XXX return type = ", rt)
print("//XXX return type.unalias = ", rt.unalias())
print("//XXX return type.kind() = ", rt.kind())
sname = self.namespace(opnode, "_")
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
#self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
self.get_CDR_alias(rt, sname + "_return")
#self.get_CDR_alias(rt, rt.name())
else:
self.getCDR(rt, sname + "_return") # return value is NOT an alias
for p in opnode.parameters():
if p.is_out(): # out or inout
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#self.st.dec_indent()
# TODO: this method seems unnecessary
def genOpExceptions(self, opnode):
for ex in opnode.raises():
if ex.members():
#print ex.members()
for m in ex.members():
t = 0
#print m.memberType(), m.memberType().kind()
def genOpDelegator(self, oplist):
"""Delegator for Operations"""
for op in oplist:
iname = "/".join(op.scopedName()[:-1])
opname = op.identifier()
sname = self.namespace(op, "_")
self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
def genAtDelegator(self, atlist):
"""Delegator for Attributes"""
for a in atlist:
for i in a.declarators():
sname = self.namespace(i, "_")
self.st.out(self.template_at_delegate_code_get, sname=sname)
if not a.readonly():
self.st.out(self.template_at_delegate_code_set, sname=sname)
def addvar(self, var):
"""Add a variable declaration to the hash of list"""
if var not in self.fn_hash[self.curr_sname]:
self.fn_hash[self.curr_sname].append(var)
def dumpvars(self):
"""Print the variable declaration from the hash of list"""
for fn in self.fn_hash.keys():
print("FN = " + fn)
for v in self.fn_hash[fn]:
print("-> " + v)
def dumpCvars(self, sname):
"""Print the "C" variable declaration from the hash of list
for a given scoped operation name (eg: tux_penguin_eat)"""
for v in self.fn_hash[sname]:
self.st.out(v)
def valFromEnum(self, enumNode, enumeratorNode):
"""Given an enum node, and a enumerator node, return the enumerator's numerical value.
eg: enum Color {red,green,blue} should return
val = 1 for green
"""
if self.DEBUG:
print("//XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId())
print("//XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId())
if isinstance(enumeratorNode, idlast.Enumerator):
value = enumNode.enumerators().index(enumeratorNode)
return value
# tk_null = 0
# tk_void = 1
# tk_short = 2
# tk_long = 3
# tk_ushort = 4
# tk_ulong = 5
# tk_float = 6
# tk_double = 7
# tk_boolean = 8
# tk_char = 9
# tk_octet = 10
# tk_any = 11
# tk_TypeCode = 12
# tk_Principal = 13
# tk_objref = 14
# tk_struct = 15
# tk_union = 16
# tk_enum = 17
# tk_string = 18
# tk_sequence = 19
# tk_array = 20
# tk_alias = 21
# tk_except = 22
# tk_longlong = 23
# tk_ulonglong = 24
# tk_longdouble = 25
# tk_wchar = 26
# tk_wstring = 27
# tk_fixed = 28
# tk_value = 29
# tk_value_box = 30
# tk_native = 31
# tk_abstract_interface = 32
def isSeqNativeType(self, type):
"""Return true for "native" datatypes that will generate a direct proto_tree_add_xxx
call for a sequence. Used to determine if a separate hf variable is needed for
the loop over the sequence"""
pt = type.unalias().kind() # param CDR type
if self.DEBUG:
print("//XXX isSeqNativeType: kind = ", pt)
if pt == idltype.tk_ulong:
return 1
elif pt == idltype.tk_longlong:
return 1
elif pt == idltype.tk_ulonglong:
return 1
elif pt == idltype.tk_short:
return 1
elif pt == idltype.tk_long:
return 1
elif pt == idltype.tk_ushort:
return 1
elif pt == idltype.tk_float:
return 1
elif pt == idltype.tk_double:
return 1
elif pt == idltype.tk_boolean:
return 1
elif pt == idltype.tk_octet:
return 1
elif pt == idltype.tk_enum:
return 1
elif pt == idltype.tk_string:
return 1
elif pt == idltype.tk_wstring:
return 1
elif pt == idltype.tk_wchar:
return 1
elif pt == idltype.tk_char:
return 1
else:
return 0
def isItemVarType(self, type):
pt = type.unalias().kind() # param CDR type
if self.DEBUG:
print("//XXX isItemVarType: kind = ", pt)
if pt in [idltype.tk_fixed, idltype.tk_struct, idltype.tk_any, idltype.tk_sequence]:
return 1
return 0
def getCDR(self, type, name="fred"):
"""This is the main "iterator" function. It takes a node, and tries to output
a get_CDR_XXX accessor method(s). It can call itself multiple times
if it finds nested structures etc."""
pt = type.unalias().kind() # param CDR type
pn = name # param name
if self.DEBUG:
print("//XXX getCDR: kind = ", pt)
print("//XXX getCDR: name = ", pn)
if pt == idltype.tk_ulong:
self.get_CDR_ulong(pn)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong(pn)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong(pn)
elif pt == idltype.tk_void:
self.get_CDR_void(pn)
elif pt == idltype.tk_short:
self.get_CDR_short(pn)
elif pt == idltype.tk_long:
self.get_CDR_long(pn)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort(pn)
elif pt == idltype.tk_float:
self.get_CDR_float(pn)
elif pt == idltype.tk_double:
self.get_CDR_double(pn)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed(type.unalias(), pn)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean(pn)
elif pt == idltype.tk_char:
self.get_CDR_char(pn)
elif pt == idltype.tk_octet:
self.get_CDR_octet(pn)
elif pt == idltype.tk_any:
self.get_CDR_any(pn)
elif pt == idltype.tk_string:
self.get_CDR_string(pn)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring(pn)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar(pn)
elif pt == idltype.tk_enum:
#print type.decl()
self.get_CDR_enum(pn, type)
#self.get_CDR_enum(pn)
elif pt == idltype.tk_struct:
self.get_CDR_struct(type, pn)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode(pn)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet(type, pn)
else:
self.get_CDR_sequence(type, pn)
elif pt == idltype.tk_objref:
self.get_CDR_objref(type, pn)
elif pt == idltype.tk_array:
pass # Supported elsewhere
elif pt == idltype.tk_union:
self.get_CDR_union(type, pn)
elif pt == idltype.tk_alias:
if self.DEBUG:
print("//XXXXX Alias type XXXXX ", type)
self.get_CDR_alias(type, pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
def get_CDR_ulong(self, pn):
self.st.out(self.template_get_CDR_ulong, hfname=pn)
def get_CDR_short(self, pn):
self.st.out(self.template_get_CDR_short, hfname=pn)
def get_CDR_void(self, pn):
self.st.out(self.template_get_CDR_void, hfname=pn)
def get_CDR_long(self, pn):
self.st.out(self.template_get_CDR_long, hfname=pn)
def get_CDR_ushort(self, pn):
self.st.out(self.template_get_CDR_ushort, hfname=pn)
def get_CDR_float(self, pn):
self.st.out(self.template_get_CDR_float, hfname=pn)
def get_CDR_double(self, pn):
self.st.out(self.template_get_CDR_double, hfname=pn)
def get_CDR_longlong(self, pn):
self.st.out(self.template_get_CDR_longlong, hfname=pn)
def get_CDR_ulonglong(self, pn):
self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
def get_CDR_boolean(self, pn):
self.st.out(self.template_get_CDR_boolean, hfname=pn)
def get_CDR_fixed(self, type, pn):
if self.DEBUG:
print("//XXXX calling get_CDR_fixed, type = ", type)
print("//XXXX calling get_CDR_fixed, type.digits() = ", type.digits())
print("//XXXX calling get_CDR_fixed, type.scale() = ", type.scale())
string_digits = '%i ' % type.digits() # convert int to string
string_scale = '%i ' % type.scale() # convert int to string
string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
self.st.out(self.template_get_CDR_fixed, hfname=pn, digits=string_digits, scale=string_scale, length=string_length)
self.addvar(self.c_seq)
def get_CDR_char(self, pn):
self.st.out(self.template_get_CDR_char, hfname=pn)
def get_CDR_octet(self, pn):
self.st.out(self.template_get_CDR_octet, hfname=pn)
def get_CDR_any(self, pn):
self.st.out(self.template_get_CDR_any, varname=pn)
def get_CDR_enum(self, pn, type):
#self.st.out(self.template_get_CDR_enum, hfname=pn)
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname, hfname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_string(self, pn):
self.st.out(self.template_get_CDR_string, hfname=pn)
def get_CDR_wstring(self, pn):
self.st.out(self.template_get_CDR_wstring, hfname=pn)
self.addvar(self.c_u_octet4)
self.addvar(self.c_seq)
def get_CDR_wchar(self, pn):
self.st.out(self.template_get_CDR_wchar, hfname=pn)
self.addvar(self.c_s_octet1)
self.addvar(self.c_seq)
def get_CDR_TypeCode(self, pn):
self.st.out(self.template_get_CDR_TypeCode, varname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_objref(self, type, pn):
self.st.out(self.template_get_CDR_object)
def get_CDR_union(self, type, pn):
if self.DEBUG:
print("//XXX Union type =", type, " pn = ", pn)
print("//XXX Union type.decl()", type.decl())
print("//XXX Union Scoped Name", type.scopedName())
# If I am a typedef union {..}; node then find the union node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a union node
if self.DEBUG:
print("//XXX Union ntype =", ntype)
sname = self.namespace(ntype, "_")
self.st.out(self.template_union_start, name=sname)
# Output a call to the union helper function so I can handle recursive union also.
self.st.out(self.template_decode_union, name=sname)
self.st.out(self.template_union_end, name=sname)
def getCDR_hf(self, type, desc, filter, hf_name="fred"):
"""This takes a node, and tries to output the appropriate item for the
hf array."""
pt = type.unalias().kind() # param CDR type
pn = hf_name # param name
if self.DEBUG:
print("//XXX getCDR_hf: kind = ", pt)
print("//XXX getCDR_hf: name = ", pn)
if pt == idltype.tk_ulong:
self.get_CDR_ulong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_void:
pass # no hf_ variables needed
elif pt == idltype.tk_short:
self.get_CDR_short_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_long:
self.get_CDR_long_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_float:
self.get_CDR_float_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_double:
self.get_CDR_double_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_char:
self.get_CDR_char_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_octet:
self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_any:
pass # no hf_ variables needed
elif pt == idltype.tk_string:
self.get_CDR_string_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_enum:
self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
elif pt == idltype.tk_struct:
pass # no hf_ variables needed (should be already contained in struct members)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
else:
self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
elif pt == idltype.tk_objref:
pass # no object specific hf_ variables used, use generic ones from giop dissector
elif pt == idltype.tk_array:
pass # Supported elsewhere
elif pt == idltype.tk_union:
pass # no hf_ variables needed (should be already contained in union members)
elif pt == idltype.tk_alias:
if self.DEBUG:
print("//XXXXX Alias type hf //XXXXX ", type)
self.get_CDR_alias_hf(type, desc, filter, pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
def get_CDR_ulong_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_short_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_long_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ushort_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_float_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_double_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_fixed_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_fixed_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_longlong_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ulonglong_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_boolean_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_char_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_octet_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_enum_hf(self, pn, type, desc, filter, diss):
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_string_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_wstring_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_u_octet4)
# self.addvar(self.c_seq)
def get_CDR_wchar_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_s_octet1)
# self.addvar(self.c_seq)
def get_CDR_TypeCode_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_octet_hf(self, type, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
if self.isSeqNativeType(type.unalias().seqType()):
self.getCDR_hf(type.unalias().seqType(), desc, filter, pn)
def get_CDR_alias_hf(self, type, desc, filter, pn):
if self.DEBUG:
print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
print("//XXX get_CDR_alias_hf, type.decl().alias() = ", type.decl().alias())
decl = type.decl() # get declarator object
if decl.sizes(): # a typedef array
#indices = self.get_indices_from_sizes(decl.sizes())
#string_indices = '%i ' % indices # convert int to string
#self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
#self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
#self.addvar(self.c_i + pn + ";")
#self.st.inc_indent()
self.getCDR_hf(type.decl().alias().aliasType(), desc, filter, pn)
#self.st.dec_indent()
#self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
#self.getCDR_hf(type.unalias(), desc, filter, decl.identifier() )
self.getCDR_hf(type.unalias(), desc, filter, pn)
def genUnionHelper(self, un):
"""Code to generate Union Helper functions
in: un - a union node
"""
if self.DEBUG:
print("//XXX genUnionHelper called")
print("//XXX Union type =", un)
print("//XXX Union type.switchType()", un.switchType())
print("//XXX Union Scoped Name", un.scopedName())
print("//XXX Union switchType.unalias", un.switchType().unalias())
print("//XXX Union switchType.unalias.kind", un.switchType().unalias().kind())
# check to see if we need an item
un_need_item = False
if un.switchType().unalias().kind() == idltype.tk_enum:
for uc in un.cases(): # for all UnionCase objects in this union
if self.DEBUG:
print("//XXX checking", uc)
if self.isItemVarType(uc.caseType()):
if uc.caseType().unalias().kind() == idltype.tk_sequence:
if uc.caseType().unalias().seqType().kind() == idltype.tk_struct:
un_need_item = True
else:
un_need_item = True
if self.AGGRESSIVE:
un_need_item = True
if self.DEBUG:
print("//XXX need_item =", un_need_item)
sname = self.namespace(un, "_")
self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
if un_need_item:
self.st.out(self.template_union_helper_function_start_with_item, sname=sname, unname=un.repoId())
else:
self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
st = un.switchType().unalias() # may be typedef switch type, so find real type
self.st.out(self.template_comment_union_code_start, uname=un.repoId())
self.getCDR(st, sname + "_" + un.identifier())
# Depending on what kind of discriminant I come accross (enum,integer,char,
# short, boolean), make sure I cast the return value of the get_XXX accessor
# to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
# return an integer, or an Enumerator object that is then converted to its
# integer equivalent.
#
#
# NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
#
if st.kind() == idltype.tk_enum:
std = st.decl()
self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId())
# count the number of cases to ensure variable is needed
num = 0
num_defaults = 0
for uc in un.cases(): # for all UnionCase objects in this union
num += len(uc.labels())
for cl in uc.labels():
if cl.default():
num_defaults += 1
if num != 1 or num_defaults != 1:
self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_long:
self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_ulong:
self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_short:
self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_ushort:
self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_boolean:
self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_char:
self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
else:
print("//XXX Unknown st.kind() = ", st.kind())
# Loop over all cases in this union
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
# get integer value, even if discriminant is
# an Enumerator node
if isinstance(cl.value(), idlast.Enumerator):
if self.DEBUG:
print("//XXX clv.identifier()", cl.value().identifier())
print("//XXX clv.repoId()", cl.value().repoId())
print("//XXX clv.scopedName()", cl.value().scopedName())
# find index of enumerator in enum declaration
# eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
clv = self.valFromEnum(std, cl.value())
else:
clv = cl.value()
#print "//XXX clv = ",clv
# if char, dont convert to int, but put inside single quotes so that it is understood by C.
# eg: if (disc == 'b')..
#
# TODO : handle \xxx chars generically from a function or table lookup rather than
# a whole bunch of "if" statements. -- FS
if st.kind() == idltype.tk_char:
if clv == '\n':
string_clv = "'\\n'"
elif clv == '\t':
string_clv = "'\\t'"
else:
string_clv = "'" + clv + "'"
else:
string_clv = '%i ' % clv
# If default case, then skp comparison with discriminator
if not cl.default():
self.st.out(self.template_comment_union_code_label_compare_start,
discname=un.identifier(), labelval=string_clv)
self.st.inc_indent()
else:
self.st.out(self.template_comment_union_code_label_default_start)
self.getCDR(uc.caseType(), sname + "_" + uc.declarator().identifier())
if not cl.default():
self.st.dec_indent()
self.st.out(self.template_comment_union_code_label_compare_end)
else:
self.st.out(self.template_comment_union_code_label_default_end)
self.st.dec_indent()
self.st.out(self.template_union_helper_function_end)
def get_CDR_alias(self, type, pn):
"""Currently, get_CDR_alias is geared to finding typedef"""
if self.DEBUG:
print("//XXX get_CDR_alias, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias, type.decl() = ", type.decl())
print("//XXX get_CDR_alias, type.decl().alias() = ", type.decl().alias())
decl = type.decl() # get declarator object
if decl.sizes(): # a typedef array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.decl().alias().aliasType(), pn)
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print("//XXX type", type.__dict__)
print("//XXX type.unalias()", type.unalias().__dict__)
print("//XXX type.unalias().kind()", type.unalias().kind())
print("//XXX type.decl()", type.decl().__dict__)
self.getCDR(type.unalias(), pn)
def get_CDR_struct(self, type, pn):
"""Handle structs, including recursive"""
# If I am a typedef struct {..}; node then find the struct node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a struct node
sname = self.namespace(ntype, "_")
self.st.out(self.template_structure_start, name=sname)
# Output a call to the struct helper function so I can handle recursive structs also.
self.st.out(self.template_decode_struct, name=sname)
self.st.out(self.template_structure_end, name=sname)
def genStructHelper(self, st):
"""Generate private helper functions to decode a struct
in: stnode ( a struct node)
"""
if self.DEBUG:
print("//XXX genStructHelper")
sname = self.namespace(st, "_")
self.curr_sname = sname # update current opnode/exnode/stnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
for m in st.members():
for decl in m.declarators():
if decl.sizes(): # an array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, aname=decl.identifier(), aval=string_indices)
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end)<|fim▁hole|> self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_struct_helper_function_end)
def get_CDR_sequence(self,type,pn):
"""Generate code to access a sequence of a type"""
if self.DEBUG:
print("//XXX get_CDR_sequence")
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_loop_start, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.unalias().seqType(), pn) # and start all over with the type
self.st.dec_indent()
self.st.out(self.template_get_CDR_sequence_loop_end)
def get_CDR_sequence_octet(self, type, pn):
"""Generate code to access a sequence of octet"""
if self.DEBUG:
print("//XXX get_CDR_sequence_octet")
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar("const guint8 * binary_seq_" + pn + ";")
self.addvar("gchar * text_seq_" + pn + ";")
@staticmethod
def namespace(node, sep):
"""in - op node
out - scoped operation name, using sep character instead of "::"
eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
"""
sname = idlutil.ccolonName(node.scopedName()).replace('::', sep)
#print("//XXX namespace: sname = " + sname)
return sname
def gen_plugin_register(self):
"""generate code for plugin initialisation"""
self.st.out(self.template_plugin_register, description=self.description,
protocol_name=self.protoname, dissector_name=self.dissname)
# TODO - make this a command line option
#
# -e explicit
# -h heuristic
def gen_proto_reg_handoff(self, oplist):
"""generate register_giop_user_module code, and register only
unique interfaces that contain operations. Also output
a heuristic register in case we want to use that."""
self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
self.st.inc_indent()
for iname in self.get_intlist(oplist):
self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname,
protocol_name=self.protoname, interface=iname)
self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname,
protocol_name=self.protoname)
self.st.dec_indent()
self.st.out(self.template_proto_reg_handoff_end)
def genOp_hf(self, op):
"""generate hf_ array element for operation, attribute, enums, struct and union lists"""
sname = self.namespace(op, "_")
opname = sname[sname.find("_")+1:]
opname = opname[:opname.find("_")]
rt = op.returnType()
if rt.kind() != idltype.tk_void:
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
self.getCDR_hf(rt, rt.name(),
opname + "." + op.identifier() + ".return", sname + "_return")
else:
self.getCDR_hf(rt, "Return value",
opname + "." + op.identifier() + ".return", sname + "_return")
for p in op.parameters():
self.getCDR_hf(p.paramType(),
p.identifier(),
opname + "." + op.identifier() + "." + p.identifier(),
sname + "_" + p.identifier())
def genAt_hf(self, at):
for decl in at.declarators():
sname = self.namespace(decl, "_")
atname = sname[sname.find("_")+1:]
atname = atname[:atname.find("_")]
self.getCDR_hf(at.attrType(), decl.identifier(),
atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.getCDR_hf(at.attrType(), decl.identifier(),
atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
def genSt_hf(self, st):
sname = self.namespace(st, "_")
stname = sname[sname.find("_")+1:]
stname = stname[:stname.find("_")]
for m in st.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),
st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
def genEx_hf(self, ex):
sname = self.namespace(ex, "_")
exname = sname[sname.find("_")+1:]
exname = exname[:exname.find("_")]
for m in ex.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),
exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
def genUnion_hf(self, un):
sname = self.namespace(un, "_")
unname = sname[:sname.rfind("_")]
unname = unname.replace("_", ".")
self.getCDR_hf(un.switchType().unalias(), un.identifier(),
unname + "." + un.identifier(), sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
# TODO: is this loop necessary?
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),
unname + "." + un.identifier() + "." + uc.declarator().identifier(),
sname + "_" + uc.declarator().identifier())
def gen_proto_register(self, oplist, atlist, stlist, unlist):
"""generate proto_register_<protoname> code,
in - oplist[], atlist[], stline[], unlist[]
"""
self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
# operation specific filters
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOp_hf(op)
# attribute filters
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAt_hf(at)
# struct filters
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
if st.members(): # only if has members
self.genSt_hf(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if ex.members(): # only if has members
self.genEx_hf(ex)
# Union filters
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnion_hf(un)
self.st.out(self.template_proto_register_end, description=self.description,
protocol_name=self.protoname, dissector_name=self.dissname)
@staticmethod
def get_intlist(oplist):
"""in - oplist[]
out - a list of unique interface names. This will be used in
register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
name must be removed from the scope. And we also only want unique interfaces.
"""
int_hash = {} # holds a hash of unique interfaces
for op in oplist:
sc = op.scopedName() # eg: penguin,tux,bite
sc1 = sc[:-1]
sn = idlutil.slashName(sc1) # penguin/tux
if sn not in int_hash:
int_hash[sn] = 0 # dummy val, but at least key is unique
ret = list(int_hash.keys())
ret.sort()
return ret
def get_exceptionList(self, oplist):
"""in - oplist[]
out - a list of exception nodes (unique). This will be used in
to generate dissect_exception_XXX functions.
"""
ex_hash = collections.OrderedDict() # holds a hash of unique exceptions.
for op in oplist:
for ex in op.raises():
if ex not in ex_hash:
ex_hash[ex] = 0 # dummy val, but at least key is unique
if self.DEBUG:
print("//XXX Exception = " + ex.identifier())
ret = list(ex_hash.keys())
return ret
@staticmethod
def get_indices_from_sizes(sizelist):
"""Simple function to take a list of array sizes and find the total number of elements
eg: temp[4][3] = 12 elements
"""
val = 1
for i in sizelist:
val = val * i
return val
@staticmethod
def dig_to_len(dignum):
"""Determine how many octets contain requested number
of digits for an "fixed" IDL type "on the wire" """
return (dignum/2) + 1
def genTODO(self, message):
self.st.out(self.template_debug_TODO, message=message)
def genWARNING(self, message):
self.st.out(self.template_debug_WARNING, message=message)
# Templates for C code
template_helper_function_comment = """\
/*
* @repoid@
*/"""
template_helper_function_vars_start = """\
/* Operation specific Variable declarations Begin */"""
template_helper_function_vars_end = """\
/* Operation specific Variable declarations End */
"""
template_helper_function_vars_end_item = """\
/* Operation specific Variable declarations End */
"""
template_helper_function_start = """\
static void
decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{"""
template_helper_function_end = """\
}
"""
template_proto_reg_handoff_start = """\
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_@dissector_name@(void)
{"""
template_proto_reg_handoff_body = """\
/* Register for Explicit Dissection */
register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
"""
template_proto_reg_handoff_heuristic = """\
/* Register for Heuristic Dissection */
register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
"""
template_proto_reg_handoff_end = """\
}
"""
template_prototype = """
void proto_register_giop_@dissector_name@(void);
void proto_reg_handoff_giop_@dissector_name@(void);"""
# Initialize the protocol
template_protocol = """
/* Initialise the protocol and subtree pointers */
static int proto_@dissector_name@ = -1;
static gint ett_@dissector_name@ = -1;
"""
template_init_boundary = """
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
# plugin_register and plugin_reg_handoff templates
template_plugin_register = """
#if 0
WS_DLL_PUBLIC_DEF void
plugin_register(void)
{
if (proto_@dissector_name@ == -1) {
proto_register_giop_@dissector_name@();
}
}
WS_DLL_PUBLIC_DEF void
plugin_reg_handoff(void){
proto_register_handoff_giop_@dissector_name@();
}
#endif
"""
template_proto_register_start = """
/* Register the protocol with Wireshark */
void proto_register_giop_@dissector_name@(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-@[email protected]_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_proto_register_end = """
};
static ei_register_info ei[] = {
{ &ei_@dissector_name@_unknown_giop_msg, { "giop-@[email protected]_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_@dissector_name@_unknown_exception, { "giop-@[email protected]_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_@dissector_name@_unknown_reply_status, { "giop-@[email protected]_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_@dissector_name@,
};
expert_module_t* expert_@dissector_name@;
/* Register the protocol name and description */
proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"@protocol_name@\", \"giop-@dissector_name@\" );
proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
}
"""
template_proto_register_op_filter_comment = """\
/* Operation filters */"""
template_proto_register_at_filter_comment = """\
/* Attribute filters */"""
template_proto_register_st_filter_comment = """\
/* Struct filters */"""
template_proto_register_ex_filter_comment = """\
/* User exception filters */"""
template_proto_register_un_filter_comment = """\
/* Union filters */"""
template_proto_register_ei_filters = """\
/* Expert info filters */
static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
"""
# template for delegation code
template_op_delegate_code = """\
if (strcmp(operation, "@opname@") == 0
&& (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
# Templates for the helper functions
template_helper_switch_msgtype_start = """\
switch(header->message_type) {"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_msgtype_end = """\
} /* switch(header->message_type) */"""
template_helper_switch_msgtype_request_start = """\
case Request:"""
template_helper_switch_msgtype_request_end = """\
break;"""
template_helper_switch_msgtype_reply_start = """\
case Reply:"""
template_helper_switch_msgtype_reply_no_exception_start = """\
case NO_EXCEPTION:"""
template_helper_switch_msgtype_reply_no_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_user_exception_start = """\
case USER_EXCEPTION:"""
template_helper_switch_msgtype_reply_user_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_default_start = """\
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
template_helper_switch_msgtype_reply_default_end = """\
break;"""
template_helper_switch_msgtype_reply_end = """\
break;"""
template_helper_switch_rep_status_start = """\
switch(header->rep_status) {"""
template_helper_switch_rep_status_default_start = """\
default:
/* Unknown Reply Status */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
template_helper_switch_rep_status_default_end = """\
break;"""
template_helper_switch_rep_status_end = """\
} /* switch(header->rep_status) */
break;"""
# Templates for get_CDR_xxx accessors
template_get_CDR_ulong = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_short = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_void = """\
/* Function returns void */
"""
template_get_CDR_long = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ushort = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_float = """\
proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_double = """\
proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_longlong = """\
proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ulonglong = """\
proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_boolean = """\
proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
"""
template_get_CDR_char = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
"""
template_get_CDR_octet = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
"""
template_get_CDR_any = """\
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_fixed = """\
get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
proto_tree_add_string_format_value(tree, hf_@hfname@, tvb, *offset-@length@, @length@, seq, "< @digits@, @scale@> = %s", seq);
"""
template_get_CDR_enum_symbolic = """\
u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
"""
template_get_CDR_string = """\
giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
"""
template_get_CDR_wstring = """\
u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-u_octet4, u_octet4, (u_octet4 > 0) ? seq : \"\");
"""
template_get_CDR_wchar = """\
s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
if (tree) {
if (s_octet1 > 0)
proto_tree_add_uint(tree, hf_@hfname@_len, tvb, *offset-1-s_octet1, 1, s_octet1);
if (s_octet1 < 0)
s_octet1 = -s_octet1;
if (s_octet1 > 0)
proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-s_octet1, s_octet1, seq);
}
"""
template_get_CDR_TypeCode = """\
u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_object = """\
get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
"""
template_get_CDR_sequence_length = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_length_item = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
item = proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_loop_start = """\
for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
"""
template_get_CDR_sequence_loop_end = """\
}
"""
template_get_CDR_sequence_octet = """\
if (u_octet4_loop_@seqname@ > 0 && tree) {
get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
u_octet4_loop_@seqname@);
text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
u_octet4_loop_@seqname@);
proto_tree_add_bytes_format_value(tree, hf_@seqname@, tvb, *offset - u_octet4_loop_@seqname@,
u_octet4_loop_@seqname@, binary_seq_@seqname@, \"%s\", text_seq_@seqname@);
}
"""
template_get_CDR_array_start = """\
for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
"""
template_get_CDR_array_end = """\
}
"""
template_get_CDR_array_comment = """\
/* Array: @aname@[ @asize@] */
"""
template_structure_start = """\
/* Begin struct \"@name@\" */"""
template_structure_end = """\
/* End struct \"@name@\" */"""
template_union_start = """\
/* Begin union \"@name@\" */"""
template_union_end = """\
/* End union \"@name@\" */"""
# Templates for get_CDR_xxx_hf accessors
template_get_CDR_ulong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_short_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_long_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ushort_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_float_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_double_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_fixed_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_boolean_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
template_get_CDR_char_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_enum_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_string_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wstring_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wchar_hf = """\
{&hf_@hfname@_len, {"@descname@ Length","giop-@dissector_name@.@[email protected]",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_TypeCode_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_hf = """\
{&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@[email protected]",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_octet_hf = """\
{&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@[email protected]",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BYTES,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_Header = """\
/* packet-@[email protected]
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
*/
"""
template_wireshark_copyright = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@@wireshark.org>
* Copyright 1998 Gerald Combs
*/
"""
template_GPL = """\
/*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
"""
template_Modelines = """\
/*
* Editor modelines - https://www.wireshark.org/tools/modelines.html
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/"""
template_Includes = """\
#include "config.h"
#include <string.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#include "ws_diag_control.h"
#include "ws_compiler_tests.h"
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
/* XXX this should be autogenerated, or the warnings fixed in the generator */
DIAG_OFF(unused-function)
DIAG_OFF(unused-variable)
#if WS_IS_AT_LEAST_GNUC_VERSION(6,0)
DIAG_OFF(unused-const-variable)
#endif"""
template_main_dissector_start = """\
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, tvb_reported_length_remaining(tvb, *offset), ENC_NA);
tree = proto_item_add_subtree(ti, ett_@dissname@);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
proto_item_set_generated(pi);
return pi;
}
static gboolean
dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
"""
template_main_dissector_switch_msgtype_start = """\
switch(header->message_type) {
"""
template_main_dissector_switch_msgtype_start_request_reply = """\
case Request:
case Reply:
"""
template_main_dissector_switch_msgtype_end_request_reply = """\
break;
"""
template_main_dissector_switch_msgtype_all_other_msgtype = """\
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
"""
template_main_dissector_end = """\
return FALSE;
} /* End of main dissector */
"""
#-------------------------------------------------------------#
# Exception handling templates #
#-------------------------------------------------------------#
template_exception_helpers_start = """\
/* Begin Exception Helper Functions */
"""
template_exception_helpers_end = """\
/* End Exception Helper Functions */
"""
template_main_exception_delegator_start = """\
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
"""
template_ex_delegate_code = """\
if (strcmp(header->exception_id, "@exname@") == 0) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
return TRUE;
}
"""
template_main_exception_delegator_end = """
return FALSE; /* user exception not found */
}
"""
template_exception_helper_function_start_no_item = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item *item _U_;
"""
template_exception_helper_function_start_item = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item *item = NULL;
"""
template_exception_helper_function_end = """\
}
"""
template_struct_helper_function_start = """\
/* Struct = @stname@ */
static void
decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_struct_helper_function_end = """\
}
"""
template_union_helper_function_start = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_union_helper_function_start_with_item = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item = NULL;
"""
template_union_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Value string templates #
#-------------------------------------------------------------#
template_value_string_start = """\
static const value_string @valstringname@[] = {
"""
template_value_string_entry = """\
{ @intval@, \"@description@\" },"""
template_value_string_end = """\
{ 0, NULL },
};
"""
#-------------------------------------------------------------#
# Enum handling templates #
#-------------------------------------------------------------#
template_comment_enums_start = """\
/*
* IDL Enums Start
*/
"""
template_comment_enums_end = """\
/*
* IDL Enums End
*/
"""
template_comment_enum_comment = """\
/*
* Enum = @ename@
*/"""
#-------------------------------------------------------------#
# Attribute handling templates #
#-------------------------------------------------------------#
template_comment_attributes_start = """\
/*
* IDL Attributes Start
*/
"""
# get/set accessor method names are language mapping dependent.
template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
template_comment_attributes_end = """
/*
* IDL Attributes End
*/
"""
# template for Attribute delegation code
#
# Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
# Note: _set_xxx() should only be called for Request
template_at_delegate_code_get = """\
if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_at_delegate_code_set = """\
if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_attribute_helpers_start = """\
/* Begin Attribute Helper Functions */
"""
template_attribute_helpers_end = """\
/* End Attribute Helper Functions */
"""
template_attribute_helper_function_start = """\
/* Attribute = @atname@ */
static void
decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item _U_;
"""
template_attribute_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Debugging templates #
#-------------------------------------------------------------#
# Template for outputting TODO "C" comments
# so user know I need to improve something.
template_debug_TODO = """\
/* TODO - @message@ */
"""
# Template for outputting WARNING "C" comments
# so user know if I have found a problem.
template_debug_WARNING = """\
/* WARNING - @message@ */
"""
#-------------------------------------------------------------#
# IDL Union templates #
#-------------------------------------------------------------#
template_comment_union_code_start = """\
/*
* IDL Union Start - @uname@
*/
"""
template_comment_union_code_end = """
/*
* IDL union End - @uname@
*/
"""
template_comment_union_code_discriminant = """\
/*
* IDL Union - Discriminant - @uname@
*/
"""
# Cast Unions types to something appropriate
# Enum value cast to guint32, all others cast to gint32
# as omniidl accessor returns integer or Enum.
template_union_code_save_discriminant_enum = """\
disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_long = """\
disc_s_@discname@ = (gint32) s_octet4; /* save gint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ulong = """\
disc_s_@discname@ = (gint32) u_octet4; /* save guint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_short = """\
disc_s_@discname@ = (gint32) s_octet2; /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ushort = """\
disc_s_@discname@ = (gint32) u_octet2; /* save guint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_char = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_boolean = """\
disc_s_@discname@ = (gint32) u_octet1; /* save guint1 discriminant and cast to gint32 */
"""
template_comment_union_code_label_compare_start = """\
if (disc_s_@discname@ == @labelval@) {
"""
template_comment_union_code_label_compare_end = """\
return; /* End Compare for this discriminant type */
}
"""
template_comment_union_code_label_default_start = """
/* Default Union Case Start */
"""
template_comment_union_code_label_default_end = """\
/* Default Union Case End */
"""
# Templates for function prototypes.
# This is used in genDeclares() for declaring function prototypes
# for structs and union helper functions.
template_hf_operations = """
static int hf_operationrequest = -1;/* Request_Operation field */
"""
template_hf = """\
static int hf_@name@ = -1;"""
template_prototype_start_dissecting = """
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
"""
template_prototype_struct_start = """\
/* Struct prototype declaration Start */
"""
template_prototype_struct_end = """\
/* Struct prototype declaration End */
"""
template_prototype_struct_body = """\
/* Struct = @stname@ */
static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_struct = """\
decode_@name@_st(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);"""
template_prototype_union_start = """\
/* Union prototype declaration Start */"""
template_prototype_union_end = """\
/* Union prototype declaration End */"""
template_prototype_union_body = """
/* Union = @unname@ */
static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_union = """\
decode_@name@_un(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
"""
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#<|fim▁end|> |
else: |
<|file_name|>resource_loader.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ResourceFinder is a helper class for finding resources given their name."""
import codecs
import os
from py_vulcanize import module
from py_vulcanize import style_sheet as style_sheet_module
from py_vulcanize import resource as resource_module
from py_vulcanize import html_module
from py_vulcanize import strip_js_comments
class ResourceLoader(object):
"""Manges loading modules and their dependencies from files.
Modules handle parsing and the construction of their individual dependency
pointers. The loader deals with bookkeeping of what has been loaded, and
mapping names to file resources.
"""
def __init__(self, project):
self.project = project
self.stripped_js_by_filename = {}
self.loaded_modules = {}
self.loaded_raw_scripts = {}
self.loaded_style_sheets = {}
self.loaded_images = {}
@property
def source_paths(self):
"""A list of base directories to search for modules under."""
return self.project.source_paths
def FindResource(self, some_path, binary=False):
"""Finds a Resource for the given path.
Args:
some_path: A relative or absolute path to a file.
Returns:
A Resource or None.
"""
if os.path.isabs(some_path):
return self.FindResourceGivenAbsolutePath(some_path, binary)
else:
return self.FindResourceGivenRelativePath(some_path, binary)
def FindResourceGivenAbsolutePath(self, absolute_path, binary=False):
"""Returns a Resource for the given absolute path."""
candidate_paths = []
for source_path in self.source_paths:
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return resource_module.Resource(longest_candidate, absolute_path, binary)
def FindResourceGivenRelativePath(self, relative_path, binary=False):
"""Returns a Resource for the given relative path."""
absolute_path = None
for script_path in self.source_paths:
absolute_path = os.path.join(script_path, relative_path)
if os.path.exists(absolute_path):
return resource_module.Resource(script_path, absolute_path, binary)
return None
def _FindResourceGivenNameAndSuffix(
self, requested_name, extension, return_resource=False):
"""Searches for a file and reads its contents.
Args:
requested_name: The name of the resource that was requested.
extension: The extension for this requested resource.
Returns:
A (path, contents) pair.
"""
pathy_name = requested_name.replace('.', os.sep)
filename = pathy_name + extension
resource = self.FindResourceGivenRelativePath(filename)
if return_resource:
return resource
if not resource:
return None, None
return _read_file(resource.absolute_path)
def FindModuleResource(self, requested_module_name):
"""Finds a module javascript file and returns a Resource, or none."""
js_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.js', return_resource=True)
html_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.html', return_resource=True)
if js_resource and html_resource:
if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames(
js_resource, html_resource):
return html_resource
return js_resource
elif js_resource:
return js_resource
return html_resource
def LoadModule(self, module_name=None, module_filename=None,
excluded_scripts=None):
assert bool(module_name) ^ bool(module_filename), (
'Must provide either module_name or module_filename.')
if module_filename:
resource = self.FindResource(module_filename)
if not resource:
raise Exception('Could not find %s in %s' % (
module_filename, repr(self.source_paths)))
module_name = resource.name
else:
resource = None # Will be set if we end up needing to load.
if module_name in self.loaded_modules:
assert self.loaded_modules[module_name].contents
return self.loaded_modules[module_name]
if not resource: # happens when module_name was given
resource = self.FindModuleResource(module_name)
if not resource:
raise module.DepsException('No resource for module "%s"' % module_name)
m = html_module.HTMLModule(self, module_name, resource)
self.loaded_modules[module_name] = m
# Fake it, this is probably either polymer.min.js or platform.js which are
# actually .js files....
if resource.absolute_path.endswith('.js'):
return m
m.Parse(excluded_scripts)
m.Load(excluded_scripts)
return m
def LoadRawScript(self, relative_raw_script_path):
resource = None
for source_path in self.source_paths:
possible_absolute_path = os.path.join(
source_path, os.path.normpath(relative_raw_script_path))
if os.path.exists(possible_absolute_path):
resource = resource_module.Resource(
source_path, possible_absolute_path)
break<|fim▁hole|> assert relative_raw_script_path == resource.unix_style_relative_path, (
'Expected %s == %s' % (relative_raw_script_path,
resource.unix_style_relative_path))
if resource.absolute_path in self.loaded_raw_scripts:
return self.loaded_raw_scripts[resource.absolute_path]
raw_script = module.RawScript(resource)
self.loaded_raw_scripts[resource.absolute_path] = raw_script
return raw_script
def LoadStyleSheet(self, name):
if name in self.loaded_style_sheets:
return self.loaded_style_sheets[name]
resource = self._FindResourceGivenNameAndSuffix(
name, '.css', return_resource=True)
if not resource:
raise module.DepsException(
'Could not find a file for stylesheet %s' % name)
style_sheet = style_sheet_module.StyleSheet(self, name, resource)
style_sheet.load()
self.loaded_style_sheets[name] = style_sheet
return style_sheet
def LoadImage(self, abs_path):
if abs_path in self.loaded_images:
return self.loaded_images[abs_path]
if not os.path.exists(abs_path):
raise module.DepsException("url('%s') did not exist" % abs_path)
res = self.FindResourceGivenAbsolutePath(abs_path, binary=True)
if res is None:
raise module.DepsException("url('%s') was not in search path" % abs_path)
image = style_sheet_module.Image(res)
self.loaded_images[abs_path] = image
return image
def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize):
if filename in self.stripped_js_by_filename:
return self.stripped_js_by_filename[filename]
with open(filename, 'r') as f:
contents = f.read(4096)
if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents):
return None
s = strip_js_comments.StripJSComments(contents)
self.stripped_js_by_filename[filename] = s
return s
def _read_file(absolute_path):
"""Reads a file and returns a (path, contents) pair.
Args:
absolute_path: Absolute path to a file.
Raises:
Exception: The given file doesn't exist.
IOError: There was a problem opening or reading the file.
"""
if not os.path.exists(absolute_path):
raise Exception('%s not found.' % absolute_path)
f = codecs.open(absolute_path, mode='r', encoding='utf-8')
contents = f.read()
f.close()
return absolute_path, contents<|fim▁end|> | if not resource:
raise module.DepsException(
'Could not find a file for raw script %s in %s' %
(relative_raw_script_path, self.source_paths)) |
<|file_name|>YAHOO.js<|end_file_name|><|fim▁begin|>/* Copyright (c) 2006 Yahoo! Inc. All rights reserved. */
/**
* @class The Yahoo global namespace
*/
var YAHOO = function() {
return {
/**
* Yahoo presentation platform utils namespace
*/
util: {},
/**
* Yahoo presentation platform widgets namespace
*/
widget: {},
<|fim▁hole|> */
example: {},
/**
* Returns the namespace specified and creates it if it doesn't exist
*
* YAHOO.namespace("property.package");
* YAHOO.namespace("YAHOO.property.package");
*
* Either of the above would create YAHOO.property, then
* YAHOO.property.package
*
* @param {String} sNameSpace String representation of the desired
* namespace
* @return {Object} A reference to the namespace object
*/
namespace: function( sNameSpace ) {
if (!sNameSpace || !sNameSpace.length) {
return null;
}
var levels = sNameSpace.split(".");
var currentNS = YAHOO;
// YAHOO is implied, so it is ignored if it is included
for (var i=(levels[0] == "YAHOO") ? 1 : 0; i<levels.length; ++i) {
currentNS[levels[i]] = currentNS[levels[i]] || {};
currentNS = currentNS[levels[i]];
}
return currentNS;
}
};
} ();<|fim▁end|> | /**
* Yahoo presentation platform examples namespace |
<|file_name|>gandi_iface.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Copyright 2013 Gandi SAS
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gandi_iface
version_added: "2.0"
short_description: create, attach, detach or delete Gandi network interfaces
description:
- Manage Gandi network interfaces
options:
state:
description:
- desired state of the resource
required: false
default: "created"
choices: ["created", "deleted"]
aliases: []
datacenter:
description:
- datacenter location for servers
required: true
choices: ["Saint Denis", "Bissen", "Baltimore"]
bandwith:
description:
- bandwith ot the interface in bits/s (float)
required: false
vlan:
description:<|fim▁hole|> required: false
default: null
ip_address:
description:
- CIDR IPv4|IPv6 address ot the interface on the vlan (str)
required: false
default: null
ip_version:
description:
- ip version of the interface (str)
required: false
default: null
requirements: [ "libcloud" ]
author: Eric Garrigues <[email protected]>
'''
EXAMPLES = '''
# Basic provisioning example. Create a new iface on vlan mypvlan
# Luxembourg datacenter
- gandi_iface:
vlan: mypvlan
datacenter: "Bissen"
ip_address: 192.168.0.1
ip_version: 4
bandwidth: 50000.0
'''
import sys
USER_AGENT_PRODUCT = "Ansible-gandi"
USER_AGENT_VERSION = "v0.1"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.gandi import GandiException
_ = Provider.GANDI
except ImportError:
print("failed=True " +
"msg='libcloud with Gandi support required for this module'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GANDI_PARAMS', ())
if not ARGS:
print("failed=True " +
"msg='Missing Gandi connection in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
# XXX : better error management
return error
def _get_by_name(name, entities):
find = [x for x in entities if x.name == name]
return find[0] if find else None
def _get_by_id(id, entities):
find = [x for x in entities if x.id == id]
return find[0] if find else None
def get_datacenter(driver, name):
"""Get datacenter by name
"""
dcs = driver.list_locations()
return _get_by_name(name, dcs)
def get_pvlan(driver, name):
pvlans = driver.ex_list_pvlans()
return _get_by_name(name, pvlans)
def get_iface(driver, id):
ifaces = driver.ex_list_ifaces()
return _get_by_id(id, ifaces)
def get_iface_info(iface):
"""Retrieves interface information from an interace object and returns it
as a dictionary.
"""
return({
'vlan': not iface.vlan is None and iface.vlan.name or None,
'bandwidth': iface.extra.get('bandwidth'),
'datacenter_id': iface.extra.get('datacenter_id')
})
def create_iface(module, driver):
"""Creates a new pvlan.
module : AnsibleModule object
driver: authenticated libcloud driver on Gandi provider
Returns:
A Dictionary with information about the vlan that was created.
"""
iface = {}
ip_address = module.params.get('ip_address')
ip_version = module.params.get('ip_version')
pvlan_name = module.params.get('vlan')
bandwidth = module.params.get('bandwidth')
datacenter = module.params.get('datacenter')
changed = False
lc_location = get_datacenter(driver, datacenter)
if not lc_location:
module.fail_json(msg='Invalid datacenter %s' % datacenter,
changed=False)
pvlan = get_pvlan(driver, pvlan_name)
# module.fail_json(msg=pvlan, changed=False)
if not pvlan and not ip_version:
module.fail_json(msg='ip_version is mandatory when not a vlan',
changed=False)
try:
iface = driver.ex_create_iface(location=lc_location,
ip_version=ip_version,
ip_address=ip_address,
vlan=pvlan,
bandwitdh=bandwidth)
changed = True
except GandiException as e:
module.fail_json(msg='Unexpected error attempting to create iface')
iface_json_data = get_iface_info(iface)
return (changed, iface_json_data)
def delete_iface(module, driver, iface_id):
"""Delete an interface.
module: Ansible module object
driver: authenticated Gandi connection object
iface_id: int id of the interface
Returns a dictionary of with operation status.
"""
changed = False
pvlan = None
try:
iface = get_iface(driver, iface_id)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if iface:
driver.ex_delete_iface(iface)
changed = True
return (changed, iface_id)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['created', 'deleted'],
default='created'),
datacenter=dict(default='Bissen'),
ip_version=dict(),
ip_address=dict(),
vlan=dict(),
bandwidth=dict()
)
)
ip_version = module.params.get('ip_version')
ip_address = module.params.get('ip_address')
vlan_name = module.params.get('vlan')
bandwidth = module.params.get('bandwidth')
state = module.params.get('state')
dc = module.params.get('datacenter')
changed = False
try:
gandi = get_driver(Provider.GANDI)(*ARGS)
gandi.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if not dc and state in ['created']:
module.fail_json(msg='Must specify a "datacenter"', changed=False)
json_output = {'datacenter': dc}
if state in ['deleted']:
json_output['state'] = 'deleted'
(changed, iface_id) = delete_iface(module, gandi, iface_id)
json_output['iface_id'] = iface_id
elif state in ['created']:
json_output['state'] = 'created'
(changed, iface_data) = create_iface(module, gandi)
json_output['iface_data'] = iface_data
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
from ansible.module_utils.basic import *
main()<|fim▁end|> | - private vlan name the interface belongs to (str) |
<|file_name|>main.js<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2011-2013 Lp digital system
*
* This file is part of BackBee.
*
* BackBee is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BackBee is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of<|fim▁hole|> * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with BackBee. If not, see <http://www.gnu.org/licenses/>.
*/
require.config({
paths: {
'app.content': 'src/tb/apps/content',
//Controllers
'content.main.controller': 'src/tb/apps/content/controllers/main.controller',
//Routing
'content.routes': 'src/tb/apps/content/routes',
//Repositories
'content.repository': 'src/tb/apps/content/repository/content.repository',
'revision.repository': 'src/tb/apps/content/repository/revision.repository',
'resource.repository': 'src/tb/apps/content/repository/resource.repository',
'keyword.repository': 'src/tb/apps/content/repository/keyword.repository',
'crop.repository': 'src/tb/apps/content/repository/crop.repository',
//DataStore
'media.datastore': 'src/tb/component/medialibrary/datastore/media.datastore',
//Components
'content.manager': 'src/tb/apps/content/components/ContentManager',
'content.dnd.manager': 'src/tb/apps/content/components/DndManager',
'content.mouseevent.manager': 'src/tb/apps/content/components/MouseEventManager',
'content.save.manager': 'src/tb/apps/content/components/SaveManager',
'content.container': 'src/tb/apps/content/components/ContentContainer',
'definition.manager': 'src/tb/apps/content/components/DefinitionManager',
'content.breadcrumb': 'src/tb/apps/content/components/Breadcrumb',
'content.pluginmanager': 'src/tb/apps/content/components/PluginManager',
//Widgets
'content.widget.DialogContentsList': 'src/tb/apps/content/widgets/DialogContentsList',
'content.widget.Breadcrumb': 'src/tb/apps/content/widgets/Breadcrumb',
'content.widget.Edition': 'src/tb/apps/content/widgets/Edition',
'content.widget.DialogAddToMediaLibrary': 'src/tb/apps/content/widgets/DialogAddToMediaLibrary',
//Models
'content.models.AbstractContent': 'src/tb/apps/content/models/AbstractContent',
'content.models.Content': 'src/tb/apps/content/models/Content',
'content.models.ContentSet': 'src/tb/apps/content/models/ContentSet',
'content.models.ContentRevision': 'src/tb/apps/content/models/ContentRevision',
//Templates
'content/tpl/button': 'src/tb/apps/content/templates/button.twig',
'content/tpl/content_breadcrumb': 'src/tb/apps/content/templates/content-breadcrumb.twig',
'content/tpl/contribution/index': 'src/tb/apps/content/templates/contribution.index.twig',
'content/tpl/edit/contribution/index': 'src/tb/apps/content/templates/edit-contribution.index.twig',
'content/tpl/carousel_blocks': 'src/tb/apps/content/templates/carousel-blocks.twig',
'content/tpl/block_display': 'src/tb/apps/content/templates/block-display.twig',
'content/tpl/dropzone': 'src/tb/apps/content/templates/dropzone.twig',
'content/tpl/content-action': 'src/tb/apps/content/templates/content-action.twig',
'content/tpl/breadcrumb': 'src/tb/apps/content/templates/breadcrumb.twig',
'content/tpl/scrollzone': 'src/tb/apps/content/templates/scrollzone.twig',
'content/tpl/block_description': 'src/tb/apps/content/templates/block-description.twig',
'content/tpl/block_informations': 'src/tb/apps/content/templates/block-informations.twig',
'content/tpl/rollback_items': 'src/tb/apps/content/plugins/rollback/templates/items.twig',
//Views
'content.view.contribution.index': 'src/tb/apps/content/views/content.view.contribution.index',
'content.view.edit.contribution.index': 'src/tb/apps/content/views/content.view.edit_contribution.index'
}
});
define("app.content", ['Core', 'content.pluginmanager'], function (Core) {
'use strict';
Core.ApplicationManager.registerApplication('content', {
onInit: function () {
Core.ApplicationManager.launchApplication('contribution');
}
});
});<|fim▁end|> | |
<|file_name|>node2d.js<|end_file_name|><|fim▁begin|>/**
* Created with JetBrains WebStorm.
* User: gbox3d
* Date: 13. 3. 30.
* Time: 오후 3:35
* version : 0.8
* it is parts of pig2d engine
* this engine is base on html5 css3
최종수정 2015.11.5
- 배경적용안되는 버그 수정, 2015.11.5
*/
Pig2d = {
version : '1.0.0'
};
/////////////////////
///model
Pig2d.model = Backbone.Model.extend({
initialize: function() {
var element = document.createElement('div');
var name = this.get('name');
if(name != undefined) {
element.setAttribute('id',name);
}
//2.3 이전 버전을 위한
if(element.classList != undefined) {
element.classList.add('pig2d-node');
}
else {
$(element).addClass('pig2d-node');
}
this.attributes.element = element;
this.attributes.update_signal = 'none';
this.attributes.translation = new gbox3d.core.Vect2d(0,0);
this.attributes.scale = new gbox3d.core.Vect2d(1,1);
//this.attributes.matrix = mat2d.create();
//this.attributes.matrix = new WebKitCSSMatrix();
this.attributes.flipX = false;
this.attributes.flipY = false;
this.attributes.cssupdate = true;
this.attributes.cancelTransition = false;
this.attributes.offset = {
x:0,
y:0
};
},
defaults: {
rotation : 0
},
getPosition : function() {
//if(decompose == true) {
//행렬분해후 적용
// this.decomposeCssMatrix(this.getCssMatrix());
//}
return this.attributes.translation;
},
getRotation : function() {
return this.attributes.rotation;
},
setPosition : function(x,y) {
this.attributes.translation.set(x,y);
return this;
},
setRotation : function(angle) {
this.attributes.rotation = angle;
return this;
},
rotate : function(angle_delata) {
this.attributes.rotation += angle_delata;
return this;
},
setScale : function(x,y) {
this.attributes.scale.set(x,y);
return this;
},
getScale : function() {
return this.attributes.scale;
},
translate: function () {
var v1 = new gbox3d.core.Vect2d();
var center = new gbox3d.core.Vect2d(0,0);
return function ( distance, axis ) {
// axis is assumed to be normalized
v1.copy( axis );
v1.multiply( distance );
v1.rotate( gbox3d.core.degToRad(-this.attributes.rotation),center);
if(this.attributes.flipX) {
v1.X *= -1;
}
if(this.attributes.flipY) {
v1.Y *= -1;
}
this.attributes.translation.addToThis( v1 );
return this;
};
}(),
show : function(visible) {
this.get('element').style.visibility = visible ? 'inherit' : 'hidden';
},
isVisible : function() {
return (this.get('element').style.visibility == 'hidden') ? false : true;
},
/////////////////////
////행렬관련
//////////////////
getCssMatrix : function() {
var el = this.get('element');
var computedStyle = window.getComputedStyle(el);
var trans = computedStyle.getPropertyValue('-webkit-transform');
var cssmat = new WebKitCSSMatrix(trans);
return cssmat;
},
//주어진 행렬을 분해하여 노드변환값에 역적용하기
decomposeCssMatrix : function(cssmat) {
//var cssmat = this.getCssMatrix();
//이동변환 얻기
this.attributes.translation.X = cssmat.e;
this.attributes.translation.Y = cssmat.f;
//스케일 얻기
var scalex = Math.sqrt(cssmat.a*cssmat.a + cssmat.b*cssmat.b);
var scaley = Math.sqrt(cssmat.c*cssmat.c + cssmat.d*cssmat.d);
this.attributes.scale.X = scalex;
this.attributes.scale.Y = scaley;
//회전 얻기
var angle = Math.round(Math.atan2(cssmat.b/scalex, cssmat.a/scalex) * (180/Math.PI));
this.attributes.rotation = angle;
},
getDecomposePosition : function() {
var cssmat = this.getCssMatrix();
return new gbox3d.core.Vect2d(cssmat.e,cssmat.f);
},
////////////// animator
setupTransition : function(param) {
var element = this.get('element');
element.style.WebkitTransition = '';
this.attributes.TransitionEndCallBack = param.TransitionEndCallBack;
if(this.attributes._TransitionEndCallBack != undefined) {
element.removeEventListener('webkitTransitionEnd',this.attributes._TransitionEndCallBack);
}
this.attributes._TransitionEndCallBack = function(event) {
if(this.attributes.cancelTransition == true) {
this.attributes.cancelTransition = false;
}
else {
this.attributes.cssupdate = true;
element.style.WebkitTransition = '';
if(this.attributes.TransitionEndCallBack != undefined) {
this.attributes.TransitionEndCallBack.apply(this);
}
}
//이밴트 전달 금지
event.cancelBubble = true;
event.stopPropagation();
}.bind(this);
element.addEventListener('webkitTransitionEnd',this.attributes._TransitionEndCallBack,false);
// if(param.timing_function != undefined) {
// element.style.webkitTransitionTimingFunction = 'linear';
// }
return this;
},
transition : function(param) {
var element = this.get('element');
param.timing_function = param.timing_function ? param.timing_function : 'linear';
if(element.style.WebkitTransition !== '')
return;
if(param.position != undefined) {
if(param.position.X == this.attributes.translation.X && param.position.Y == this.attributes.translation.Y ) {
}
else {
if(element.style.WebkitTransition === '') {
element.style.WebkitTransition = '-webkit-transform ' + param.time + 's ' + param.timing_function;
this.setPosition(param.position.X,param.position.Y);
}
}
}
if(param.rotation != undefined) {
if(param.rotation == this.attributes.rotation) {
}
else {
if(element.style.WebkitTransition === '') {
element.style.WebkitTransition = '-webkit-transform ' + param.time + 's '+ param.timing_function;
}
this.setRotation(param.rotation);
}
}
if(param.scale != undefined) {
if(param.scale.X == this.attributes.scale.X && param.scale.Y == this.attributes.scale.Y) {
}
else {
if(element.style.WebkitTransition === '') {
element.style.WebkitTransition = '-webkit-transform ' + param.time + 's ' + param.timing_function;
}
this.setScale(param.scale.X,param.scale.Y);
}
}
},
stopTransition : function(param) {
this.attributes.update_signal = 'stop_transition';
this.attributes.cancelTransition = true;
return this;
},
clearTransition : function() {
var el = this.get('element');
el.removeEventListener('webkitTransitionEnd',this.attributes._TransitionEndCallBack);
this.attributes.update_signal = 'stop_transition';
},
////////////////////
updateCSS : function() {
//if(this.attributes.cssupdate == false) return;
var el = this.get('element');
switch (this.attributes.update_signal) {
case 'none':
(function() {
//오브잭트변환값을 앨리먼트쪽으로 갱신해주기
if(this.attributes.cssupdate == true) {
var trans = this.attributes.translation;
var rot = this.attributes.rotation;
var scalex = this.attributes.scale.X;
var scaley = this.attributes.scale.Y;
//반전 적용
if(this.attributes.flipX) {
scaley = -scaley;
}
if(this.attributes.flipY) {
scalex = -scalex;
}
var css_val = 'translate(' + trans.X + 'px,' + trans.Y +'px) ' +
'rotate(' + rot + 'deg) ' +
'scale(' + scalex + ',' + scaley + ')';
//브라우져 호환성을 위한 코드
el.style.WebkitTransform = css_val;
el.style.MozTransform = css_val;
el.style.oTransform = css_val;
el.style.transform = css_val;
//트랜지션 상태이면 css를 더이상 업데이트 못하게 한다
if(el.style.WebkitTransition !== '') {
this.attributes.cssupdate = false;
}
}
else {
//현재 트랜지션 상태이므로 트래지션 취소는 무효화 된다.
this.attributes.cancelTransition = false;
}
}).bind(this)();
break;
case 'stop_transition':
(function() {
//행렬분해후 적용
this.decomposeCssMatrix(this.getCssMatrix());
el.style.WebkitTransition = '';
this.attributes.update_signal = 'none';
this.attributes.cssupdate = true;
this.updateCSS();
}).bind(this)();
break;
}
return this;
},
///////////////////////////////////////////
setupCssAnimation : function(option) {
var element = this.get('element');
element.style.WebkitAnimationName = option.name;
element.style.WebkitAnimationDuration = option.duration;
if(option.timing_function) {
element.style.WebkitAnimationTimingFunction = option.timing_function;
}
if(option.delay) {
element.style.WebkitAnimationDelay = option.delay;
}
if(option.direction) {
element.style.WebkitAnimationDirection = option.direction;
}
if(option.iteration_count) {
element.style.WebkitAnimationIterationCount = option.iteration_count;
}
element.style.WebkitAnimationPlayState = 'running';
this.attributes.CssAnimationEndCallBack = option.EndCallBack;
if(this.attributes._CssAnimationEndCallBack != undefined) {
element.removeEventListener('webkitAnimationEnd',this.attributes._CssAnimationEndCallBack);
}
this.attributes._CssAnimationEndCallBack = function(event) {
element.style.WebkitAnimation = '';
if(this.attributes.CssAnimationEndCallBack != undefined) {
this.attributes.CssAnimationEndCallBack.apply(this);
}
//이밴트 전달 금지
event.cancelBubble = true;
event.stopPropagation();
}.bind(this);
<|fim▁hole|> element.addEventListener('webkitAnimationEnd',this.attributes._CssAnimationEndCallBack,false);
return this;
},
//////////////////////////
//노드에서 완전히 제거할때 사용됨
destroy : function() {
var el = this.get('element');
//el.removeEventListener('webkitTransitionEnd');
this.clearTransition();
el.parentNode.removeChild(el);
},
clone : function() {
var model = Backbone.Model.prototype.clone.call(this);
// console.log(model);
model.set("element",this.get('element').cloneNode(true));
return model;
}
});
//end of base model
//////////////////////
Pig2d.SpriteModel = Pig2d.model.extend({
initialize: function(param) {
Pig2d.model.prototype.initialize.call(this);
this.attributes.currentFrame = 0;
//애니메이션 타이머 핸들
this.attributes.animationHID = null;
var sheet = document.createElement('canvas');
sheet.classList.add('pig2d-sheet');
sheet.style.position = 'absolute';
this.get('element').appendChild(sheet);
this.set('sheet',sheet);
this.set('sheetCTX',sheet.getContext('2d'));
this.attributes.currentTick = 0;
this.attributes.scaler = 1;
if(this.attributes.data.canvas_size) {
sheet.width = this.attributes.data.canvas_size.width;
sheet.height = this.attributes.data.canvas_size.height;
}
//캔버스 클리어 일부 삼성폰들은 초기화를 안할경우 잔상이 생긴다.
//this.get('sheetCTX').clearRect(0,0,sheet.width,sheet.height);
//this.setFrame(-1);
//this.attributes.AnimationStatus = 'ready';
},
setScaler : function(scale) {
this.attributes.scaler = scale;
if(this.attributes.data.canvas_size) {
var sheet = this.get('sheet');
this.attributes.data.canvas_size.width *= scale;
this.attributes.data.canvas_size.height *= scale;
sheet.width = this.attributes.data.canvas_size.width;
sheet.height = this.attributes.data.canvas_size.height;
}
},
changeDress : function(param) {
this.attributes.imgObj = param.texture;
this.attributes.data = param.animation;
var sheet = this.get('sheet');
if(this.attributes.data.canvas_size) {
sheet.width = this.attributes.data.canvas_size.width;
sheet.height = this.attributes.data.canvas_size.height;
}
this.setFrame(this.attributes.currentFrame);
},
clone : function() {
var model = Backbone.Model.prototype.clone.call(this);
console.log('SpriteModel clone');
//model.set("element",this.get('element').cloneNode(true));
return model;
},
updateCSS : function (deltaTime) {
deltaTime = deltaTime || 0;
this.applyAnimation(deltaTime);
return Pig2d.model.prototype.updateCSS.call(this);
},
//////////////////////////////////////////////
//애니메이션 관련 기능
//////////////////////////////////////////////
setFrame : function(index) {
//프레임 노드 얻기
var imgObj = this.attributes.imgObj;
if(this.attributes.data.frames.length <= index) {
console.log('error exeed frame number : ' + index + ',' + this.attributes.data.frames.length);
index = 0;
}
if(imgObj != undefined) {
this.set('currentFrame',index);
var sheet = this.attributes.sheet;
var ctx = this.attributes.sheetCTX;
/*
공백프레임을 만든 이유 :
일부 폰들(삼성폰)에서 캔버스를 처음생성한후 맨처음 랜더링된 이미지가 지워지지않고 남아 있는 현상들이 발생함
그래서 캔버스처음생성할때(changeDress,createSprite)할때는 반드시 공백프레임을 화면에 한번출력을 해주어야함
*/
if(index < 0) { //공프레임 이면..
if(this.attributes.data.canvas_size) {
sheet.width = this.attributes.data.canvas_size.width;
sheet.height = this.attributes.data.canvas_size.height;
ctx.clearRect(0,0,this.attributes.data.canvas_size.width,this.attributes.data.canvas_size.height);
}
}
else {
var frame = this.attributes.data.frames[this.attributes.currentFrame];
//console.log(this.attributes.currentFrame);
var sheet_data = frame.sheets[0];
var scaler = this.attributes.scaler;
if(this.attributes.data.canvas_size) {
ctx.clearRect(0,0,this.attributes.data.canvas_size.width,this.attributes.data.canvas_size.height);
//sheet.width = 1;
//sheet.width = this.attributes.data.canvas_size.width;
}
else {
sheet.width = sheet_data.width;
sheet.height = sheet_data.height;
}
var offsetX = sheet_data.centerOffset.x;
var offsetY = sheet_data.centerOffset.y;
var destW = sheet_data.width;
var destH = sheet_data.height;
var cutx = -sheet_data.bp_x;
var cuty = -sheet_data.bp_y;
var srcW = sheet_data.width;
var srcH = sheet_data.height;
if(scaler < 1.0) {
offsetX *= scaler;
offsetY *= scaler;
destW *= scaler;
destH *= scaler;
}
sheet.style.webkitTransform = "translate(" + offsetX + "px," + offsetY + "px)";
ctx.drawImage(
imgObj,
cutx,cuty,srcW,srcH,
0,0,destW,destH
);
}
}
return this;
},
/////////////////////////////////////////////
/////new animation system////////////////////
/////////////////////////////////////////////
setupAnimation : function(param) {
param = param ? param : {};
this.attributes.startFrame = param.startFrame ? param.startFrame : 0 ;
this.attributes.endFrame = param.endFrame ? param.endFrame : (this.get('data').frames.length-1);
if(param.isAnimationLoop !== undefined) {
this.attributes.isAnimationLoop = param.isAnimationLoop;
}
else {
this.attributes.isAnimationLoop = true;
}
this.attributes.AnimationEndCallback = param.AnimationEndCallback;
this.attributes.AnimationStatus = param.AnimationStatus ? param.AnimationStatus : 'stop';
this.setFrame(this.attributes.startFrame);
},
applyAnimation : function(delataTick) {
if(this.attributes.AnimationStatus == 'play') {
this.attributes.currentTick += delataTick;
var frameindex = this.attributes.currentFrame;
var Ani_data = this.get('data');
var delay = 300;
if(frameindex >= 0) {
delay = Ani_data.frames[frameindex].delay / 1000;
}
//var delay = Ani_data.frames[frameindex].delay / 1000;
if(this.attributes.currentTick > delay) {
this.attributes.currentTick = 0;
++frameindex;
if(frameindex > this.attributes.endFrame) {//마지막 프레임이면
if(this.attributes.isAnimationLoop) {
frameindex = this.attributes.startFrame;
this.setFrame(frameindex);
}
else {
this.attributes.AnimationStatus = 'stop';
frameindex = this.attributes.endFrame;
}
if(this.attributes.AnimationEndCallback != undefined) {
this.attributes.AnimationEndCallback.bind(this)();
}
}
else {
this.setFrame(frameindex);
}
}
}
else if(this.attributes.AnimationStatus == 'ready') {
this.setFrame(-1);
this.attributes.AnimationStatus = 'play';
this.attributes.currentFrame = this.attributes.startFrame;
}
},
stopAnimation : function() {
this.attributes.AnimationStatus = 'stop';
},
////////////////////////
destroy : function() {
//this.stop_animate();
//슈퍼 클래싱
Pig2d.model.prototype.destroy.call(this);
}
});
//end of sprite model
///////////////////////
//////////////////node//
/////////////////////////
Pig2d.node = Backbone.Model.extend({
initialize: function() {
this.attributes.children = this.attributes.chiledren = new Array();
// _.bindAll(this,"update","clone");
},
traverse : function(callback,param) {
callback.bind(this)(param);
for(var index = 0;index < this.attributes.chiledren.length;index++ ) {
this.attributes.chiledren[index].traverse(callback,param);
}
},
update: function(applyChild,deltaTime) {
this.get('model').updateCSS(deltaTime);
if( applyChild == true) {
for(var index = 0;index < this.attributes.chiledren.length;index++ ) {
this.attributes.chiledren[index].update(applyChild,deltaTime);
}
}
return this;
},
clone : function() {
//딥 클로닝
var node = Backbone.Model.prototype.clone.call(this);
if(node.get('model')) {
var model = node.get('model').clone();
node.set({model:model});
}
var chiledren = this.get('chiledren');
for(var i=0;i<chiledren.length;i++) {
node.add(chiledren[i].clone());
}
return node;
},
findByName : function(name) {
if(name == this.attributes.name) return this;
for(var index in this.attributes.chiledren ) {
var obj = this.attributes.chiledren[index].findByName(name);
if(obj != null)
return obj;
}
return null;
},
findByID : function(cid) {
if(cid == this.cid) return this;
for(var index in this.attributes.chiledren ) {
var obj = this.attributes.chiledren[index].findByID(cid);
if(obj != null)
return obj;
}
return null;
},
add : function(child_node,parents) {
if(parents == undefined || parents == null) {
parents = this;
}
parents.get('chiledren').push(child_node);
//child_node.setParent(parents);
//모델이 존재하면
if(parents.get('model')) {
var par_el = parents.get('model').get('element');
var child_el = child_node.get('model').get('element');
}
par_el.appendChild(child_el);
child_node.attributes.parent = parents;
return this;
},
//부모노드 바꾸기
setParent : function(parent) {
var old_parent = this.get('parent');
var chiledren = old_parent.get('chiledren');
for(var i= chiledren.length-1;i >= 0;i--) {
if(chiledren[i] === this) {
chiledren.splice(i,1);
parent.add(this);
}
}
},
removeChild : function(node) {
for(var i= this.attributes.chiledren.length-1;i >= 0;i--) {
var _node = this.attributes.chiledren[i];
if(_node === node) {
this.attributes.chiledren.splice(i,1);
node.get('model').destroy();
return true;
}
else {
_node.removeChild(node); //자식노드까지 검사
}
}
return false;
},
removeChildAll : function() {
for(var i= this.attributes.chiledren.length-1;i >= 0;i--) {
this.removeChild(this.attributes.chiledren[i]);
}
return false;
},
show : function(visible) {
//console.log(this.get('model').get('element'));
//this.get('model').get('element').style.visibility = visible ? 'inherit' : 'hidden';
this.get('model').show(visible);
},
isVisible : function() {
//return (this.get('model').get('element').style.visibility == 'hidden') ? false : true;
return this.get('model').isVisible();
}
});
//end of node
///////////////
///
Pig2d.SceneManager = Backbone.Model.extend({
initialize: function(param) {
var rootNode = new Pig2d.node(
{
model : new Pig2d.model({
name : 'root_' + (new Date()).getTime() + '_'
})
}
);
rootNode.get('model').setPosition(0,0);
//this.attributes.container.append(rootNode.get('model').get('element'));
var rootElement = rootNode.get('model').get('element');
//console.log(rootElement);
if(param.window_size != undefined) {
rootElement.style.overflow = 'hidden';
rootElement.style.width = param.window_size.width + 'px' ;
rootElement.style.height = param.window_size.height + 'px' ;
}
if(param.bkg_color != undefined) {
//2015.11.5 수정 ,배경적용안되는 버그 수정
this.attributes.container.style.backgroundColor = param.bkg_color;
}
this.attributes.container.appendChild(rootElement);
this.attributes.rootNode = rootNode;
},
getRootNode : function() {
return this.attributes.rootNode;
},
updateAll : function(deltaTime) {
deltaTime = deltaTime ? deltaTime : 0.01;
this.attributes.rootNode.update(true,deltaTime);
},
add : function(node,parent) {
if(parent == undefined) {
this.attributes.rootNode.add(node);
}
else {
parent.add(node);
}
},
addImageNode : function(param) {
//var node = Pig2d.util.createImage(param.img_info);
//this.add(node,param.parent);
var center_x = param.center ? param.center.x : 0;
var center_y = param.center ? param.center.y : 0;
var node = Pig2d.util.createDummy();
var imgObj = new Image();
imgObj.onload = function(evt) {
//console.log(this.width);
imgObj.style.position = 'absolute';
imgObj.style.left = -this.width/2 + parseInt(center_x) + 'px';
imgObj.style.top = -this.height/2 + parseInt(center_y) + 'px';
var element = node.get('model').get('element');
element.appendChild(imgObj);
node.get('model').set('imgObj', imgObj);
if(param.onload) {
param.onload(node);
}
}
imgObj.src = param.src;
this.add(node,param.parent);
return node;
},
addSpriteSceneNode : function(param) {
var node = Pig2d.util.createSprite(param.spr_info);
node.show(true);
this.add(node,param.parent);
return node;
}
});
//end of scene manager<|fim▁end|> | |
<|file_name|>generate_pie_charts.py<|end_file_name|><|fim▁begin|>"""
Author: Sam Ginzburg
Description: This script reads in a blast2go sequence table output of GO Term mappings, and calculates frequencies of GO Terms at specific GO Levels
Example run:
python generate_pie_charts.py [blast2go_file.txt] [GO Level]
"""
import sys
from GeneOntologyLibrary import obo_parser
from GeneOntologyLibrary import go_term as gt
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
def parse_go_terms_by_go(go_counts, go, go_type, term_name):
if go_type == "molecular function":
if go_counts[0].get(go) is None:
go_counts[0][go] = 1
else:
go_counts[0][go] += 1
if go_type == "biological process":
if go_counts[1].get(go) is None:
go_counts[1][go] = 1
else:
go_counts[1][go] += 1
if go_type == "cellular component":
if go_counts[2].get(go) is None:
go_counts[2][go] = 1
else:
go_counts[2][go] += 1
def parse_go_mappped_file(go_counts, string):
#print (string)
if ";" in string:
string = string.split(";") # splits the column by ;
else:
string = [string]
#print("splitstring: " + str(split_string))
return_list = list()
for go_term in string:
go_term = go_term.strip()
if go_term == "-":
continue
if "P:" in go_term or "Biological Process:" in go_term:
go_term = go_term[2:]
if go_counts[0].get(go_term) is None:
go_counts[0][go_term] = 1
else:
go_counts[0][go_term] += 1
if "F:" in go_term or "Molecular Function:" in go_term:
go_term = go_term[2:]
if go_counts[1].get(go_term) is None:
go_counts[1][go_term] = 1<|fim▁hole|> else:
go_counts[1][go_term] += 1
if "C:" in go_term or "Cellular Component:" in go_term:
go_term = go_term[2:]
if go_counts[2].get(go_term) is None:
go_counts[2][go_term] = 1
else:
go_counts[2][go_term] += 1
#print (go_term)
return_list.append(go_term)
return return_list
"""
def filter_by_level(go_dict, level, parser):
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key[2:])
if go_term_object is None:
print ("None -- error has occured:\t" + key[2:])
exit()
else:
print (key)
print ("level:\t" + str(go_term_object[0].calculate_level()))
if go_term_object[0].calculate_level() != int(level):
del go_dict[key]
"""
def filter_by_level(go_dict, level, parser, go_dict_type):
if go_dict_type == "biological_process":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_biological_process_go_terms_by_level(int(level)))])]
if go_dict_type == "molecular_function":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_molecular_function_go_terms_by_level(int(level)))])]
if go_dict_type == "cellular_component":
filtered = [x for x in set(go_dict.keys()) & set([gterm.name for gterm in set(parser.get_cellular_component_go_terms_by_level(int(level)))])]
#print ("filtered:\t" + str(filtered))
ret_dict = dict()
for key in filtered:
ret_dict[key] = go_dict[key]
return ret_dict
def generate_counts(go_dict, parser):
#print (sum(go_dict.values()))
#print (len(go_dict))
for key in dict(go_dict):
go_term_object = parser.go_term_by_name_dict.get(key)
if go_term_object is None:
print ("None -- error has occured:\t" + key)
exit()
else:
for x in range(0, go_dict[key]):
gt.propogate_go_term(go_term_object[0])
#exit()
def save_graph(go_dict, chart_type, level, parser):
fontP = FontProperties()
fontP.set_size('small')
# The slices will be ordered and plotted counter-clockwise.
figure = plt.figure(figsize=(10,10))
labels = go_dict.keys()
sizes = [parser.go_term_by_name_dict.get(x)[0].encountered_count for x in go_dict]
#sizes = go_dict.values()
#print (chart_type)
#print (zip(labels, sizes))
#print (sum(sizes))
plt.title('Graph Level %s Pie Chart [%s]' % (level, chart_type))
total = sum(sizes)
labels = [l+" "+str(float(s)/total * 100)[0:4]+"% ("+ str(s) + ")" for l,s in zip(labels, sizes)]
patches, texts = plt.pie(sizes, startangle=90)
plt.legend(patches, labels, prop = fontP, loc="best")
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
#plt.tight_layout()
#plt.show()
print (chart_type)
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in go_dict]
for x in out:
print (x)
print ("\n")
figure.savefig(chart_type+"_level_"+level+'.png',aspect='auto',dpi=100)
if __name__ == '__main__':
args = sys.argv
args = args[1:]
# these dicts store the name of the GO term and the number of times it occurs
combined = dict()
biological_process = dict()
molecular_function = dict()
cellular_component = dict()
go_counts = [biological_process, molecular_function, cellular_component]
gene_go_term_dict = dict() # key = SeqName description, value = list of gene ontology terms corresponding to the gene
with open(args[0], "r") as f:
for line in f:
line = line.split("\t")
gene_go_term_dict[line[0]] = parse_go_mappped_file(go_counts, line[7])
"""
# remove all genes with no go terms at all
for key in dict(gene_go_term_dict):
if len(gene_go_term_dict[key]) < 1:
del gene_go_term_dict[key]
"""
#print (gene_go_term_dict)
#print (len(gene_go_term_dict))
print ("Number of unique biological processes go terms:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms:\t" + str(len(biological_process) + len(molecular_function) + len(cellular_component)))
print ("Number of molecular function go terms:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms:\t" + str(sum(cellular_component.values())))
parser = obo_parser("go.obo")
parser.build_obo_file()
generate_counts(biological_process, parser)
generate_counts(molecular_function, parser)
generate_counts(cellular_component, parser)
#print (sum(biological_process.values()))
biological_process = filter_by_level(biological_process,args[1], parser, "biological_process")
molecular_function = filter_by_level(molecular_function,args[1], parser, "molecular_function")
cellular_component = filter_by_level(cellular_component,args[1], parser, "cellular_component")
"""
print (biological_process.keys())
print(parser.go_term_by_name_dict.get("biological_process")[0].encountered_count)
print (molecular_function.keys())
print(parser.go_term_by_name_dict.get("molecular_function")[0].encountered_count)
"""
#save_graph(molecular_function, "Molecular Function", str(2), parser)
combined = dict(biological_process)
combined.update(molecular_function)
combined.update(cellular_component)
print ("Number of unique biological processes go terms after filtering by level:\t" + str(len(biological_process)))
print ("Number of unique molecular function go terms after filtering by level:\t" + str(len(molecular_function)))
print ("Number of unique cellular compontent go terms after filtering by level:\t" + str(len(cellular_component)))
print ("Number of unique overall go terms after filtering by level:\t" + str(len(combined)))
print ("Number of molecular function go terms after filtering by level:\t" + str(sum(molecular_function.values())))
print ("Number of biological process go terms after filtering by level:\t" + str(sum(biological_process.values())))
print ("Number of cellular component go terms after filtering by level:\t" + str(sum(cellular_component.values())))
"""
out = [str(x) + "\t" + str(parser.go_term_by_name_dict.get(x)[0].encountered_count) for x in cellular_component]
for x in out:
print (x)
"""
save_graph(biological_process, "Biological Process", args[1], parser)
save_graph(molecular_function, "Molecular Function", args[1], parser)
save_graph(cellular_component, "Cellular Component", args[1], parser)
save_graph(combined, "All", args[1], parser)<|fim▁end|> | |
<|file_name|>jspack.js<|end_file_name|><|fim▁begin|>/*!
* Copyright © 2008 Fair Oaks Labs, Inc.
* All rights reserved.
*/
// Utility object: Encode/Decode C-style binary primitives to/from octet arrays
function JSPack()
{
// Module-level (private) variables
var el, bBE = false, m = this;
// Raw byte arrays
m._DeArray = function (a, p, l)
{
return [a.slice(p,p+l)];
};
m._EnArray = function (a, p, l, v)
{
for (var i = 0; i < l; a[p+i] = v[i]?v[i]:0, i++);
};
// ASCII characters
m._DeChar = function (a, p)
{
return String.fromCharCode(a[p]);
};
m._EnChar = function (a, p, v)
{
a[p] = v.charCodeAt(0);
};
// Little-endian (un)signed N-byte integers
m._DeInt = function (a, p)
{
var lsb = bBE?(el.len-1):0, nsb = bBE?-1:1, stop = lsb+nsb*el.len, rv, i, f;
for (rv = 0, i = lsb, f = 1; i != stop; rv+=(a[p+i]*f), i+=nsb, f*=256);
if (el.bSigned && (rv & Math.pow(2, el.len*8-1))) { rv -= Math.pow(2, el.len*8); }
return rv;
};
m._EnInt = function (a, p, v)
{
var lsb = bBE?(el.len-1):0, nsb = bBE?-1:1, stop = lsb+nsb*el.len, i;
v = (v<el.min)?el.min:(v>el.max)?el.max:v;
for (i = lsb; i != stop; a[p+i]=v&0xff, i+=nsb, v>>=8);
};
// ASCII character strings
m._DeString = function (a, p, l)
{
for (var rv = new Array(l), i = 0; i < l; rv[i] = String.fromCharCode(a[p+i]), i++);
return rv.join('');
};
m._EnString = function (a, p, l, v)
{
for (var t, i = 0; i < l; a[p+i] = (t=v.charCodeAt(i))?t:0, i++);
};
// Little-endian N-bit IEEE 754 floating point
m._De754 = function (a, p)
{
var s, e, m, i, d, nBits, mLen, eLen, eBias, eMax;
mLen = el.mLen, eLen = el.len*8-el.mLen-1, eMax = (1<<eLen)-1, eBias = eMax>>1;
i = bBE?0:(el.len-1); d = bBE?1:-1; s = a[p+i]; i+=d; nBits = -7;
for (e = s&((1<<(-nBits))-1), s>>=(-nBits), nBits += eLen; nBits > 0; e=e*256+a[p+i], i+=d, nBits-=8);
for (m = e&((1<<(-nBits))-1), e>>=(-nBits), nBits += mLen; nBits > 0; m=m*256+a[p+i], i+=d, nBits-=8);
switch (e)
{
case 0:
// Zero, or denormalized number
e = 1-eBias;
break;
case eMax:
// NaN, or +/-Infinity
return m?NaN:((s?-1:1)*Infinity);
default:
// Normalized number
m = m + Math.pow(2, mLen);
e = e - eBias;
break;
}
return (s?-1:1) * m * Math.pow(2, e-mLen);
};
m._En754 = function (a, p, v)
{
var s, e, m, i, d, c, mLen, eLen, eBias, eMax;
mLen = el.mLen, eLen = el.len*8-el.mLen-1, eMax = (1<<eLen)-1, eBias = eMax>>1;
s = v<0?1:0;
v = Math.abs(v);
if (isNaN(v) || (v == Infinity))
{
m = isNaN(v)?1:0;
e = eMax;
}
else
{
e = Math.floor(Math.log(v)/Math.LN2); // Calculate log2 of the value
if (v*(c = Math.pow(2, -e)) < 1) { e--; c*=2; } // Math.log() isn't 100% reliable
// Round by adding 1/2 the significand's LSD
if (e+eBias >= 1) { v += el.rt/c; } // Normalized: mLen significand digits
else { v += el.rt*Math.pow(2, 1-eBias); } // Denormalized: <= mLen significand digits
if (v*c >= 2) { e++; c/=2; } // Rounding can increment the exponent
if (e+eBias >= eMax)
{
// Overflow
m = 0;
e = eMax;
}
else if (e+eBias >= 1)
{
// Normalized - term order matters, as Math.pow(2, 52-e) and v*Math.pow(2, 52) can overflow
m = (v*c-1)*Math.pow(2, mLen);
e = e + eBias;
}
else
{
// Denormalized - also catches the '0' case, somewhat by chance
m = v*Math.pow(2, eBias-1)*Math.pow(2, mLen);
e = 0;
}
}
for (i = bBE?(el.len-1):0, d=bBE?-1:1; mLen >= 8; a[p+i]=m&0xff, i+=d, m/=256, mLen-=8);
for (e=(e<<mLen)|m, eLen+=mLen; eLen > 0; a[p+i]=e&0xff, i+=d, e/=256, eLen-=8);
a[p+i-d] |= s*128;
};
// Class data
m._sPattern = '(\\d+)?([AxcbBhHsfdiIlLqQ])';
m._lenLut = {'A':1, 'x':1, 'c':1, 'b':1, 'B':1, 'h':2, 'H':2, 's':1, 'f':4, 'd':8, 'i':4, 'I':4, 'l':4, 'L':4, 'q':8, 'Q':8};
m._elLut = { 'A': {en:m._EnArray, de:m._DeArray},
's': {en:m._EnString, de:m._DeString},
'c': {en:m._EnChar, de:m._DeChar},
'b': {en:m._EnInt, de:m._DeInt, len:1, bSigned:true, min:-Math.pow(2, 7), max:Math.pow(2, 7)-1},
'B': {en:m._EnInt, de:m._DeInt, len:1, bSigned:false, min:0, max:Math.pow(2, 8)-1},
'h': {en:m._EnInt, de:m._DeInt, len:2, bSigned:true, min:-Math.pow(2, 15), max:Math.pow(2, 15)-1},
'H': {en:m._EnInt, de:m._DeInt, len:2, bSigned:false, min:0, max:Math.pow(2, 16)-1},
'i': {en:m._EnInt, de:m._DeInt, len:4, bSigned:true, min:-Math.pow(2, 31), max:Math.pow(2, 31)-1},
'I': {en:m._EnInt, de:m._DeInt, len:4, bSigned:false, min:0, max:Math.pow(2, 32)-1},
'l': {en:m._EnInt, de:m._DeInt, len:4, bSigned:true, min:-Math.pow(2, 31), max:Math.pow(2, 31)-1},
'L': {en:m._EnInt, de:m._DeInt, len:4, bSigned:false, min:0, max:Math.pow(2, 32)-1},
'f': {en:m._En754, de:m._De754, len:4, mLen:23, rt:Math.pow(2, -24)-Math.pow(2, -77)},
'd': {en:m._En754, de:m._De754, len:8, mLen:52, rt:0},
'q': {en:m._EnInt, de:m._DeInt, len:8, bSigned:true, min:-Math.pow(2, 63), max:Math.pow(2, 63)-1},
'Q': {en:m._EnInt, de:m._DeInt, len:8, bSigned:false, min:0, max:Math.pow(2, 64)-1}};
// Unpack a series of n elements of size s from array a at offset p with fxn
m._UnpackSeries = function (n, s, a, p)
{
for (var fxn = el.de, rv = [], i = 0; i < n; rv.push(fxn(a, p+i*s)), i++);
return rv;
};
// Pack a series of n elements of size s from array v at offset i to array a at offset p with fxn
m._PackSeries = function (n, s, a, p, v, i)
{
for (var fxn = el.en, o = 0; o < n; fxn(a, p+o*s, v[i+o]), o++);
};
// Unpack the octet array a, beginning at offset p, according to the fmt string
m.Unpack = function (fmt, a, p)
{
// Set the private bBE flag based on the format string - assume big-endianness
bBE = (fmt.charAt(0) != '<');
p = p?p:0;
var re = new RegExp(this._sPattern, 'g'), m, n, s, rv = [];
while (m = re.exec(fmt))
{
n = ((m[1]==undefined)||(m[1]==''))?1:parseInt(m[1]);
s = this._lenLut[m[2]];
if ((p + n*s) > a.length)
{
return undefined;
}
switch (m[2])
{
case 'A': case 's':
rv.push(this._elLut[m[2]].de(a, p, n));
break;
case 'c': case 'b': case 'B': case 'h': case 'H':
case 'i': case 'I': case 'l': case 'L': case 'f': case 'd': case 'q': case 'Q':
el = this._elLut[m[2]];
rv.push(this._UnpackSeries(n, s, a, p));
break;
}
p += n*s;
}
return Array.prototype.concat.apply([], rv);
};
// Pack the supplied values into the octet array a, beginning at offset p, according to the fmt string
m.PackTo = function (fmt, a, p, values)
{
// Set the private bBE flag based on the format string - assume big-endianness
bBE = (fmt.charAt(0) != '<');
var re = new RegExp(this._sPattern, 'g'), m, n, s, i = 0, j;
while (m = re.exec(fmt))
{
n = ((m[1]==undefined)||(m[1]==''))?1:parseInt(m[1]);
s = this._lenLut[m[2]];
if ((p + n*s) > a.length)
{
return false;
}<|fim▁hole|> this._elLut[m[2]].en(a, p, n, values[i]);
i += 1;
break;
case 'c': case 'b': case 'B': case 'h': case 'H':
case 'i': case 'I': case 'l': case 'L': case 'f': case 'd': case 'q': case 'Q':
el = this._elLut[m[2]];
if ((i + n) > values.length) { return false; }
this._PackSeries(n, s, a, p, values, i);
i += n;
break;
case 'x':
for (j = 0; j < n; j++) { a[p+j] = 0; }
break;
}
p += n*s;
}
return a;
};
// Pack the supplied values into a new octet array, according to the fmt string
m.Pack = function (fmt, values)
{
return this.PackTo(fmt, new Array(this.CalcLength(fmt)), 0, values);
};
// Determine the number of bytes represented by the format string
m.CalcLength = function (fmt)
{
var re = new RegExp(this._sPattern, 'g'), m, sum = 0;
while (m = re.exec(fmt))
{
sum += (((m[1]==undefined)||(m[1]==''))?1:parseInt(m[1])) * this._lenLut[m[2]];
}
return sum;
};
};<|fim▁end|> | switch (m[2])
{
case 'A': case 's':
if ((i + 1) > values.length) { return false; } |
<|file_name|>main.ts<|end_file_name|><|fim▁begin|>import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
import { AppModule } from './app';
<|fim▁hole|>platformBrowserDynamic().bootstrapModule(AppModule).catch((error) => console.log("An error occured in bootsrap :", error));<|fim▁end|> | |
<|file_name|>test_spin.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pytest
import math as m
import numpy as np
from sisl import Spin
pytestmark = [pytest.mark.physics, pytest.mark.spin]
def test_spin1():
for val in ['unpolarized', '', Spin.UNPOLARIZED,
'polarized', 'p', Spin.POLARIZED,
'non-collinear', 'nc', Spin.NONCOLINEAR,
'spin-orbit', 'so', Spin.SPINORBIT]:
s = Spin(val)
str(s)
s1 = s.copy()
assert s == s1
def test_spin2():
s1 = Spin()
s2 = Spin('p')
s3 = Spin('nc')
s4 = Spin('so')
assert s1.kind == Spin.UNPOLARIZED
assert s2.kind == Spin.POLARIZED
assert s3.kind == Spin.NONCOLINEAR
assert s4.kind == Spin.SPINORBIT
assert s1 == s1.copy()
assert s2 == s2.copy()
assert s3 == s3.copy()
assert s4 == s4.copy()
assert s1 < s2
assert s2 < s3
assert s3 < s4
assert s1 <= s2
assert s2 <= s3
assert s3 <= s4
assert s2 > s1
assert s3 > s2
assert s4 > s3
assert s2 >= s1
assert s3 >= s2
assert s4 >= s3
assert s1.is_unpolarized
assert not s1.is_polarized
assert not s1.is_noncolinear
assert not s1.is_spinorbit
assert not s2.is_unpolarized
assert s2.is_polarized
assert not s2.is_noncolinear
assert not s2.is_spinorbit
assert not s3.is_unpolarized
assert not s3.is_polarized
assert s3.is_noncolinear
assert not s3.is_spinorbit
assert not s4.is_unpolarized
assert not s4.is_polarized
assert not s4.is_noncolinear
assert s4.is_spinorbit
def test_spin3():
with pytest.raises(ValueError):
s = Spin('satoehus')
def test_spin4():
s1 = Spin(Spin.UNPOLARIZED)
S1 = Spin(Spin.UNPOLARIZED, np.complex64)
s2 = Spin(Spin.POLARIZED)
S2 = Spin(Spin.POLARIZED, np.complex64)
s3 = Spin(Spin.NONCOLINEAR)
S3 = Spin(Spin.NONCOLINEAR, np.complex64)
s4 = Spin(Spin.SPINORBIT)
S4 = Spin(Spin.SPINORBIT, np.complex64)
assert s1 == S1
assert s2 == S2
assert s3 == S3
assert s4 == S4
# real comparison
assert s1 < S2
assert s1 < S3
assert s1 < S4
assert s2 > S1
assert s2 < S3
assert s2 < S4
assert s3 > S1
assert s3 > S2
assert s3 < S4
assert s4 > S1
assert s4 > S2
assert s4 > S3
# complex complex
assert S1 < S2
assert S1 < S3
assert S1 < S4
assert S2 > S1
assert S2 < S3
assert S2 < S4<|fim▁hole|>
assert S3 > S1
assert S3 > S2
assert S3 < S4
assert S4 > S1
assert S4 > S2
assert S4 > S3
# real comparison
assert S1 < s2
assert S1 < s3
assert S1 < s4
assert S2 > s1
assert S2 < s3
assert S2 < s4
assert S3 > s1
assert S3 > s2
assert S3 < s4
assert S4 > s1
assert S4 > s2
assert S4 > s3
# complex complex
assert S1 < s2
assert S1 < s3
assert S1 < s4
assert S2 > s1
assert S2 < s3
assert S2 < s4
assert S3 > s1
assert S3 > s2
assert S3 < s4
assert S4 > s1
assert S4 > s2
assert S4 > s3
def test_pauli():
# just grab the default spin
S = Spin()
# Create a fictituous wave-function
sq2 = 2 ** .5
W = np.array([
[1/sq2, 1/sq2], # M_x = 1
[1/sq2, -1/sq2], # M_x = -1
[0.5 + 0.5j, 0.5 + 0.5j], # M_x = 1
[0.5 - 0.5j, -0.5 + 0.5j], # M_x = -1
[1/sq2, 1j/sq2], # M_y = 1
[1/sq2, -1j/sq2], # M_y = -1
[0.5 - 0.5j, 0.5 + 0.5j], # M_y = 1
[0.5 + 0.5j, 0.5 - 0.5j], # M_y = -1
[1, 0], # M_z = 1
[0, 1], # M_z = -1
])
x = np.array([1, -1, 1, -1, 0, 0, 0, 0, 0, 0])
assert np.allclose(x, (np.conj(W)*S.X.dot(W.T).T).sum(1).real)
y = np.array([0, 0, 0, 0, 1, -1, 1, -1, 0, 0])
assert np.allclose(y, (np.conj(W)*np.dot(S.Y, W.T).T).sum(1).real)
z = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, -1])
assert np.allclose(z, (np.conj(W)*np.dot(S.Z, W.T).T).sum(1).real)
def test_pickle():
import pickle as p
S = Spin('nc')
n = p.dumps(S)
s = p.loads(n)
assert S == s<|fim▁end|> | |
<|file_name|>location.go<|end_file_name|><|fim▁begin|>package reflectwalk
//go:generate stringer -type=Location location.go
type Location uint
const (
None Location = iota
Map
MapKey
MapValue<|fim▁hole|> Slice
SliceElem
Struct
StructField
WalkLoc
)<|fim▁end|> | |
<|file_name|>conf.go<|end_file_name|><|fim▁begin|>package conf
import (
"errors"
"flag"
"time"
"go-common/library/cache/redis"
"go-common/library/conf"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/http/blademaster/middleware/auth"
"go-common/library/net/http/blademaster/middleware/verify"
"go-common/library/net/rpc"
"go-common/library/net/rpc/warden"
"go-common/library/net/trace"
"go-common/library/stat/prom"
xtime "go-common/library/time"
"github.com/BurntSushi/toml"
)
var (
confPath string
// Conf config .
Conf = &Config{}
)
// Config config .
type Config struct {
Log *log.Config
Ecode *ecode.Config
Tag *Tag
Supervision *Supervision
Host *Host<|fim▁hole|> Tracer *trace.Config
Auth *auth.Config
Verify *verify.Config
HTTPClient *bm.ClientConfig
HTTPSimilar *bm.ClientConfig
BM *bm.ServerConfig
GRPCServer *warden.ServerConfig
RPCServer *rpc.ServerConfig
Redis *Redis
ArchiveRPC *rpc.ClientConfig
TagDisRPC *rpc.ClientConfig
FigureRPC *rpc.ClientConfig
// Warden Client
TagGRPClient *warden.ClientConfig
AccGRPClient *warden.ClientConfig
}
// Host host config .
type Host struct {
APICo string
AI string
Account string
Archive string
BigDataURL string
}
// Redis redis config .
type Redis struct {
Tag *TagRedis
Rank *RankRedis
}
// TagRedis tag redis config .
type TagRedis struct {
Redis *redis.Config
Expire *TagExpire
}
// TagExpire expire config .
type TagExpire struct {
Sub xtime.Duration
ArcTag xtime.Duration
ArcTagOp xtime.Duration
AtLike xtime.Duration
AtHate xtime.Duration
}
// RankRedis rank redis config .
type RankRedis struct {
Redis *redis.Config
Expire *RankExpire
}
// RankExpire rang expire config .
type RankExpire struct {
TagNewArc xtime.Duration
}
// Tag tag config .
type Tag struct {
FeedBackMaxLen int
// user level
ArcTagAddLevel int
ArcTagDelLevel int
ArcTagRptLevel int
ArcTagLikeLevel int
ArcTagHateLevel int
SubArcMaxNum int
// arctag
ArcTagMaxNum int
ArcTagAddMaxNum int
ArcTagDelMaxNum int
ArcTagDelSomeNum int
ArcTagLikeMaxNum int
ArcTagHateMaxNum int
ArcTagRptMaxNum int
LikeLimitToLock int64
MaxArcsPageSize int
MaxArcsLimit int
// select tag number
MaxSelTagNum int
White []int64 // 用户账号白名单
ChannelRefreshTime xtime.Duration
AITimeout int
}
// Supervision supervision .
type Supervision struct {
SixFour *struct {
Button bool
Begin time.Time
End time.Time
}
RealName *struct {
Button bool
}
}
// PromError stat and log.
func PromError(name string, format string, args ...interface{}) {
prom.BusinessErrCount.Incr(name)
}
func init() {
flag.StringVar(&confPath, "conf", "", "config path")
}
// Init intt conf .
func Init() (err error) {
if confPath == "" {
return configCenter()
}
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func configCenter() (err error) {
var (
client *conf.Client
value string
ok bool
)
if client, err = conf.New(); err != nil {
return
}
if value, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
_, err = toml.Decode(value, &Conf)
return
}<|fim▁end|> | |
<|file_name|>openshift_controller.go<|end_file_name|><|fim▁begin|>package kubeapiserver
import (
"io/ioutil"
"path"
"k8s.io/apimachinery/pkg/runtime"
"github.com/golang/glog"
configapi "github.com/openshift/origin/pkg/cmd/server/apis/config"
configapilatest "github.com/openshift/origin/pkg/cmd/server/apis/config/latest"
"github.com/openshift/origin/pkg/oc/clusterup/coreinstall/tmpformac"
)
func MakeOpenShiftControllerConfig(existingMasterConfig string, basedir string) (string, error) {
configDir := path.Join(basedir, OpenShiftControllerManagerDirName)
glog.V(1).Infof("Copying kube-apiserver config to local directory %s", OpenShiftControllerManagerDirName)
if err := tmpformac.CopyDirectory(existingMasterConfig, configDir); err != nil {
return "", err
}
// update some listen information to include starting the DNS server
masterconfigFilename := path.Join(configDir, "master-config.yaml")
originalBytes, err := ioutil.ReadFile(masterconfigFilename)
if err != nil {
return "", err
}
configObj, err := runtime.Decode(configapilatest.Codec, originalBytes)
if err != nil {
return "", err
}
masterconfig := configObj.(*configapi.MasterConfig)
masterconfig.ServingInfo.BindAddress = "0.0.0.0:8444"
configBytes, err := configapilatest.WriteYAML(masterconfig)
if err != nil {
return "", err<|fim▁hole|> }
if err := ioutil.WriteFile(masterconfigFilename, configBytes, 0644); err != nil {
return "", err
}
return configDir, nil
}<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, url
urlpatterns = patterns('',<|fim▁hole|><|fim▁end|> | url(r'^img1x1$', 'ses_analytics.views.img1x1', name='img1x1'), # used to trace email opening
# TODO: unsubscription and SNS feedback notifications
) |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// @adjivas - github.com/adjivas. See the LICENSE
// file at the top-level directory of this distribution and at
// https://github.com/adjivas/manlist/LICENCE.
//
// This file may not be copied, modified, or distributed
// except according to those terms.
pub mod mans {
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::path::PathBuf;
extern crate glob;
/// The `command` structure is a name ("bc") and
/// a description ("An arbitrary precision...").
pub struct Command {
pub names: Vec<String>,
pub description: String,
}
impl Command {
/// The `new` constructor function returns a new Command.
fn new (
names: Vec<String>,
description: String,
) -> Self {
Command {
names: names,
description: description,
}
}
/// The `from_gnu` constructor function returns the name and the
/// description from the man's gnu command ("bc", "An arbitrary...").
fn from_gnu (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Self, String> {
if buff.read_line(line).is_ok() {
let command: String = line.replace("\\- ", "- ");
if line.find("- ").is_some() {
let mut command = command.split("- ");
// Warning, this four match is a not "good practice", the
// method `from_gnu` must be revised for more efficiance.
match command.next() {
Some(line_name) => {
match Command::gnu_names(&mut line_name.to_string()) {
Ok(names) => {
match command.next() {
Some(line_description) => {
match Command::gnu_description(
&mut line_description.to_string()
) {
Ok(description) => {
return Ok(Command::new(
names,
description,
));
},
Err(_) => {},
}
},
None => {},
}
},
Err(_) => {},
}
},
None => {},
}
}
}
Err("invalid gnu's command".to_string())
}
/// The `from_unix` constructor function returns the name and the
/// description from the man's unix command ("bc", "An arbitrary...").
fn from_unix (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Self, String> {
if line_clear_to(buff, line, ".Nm") {
match Command::unix_names(line) {
Ok(names) => {
if line_clear_to(buff, line, ".Nd") {
match Command::unix_description(line) {
Ok(description) => {
return Ok(Command::new(
names,
description,
));
},
Err(_) => {},
}
}
},
Err(_) => {},
}
}
Err("invalid unix's command".to_string())
}
/// The `gnu_names` function returns the command
/// names from the man's gnu ("bc", "echo", [...]).
fn gnu_names (
line: &mut String
) -> Result<Vec<String>, String> {
let mut lines: String = line.replace(",", "");
let mut names: Vec<String> = Vec::new();
if 92 == lines.as_bytes()[0] {
lines = lines.chars().skip(3).collect();
}
lines = lines.chars().take_while(|x| *x != '\\').collect();
for name in lines.split(" ") {
if !name.is_empty() {
names.push(name.to_string());
}
}
if names.len() > 0 {
return Ok(names);
}
Err(lines)
}
/// The `gnu_description` function returns the description
/// from the man's gnu ("An arbitrary...", [...]).
fn gnu_description (
line: &mut String
) -> Result<String, String> {
let mut description: String = line.replace(",", "").replace("\\", "");
description = description.trim().chars().collect();
if !description.is_empty() {
return Ok(description);
}
Err(description)
}
/// The `unix_names` function returns the command
/// names from the man's unix ("bc", "echo", [...]).
fn unix_names (
line: &mut String
) -> Result<Vec<String>, String> {
let lines: String = line.replace(",", "").trim().chars().skip(4).collect();
let mut names: Vec<String> = Vec::new();
for name in lines.split(" ") {
if !name.is_empty() {
names.push(name.to_string());
}
}
if names.len() > 0 {
return Ok(names);
}
Err(lines)
}
/// The `unix_description` function returns the description
/// from the man's unix ("An arbitrary...", [...]).
fn unix_description (
line: &mut String
) -> Result<String, String> {
let mut description: String = line.replace(",", "");
description = description.trim().chars().skip(4).collect();
description = description.chars().take_while(|x| *x != '\\').collect();
description = description.replace("\"", "");
if !description.is_empty() {
return Ok(description);
}
Err(description)
}
}
/// The `argument` structure is the first option ("-h") and
/// all comments ("Print the usage and exit.", ...).
pub struct Argument {
pub option: String,
pub comments: Vec<String>,
}
impl Argument {
/// The `Argument::new` Constructor function returns a new Argument.
fn new (
option: String,
comments: Vec<String>,
) -> Self {
Argument {
option: option,
comments: comments,
}
}
/// The `from_gnu` Constructor function returns all options and comments
/// from the man's gnu command (["-h", "Print..."], ["-i", "Force..."]).
fn from_gnu (<|fim▁hole|> ) -> Result<Vec<Self>, String> {
let mut arguments:Vec<Argument> = Vec::new();
if line_to_multy(buff, line, &["OPTIONS"]) > 0 {
line.clear();
while line_to_multy(buff, line, &[".TP"]) > 0 {
match Argument::gnu_option(buff, line) {
Ok(option) => {
match Argument::gnu_comment(buff, line) {
Ok(description) => {
arguments.push(Argument::new(
option,
description,
));
},
Err(_) => {},
}
},
Err(_) => {},
}
}
}
if arguments.len() > 0 {
return Ok(arguments);
}
Err("invalid gnu's argument".to_string())
}
/// The `from_unix` Constructor function returns all options and comments
/// from the man's unix command (["-h", "Print..."], ["-i", "Force..."]).
fn from_unix (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Vec<Self>, String> {
let mut arguments:Vec<Argument> = Vec::new();
while line_clear_to(buff, line, ".It Fl ") {
match Argument::unix_option(line) {
Ok(option) => {
match Argument::unix_comment(buff, line) {
Ok(description) => {
arguments.push(Argument::new(
option,
description,
));
},
Err(_) => {},
}
},
Err(_) => {},
}
}
if arguments.len() <= 0 {
return Err("invalid unix's argument".to_string());
}
Ok(arguments)
}
/// The `unix_option` function returns a list of
/// option's arguments from the man's unix ([-h, --help], [[...], ...]).
fn unix_option (
line: &mut String
) -> Result<String, String> {
let mut option:String = "-".to_string();
let mut opt:String = line.trim().to_string();
opt = opt.replace("\\-", "-");
opt = opt.replace("\\&", "");
opt = opt.chars().skip(7).collect();
opt = opt.chars().take_while(
|x| *x != ' ' &&
*x != '\\' &&
*x != '"' &&
*x != ','
).collect();
if !opt.is_empty() {
option.push_str(&opt);
return Ok(option);
}
Err(line.to_string())
}
/// The `unix_comment` function returns a list of
/// comment's arguments from the man's unix ("Print the...", [...]).
fn unix_comment (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Vec<String>, String> {
line.clear();
let mut descriptions:Vec<String> = Vec::new();
while buff.read_line(line).is_ok()
&& !line.is_empty()
&& 46 != line.as_bytes()[0] {
let mut description:String = line.to_string();
description = description.replace("\\fI", "");
description = description.replace("\\fR", "");
description = description.replace("\\fB", "");
description = description.replace("\\-", "-");
description = description.replace("\\(aa", "");
if !description.is_empty() {
descriptions.push(description);
}
line.clear();
}
if descriptions.len() > 0 {
return Ok(descriptions);
}
Err(line.to_string())
}
/// The `gnu_comment` function returns a list of
/// comment's arguments from the man's gnu ("Print the...", [...]).
fn gnu_comment (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Vec<String>, String> {
let mut descriptions:Vec<String> = Vec::new();
line.clear();
while buff.read_line(line).is_ok() && !line.is_empty()
&& !line.find(".SH").is_some()
&& !line.find("\\-").is_some()
&& !line.find(".TP").is_some() {
let mut description:String = line.trim().to_string();
if !description.is_empty() && description.as_bytes()[0] == 46 {
description = description.chars().skip_while(
|x| *x != ' '
).collect();
}
description = description.replace("\\\\fP", "");
description = description.replace("\\\\fI", "");
description = description.replace("\\f", "");
description = description.replace("\\&", "");
description = description.replace("\\ ", " ");
description = description.replace("\\", "\\");
description = description.replace("\\\"", "\"");
description = description.replace("\\'", "'");
if !description.is_empty() {
descriptions.push(description);
}
line.clear();
}
if descriptions.len() <= 0 {
return Ok(descriptions);
}
Err(line.to_string())
}
/// The `gnu_option` function returns a list of
/// option's arguments from the man's gnu ([-h, --help], [[...], ...]).
fn gnu_option (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<String, String> {
let mut option:String = line.trim().to_string();
line.clear();
if buff.read_line(line).is_ok() {
option = option.chars().take_while(|x| *x != '"').collect();
option = option.chars().skip(3).collect();
if option.find("^").is_some() {
option = option.chars().skip_while(|x| *x != '^').collect();
option = option.chars().take_while(|x| *x != ' ').collect();
option = option.replace("^", "-");
}
else if option.find("-").is_some()
|| option.find("\\").is_some() {
option = option.replace("\\-", "-");
option = option.replace("\\\\fR", "\n");
option = option.chars().take_while(
|x| *x != '\n' || *x != ' '
).collect();
option = option.replace("*=", "--");
}
option = option.chars().take_while(|x| *x != '\\').collect();
option = option.replace(" ", "");
if !option.is_empty()
&& option.as_bytes()[0] == 45 {
return Ok(option);
}
}
Err(option)
}
}
/// The `mans` structure is a defined by the two
/// structures command and Argument
pub struct Man {
pub command: Command,
pub arguments: Vec<Argument>,
}
impl Man {
/// The `new` constructor function returns a new Man.
pub fn new (
command: Command,
arguments: Vec<Argument>,
) -> Self {
Man {
command: command,
arguments: arguments,
}
}
/// The `from_open` constructor function returns a new Man
/// according the path.
pub fn from_open (
path: &PathBuf,
) -> Result<Self, String> {
return match File::open(&path) {
Err(why) => Err(why.to_string()),
Ok(open) => Man::from_buff(open),
}
}
/// The `from_buff` constructor function returns a new Man
/// according to a file descriptor.
pub fn from_buff (
open: File,
) -> Result<Self, String> {
let mut buff = BufReader::new(&open);
let mut line:String = String::new();
return match line_to_multy(
&mut buff,
&mut line,
&[".Sh", ".SH"],
) {
104 => Man::read_unix(&mut buff, &mut line),
72 => Man::read_gnu(&mut buff, &mut line),
_ => Err("unknown man".to_string()),
}
}
/// The `read_gnu` function checks and parses the: name,
/// description, options and commants from a man's gnu.
fn read_gnu (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Self, String> {
line.clear();
match Command::from_gnu(buff, line) {
Ok(command) => {
match Argument::from_gnu(buff, line) {
Ok(argument) => return Ok(Man::new(
command,
argument,
)),
Err(_) => {},
}
},
Err(_) => {},
}
Err("invalid gnu's man".to_string())
}
/// The `read_unix` function checks and parses the: name,
/// description, options and commants from a man's unix.
fn read_unix (
buff: &mut BufReader<&File>,
line: &mut String
) -> Result<Self, String> {
line.clear();
match Command::from_unix(buff, line) {
Ok(command) => {
match Argument::from_unix(buff, line) {
Ok(argument) => return Ok(Man::new(
command,
argument,
)),
Err(_) => {},
}
},
Err(_) => {},
}
Err("invalid unix's man".to_string())
}
}
/// The `new` constructor function returns a list of valid man
/// according to a list of possibely roots for .1's files.
pub fn from_env (
roots: &Vec<String>
) -> Vec<Man> {
let mut mans: Vec<Man> = Vec::with_capacity(roots.capacity());
for paths in roots {
let mut one:String = paths.clone();
one.push_str("/*/*.1");
for buff in glob::glob(&one).unwrap().filter_map(Result::ok) {
let name = &buff.file_name().unwrap();
match Man::from_open(&buff) {
Err(why) => {
println!("fail add {:?} because: {}", name, why);
},
Ok(man) => {
if !search_description(&mans, &man.command.description) {
println!("success add {:?}", name);
mans.push(man);
}
},
}
}
}
mans
}
/// The `search_description` function returns a boolean true if
/// description is a element from man's list.
fn search_description (
mans: &Vec<Man>,
find: &String
) -> bool {
for man in mans {
if man.command.description == *find {
return true;
}
}
false
}
/// The `line_clear_to` function first clears, moves
/// the `line` variable to `find` and returns a boolean.
fn line_clear_to (
buff: &mut BufReader<&File>,
line: &mut String,
find: &str
) -> bool {
line.clear();
while buff.read_line(line).is_ok()
&& !line.is_empty() {
if line.find(find).is_some() {
return true;
}
line.clear();
}
false
}
/// The `line_to_multy` function moves the `line` variable to `finds`,
/// founds the first egality for returns the two letters or returns zero.
fn line_to_multy (
buff: &mut BufReader<&File>,
line: &mut String,
finds: &[&str]
) -> u8 {
while buff.read_line(line).is_ok()
&& !line.is_empty() {
for find in finds {
if line.find(find).is_some() {
return line.as_bytes()[2];
}
}
line.clear();
}
0
}
/// The `display` function returns a man's text.
pub fn display (
mans: &Vec<Man>,
binary: &String,
) -> String {
let mut out:String = String::new();
'manual: for man in mans.iter() {
if man.command.names.contains(&binary) {
out.push_str(&man.command.description);
out.push_str("\n");
for argument in man.arguments.iter() {
out.push_str(&argument.option);
out.push_str("\n");
for comment in argument.comments.iter() {
out.push_str("\t");
out.push_str(&comment);
}
out.push_str("\n");
}
return out;
}
}
"command not found\n".to_string()
}
}<|fim▁end|> | buff: &mut BufReader<&File>,
line: &mut String |
<|file_name|>NavBar.js<|end_file_name|><|fim▁begin|>'use strict';
import React from 'react-native'
import {
AppRegistry,
Component,
Navigator,
ToolbarAndroid,
View,
} from 'react-native';
import globalStyle, { colors } from './globalStyle';
class NavBar extends Component {
onClickBackToHome() {
this.props.navigator.push({
name: 'home',
sceneConfig: Navigator.SceneConfigs.FloatFromLeft,
});
}
getToolbarActions(route) {
const actionsByRoute = {
'home': [],
'boop-view': [{
title: 'Back',
show: 'always'
}],
}
if (actionsByRoute[route.name]) {
return actionsByRoute[route.name];
}
return [];
}
render() {
const { navigator } = this.props;
const currentRoute = navigator.getCurrentRoutes()[navigator.getCurrentRoutes().length - 1];
const toolbarActions = this.getToolbarActions(currentRoute);
<|fim▁hole|> <ToolbarAndroid
style={globalStyle.toolbar}
title='boop'
titleColor={colors.darkTheme.text1}
actions={toolbarActions}
onActionSelected={this.onClickBackToHome.bind(this)}
/>
);
}
};
AppRegistry.registerComponent('NavBar', () => NavBar);
export default NavBar;<|fim▁end|> | return ( |
<|file_name|>palindrome.py<|end_file_name|><|fim▁begin|><|fim▁hole|># https://www.reddit.com/r/learnpython/comments/82ucgu/calling_an_input_inside_a_def_function/
def main():
while True:
word = raw_input('Enter a word: ')
if word == '-1':
break
not_ = '' if word[:] == word[::-1] else ' not'
print "Word '%s' is%s a palindrome" % (word, not_)
main()<|fim▁end|> | |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import Globals
from Products.ZenModel.ZenPack import ZenPack as ZenPackBase
from Products.ZenUtils.Utils import unused, zenPath
import os
unused(Globals)
_plugins = [
'rig_host_app_transform1.py',
'copy_server_config_file.sh',
]
class ZenPack(ZenPackBase):
def install(self, app):<|fim▁hole|> libexec = os.path.join(os.environ.get('ZENHOME'), 'libexec')
if not os.path.isdir(libexec):
# Stack installs might not have a \$ZENHOME/libexec directory.
os.mkdir(libexec)
# Now get the path to the file in the ZenPack's libexec directory
filepath = __file__ # Get path to this file
(zpdir, tail) = os.path.split(filepath)
zp_libexec_dir = os.path.join(zpdir,'libexec')
for plugin in _plugins:
plugin_path = zenPath('libexec', plugin)
zp_plugin_path = os.path.join(zp_libexec_dir, plugin)
#os.system('ln -sf "%s" "%s"' % (self.path(plugin), plugin_path))
os.system('ln -sf "%s" "%s"' % (zp_plugin_path, plugin_path))
os.system('chmod 0755 %s' % plugin_path)
def remove_plugin_symlinks(self):
for plugin in _plugins:
os.system('rm -f "%s"' % zenPath('libexec', plugin))
def remove(self, app, leaveObjects=False):
if not leaveObjects:
self.remove_plugin_symlinks()
super(ZenPack, self).remove(app, leaveObjects=leaveObjects)<|fim▁end|> | super(ZenPack, self).install(app)
self.symlink_plugins()
def symlink_plugins(self): |
<|file_name|>doc.go<|end_file_name|><|fim▁begin|>// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
The present file format
Present files have the following format. The first non-blank non-comment
line is the title, so the header looks like
Title of document
Subtitle of document
15:04 2 Jan 2006
Tags: foo, bar, baz
<blank line>
Author Name
Job title, Company
[email protected]<|fim▁hole|>
The subtitle, date, and tags lines are optional.
The date line may be written without a time:
2 Jan 2006
In this case, the time will be interpreted as 10am UTC on that date.
The tags line is a comma-separated list of tags that may be used to categorize
the document.
The author section may contain a mixture of text, twitter names, and links.
For slide presentations, only the plain text lines will be displayed on the
first slide.
Multiple presenters may be specified, separated by a blank line.
After that come slides/sections, each after a blank line:
* Title of slide or section (must have asterisk)
Some Text
** Subsection
- bullets
- more bullets
- a bullet with
*** Sub-subsection
Some More text
Preformatted text
is indented (however you like)
Further Text, including invocations like:
.code x.go /^func main/,/^}/
.play y.go
.image image.jpg
.background image.jpg
.iframe http://foo
.link http://foo label
.html file.html
.caption _Gopher_ by [[http://www.reneefrench.com][Renée French]]
Again, more text
Blank lines are OK (not mandatory) after the title and after the
text. Text, bullets, and .code etc. are all optional; title is
not.
Lines starting with # in column 1 are commentary.
Fonts:
Within the input for plain text or lists, text bracketed by font
markers will be presented in italic, bold, or program font.
Marker characters are _ (italic), * (bold) and ` (program font).
Unmatched markers appear as plain text.
Within marked text, a single marker character becomes a space
and a doubled single marker quotes the marker character.
_italic_
*bold*
`program`
_this_is_all_italic_
_Why_use_scoped__ptr_? Use plain ***ptr* instead.
Inline links:
Links can be included in any text with the form [[url][label]], or
[[url]] to use the URL itself as the label.
Functions:
A number of template functions are available through invocations
in the input text. Each such invocation contains a period as the
first character on the line, followed immediately by the name of
the function, followed by any arguments. A typical invocation might
be
.play demo.go /^func show/,/^}/
(except that the ".play" must be at the beginning of the line and
not be indented like this.)
Here follows a description of the functions:
code:
Injects program source into the output by extracting code from files
and injecting them as HTML-escaped <pre> blocks. The argument is
a file name followed by an optional address that specifies what
section of the file to display. The address syntax is similar in
its simplest form to that of ed, but comes from sam and is more
general. See
http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
for full details. The displayed block is always rounded out to a
full line at both ends.
If no pattern is present, the entire file is displayed.
Any line in the program that ends with the four characters
OMIT
is deleted from the source before inclusion, making it easy
to write things like
.code test.go /START OMIT/,/END OMIT/
to find snippets like this
tedious_code = boring_function()
// START OMIT
interesting_code = fascinating_function()
// END OMIT
and see only this:
interesting_code = fascinating_function()
Also, inside the displayed text a line that ends
// HL
will be highlighted in the display; the 'h' key in the browser will
toggle extra emphasis of any highlighted lines. A highlighting mark
may have a suffix word, such as
// HLxxx
Such highlights are enabled only if the code invocation ends with
"HL" followed by the word:
.code test.go /^type Foo/,/^}/ HLxxx
The .code function may take one or more flags immediately preceding
the filename. This command shows test.go in an editable text area:
.code -edit test.go
This command shows test.go with line numbers:
.code -numbers test.go
play:
The function "play" is the same as "code" but puts a button
on the displayed source so the program can be run from the browser.
Although only the selected text is shown, all the source is included
in the HTML output so it can be presented to the compiler.
link:
Create a hyperlink. The syntax is 1 or 2 space-separated arguments.
The first argument is always the HTTP URL. If there is a second
argument, it is the text label to display for this link.
.link http://golang.org golang.org
image:
The template uses the function "image" to inject picture files.
The syntax is simple: 1 or 3 space-separated arguments.
The first argument is always the file name.
If there are more arguments, they are the height and width;
both must be present, or substituted with an underscore.
Replacing a dimension argument with the underscore parameter
preserves the aspect ratio of the image when scaling.
.image images/betsy.jpg 100 200
.image images/janet.jpg _ 300
video:
The template uses the function "video" to inject video files.
The syntax is simple: 2 or 4 space-separated arguments.
The first argument is always the file name.
The second argument is always the file content-type.
If there are more arguments, they are the height and width;
both must be present, or substituted with an underscore.
Replacing a dimension argument with the underscore parameter
preserves the aspect ratio of the video when scaling.
.video videos/evangeline.mp4 video/mp4 400 600
.video videos/mabel.ogg video/ogg 500 _
background:
The template uses the function "background" to set the background image for
a slide. The only argument is the file name of the image.
.background images/susan.jpg
caption:
The template uses the function "caption" to inject figure captions.
The text after ".caption" is embedded in a figcaption element after
processing styling and links as in standard text lines.
.caption _Gopher_ by [[http://www.reneefrench.com][Renée French]]
iframe:
The function "iframe" injects iframes (pages inside pages).
Its syntax is the same as that of image.
html:
The function html includes the contents of the specified file as
unescaped HTML. This is useful for including custom HTML elements
that cannot be created using only the slide format.
It is your responsibilty to make sure the included HTML is valid and safe.
.html file.html
*/
package present // import "golang.org/x/tools/present"<|fim▁end|> | http://url/
@twitter_name |
<|file_name|>recent.js<|end_file_name|><|fim▁begin|>/**
* Created by raj on 19/8/14.
*/
var fs = require('fs');
var content = fs.read ('animeEpisode.json');
console.log(JSON.stringify(JSON.parse(content)[1][0].title));
videolinks=JSON.parse(content);
links=[];
function pages(k) {
var page = new WebPage();
page.open('http://www.gogoanime.com/', function (status) {
console.log('opened gogoanime :++++ ', status);
if (status==fail){
page.close();
pages(k);
}
if (status == success) {
page.includeJs('http://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js', function () {
console.log('jq included')
var data = page.evaluate(function (data) {<|fim▁hole|> for (var i = 0; i <$('.post div:eq(1) table tbody tr td:eq(0) ul').length; i = i + 1) {
data.links.push($('.post div:eq(1) table tbody tr td:eq(0) ul li a').attr('href'));
}
return JSON.stringify(data);
});
links[k][m] = JSON.parse(data);
console.log(data);
if (m < links[k].length - 1) {
page.close();
console.log('next episoide called');
pages(k, m + 1);
}
;
if (m == links[k].length - 1) {
page.close();
console.log('next anime called');
var path = 'links.json';
fs.write(path, links[k], 'w');
pages(k + 1, 1);
}
if (k == links.length - 1) {
var path = 'links.json';
fs.write(path, links, 'w');
}
});
}
});
}
pages(1,1);<|fim▁end|> | var tempdata=[]; |
<|file_name|>BackBeanProvider.java<|end_file_name|><|fim▁begin|>package com.aw.swing.mvp.action;
import com.aw.swing.mvp.Presenter;
import com.aw.swing.mvp.navigation.Flow;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.beans.BeanWrapper;
import org.springframework.beans.BeanWrapperImpl;
import java.util.Iterator;
import java.util.Map;
/**
* User: gmc
* Date: 16/05/2009
*/
public class BackBeanProvider {
protected Log logger = LogFactory.getLog(getClass());
public BackBeanProvider() {
}
/**
* Return the back bean that must be used.
*
* @return
*/
public Object getBackBean(Presenter targetPst,Flow flow) {
Object backBean = null;
backBean = flow.getTargetBackBeanAttr();
if (backBean == null) {
backBean = targetPst.createBackBean();
}
decorateBackBeanWithFlowAttributes(backBean, flow);
return backBean;
}
/**
* Decorate the back bean with the attributes sent in the flow
*
* @param backBean
* @param flow
*/
private void decorateBackBeanWithFlowAttributes(Object backBean, Flow flow) {
Map flowAttributes = flow.getAttributes();
BeanWrapper bwBackBean = new BeanWrapperImpl(backBean);
for (Iterator iterator = flowAttributes.keySet().iterator(); iterator.hasNext();) {<|fim▁hole|> if (bwBackBean.isWritableProperty(flowAttributeName)) {
bwBackBean.setPropertyValue(flowAttributeName, flowAttributes.get(flowAttributeName));
}
}
}
}<|fim▁end|> | String flowAttributeName = (String) iterator.next(); |
<|file_name|>mssip.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015-2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
//! Microsoft SIP Provider Prototypes and Definitions
use shared::guiddef::GUID;
use shared::minwindef::{BOOL, BYTE, DWORD, LPVOID};
use um::mscat::{CRYPTCATMEMBER, CRYPTCATSTORE};
use um::wincrypt::{
CRYPT_ALGORITHM_IDENTIFIER, CRYPT_ATTRIBUTE_TYPE_VALUE, CRYPT_HASH_BLOB, HCRYPTPROV,
};
use um::winnt::{HANDLE, LPCWSTR, PWSTR, WCHAR};
pub type CRYPT_DIGEST_DATA = CRYPT_HASH_BLOB;
pub const MSSIP_FLAGS_PROHIBIT_RESIZE_ON_CREATE: DWORD = 0x00010000;
pub const MSSIP_FLAGS_USE_CATALOG: DWORD = 0x00020000;
pub const MSSIP_FLAGS_MULTI_HASH: DWORD = 0x00040000;
pub const SPC_INC_PE_RESOURCES_FLAG: DWORD = 0x80;
pub const SPC_INC_PE_DEBUG_INFO_FLAG: DWORD = 0x40;
pub const SPC_INC_PE_IMPORT_ADDR_TABLE_FLAG: DWORD = 0x20;
pub const SPC_EXC_PE_PAGE_HASHES_FLAG: DWORD = 0x10;
pub const SPC_INC_PE_PAGE_HASHES_FLAG: DWORD = 0x100;
pub const SPC_DIGEST_GENERATE_FLAG: DWORD = 0x200;
pub const SPC_DIGEST_SIGN_FLAG: DWORD = 0x400;
pub const SPC_RELAXED_PE_MARKER_CHECK: DWORD = 0x800;
pub const SPC_MARKER_CHECK_SKIP_SIP_INDIRECT_DATA_FLAG: DWORD = 0x00000001;
pub const SPC_MARKER_CHECK_CURRENTLY_SUPPORTED_FLAGS: DWORD
= SPC_MARKER_CHECK_SKIP_SIP_INDIRECT_DATA_FLAG;
pub const MSSIP_ADDINFO_NONE: DWORD = 0;
pub const MSSIP_ADDINFO_FLAT: DWORD = 1;
pub const MSSIP_ADDINFO_CATMEMBER: DWORD = 2;
pub const MSSIP_ADDINFO_BLOB: DWORD = 3;
pub const MSSIP_ADDINFO_NONMSSIP: DWORD = 500;
UNION2!{union SIP_SUBJECTINFO_u {
[usize; 1],
psFlat psFlat_mut: *mut MS_ADDINFO_FLAT,
psCatMember psCatMember_mut: *mut MS_ADDINFO_CATALOGMEMBER,
psBlob psBlob_mut: *mut MS_ADDINFO_BLOB,
}}
STRUCT!{struct SIP_SUBJECTINFO {
cbSize: DWORD,
pgSubjectType: *mut GUID,
hFile: HANDLE,
pwsFileName: LPCWSTR,
pwsDisplayName: LPCWSTR,
dwReserved1: DWORD,
dwIntVersion: DWORD,
hProv: HCRYPTPROV,
DigestAlgorithm: CRYPT_ALGORITHM_IDENTIFIER,
dwFlags: DWORD,
dwEncodingType: DWORD,
dwReserved2: DWORD,
fdwCAPISettings: DWORD,
fdwSecuritySettings: DWORD,
dwIndex: DWORD,
dwUnionChoice: DWORD,
u: SIP_SUBJECTINFO_u,
pClientData: LPVOID,
}}
pub type LPSIP_SUBJECTINFO = *mut SIP_SUBJECTINFO;
STRUCT!{struct MS_ADDINFO_FLAT {
cbStruct: DWORD,
pIndirectData: *mut SIP_INDIRECT_DATA,
}}
pub type PMS_ADDINFO_FLAT = *mut MS_ADDINFO_FLAT;
STRUCT!{struct MS_ADDINFO_CATALOGMEMBER {
cbStruct: DWORD,
pStore: *mut CRYPTCATSTORE,
pMember: *mut CRYPTCATMEMBER,
}}
pub type PMS_ADDINFO_CATALOGMEMBER = *mut MS_ADDINFO_CATALOGMEMBER;
STRUCT!{struct MS_ADDINFO_BLOB {
cbStruct: DWORD,
cbMemObject: DWORD,
pbMemObject: *mut BYTE,
cbMemSignedMsg: DWORD,
pbMemSignedMsg: *mut BYTE,
}}
pub type PMS_ADDINFO_BLOB = *mut MS_ADDINFO_BLOB;
STRUCT!{struct SIP_CAP_SET_V2 {
cbSize: DWORD,
dwVersion: DWORD,
isMultiSign: BOOL,
dwReserved: DWORD,
}}
pub type PSIP_CAP_SET_V2 = *mut SIP_CAP_SET_V2;
UNION2!{union SIP_CAP_SET_V3_u {
[u32; 1],<|fim▁hole|>STRUCT!{struct SIP_CAP_SET_V3 {
cbSize: DWORD,
dwVersion: DWORD,
isMultiSign: BOOL,
u: SIP_CAP_SET_V3_u,
}}
pub type PSIP_CAP_SET_V3 = *mut SIP_CAP_SET_V3;
pub type SIP_CAP_SET = SIP_CAP_SET_V3;
pub type PSIP_CAP_SET = PSIP_CAP_SET_V3;
pub const SIP_CAP_SET_VERSION_2: DWORD = 2;
pub const SIP_CAP_SET_VERSION_3: DWORD = 3;
pub const SIP_CAP_SET_CUR_VER: DWORD = 3;
pub const SIP_CAP_FLAG_SEALING: DWORD = 0x00000001;
STRUCT!{struct SIP_INDIRECT_DATA {
Data: CRYPT_ATTRIBUTE_TYPE_VALUE,
DigestAlgorithm: CRYPT_ALGORITHM_IDENTIFIER,
Digest: CRYPT_HASH_BLOB,
}}
pub type PSIP_INDIRECT_DATA = *mut SIP_INDIRECT_DATA;
extern "system" {
pub fn CryptSIPGetSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pdwEncodingType: *mut DWORD,
dwIndex: DWORD,
pcbSignedDataMsg: *mut DWORD,
pbSignedDataMsg: *mut BYTE,
) -> BOOL;
}
FN!{stdcall pCryptSIPGetSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pdwEncodingType: *mut DWORD,
dwIndex: DWORD,
pcbSignedDataMsg: *mut DWORD,
pbSignedDataMsg: *mut BYTE,
) -> BOOL}
extern "system" {
pub fn CryptSIPPutSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
dwEncodingType: DWORD,
pdwIndex: *mut DWORD,
cbSignedDataMsg: DWORD,
pbSignedDataMsg: *mut BYTE,
) -> BOOL;
}
FN!{stdcall pCryptSIPPutSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
dwEncodingType: DWORD,
pdwIndex: *mut DWORD,
cbSignedDataMsg: DWORD,
pbSignedDataMsg: *mut BYTE,
) -> BOOL}
extern "system" {
pub fn CryptSIPCreateIndirectData(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pcbIndirectData: *mut DWORD,
pIndirectData: *mut SIP_INDIRECT_DATA,
) -> BOOL;
}
FN!{stdcall pCryptSIPCreateIndirectData(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pcbIndirectData: *mut DWORD,
pIndirectData: *mut SIP_INDIRECT_DATA,
) -> BOOL}
extern "system" {
pub fn CryptSIPVerifyIndirectData(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pIndirectData: *mut SIP_INDIRECT_DATA,
) -> BOOL;
}
FN!{stdcall pCryptSIPVerifyIndirectData(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pIndirectData: *mut SIP_INDIRECT_DATA,
) -> BOOL}
extern "system" {
pub fn CryptSIPRemoveSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
dwIndex: DWORD,
) -> BOOL;
}
FN!{stdcall pCryptSIPRemoveSignedDataMsg(
pSubjectInfo: *mut SIP_SUBJECTINFO,
dwIndex: DWORD,
) -> BOOL}
STRUCT!{struct SIP_DISPATCH_INFO {
cbSize: DWORD,
hSIP: HANDLE,
pfGet: pCryptSIPGetSignedDataMsg,
pfPut: pCryptSIPPutSignedDataMsg,
pfCreate: pCryptSIPCreateIndirectData,
pfVerify: pCryptSIPVerifyIndirectData,
pfRemove: pCryptSIPRemoveSignedDataMsg,
}}
pub type LPSIP_DISPATCH_INFO = *mut SIP_DISPATCH_INFO;
FN!{stdcall pfnIsFileSupported(
hFile: HANDLE,
pgSubject: *mut GUID,
) -> BOOL}
FN!{stdcall pfnIsFileSupportedName(
pwszFileName: *mut WCHAR,
pgSubject: *mut GUID,
) -> BOOL}
STRUCT!{struct SIP_ADD_NEWPROVIDER {
cbStruct: DWORD,
pgSubject: *mut GUID,
pwszDLLFileName: *mut WCHAR,
pwszMagicNumber: *mut WCHAR,
pwszIsFunctionName: *mut WCHAR,
pwszGetFuncName: *mut WCHAR,
pwszPutFuncName: *mut WCHAR,
pwszCreateFuncName: *mut WCHAR,
pwszVerifyFuncName: *mut WCHAR,
pwszRemoveFuncName: *mut WCHAR,
pwszIsFunctionNameFmt2: *mut WCHAR,
pwszGetCapFuncName: PWSTR,
}}
pub type PSIP_ADD_NEWPROVIDER = *mut SIP_ADD_NEWPROVIDER;
pub const SIP_MAX_MAGIC_NUMBER: DWORD = 4;
extern "system" {
pub fn CryptSIPLoad(
pgSubject: *const GUID,
dwFlags: DWORD,
pSipDispatch: *mut SIP_DISPATCH_INFO,
) -> BOOL;
pub fn CryptSIPRetrieveSubjectGuid(
FileName: LPCWSTR,
hFileIn: HANDLE,
pgSubject: *mut GUID,
) -> BOOL;
pub fn CryptSIPRetrieveSubjectGuidForCatalogFile(
FileName: LPCWSTR,
hFileIn: HANDLE,
pgSubject: *mut GUID,
) -> BOOL;
pub fn CryptSIPAddProvider(
psNewProv: *mut SIP_ADD_NEWPROVIDER,
) -> BOOL;
pub fn CryptSIPRemoveProvider(
pgProv: *mut GUID,
) -> BOOL;
pub fn CryptSIPGetCaps(
pSubjInfo: *mut SIP_SUBJECTINFO,
pCaps: *mut SIP_CAP_SET,
) -> BOOL;
}
FN!{stdcall pCryptSIPGetCaps(
pSubjInfo: *mut SIP_SUBJECTINFO,
pCaps: *mut SIP_CAP_SET,
) -> BOOL}
extern "system" {
pub fn CryptSIPGetSealedDigest(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pSig: *const BYTE,
dwSig: DWORD,
pbDigest: *mut BYTE,
pcbDigest: *mut DWORD,
) -> BOOL;
}
FN!{stdcall pCryptSIPGetSealedDigest(
pSubjectInfo: *mut SIP_SUBJECTINFO,
pSig: *const BYTE,
dwSig: DWORD,
pbDigest: *mut BYTE,
pcbDigest: *mut DWORD,
) -> BOOL}<|fim▁end|> | dwFlags dwFlags_mut: DWORD,
dwReserved dwReserved_mut: DWORD,
}} |
<|file_name|>test_unit_outdated.py<|end_file_name|><|fim▁begin|>import datetime
import os
import sys
from contextlib import contextmanager
import freezegun
import pretend
import pytest
from pip._vendor import lockfile
from pip._internal.index import InstallationCandidate
from pip._internal.utils import outdated
class MockPackageFinder(object):
BASE_URL = 'https://pypi.python.org/simple/pip-{0}.tar.gz'
PIP_PROJECT_NAME = 'pip'
INSTALLATION_CANDIDATES = [
InstallationCandidate(PIP_PROJECT_NAME, '6.9.0',
BASE_URL.format('6.9.0')),
InstallationCandidate(PIP_PROJECT_NAME, '3.3.1',
BASE_URL.format('3.3.1')),
InstallationCandidate(PIP_PROJECT_NAME, '1.0',
BASE_URL.format('1.0')),
]
def __init__(self, *args, **kwargs):
pass
def find_all_candidates(self, project_name):
return self.INSTALLATION_CANDIDATES
def _options():
''' Some default options that we pass to outdated.pip_version_check '''
return pretend.stub(
find_links=False, extra_index_urls=[], index_url='default_url',
pre=False, trusted_hosts=False, process_dependency_links=False,
)
@pytest.mark.parametrize(
[
'stored_time',
'installed_ver',
'new_ver',
'check_if_upgrade_required',
'check_warn_logs',
],
[
# Test we return None when installed version is None
('1970-01-01T10:00:00Z', None, '1.0', False, False),
# Need an upgrade - upgrade warning should print
('1970-01-01T10:00:00Z', '1.0', '6.9.0', True, True),
# No upgrade - upgrade warning should not print
('1970-01-9T10:00:00Z', '6.9.0', '6.9.0', False, False),
]
)
def test_pip_version_check(monkeypatch, stored_time, installed_ver, new_ver,
check_if_upgrade_required, check_warn_logs):
monkeypatch.setattr(outdated, 'get_installed_version',
lambda name: installed_ver)
monkeypatch.setattr(outdated, 'PackageFinder', MockPackageFinder)
monkeypatch.setattr(outdated.logger, 'warning',
pretend.call_recorder(lambda *a, **kw: None))
monkeypatch.setattr(outdated.logger, 'debug',
pretend.call_recorder(lambda s, exc_info=None: None))
fake_state = pretend.stub(
state={"last_check": stored_time, 'pypi_version': installed_ver},<|fim▁hole|> )
monkeypatch.setattr(
outdated, 'load_selfcheck_statefile', lambda: fake_state
)
with freezegun.freeze_time(
"1970-01-09 10:00:00",
ignore=[
"six.moves",
"pip._vendor.six.moves",
"pip._vendor.requests.packages.urllib3.packages.six.moves",
]):
latest_pypi_version = outdated.pip_version_check(None, _options())
# See we return None if not installed_version
if not installed_ver:
assert not latest_pypi_version
# See that we saved the correct version
elif check_if_upgrade_required:
assert fake_state.save.calls == [
pretend.call(new_ver, datetime.datetime(1970, 1, 9, 10, 00, 00)),
]
else:
# Make sure no Exceptions
assert not outdated.logger.debug.calls
# See that save was not called
assert fake_state.save.calls == []
# Ensure we warn the user or not
if check_warn_logs:
assert len(outdated.logger.warning.calls) == 1
else:
assert len(outdated.logger.warning.calls) == 0
def test_virtualenv_state(monkeypatch):
CONTENT = '{"last_check": "1970-01-02T11:00:00Z", "pypi_version": "1.0"}'
fake_file = pretend.stub(
read=pretend.call_recorder(lambda: CONTENT),
write=pretend.call_recorder(lambda s: None),
)
@pretend.call_recorder
@contextmanager
def fake_open(filename, mode='r'):
yield fake_file
monkeypatch.setattr(outdated, 'open', fake_open, raising=False)
monkeypatch.setattr(outdated, 'running_under_virtualenv',
pretend.call_recorder(lambda: True))
monkeypatch.setattr(sys, 'prefix', 'virtually_env')
state = outdated.load_selfcheck_statefile()
state.save('2.0', datetime.datetime.utcnow())
assert len(outdated.running_under_virtualenv.calls) == 1
expected_path = os.path.join('virtually_env', 'pip-selfcheck.json')
assert fake_open.calls == [
pretend.call(expected_path),
pretend.call(expected_path, 'w'),
]
# json.dumps will call this a number of times
assert len(fake_file.write.calls)
def test_global_state(monkeypatch, tmpdir):
CONTENT = '''{"pip_prefix": {"last_check": "1970-01-02T11:00:00Z",
"pypi_version": "1.0"}}'''
fake_file = pretend.stub(
read=pretend.call_recorder(lambda: CONTENT),
write=pretend.call_recorder(lambda s: None),
)
@pretend.call_recorder
@contextmanager
def fake_open(filename, mode='r'):
yield fake_file
monkeypatch.setattr(outdated, 'open', fake_open, raising=False)
@pretend.call_recorder
@contextmanager
def fake_lock(filename):
yield
monkeypatch.setattr(outdated, "check_path_owner", lambda p: True)
monkeypatch.setattr(lockfile, 'LockFile', fake_lock)
monkeypatch.setattr(os.path, "exists", lambda p: True)
monkeypatch.setattr(outdated, 'running_under_virtualenv',
pretend.call_recorder(lambda: False))
cache_dir = tmpdir / 'cache_dir'
monkeypatch.setattr(outdated, 'USER_CACHE_DIR', cache_dir)
monkeypatch.setattr(sys, 'prefix', tmpdir / 'pip_prefix')
state = outdated.load_selfcheck_statefile()
state.save('2.0', datetime.datetime.utcnow())
assert len(outdated.running_under_virtualenv.calls) == 1
expected_path = cache_dir / 'selfcheck.json'
assert fake_lock.calls == [pretend.call(expected_path)]
assert fake_open.calls == [
pretend.call(expected_path),
pretend.call(expected_path),
pretend.call(expected_path, 'w'),
]
# json.dumps will call this a number of times
assert len(fake_file.write.calls)<|fim▁end|> | save=pretend.call_recorder(lambda v, t: None), |
<|file_name|>JSExceptionArgNoBool.cpp<|end_file_name|><|fim▁begin|>/*
* JSExceptionArgNoBool.cpp
*
* Created on: 15/gen/2015
* Author: Paolo Achdjian
*/
#include <sstream>
#include "JSExceptionArgNoBool.h"
namespace zigbee {
JSExceptionArgNoBool::JSExceptionArgNoBool() {
std::stringstream stream;
stream << "Invalid parameter: expected an argument of type boolean";
message = stream.str();
<|fim▁hole|>}
} /* namespace zigbee */<|fim▁end|> | }
JSExceptionArgNoBool::~JSExceptionArgNoBool() { |
<|file_name|>csnStandardModuleProject.py<|end_file_name|><|fim▁begin|>## @package csnStandardModuleProject
# Definition of the methods used for project configuration.
# This should be the only CSnake import in a project configuration.
import csnUtility
import csnProject
import csnBuild
import os.path
import inspect
from csnProject import GenericProject
class StandardModuleProject(GenericProject):
""" GenericProject with applications and modules in specific folders. """
def __init__(self, _name, _type, _sourceRootFolder = None, _categories = None):
if _sourceRootFolder is None:
filename = csnProject.FindFilename(1)
dirname = os.path.dirname(filename)
_sourceRootFolder = csnUtility.NormalizePath(dirname, _correctCase = False)
GenericProject.__init__(self, _name=_name, _type=_type, _sourceRootFolder=_sourceRootFolder, _categories=_categories, _context=csnProject.globalCurrentContext)
self.applicationsProject = None
def AddLibraryModules(self, _libModules):
"""
Adds source files (anything matching *.c??) and public include folders to self, using a set of libmodules.
It is assumed that the root folder of self has a subfolder called libmodules. The subfolders of libmodules should
contain a subfolder called src (e.g. for mymodule, this would be libmodules/mymodule/src).
If the src folder has a subfolder called 'stub', it is also added to the source tree.
_libModules - a list of subfolders of the libmodules folder that should be 'added' to self.
"""
# add sources
sourceRootFolder = self.GetSourceRootFolder()
includeFileExtensions = csnUtility.GetIncludeFileExtensions()
sourceFileExtensions = csnUtility.GetSourceFileExtensions()
for libModule in _libModules:
for stub in ("/stub", ""):
srcFolder = "libmodules/%s/src%s" % (libModule, stub)
srcFolderAbs = "%s/%s" % (sourceRootFolder, srcFolder)
if( os.path.exists(srcFolderAbs) ):
self.AddIncludeFolders([srcFolder])
for extension in sourceFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (srcFolder, extension)], _checkExists = 0)
for libModule in _libModules:
for stub in ("/stub", ""):
includeFolder = "libmodules/%s/include%s" % (libModule, stub)
includeFolderAbs = "%s/%s" % (sourceRootFolder, includeFolder)
if( os.path.exists(includeFolderAbs) ):
self.AddIncludeFolders([includeFolder])
for extension in includeFileExtensions:
self.AddSources(["%s/*.%s" % (includeFolder, extension)], _checkExists = 0)
def AddApplications(self, _modules, _pch="", _applicationDependenciesList=None, _holderName=None, _properties = []):
"""
Creates extra CSnake projects, each project building one application in the 'Applications' subfolder of the current project.
_modules - List of the subfolders within the 'Applications' subfolder that must be scanned for applications.
_pch - If not "", this is the include file used to generate a precompiled header for each application.
"""
dependencies = [self]
if not _applicationDependenciesList is None:
dependencies.extend(_applicationDependenciesList)
if _holderName is None:
_holderName = "%sApplications" % self.name
csnProject.globalCurrentContext.SetSuperSubCategory("Applications", _holderName)
if self.applicationsProject is None:
self.applicationsProject = csnBuild.Project(self.name + "Applications", "container", _sourceRootFolder = self.GetSourceRootFolder(), _categories = [_holderName])
#self.applicationsProject.AddSources([csnUtility.GetDummyCppFilename()], _sourceGroup = "CSnakeGeneratedFiles")
self.applicationsProject.AddProjects([self])
self.AddProjects([self.applicationsProject], _dependency = 0)
# look for an 'applications' or 'Applications' folder
_modulesFolder = "%s/applications" % self.GetSourceRootFolder()
if not os.path.exists(_modulesFolder):
_modulesFolder = "%s/Applications" % self.GetSourceRootFolder()
self.__AddApplications(self.applicationsProject, dependencies, _modules, _modulesFolder, _pch, _holderName, _properties)<|fim▁hole|> project does not depend on these application projects.
It is assumed that _modules is a list containing subfolders of _modulesFolder.
Each subfolder in _modules should contain source files (.cpp, .cxx or .cc), where each source file corresponds to a single application.
Hence, each source file is used to create a new application project. For example, assuming that the _modulesFolder
is called 'Applications', the file 'Applications/Small/Tiny.cpp' will be used to build the 'Tiny' application.
_applicationDependenciesList - List of projects that each new application project is dependent on.
_modulesFolder - Folder containing subfolders with applications.
_modules = List of subfolders of _modulesFolder that should be processed.
_pch - If not "", this is the C++ include file which is used for building a precompiled header file for each application.
"""
for module in _modules:
moduleFolder = "%s/%s" % (_modulesFolder, module)
sourceFiles = []
headerFiles = []
for extension in csnUtility.GetSourceFileExtensions():
sourceFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for extension in csnUtility.GetIncludeFileExtensions():
headerFiles.extend(_holderProject.Glob("%s/*.%s" % (moduleFolder, extension)))
for sourceFile in sourceFiles:
if os.path.isdir(sourceFile):
continue
name = os.path.splitext( os.path.basename(sourceFile) )[0]
name = name.replace(' ', '_')
if _holderName is None:
_holderName = _holderProject.name
app = csnBuild.Project("%s_%s" % (_holderName, name), "executable", _sourceRootFolder = _holderProject.GetSourceRootFolder())
app.AddIncludeFolders([moduleFolder])
app.AddProjects(_applicationDependenciesList)
app.AddSources([sourceFile])
app.AddProperties( _properties )
# add header files so that they appear in visual studio
app.AddSources(headerFiles)
if( _pch != "" ):
app.SetPrecompiledHeader(_pch)
_holderProject.AddProjects([app])<|fim▁end|> |
def __AddApplications(self, _holderProject, _applicationDependenciesList, _modules, _modulesFolder, _pch = "", _holderName=None, _properties = []):
"""
Creates application projects and adds them to _holderProject (using _holderProject.AddProject). The holder |
<|file_name|>macro-input-future-proofing.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
macro_rules! errors_everywhere {
($ty:ty <) => (); //~ ERROR `$ty:ty` is followed by `<`, which is not allowed for `ty`
($ty:ty < foo ,) => (); //~ ERROR `$ty:ty` is followed by `<`, which is not allowed for `ty`
($ty:ty , ) => ();
( ( $ty:ty ) ) => ();
( { $ty:ty } ) => ();
( [ $ty:ty ] ) => ();
($bl:block < ) => ();
($pa:pat >) => (); //~ ERROR `$pa:pat` is followed by `>`, which is not allowed for `pat`
($pa:pat , ) => ();
($pa:pat | ) => (); //~ ERROR `$pa:pat` is followed by `|`
($pa:pat $pb:pat $ty:ty ,) => ();
//~^ ERROR `$pa:pat` is followed by `$pb:pat`, which is not allowed
//~^^ ERROR `$pb:pat` is followed by `$ty:ty`, which is not allowed
($($ty:ty)* -) => (); //~ ERROR `$ty:ty` is followed by `-`
($($a:ty, $b:ty)* -) => (); //~ ERROR `$b:ty` is followed by `-`
($($ty:ty)-+) => (); //~ ERROR `$ty:ty` is followed by `-`, which is not allowed for `ty`
}
fn main() { }<|fim▁end|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.