prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>args.rs<|end_file_name|><|fim▁begin|>use super::abi::usercalls::{alloc, raw::ByteBuffer};
use crate::ffi::OsString;
use crate::fmt;
use crate::slice;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::os_str::Buf;
use crate::sys_common::FromInner;
#[cfg_attr(test, linkage = "available_externally")]
#[export_name = "_ZN16__rust_internals3std3sys3sgx4args4ARGSE"]
static ARGS: AtomicUsize = AtomicUsize::new(0);
type ArgsStore = Vec<OsString>;
#[cfg_attr(test, allow(dead_code))]
pub unsafe fn init(argc: isize, argv: *const *const u8) {
if argc != 0 {
let args = unsafe { alloc::User::<[ByteBuffer]>::from_raw_parts(argv as _, argc as _) };
let args = args
.iter()
.map(|a| OsString::from_inner(Buf { inner: a.copy_user_buffer() }))
.collect::<ArgsStore>();
ARGS.store(Box::into_raw(Box::new(args)) as _, Ordering::Relaxed);
}
}
pub fn args() -> Args {
let args = unsafe { (ARGS.load(Ordering::Relaxed) as *const ArgsStore).as_ref() };
if let Some(args) = args { Args(args.iter()) } else { Args([].iter()) }
}
pub struct Args(slice::Iter<'static, OsString>);
impl fmt::Debug for Args {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.as_slice().fmt(f)
}<|fim▁hole|>}
impl Iterator for Args {
type Item = OsString;
fn next(&mut self) -> Option<OsString> {
self.0.next().cloned()
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
impl ExactSizeIterator for Args {
fn len(&self) -> usize {
self.0.len()
}
}
impl DoubleEndedIterator for Args {
fn next_back(&mut self) -> Option<OsString> {
self.0.next_back().cloned()
}
}<|fim▁end|>
| |
<|file_name|>cabi_arm.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use llvm::{StructRetAttribute, ZExtAttribute};
use trans::cabi::{FnType, ArgType};
use trans::context::CrateContext;
use trans::type_::Type;
use std::cmp;
pub enum Flavor {
General,
Ios
}
type TyAlignFn = fn(ty: Type) -> uint;
fn align_up_to(off: uint, a: uint) -> uint {
return (off + a - 1) / a * a;
}
fn align(off: uint, ty: Type, align_fn: TyAlignFn) -> uint {
let a = align_fn(ty);
return align_up_to(off, a);
}
fn general_ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => ((ty.int_width() as uint) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, general_ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
general_ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
general_ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
// For more information see:
// ARMv7
// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
// /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html
// ARMv6
// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html
fn ios_ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => cmp::min(4, ((ty.int_width() as uint) + 7) / 8),
Pointer => 4,
Float => 4,
Double => 4,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ios_ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ios_ty_align(elt) * len
}
_ => panic!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type, align_fn: TyAlignFn) -> uint {
match ty.kind() {
Integer => ((ty.int_width() as uint) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter()
.fold(0, |s, t| {
align(s, *t, align_fn) + ty_size(*t, align_fn)
});
align(size, ty, align_fn)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, align_fn);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, align_fn);
len * eltsz
}
_ => panic!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let size = ty_size(ty, align_fn);
if size <= 4 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else {
Type::i32(ccx)
};
return ArgType::direct(ty, Some(llty), None, None);
}
ArgType::indirect(ty, Some(StructRetAttribute))
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
return ArgType::direct(ty, None, None, attr);
}
let align = align_fn(ty);
let size = ty_size(ty, align_fn);
let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
};
ArgType::direct(ty, Some(llty), None, None)
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool,
flavor: Flavor) -> FnType {
let align_fn = match flavor {
Flavor::General => general_ty_align as TyAlignFn,
Flavor::Ios => ios_ty_align as TyAlignFn,
};
<|fim▁hole|> for &aty in atys {
let ty = classify_arg_ty(ccx, aty, align_fn);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty, align_fn)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}<|fim▁end|>
|
let mut arg_tys = Vec::new();
|
<|file_name|>is_empty.rs<|end_file_name|><|fim▁begin|>use malachite_base_test_util::bench::bucketers::rational_sequence_len_bucketer;
use malachite_base_test_util::bench::{run_benchmark, BenchmarkType};
use malachite_base_test_util::generators::common::{GenConfig, GenMode};
use malachite_base_test_util::generators::unsigned_rational_sequence_gen;
use malachite_base_test_util::runner::Runner;
pub(crate) fn register(runner: &mut Runner) {
register_demo!(runner, demo_rational_sequence_is_empty);
register_bench!(runner, benchmark_rational_sequence_is_empty);
}
fn demo_rational_sequence_is_empty(gm: GenMode, config: GenConfig, limit: usize) {
for xs in unsigned_rational_sequence_gen::<u8>()
.get(gm, &config)
.take(limit)
{
if xs.is_empty() {
println!("{} is empty", xs);
} else {
println!("{} is not empty", xs);
}
}
}
fn benchmark_rational_sequence_is_empty(
gm: GenMode,
config: GenConfig,
limit: usize,
file_name: &str,<|fim▁hole|> "RationalSequence.is_empty()",
BenchmarkType::Single,
unsigned_rational_sequence_gen::<u8>().get(gm, &config),
gm.name(),
limit,
file_name,
&rational_sequence_len_bucketer("xs"),
&mut [("Malachite", &mut |xs| no_out!(xs.is_empty()))],
);
}<|fim▁end|>
|
) {
run_benchmark(
|
<|file_name|>Formatter.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2011 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0<|fim▁hole|> * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.longluo.demo.qrcode.zxing.client.android.encode;
/**
* Encapsulates some simple formatting logic, to aid refactoring in {@link ContactEncoder}.
*
* @author Sean Owen
*/
interface Formatter {
/**
* @param value value to format
* @param index index of value in a list of values to be formatted
* @return formatted value
*/
CharSequence format(CharSequence value, int index);
}<|fim▁end|>
|
*
|
<|file_name|>rmm_diis_old.py<|end_file_name|><|fim▁begin|>"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)<|fim▁hole|> self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG<|fim▁end|>
|
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
|
<|file_name|>actions.js<|end_file_name|><|fim▁begin|>export const UPDATE_TREE_FILTER = 'SIMPR_UPDATE_TREE_FILTER';
export const fireUpdateTreeFilter = (filter) => ({
type: UPDATE_TREE_FILTER,
payload: {<|fim▁hole|><|fim▁end|>
|
filter
},
});
|
<|file_name|>issue_user_test.go<|end_file_name|><|fim▁begin|>// Copyright 2017 The Gogs Authors. All rights reserved.<|fim▁hole|>// license that can be found in the LICENSE file.
package models
import (
"testing"
"code.gitea.io/gitea/models/db"
repo_model "code.gitea.io/gitea/models/repo"
"code.gitea.io/gitea/models/unittest"
"github.com/stretchr/testify/assert"
)
func Test_newIssueUsers(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
newIssue := &Issue{
RepoID: repo.ID,
PosterID: 4,
Index: 6,
Title: "newTestIssueTitle",
Content: "newTestIssueContent",
}
// artificially insert new issue
unittest.AssertSuccessfulInsert(t, newIssue)
assert.NoError(t, newIssueUsers(db.DefaultContext, repo, newIssue))
// issue_user table should now have entries for new issue
unittest.AssertExistsAndLoadBean(t, &IssueUser{IssueID: newIssue.ID, UID: newIssue.PosterID})
unittest.AssertExistsAndLoadBean(t, &IssueUser{IssueID: newIssue.ID, UID: repo.OwnerID})
}
func TestUpdateIssueUserByRead(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
issue := unittest.AssertExistsAndLoadBean(t, &Issue{ID: 1}).(*Issue)
assert.NoError(t, UpdateIssueUserByRead(4, issue.ID))
unittest.AssertExistsAndLoadBean(t, &IssueUser{IssueID: issue.ID, UID: 4}, "is_read=1")
assert.NoError(t, UpdateIssueUserByRead(4, issue.ID))
unittest.AssertExistsAndLoadBean(t, &IssueUser{IssueID: issue.ID, UID: 4}, "is_read=1")
assert.NoError(t, UpdateIssueUserByRead(unittest.NonexistentID, unittest.NonexistentID))
}
func TestUpdateIssueUsersByMentions(t *testing.T) {
assert.NoError(t, unittest.PrepareTestDatabase())
issue := unittest.AssertExistsAndLoadBean(t, &Issue{ID: 1}).(*Issue)
uids := []int64{2, 5}
assert.NoError(t, UpdateIssueUsersByMentions(db.DefaultContext, issue.ID, uids))
for _, uid := range uids {
unittest.AssertExistsAndLoadBean(t, &IssueUser{IssueID: issue.ID, UID: uid}, "is_mentioned=1")
}
}<|fim▁end|>
|
// Use of this source code is governed by a MIT-style
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#-----------------------------------------------------------------------------
# Copyright (c) 2008-2012, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""Routines for IPv4 and IPv6 addresses, subnets and ranges."""
import sys as _sys
import re as _re
from netaddr.core import AddrFormatError, AddrConversionError, num_bits, \
DictDotLookup, NOHOST, N, INET_PTON, P, ZEROFILL, Z
from netaddr.strategy import ipv4 as _ipv4, ipv6 as _ipv6
from netaddr.compat import _sys_maxint, _iter_range, _is_str, _int_type, \
_str_type
#-----------------------------------------------------------------------------
# Pre-compiled regexen used by cidr_merge() function.
RE_CIDR_ADJACENT = _re.compile(r'^([01]+)0 \1[1]$')
RE_CIDR_WITHIN = _re.compile(r'^([01]+) \1[10]+$')
RE_VALID_CIDR_BITS = _re.compile('^[01]+$')
#-----------------------------------------------------------------------------
class BaseIP(object):
"""
An abstract base class for common operations shared between various IP
related subclasses.
"""
__slots__ = ('_value', '_module')
def __init__(self):
"""Constructor."""
self._value = None
self._module = None
def _set_value(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.max_int:
raise AddrFormatError('value out of bounds for an %s address!' \
% self._module.family_name)
self._value = value
value = property(lambda self: self._value, _set_value,
doc='a positive integer representing the value of IP address/subnet.')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
return NotImplemented
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPAddress`
correctly.
"""
return NotImplemented
def __hash__(self):
"""
:return: A hash value uniquely indentifying this IP object.
"""
return hash(self.key())
def __eq__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() == other.key()
except (AttributeError, TypeError):
return NotImplemented
def __ne__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
not equivalent to ``other``, ``False`` otherwise.
"""
try:
return self.key() != other.key()
except (AttributeError, TypeError):
return NotImplemented
def __lt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() < other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __le__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
less than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() <= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __gt__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() > other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def __ge__(self, other):
"""
:param other: an `IPAddress` or `IPNetwork` object.
:return: ``True`` if this `IPAddress` or `IPNetwork` object is
greater than or equal to ``other``, ``False`` otherwise.
"""
try:
return self.sort_key() >= other.sort_key()
except (AttributeError, TypeError):
return NotImplemented
def is_unicast(self):
""":return: ``True`` if this IP is unicast, ``False`` otherwise"""
return not self.is_multicast()
def is_multicast(self):
""":return: ``True`` if this IP is multicast, ``False`` otherwise"""
if self._module == _ipv4:
return self in IPV4_MULTICAST
elif self._module == _ipv6:
return self in IPV6_MULTICAST
def is_loopback(self):
"""
:return: ``True`` if this IP is loopback address (not for network
transmission), ``False`` otherwise.
References: RFC 3330 and 4291.
"""
if self.version == 4:
return self in IPV4_LOOPBACK
elif self.version == 6:
return self == IPV6_LOOPBACK
def is_private(self):
"""
:return: ``True`` if this IP is for internal/private use only
(i.e. non-public), ``False`` otherwise. Reference: RFCs 1918,
3330, 4193, 3879 and 2365.
"""
if self.version == 4:
for cidr in IPV4_PRIVATE:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_PRIVATE:
if self in cidr:
return True
if self.is_link_local():
return True
return False
def is_link_local(self):
"""
:return: ``True`` if this IP is link-local address ``False`` otherwise.
Reference: RFCs 3927 and 4291.
"""
if self.version == 4:
return self in IPV4_LINK_LOCAL
elif self.version == 6:
return self in IPV6_LINK_LOCAL
def is_reserved(self):
"""
:return: ``True`` if this IP is in IANA reserved range, ``False``
otherwise. Reference: RFCs 3330 and 3171.
"""
if self.version == 4:
for cidr in IPV4_RESERVED:
if self in cidr:
return True
elif self.version == 6:
for cidr in IPV6_RESERVED:
if self in cidr:
return True
return False
def is_ipv4_mapped(self):
"""
:return: ``True`` if this IP is IPv4-compatible IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0xffff
def is_ipv4_compat(self):
"""
:return: ``True`` if this IP is IPv4-mapped IPv6 address, ``False``
otherwise.
"""
return self.version == 6 and (self._value >> 32) == 0
@property
def info(self):
"""
A record dict containing IANA registration details for this IP address
if available, None otherwise.
"""
# Lazy loading of IANA data structures.
from netaddr.ip.iana import query
return DictDotLookup(query(self))
@property
def version(self):
"""the IP protocol version represented by this IP object."""
return self._module.version
#-----------------------------------------------------------------------------
class IPAddress(BaseIP):
"""
An individual IPv4 or IPv6 address without a net mask or subnet prefix.
To support these and other network based operations, see `IPNetwork`.
"""
__slots__ = ()
def __init__(self, addr, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address which may be represented in an
accepted string format, as an unsigned integer or as another
IPAddress object (copy construction).
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Supported constants are
INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
super(IPAddress, self).__init__()
if isinstance(addr, BaseIP):
# Copy constructor.
if version is not None and version != addr._module.version:
raise ValueError('cannot switch IP versions using '
'copy constructor!')
self._value = addr._value
self._module = addr._module
else:
# Explicit IP address version.
if version is not None:
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('%r is an invalid IP version!' % version)
has_upper = hasattr(addr, 'upper')
if has_upper and '/' in addr:
raise ValueError('%s() does not support netmasks or subnet' \
' prefixes! See documentation for details.'
% self.__class__.__name__)
if self._module is None:
# IP version is implicit, detect it from addr.
if isinstance(addr, _int_type):
try:
if 0 <= int(addr) <= _ipv4.max_int:
self._value = int(addr)
self._module = _ipv4
elif _ipv4.max_int < int(addr) <= _ipv6.max_int:
self._value = int(addr)
self._module = _ipv6
except ValueError:
pass
else:
for module in _ipv4, _ipv6:
try:
self._value = module.str_to_int(addr, flags)
except:
continue
else:
self._module = module
break
if self._module is None:
raise AddrFormatError('failed to detect a valid IP ' \
'address from %r' % addr)
else:
# IP version is explicit.
if has_upper:
try:
self._value = self._module.str_to_int(addr, flags)
except AddrFormatError:
raise AddrFormatError('base address %r is not IPv%d'
% (addr, self._module.version))
else:
if 0 <= int(addr) <= self._module.max_int:
self._value = int(addr)
else:
raise AddrFormatError('bad address format: %r' % addr)
def __getstate__(self):
""":returns: Pickled state of an `IPAddress` object."""
return self._value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPAddress` object.
"""
value, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state: %s' \
% str(state))
def is_hostmask(self):
"""
:return: ``True`` if this IP address host mask, ``False`` otherwise.
"""
int_val = self._value + 1
return (int_val & (int_val - 1) == 0)
def is_netmask(self):
"""
:return: ``True`` if this IP address network mask, ``False`` otherwise.
"""
int_val = (self._value ^ self._module.max_int) + 1
return (int_val & (int_val - 1) == 0)
def __iadd__(self, num):
"""
Increases the numerical value of this IPAddress by num.
An IndexError is raised if result exceeds maximum IP address value or
is less than zero.
:param num: size of IP address increment.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __isub__(self, num):
"""
Decreases the numerical value of this IPAddress by num.
An IndexError is raised if result is less than zero or exceeds maximum
IP address value.
:param num: size of IP address decrement.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
self._value = new_value
return self
raise IndexError('result outside valid IP address boundary!')
def __add__(self, num):
"""
Add the numerical value of this IP address to num and provide the
result as a new IPAddress object.
:param num: size of IP address increase.
:return: a new IPAddress object with its numerical value increased by num.
"""
new_value = self._value + num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
__radd__ = __add__
def __sub__(self, num):
"""
Subtract the numerical value of this IP address from num providing
the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = self._value - num
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def __rsub__(self, num):
"""
Subtract num (lvalue) from the numerical value of this IP address
(rvalue) providing the result as a new IPAddress object.
:param num: size of IP address decrease.
:return: a new IPAddress object with its numerical value decreased by num.
"""
new_value = num - self._value
if 0 <= new_value <= self._module.max_int:
return self.__class__(new_value, self.version)
raise IndexError('result outside valid IP address boundary!')
def key(self):
"""
:return: a key tuple that uniquely identifies this IP address.
"""
# NB - we return the value here twice because this IP Address may
# be sorted with a list of networks and it should still end up
# in the expected order.
return self.version, self._value
def sort_key(self):
""":return: A key tuple used to compare and sort this `IPAddress` correctly."""
return self.version, self._value, self._module.width
def __int__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __long__(self):
""":return: the value of this IP address as an unsigned integer"""
return self._value
def __oct__(self):
""":return: an octal string representation of this IP address."""
# Python 2.x
if self._value == 0:
return '0'
return '0%o' % self._value
def __hex__(self):
""":return: a hexadecimal string representation of this IP address."""
# Python 2.x
return '0x%x' % self._value
def __index__(self):
"""
:return: return the integer value of this IP address when called by \
hex(), oct() or bin().
"""
# Python 3.x
return self._value
def bits(self, word_sep=None):
"""
:param word_sep: (optional) the separator to insert between words.
Default: None - use default separator for address type.
:return: the value of this IP address as a binary digit string."""
return self._module.int_to_bits(self._value, word_sep)
@property
def packed(self):
"""The value of this IP address as a packed binary string."""
return self._module.int_to_packed(self._value)
@property
def words(self):
"""
A list of unsigned integer words (octets for IPv4, hextets for IPv6)
found in this IP address.
"""
return self._module.int_to_words(self._value)
@property
def bin(self):
"""
The value of this IP adddress in standard Python binary
representational form (0bxxx). A back port of the format provided by
the builtin bin() function found in Python 2.6.x and higher.
"""
return self._module.int_to_bin(self._value)
@property
def reverse_dns(self):
"""The reverse DNS lookup record for this IP address"""
return self._module.int_to_arpa(self._value)
def ipv4(self):
"""
Raises an `AddrConversionError` if IPv6 address cannot be converted
to IPv4.
:return: A numerically equivalent version 4 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass(self._value, 4)
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
ip = klass(self._value, 4)
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
ip = klass(self._value - 0xffff00000000, 4)
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: The IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPAddress` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass(self._value - 0xffff00000000, 6)
else:
ip = klass(self._value, 6)
elif self.version == 4:
# IPv4-Compatible IPv6 address
ip = klass(self._value, 6)
if not ipv4_compatible:
# IPv4-Mapped IPv6 address
ip = klass(0xffff00000000 + self._value, 6)
return ip
def format(self, dialect=None):
"""
Only relevant for IPv6 addresses. Has no effect for IPv4.
:param dialect: An ipv6_* dialect class.
:return: an alternate string representation for this IP address.
"""
if dialect is not None:
if not hasattr(dialect, 'word_fmt'):
raise TypeError(
'custom dialects should subclass ipv6_verbose!')
return self._module.int_to_str(self._value, dialect=dialect)
def __or__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise OR (x | y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value | int(other), self.version)
def __and__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise AND (x & y) between the integer value of this IP
address and ``other``.
"""
return self.__class__(self._value & int(other), self.version)
def __xor__(self, other):
"""
:param other: An `IPAddress` object (or other int-like object).
:return: bitwise exclusive OR (x ^ y) between the integer value of
this IP address and ``other``.
"""
return self.__class__(self._value ^ int(other), self.version)
def __lshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value left shifted by ``numbits``.
"""
return self.__class__(self._value << numbits, self.version)
def __rshift__(self, numbits):
"""
:param numbits: size of bitwise shift.
:return: an `IPAddress` object based on this one with its integer
value right shifted by ``numbits``.
"""
return self.__class__(self._value >> numbits, self.version)
def __nonzero__(self):
""":return: ``True`` if the numerical value of this IP address is not \
zero, ``False`` otherwise."""
# Python 2.x.
return bool(self._value)
__bool__ = __nonzero__ # Python 3.x.
def __str__(self):
""":return: IP address in presentational format"""
return self._module.int_to_str(self._value)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPListMixin(object):
"""
A mixin class providing shared list-like functionality to classes
representing groups of IP addresses.
"""
def __iter__(self):
"""
:return: An iterator providing access to all `IPAddress` objects
within range represented by this ranged IP object.
"""
start_ip = IPAddress(self.first, self.version)
end_ip = IPAddress(self.last, self.version)
return iter_iprange(start_ip, end_ip)
@property
def size(self):
"""
The total number of IP addresses within this ranged IP object.
"""
return int(self.last - self.first + 1)
def __len__(self):
"""
:return: the number of IP addresses in this ranged IP object. Raises
an `IndexError` if size > system max int (a Python 2.x
limitation). Use the .size property for subnets of any size.
"""
size = self.size
if size > _sys_maxint:
raise IndexError(("range contains more than %d (index size max) "
"IP addresses! Use the .size property instead." % _sys_maxint))
return size
def __getitem__(self, index):
"""
:return: The IP address(es) in this `IPNetwork` object referenced by
index or slice. As slicing can produce large sequences of objects
an iterator is returned instead of the more usual `list`.
"""
item = None
if hasattr(index, 'indices'):
if self._module.version == 6:
raise TypeError('IPv6 slices are not supported!')
(start, stop, step) = index.indices(self.size)
if (start + step < 0) or (step > stop):
# step value exceeds start and stop boundaries.
item = iter([IPAddress(self.first, self.version)])
else:
start_ip = IPAddress(self.first + start, self.version)
end_ip = IPAddress(self.first + stop - step, self.version)
item = iter_iprange(start_ip, end_ip, step)
else:
try:
index = int(index)
if (- self.size) <= index < 0:
# negative index.
item = IPAddress(self.last + index + 1, self.version)
elif 0 <= index <= (self.size - 1):
# Positive index or zero index.
item = IPAddress(self.first + index, self.version)
else:
raise IndexError('index out range for address range size!')
except ValueError:
raise TypeError('unsupported index type %r!' % index)
return item
def __contains__(self, other):
"""
:param other: an `IPAddress` or ranged IP object.
:return: ``True`` if other falls within the boundary of this one,
``False`` otherwise.
"""
if self.version != other.version:
return False
if hasattr(other, '_value') and not hasattr(other, '_prefixlen'):
return other._value >= self.first and other._value <= self.last
return other.first >= self.first and other.last <= self.last
def __nonzero__(self):
"""
Ranged IP objects always represent a sequence of at least one IP
address and are therefore always True in the boolean context.
"""
# Python 2.x.
return True
__bool__ = __nonzero__ # Python 3.x.
#-----------------------------------------------------------------------------
def parse_ip_network(module, addr, implicit_prefix=False, flags=0):
if isinstance(addr, tuple):
# CIDR integer tuple
try:
val1, val2 = addr
except ValueError:
raise AddrFormatError('invalid %s tuple!' % module.family_name)
if 0 <= val1 <= module.max_int:
value = val1
if 0 <= val2 <= module.width:
prefixlen = val2
else:
raise AddrFormatError('invalid prefix for %s tuple!' \
% module.family_name)
else:
raise AddrFormatError('invalid address value for %s tuple!' \
% module.family_name)
elif isinstance(addr, _str_type):
# CIDR-like string subnet
if implicit_prefix:
#TODO: deprecate this option in netaddr 0.8.x
addr = cidr_abbrev_to_verbose(addr)
try:
if '/' in addr:
val1, val2 = addr.split('/', 1)
else:
val1 = addr
val2 = None
except ValueError:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
try:
ip = IPAddress(val1, module.version, flags=INET_PTON)
except AddrFormatError:
if module.version == 4:
# Try a partial IPv4 network address...
expanded_addr = _ipv4.expand_partial_address(val1)
ip = IPAddress(expanded_addr, module.version, flags=INET_PTON)
else:
raise AddrFormatError('invalid IPNetwork address %s!' % addr)
value = ip._value
try:
# Integer CIDR prefix.
prefixlen = int(val2)
except TypeError:
if val2 is None:
# No prefix was specified.
prefixlen = module.width
except ValueError:
# Not an integer prefix, try a netmask/hostmask prefix.
mask = IPAddress(val2, module.version, flags=INET_PTON)
if mask.is_netmask():
prefixlen = module.netmask_to_prefix[mask._value]
elif mask.is_hostmask():
prefixlen = module.hostmask_to_prefix[mask._value]
else:
raise AddrFormatError('addr %r is not a valid IPNetwork!' \
% addr)
if not 0 <= prefixlen <= module.width:
raise AddrFormatError('invalid prefix for %s address!' \
% module.family_name)
else:
raise TypeError('unexpected type %s for addr arg' % type(addr))
if flags & NOHOST:
# Remove host bits.
netmask = module.prefix_to_netmask[prefixlen]
value = value & netmask
return value, prefixlen
#-----------------------------------------------------------------------------
class IPNetwork(BaseIP, IPListMixin):
"""
An IPv4 or IPv6 network or subnet.
A combination of an IP address and a network mask.
Accepts CIDR and several related variants :
a) Standard CIDR::
x.x.x.x/y -> 192.0.2.0/24
x::/y -> fe80::/10
b) Hybrid CIDR format (netmask address instead of prefix), where 'y' \
address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/255.255.255.0
x::/y:: -> fe80::/ffc0::
c) ACL hybrid CIDR format (hostmask address instead of prefix like \
Cisco's ACL bitmasks), where 'y' address represent a valid netmask::
x.x.x.x/y.y.y.y -> 192.0.2.0/0.0.0.255
x::/y:: -> fe80::/3f:ffff:ffff:ffff:ffff:ffff:ffff:ffff
d) Abbreviated CIDR format (as of netaddr 0.7.x this requires the \
optional constructor argument ``implicit_prefix=True``)::
x -> 192
x/y -> 10/8
x.x/y -> 192.168/16
x.x.x/y -> 192.168.0/24
which are equivalent to::
x.0.0.0/y -> 192.0.0.0/24
x.0.0.0/y -> 10.0.0.0/8
x.x.0.0/y -> 192.168.0.0/16
x.x.x.0/y -> 192.168.0.0/24
"""
__slots__ = ('_prefixlen',)
def __init__(self, addr, implicit_prefix=False, version=None, flags=0):
"""
Constructor.
:param addr: an IPv4 or IPv6 address with optional CIDR prefix,
netmask or hostmask. May be an IP address in presentation
(string) format, an tuple containing and integer address and a
network prefix, or another IPAddress/IPNetwork object (copy
construction).
:param implicit_prefix: (optional) if True, the constructor uses
classful IPv4 rules to select a default prefix when one is not
provided. If False it uses the length of the IP address version.
(default: False)
:param version: (optional) optimizes version detection if specified
and distinguishes between IPv4 and IPv6 for addresses with an
equivalent integer value.
:param flags: (optional) decides which rules are applied to the
interpretation of the addr value. Currently only supports the
NOHOST option. See the netaddr.core docs for further details.
"""
super(IPNetwork, self).__init__()
<|fim▁hole|> if hasattr(addr, '_prefixlen'):
# IPNetwork object copy constructor
value = addr._value
module = addr._module
prefixlen = addr._prefixlen
elif hasattr(addr, '_value'):
# IPAddress object copy constructor
value = addr._value
module = addr._module
prefixlen = module.width
elif version == 4:
value, prefixlen = parse_ip_network(_ipv4, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv4
elif version == 6:
value, prefixlen = parse_ip_network(_ipv6, addr,
implicit_prefix=implicit_prefix, flags=flags)
module = _ipv6
else:
if version is not None:
raise ValueError('%r is an invalid IP version!' % version)
try:
module = _ipv4
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
try:
module = _ipv6
value, prefixlen = parse_ip_network(module, addr,
implicit_prefix, flags)
except AddrFormatError:
pass
if value is None:
raise AddrFormatError('invalid IPNetwork %s' % addr)
self._value = value
self._prefixlen = prefixlen
self._module = module
def __getstate__(self):
""":return: Pickled state of an `IPNetwork` object."""
return self._value, self._prefixlen, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPNetwork` object.
"""
value, prefixlen, version = state
self._value = value
if version == 4:
self._module = _ipv4
elif version == 6:
self._module = _ipv6
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
if 0 <= prefixlen <= self._module.width:
self._prefixlen = prefixlen
else:
raise ValueError('unpickling failed for object state %s' \
% str(state))
def _set_prefixlen(self, value):
if not isinstance(value, _int_type):
raise TypeError('int argument expected, not %s' % type(value))
if not 0 <= value <= self._module.width:
raise AddrFormatError('invalid prefix for an %s address!' \
% self._module.family_name)
self._prefixlen = value
prefixlen = property(lambda self: self._prefixlen, _set_prefixlen,
doc='size of the bitmask used to separate the network from the host bits')
@property
def ip(self):
"""
The IP address of this `IPNetwork` object. This is may or may not be
the same as the network IP address which varies according to the value
of the CIDR subnet prefix.
"""
return IPAddress(self._value, self.version)
@property
def network(self):
"""The network address of this `IPNetwork` object."""
return IPAddress(self._value & int(self.netmask), self.version)
@property
def broadcast(self):
"""The broadcast address of this `IPNetwork` object"""
return IPAddress(self._value | self.hostmask._value, self.version)
@property
def first(self):
"""
The integer value of first IP address found within this `IPNetwork`
object.
"""
return self._value & (self._module.max_int ^ self.hostmask._value)
@property
def last(self):
"""
The integer value of last IP address found within this `IPNetwork`
object.
"""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return self._value | hostmask
@property
def netmask(self):
"""The subnet mask of this `IPNetwork` object."""
netmask = self._module.max_int ^ self.hostmask._value
return IPAddress(netmask, self.version)
@property
def hostmask(self):
"""The host mask of this `IPNetwork` object."""
hostmask = (1 << (self._module.width - self._prefixlen)) - 1
return IPAddress(hostmask, self.version)
@property
def cidr(self):
"""
The true CIDR address for this `IPNetwork` object which omits any
host bits to the right of the CIDR subnet prefix.
"""
ip = IPAddress(self._value & int(self.netmask), self.version)
cidr = IPNetwork("%s/%d" % (ip, self.prefixlen))
return cidr
def __iadd__(self, num):
"""
Increases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result exceeds maximum IP address value
or is less than zero.
:param num: (optional) number of `IPNetwork` blocks to increment \
this IPNetwork's value by.
"""
new_value = int(self.network) + (self.size * num)
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('increment exceeds address boundary!')
if new_value < 0:
raise IndexError('increment is less than zero!')
self._value = new_value
return self
def __isub__(self, num):
"""
Decreases the value of this `IPNetwork` object by the current size
multiplied by ``num``.
An `IndexError` is raised if result is less than zero or exceeds
maximum IP address value.
:param num: (optional) number of `IPNetwork` blocks to decrement \
this IPNetwork's value by.
"""
new_value = int(self.network) - (self.size * num)
if new_value < 0:
raise IndexError('decrement is less than zero!')
if (new_value + (self.size - 1)) > self._module.max_int:
raise IndexError('decrement exceeds address boundary!')
self._value = new_value
return self
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPNetwork`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPNetwork` correctly.
"""
net_size_bits = self._module.width - num_bits(self.size)
host_bits = self._value - self.first
return self.version, self.first, net_size_bits, host_bits
def ipv4(self):
"""
:return: A numerically equivalent version 4 `IPNetwork` object. \
Raises an `AddrConversionError` if IPv6 address cannot be \
converted to IPv4.
"""
ip = None
klass = self.__class__
if self.version == 4:
ip = klass('%s/%d' % (self.ip, self.prefixlen))
elif self.version == 6:
if 0 <= self._value <= _ipv4.max_int:
addr = _ipv4.int_to_str(self._value)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
elif _ipv4.max_int <= self._value <= 0xffffffffffff:
addr = _ipv4.int_to_str(self._value - 0xffff00000000)
ip = klass('%s/%d' % (addr, self.prefixlen - 96))
else:
raise AddrConversionError('IPv6 address %s unsuitable for ' \
'conversion to IPv4!' % self)
return ip
def ipv6(self, ipv4_compatible=False):
"""
.. note:: the IPv4-mapped IPv6 address format is now considered \
deprecated. See RFC 4291 or later for details.
:param ipv4_compatible: If ``True`` returns an IPv4-mapped address
(::ffff:x.x.x.x), an IPv4-compatible (::x.x.x.x) address
otherwise. Default: False (IPv4-mapped).
:return: A numerically equivalent version 6 `IPNetwork` object.
"""
ip = None
klass = self.__class__
if self.version == 6:
if ipv4_compatible and \
(0xffff00000000 <= self._value <= 0xffffffffffff):
ip = klass((self._value - 0xffff00000000, self._prefixlen),
version=6)
else:
ip = klass((self._value, self._prefixlen), version=6)
elif self.version == 4:
if ipv4_compatible:
# IPv4-Compatible IPv6 address
ip = klass((self._value, self._prefixlen + 96), version=6)
else:
# IPv4-Mapped IPv6 address
ip = klass((0xffff00000000 + self._value,
self._prefixlen + 96), version=6)
return ip
def previous(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the previous IP subnet).
:return: The adjacent subnet preceding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy -= step
return ip_copy
def next(self, step=1):
"""
:param step: the number of IP subnets between this `IPNetwork` object
and the expected subnet. Default: 1 (the next IP subnet).
:return: The adjacent subnet succeeding this `IPNetwork` object.
"""
ip_copy = self.__class__('%s/%d' % (self.network, self.prefixlen),
self.version)
ip_copy += step
return ip_copy
def supernet(self, prefixlen=0):
"""
Provides a list of supernets for this `IPNetwork` object between the
size of the current prefix and (if specified) an endpoint prefix.
:param prefixlen: (optional) a CIDR prefix for the maximum supernet.
Default: 0 - returns all possible supernets.
:return: a tuple of supernet `IPNetwork` objects.
"""
if not 0 <= prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
# Use a copy of self as we'll be editing it.
supernet = self.cidr
supernets = []
while supernet.prefixlen > prefixlen:
supernet.prefixlen -= 1
supernets.append(supernet.cidr)
return list(reversed(supernets))
def subnet(self, prefixlen, count=None, fmt=None):
"""
A generator that divides up this IPNetwork's subnet into smaller
subnets based on a specified CIDR prefix.
:param prefixlen: a CIDR prefix indicating size of subnets to be
returned.
:param count: (optional) number of consecutive IP subnets to be
returned.
:return: an iterator containing IPNetwork subnet objects.
"""
if not 0 <= self.prefixlen <= self._module.width:
raise ValueError('CIDR prefix /%d invalid for IPv%d!' \
% (prefixlen, self.version))
if not self.prefixlen <= prefixlen:
# Don't return anything.
raise StopIteration
# Calculate number of subnets to be returned.
width = self._module.width
max_subnets = 2 ** (width - self.prefixlen) // 2 ** (width - prefixlen)
if count is None:
count = max_subnets
if not 1 <= count <= max_subnets:
raise ValueError('count outside of current IP subnet boundary!')
base_subnet = self._module.int_to_str(self.first)
i = 0
while(i < count):
subnet = self.__class__('%s/%d' % (base_subnet, prefixlen),
self.version)
subnet.value += (subnet.size * i)
subnet.prefixlen = prefixlen
i += 1
yield subnet
def iter_hosts(self):
"""
An generator that provides all the IP addresses that can be assigned
to hosts within the range of this IP object's subnet.
- for IPv4, the network and broadcast addresses are always excluded. \
Any subnet that contains less than 4 IP addresses yields an empty list.
- for IPv6, only the unspecified address '::' is excluded from any \
yielded IP addresses.
:return: an IPAddress iterator
"""
it_hosts = iter([])
if self.version == 4:
# IPv4 logic.
if self.size >= 4:
it_hosts = iter_iprange(IPAddress(self.first+1, self.version),
IPAddress(self.last-1, self.version))
else:
# IPv6 logic.
if self.first == 0:
if self.size != 1:
# Don't return '::'.
it_hosts = iter_iprange(
IPAddress(self.first+1, self.version),
IPAddress(self.last, self.version))
else:
it_hosts = iter(self)
return it_hosts
def __str__(self):
""":return: this IPNetwork in CIDR format"""
addr = self._module.int_to_str(self._value)
return "%s/%s" % (addr, self.prefixlen)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s')" % (self.__class__.__name__, self)
#-----------------------------------------------------------------------------
class IPRange(BaseIP, IPListMixin):
"""
An arbitrary IPv4 or IPv6 address range.
Formed from a lower and upper bound IP address. The upper bound IP cannot
be numerically smaller than the lower bound and the IP version of both
must match.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end, flags=0):
"""
Constructor.
:param start: an IPv4 or IPv6 address that forms the lower
boundary of this IP range.
:param end: an IPv4 or IPv6 address that forms the upper
boundary of this IP range.
:param flags: (optional) decides which rules are applied to the
interpretation of the start and end values. Supported constants
are INET_PTON and ZEROFILL. See the netaddr.core docs for further
details.
"""
self._start = IPAddress(start, flags=flags)
self._module = self._start._module
self._end = IPAddress(end, self._module.version, flags=flags)
if int(self._start) > int(self._end):
raise AddrFormatError('lower bound IP greater than upper bound!')
def __getstate__(self):
""":return: Pickled state of an `IPRange` object."""
return self._start.value, self._end.value, self._module.version
def __setstate__(self, state):
"""
:param state: data used to unpickle a pickled `IPRange` object.
"""
start, end, version = state
self._start = IPAddress(start, version)
self._module = self._start._module
self._end = IPAddress(end, version)
@property
def first(self):
"""The integer value of first IP address in this `IPRange` object."""
return int(self._start)
@property
def last(self):
"""The integer value of last IP address in this `IPRange` object."""
return int(self._end)
def key(self):
"""
:return: A key tuple used to uniquely identify this `IPRange`.
"""
return self.version, self.first, self.last
def sort_key(self):
"""
:return: A key tuple used to compare and sort this `IPRange` correctly.
"""
skey = self._module.width - num_bits(self.size)
return self.version, self.first, skey
def cidrs(self):
"""
The list of CIDR addresses found within the lower and upper bound
addresses of this `IPRange`.
"""
return iprange_to_cidrs(self._start, self._end)
def __str__(self):
""":return: this `IPRange` in a common representational format."""
return "%s-%s" % (self._start, self._end)
def __repr__(self):
""":return: Python statement to create an equivalent object"""
return "%s('%s', '%s')" % (self.__class__.__name__,
self._start, self._end)
#-----------------------------------------------------------------------------
def iter_unique_ips(*args):
"""
:param args: A list of IP addresses and subnets passed in as arguments.
:return: A generator that flattens out IP subnets, yielding unique
individual IP addresses (no duplicates).
"""
for cidr in cidr_merge(args):
for ip in cidr:
yield ip
#-----------------------------------------------------------------------------
def cidr_abbrev_to_verbose(abbrev_cidr):
"""
A function that converts abbreviated IPv4 CIDRs to their more verbose
equivalent.
:param abbrev_cidr: an abbreviated CIDR.
Uses the old-style classful IP address rules to decide on a default
subnet prefix if one is not explicitly provided.
Only supports IPv4 addresses.
Examples ::
10 - 10.0.0.0/8
10/16 - 10.0.0.0/16
128 - 128.0.0.0/16
128/8 - 128.0.0.0/8
192.168 - 192.168.0.0/16
:return: A verbose CIDR from an abbreviated CIDR or old-style classful \
network address, The original value if it was not recognised as a \
supported abbreviation.
"""
# Internal function that returns a prefix value based on the old IPv4
# classful network scheme that has been superseded (almost) by CIDR.
def classful_prefix(octet):
octet = int(octet)
if not 0 <= octet <= 255:
raise IndexError('Invalid octet: %r!' % octet)
if 0 <= octet <= 127: # Legacy class 'A' classification.
return 8
elif 128 <= octet <= 191: # Legacy class 'B' classification.
return 16
elif 192 <= octet <= 223: # Legacy class 'C' classification.
return 24
elif 224 <= octet <= 239: # Multicast address range.
return 4
return 32 # Default.
start = ''
tokens = []
prefix = None
if _is_str(abbrev_cidr):
if ':' in abbrev_cidr:
return abbrev_cidr
try:
# Single octet partial integer or string address.
i = int(abbrev_cidr)
tokens = [str(i), '0', '0', '0']
return "%s%s/%s" % (start, '.'.join(tokens), classful_prefix(i))
except ValueError:
# Multi octet partial string address with optional prefix.
part_addr = abbrev_cidr
tokens = []
if part_addr == '':
# Not a recognisable format.
return abbrev_cidr
if '/' in part_addr:
(part_addr, prefix) = part_addr.split('/', 1)
# Check prefix for validity.
if prefix is not None:
try:
if not 0 <= int(prefix) <= 32:
raise ValueError('prefixlen in address %r out of range' \
' for IPv4!' % abbrev_cidr)
except ValueError:
return abbrev_cidr
if '.' in part_addr:
tokens = part_addr.split('.')
else:
tokens = [part_addr]
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
# Not a recognisable format.
return abbrev_cidr
if prefix is None:
try:
prefix = classful_prefix(tokens[0])
except ValueError:
return abbrev_cidr
return "%s%s/%s" % (start, '.'.join(tokens), prefix)
except TypeError:
pass
except IndexError:
pass
# Not a recognisable format.
return abbrev_cidr
#-----------------------------------------------------------------------------
def cidr_merge(ip_addrs):
"""
A function that accepts an iterable sequence of IP addresses and subnets
merging them into the smallest possible list of CIDRs. It merges adjacent
subnets where possible, those contained within others and also removes
any duplicates.
:param ip_addrs: an iterable sequence of IP addresses and subnets.
:return: a summarized list of `IPNetwork` objects.
"""
if not hasattr(ip_addrs, '__iter__') or hasattr(ip_addrs, 'keys'):
raise ValueError('A sequence or iterator is expected!')
# Start off using set as we'll remove any duplicates at the start.
ipv4_bit_cidrs = set()
ipv6_bit_cidrs = set()
# Convert IP addresses and subnets into their CIDR bit strings.
ipv4_match_all_found = False
ipv6_match_all_found = False
for ip in ip_addrs:
cidr = IPNetwork(ip)
bits = cidr.network.bits(word_sep='')[0:cidr.prefixlen]
if cidr.version == 4:
if bits == '':
ipv4_match_all_found = True
ipv4_bit_cidrs = set(['']) # Clear all other IPv4 values.
if not ipv4_match_all_found:
ipv4_bit_cidrs.add(bits)
else:
if bits == '':
ipv6_match_all_found = True
ipv6_bit_cidrs = set(['']) # Clear all other IPv6 values.
if not ipv6_match_all_found:
ipv6_bit_cidrs.add(bits)
# Merge binary CIDR addresses where possible.
def _reduce_bit_cidrs(cidrs):
new_cidrs = []
cidrs.sort()
# Multiple passes are required to obtain precise results.
while 1:
finished = True
while (cidrs):
if not new_cidrs:
new_cidrs.append(cidrs.pop(0))
if not cidrs:
break
# lhs and rhs are same size and adjacent.
(new_cidr, subs) = RE_CIDR_ADJACENT.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# merge lhs with rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# lhs contains rhs.
(new_cidr, subs) = RE_CIDR_WITHIN.subn(
r'\1', '%s %s' % (new_cidrs[-1], cidrs[0]))
if subs:
# keep lhs, discard rhs.
new_cidrs[-1] = new_cidr
cidrs.pop(0)
finished = False
else:
# no matches - accept rhs.
new_cidrs.append(cidrs.pop(0))
if finished:
break
else:
# still seeing matches, reset.
cidrs = new_cidrs
new_cidrs = []
if new_cidrs == ['0', '1']:
# Special case where summary CIDR result is '0.0.0.0/0' or
# '::/0' i.e. the whole IPv4 or IPv6 address space.
new_cidrs = ['']
return new_cidrs
new_cidrs = []
def _bits_to_cidr(bits, module):
if bits == '':
if module.version == 4:
return IPNetwork('0.0.0.0/0', 4)
else:
return IPNetwork('::/0', 6)
if RE_VALID_CIDR_BITS.match(bits) is None:
raise ValueError('%r is an invalid bit string!' % bits)
num_bits = len(bits)
if bits == '':
return IPAddress(module.int_to_str(0), module.version)
else:
bits = bits + '0' * (module.width - num_bits)
return IPNetwork((module.bits_to_int(bits), num_bits),
version=module.version)
# Reduce and format lists of reduced CIDRs.
for bits in _reduce_bit_cidrs(list(ipv4_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv4))
for bits in _reduce_bit_cidrs(list(ipv6_bit_cidrs)):
new_cidrs.append(_bits_to_cidr(bits, _ipv6))
return new_cidrs
#-----------------------------------------------------------------------------
def cidr_exclude(target, exclude):
"""
Removes an exclude IP address or subnet from target IP subnet.
:param target: the target IP address or subnet to be divided up.
:param exclude: the IP address or subnet to be removed from target.
:return: list of `IPNetwork` objects remaining after exclusion.
"""
cidrs = []
target = IPNetwork(target)
exclude = IPNetwork(exclude)
if exclude.last < target.first:
# Exclude subnet's upper bound address less than target
# subnet's lower bound.
return [target.cidr]
elif target.last < exclude.first:
# Exclude subnet's lower bound address greater than target
# subnet's upper bound.
return [target.cidr]
new_prefixlen = target.prefixlen + 1
if new_prefixlen <= target._module.width:
i_lower = target.first
i_upper = target.first + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
while exclude.prefixlen >= new_prefixlen:
if exclude in lower:
matched = i_lower
unmatched = i_upper
elif exclude in upper:
matched = i_upper
unmatched = i_lower
else:
# Exclude subnet not within target subnet.
cidrs.append(target.cidr)
break
ip = IPNetwork((unmatched, new_prefixlen))
cidrs.append(ip)
new_prefixlen += 1
if new_prefixlen > target._module.width:
break
i_lower = matched
i_upper = matched + (2 ** (target._module.width - new_prefixlen))
lower = IPNetwork((i_lower, new_prefixlen))
upper = IPNetwork((i_upper, new_prefixlen))
cidrs.sort()
return cidrs
#-----------------------------------------------------------------------------
def spanning_cidr(ip_addrs):
"""
Function that accepts a sequence of IP addresses and subnets returning
a single `IPNetwork` subnet that is large enough to span the lower and
upper bound IP addresses with a possible overlap on either end.
:param ip_addrs: sequence of IP addresses and subnets.
:return: a single spanning `IPNetwork` subnet.
"""
sorted_ips = sorted(
[IPNetwork(ip) for ip in ip_addrs])
if not len(sorted_ips) > 1:
raise ValueError('IP sequence must contain at least 2 elements!')
lowest_ip = sorted_ips[0]
highest_ip = sorted_ips[-1]
if lowest_ip.version != highest_ip.version:
raise TypeError('IP sequence cannot contain both IPv4 and IPv6!')
ip = highest_ip.cidr
while ip.prefixlen > 0:
if highest_ip in ip and lowest_ip not in ip:
ip.prefixlen -= 1
else:
break
return ip.cidr
#-----------------------------------------------------------------------------
def iter_iprange(start, end, step=1):
"""
A generator that produces IPAddress objects between an arbitrary start
and stop IP address with intervals of step between them. Sequences
produce are inclusive of boundary IPs.
:param start: start IP address.
:param end: end IP address.
:param step: (optional) size of step between IP addresses. Default: 1
:return: an iterator of one or more `IPAddress` objects.
"""
start = IPAddress(start)
end = IPAddress(end)
if start.version != end.version:
raise TypeError('start and stop IP versions do not match!')
version = start.version
step = int(step)
if step == 0:
raise ValueError('step argument cannot be zero')
# We don't need objects from here, just integers.
start = int(start)
stop = int(end)
negative_step = False
if step < 0:
negative_step = True
index = start - step
while True:
index += step
if negative_step:
if not index >= stop:
break
else:
if not index <= stop:
break
yield IPAddress(index, version)
#-----------------------------------------------------------------------------
def iprange_to_cidrs(start, end):
"""
A function that accepts an arbitrary start and end IP address or subnet
and returns a list of CIDR subnets that fit exactly between the boundaries
of the two with no overlap.
:param start: the start IP address or subnet.
:param end: the end IP address or subnet.
:return: a list of one or more IP addresses and subnets.
"""
cidr_list = []
start = IPNetwork(start)
end = IPNetwork(end)
iprange = [start.first, end.last]
# Get spanning CIDR covering both addresses.
cidr_span = spanning_cidr([start, end])
if cidr_span.first == iprange[0] and cidr_span.last == iprange[-1]:
# Spanning CIDR matches start and end exactly.
cidr_list = [cidr_span]
elif cidr_span.last == iprange[-1]:
# Spanning CIDR matches end exactly.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
elif cidr_span.first == iprange[0]:
# Spanning CIDR matches start exactly.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_span, ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
elif cidr_span.first <= iprange[0] and cidr_span.last >= iprange[-1]:
# Spanning CIDR overlaps start and end.
ip = IPAddress(start)
first_int_val = int(ip)
ip -= 1
cidr_remainder = cidr_exclude(cidr_span, ip)
# Fix start.
first_found = False
for cidr in cidr_remainder:
if cidr.first == first_int_val:
first_found = True
if first_found:
cidr_list.append(cidr)
# Fix end.
ip = IPAddress(end)
last_int_val = int(ip)
ip += 1
cidr_remainder = cidr_exclude(cidr_list.pop(), ip)
last_found = False
for cidr in cidr_remainder:
cidr_list.append(cidr)
if cidr.last == last_int_val:
break
return cidr_list
#-----------------------------------------------------------------------------
def smallest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the smallest (most specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
else:
if match is not None:
break
return match
#-----------------------------------------------------------------------------
def largest_matching_cidr(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address or subnet.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: the largest (least specific) matching IPAddress or IPNetwork
object from the provided sequence, None if there was no match.
"""
match = None
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
match = cidr
break
return match
#-----------------------------------------------------------------------------
def all_matching_cidrs(ip, cidrs):
"""
Matches an IP address or subnet against a given sequence of IP addresses
and subnets.
:param ip: a single IP address.
:param cidrs: a sequence of IP addresses and/or subnets.
:return: all matching IPAddress and/or IPNetwork objects from the provided
sequence, an empty list if there was no match.
"""
matches = []
if not hasattr(cidrs, '__iter__'):
raise TypeError('IP address/subnet sequence expected, not %r!'
% cidrs)
ip = IPAddress(ip)
for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]):
if ip in cidr:
matches.append(cidr)
else:
if matches:
break
return matches
#-----------------------------------------------------------------------------
# Cached IPv4 address range lookups.
#-----------------------------------------------------------------------------
IPV4_LOOPBACK = IPNetwork('127.0.0.0/8')
IPV4_PRIVATE = (
IPNetwork('10.0.0.0/8'), # Private-Use Networks
IPNetwork('172.16.0.0/12'), # Private-Use Networks
IPNetwork('192.0.2.0/24'), # Test-Net
IPNetwork('192.168.0.0/16'), # Private-Use Networks
IPRange('239.0.0.0', '239.255.255.255'), # Administrative Multicast
)
IPV4_LINK_LOCAL = IPNetwork('169.254.0.0/16')
IPV4_MULTICAST = IPNetwork('224.0.0.0/4')
IPV4_6TO4 = IPNetwork('192.88.99.0/24') # 6to4 Relay Anycast
IPV4_RESERVED = (
IPNetwork('128.0.0.0/16'), # Reserved but subject to allocation
IPNetwork('191.255.0.0/16'), # Reserved but subject to allocation
IPNetwork('192.0.0.0/24'), # Reserved but subject to allocation
IPNetwork('223.255.255.0/24'), # Reserved but subject to allocation
IPNetwork('240.0.0.0/4'), # Reserved for Future Use
# Reserved multicast
IPRange('234.0.0.0', '238.255.255.255'),
IPRange('225.0.0.0', '231.255.255.255'),
)
#-----------------------------------------------------------------------------
# Cached IPv6 address range lookups.
#-----------------------------------------------------------------------------
IPV6_LOOPBACK = IPAddress('::1')
IPV6_PRIVATE = (
IPNetwork('fc00::/7'), # Unique Local Addresses (ULA)
IPNetwork('fec0::/10'), # Site Local Addresses (deprecated - RFC 3879)
)
IPV6_LINK_LOCAL = IPNetwork('fe80::/10')
IPV6_MULTICAST = IPNetwork('ff00::/8')
IPV6_RESERVED = (
IPNetwork('ff00::/12'), IPNetwork('::/8'),
IPNetwork('0100::/8'), IPNetwork('0200::/7'),
IPNetwork('0400::/6'), IPNetwork('0800::/5'),
IPNetwork('1000::/4'), IPNetwork('4000::/3'),
IPNetwork('6000::/3'), IPNetwork('8000::/3'),
IPNetwork('A000::/3'), IPNetwork('C000::/3'),
IPNetwork('E000::/4'), IPNetwork('F000::/5'),
IPNetwork('F800::/6'), IPNetwork('FE00::/9'),
)<|fim▁end|>
|
value, prefixlen, module = None, None, None
|
<|file_name|>qa_repack_bits_bb.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
from gnuradio import gr, gr_unittest, blocks
import pmt
class qa_repack_bits_bb (gr_unittest.TestCase):
def setUp (self):
random.seed(0)
self.tb = gr.top_block ()
self.tsb_key = "length"
def tearDown (self):
self.tb = None
def test_001_simple (self):
""" Very simple test, 2 bits -> 1 """
src_data = (0b11, 0b01, 0b10)
expected_data = (0b1, 0b1, 0b1, 0b0, 0b0, 0b1)
k = 2
l = 1
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_001_simple_msb (self):
""" Very simple test, 2 bits -> 1 with MSB set """
src_data = (0b11, 0b01, 0b10)
expected_data = (0b1, 0b1, 0b0, 0b1, 0b1, 0b0)
k = 2
l = 1
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b101,) + (0b111,) * 7
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b101,) + (0b111,) * 7
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_002_three_msb (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111, 0b11111111)
expected_data = (0b111,) + (0b111,) + (0b011,) + (0b111,) * 5
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack, sink)
self.tb.run ()
self.assertEqual(sink.data(), expected_data)
def test_003_lots_of_bytes (self):
""" Lots and lots of bytes, multiple packer stages """
src_data = tuple([random.randint(0, 255) for x in range(3*5*7*8 * 10)])
src = blocks.vector_source_b(src_data, False, 1)
repack1 = blocks.repack_bits_bb(8, 3)
repack2 = blocks.repack_bits_bb(3, 5)
repack3 = blocks.repack_bits_bb(5, 7)
repack4 = blocks.repack_bits_bb(7, 8)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack1, repack2, repack3, repack4, sink)
self.tb.run ()
self.assertEqual(sink.data(), src_data)
def test_003_lots_of_bytes_msb (self):
""" Lots and lots of bytes, multiple packer stages """
src_data = tuple([random.randint(0, 255) for x in range(3*5*7*8 * 10)])
src = blocks.vector_source_b(src_data, False, 1)
repack1 = blocks.repack_bits_bb(8, 3, "", False, gr.GR_MSB_FIRST)
repack2 = blocks.repack_bits_bb(3, 5, "", False, gr.GR_MSB_FIRST)
repack3 = blocks.repack_bits_bb(5, 7, "", False, gr.GR_MSB_FIRST)
repack4 = blocks.repack_bits_bb(7, 8, "", False, gr.GR_MSB_FIRST)
sink = blocks.vector_sink_b()
self.tb.connect(src, repack1, repack2, repack3, repack4, sink)
self.tb.run ()
self.assertEqual(sink.data(), src_data)
def test_004_three_with_tags (self):
""" 8 -> 3 """
src_data = (0b11111101, 0b11111111)
expected_data = (0b101,) + (0b111,) * 4 + (0b001,)
k = 8
l = 3
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, self.tsb_key)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1, len(src_data), self.tsb_key),
repack,
sink
)
self.tb.run ()
self.assertEqual(len(sink.data()), 1)
self.assertEqual(sink.data()[0], expected_data)
def test_005_three_with_tags_trailing (self):
""" 3 -> 8, trailing bits """
src_data = (0b101,) + (0b111,) * 4 + (0b001,)
expected_data = (0b11111101, 0b11111111)
k = 3
l = 8
src = blocks.vector_source_b(src_data, False, 1)
repack = blocks.repack_bits_bb(k, l, self.tsb_key, True)
sink = blocks.tsb_vector_sink_b(tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(gr.sizeof_char, 1, len(src_data), self.tsb_key),
repack,
sink
)
self.tb.run ()
self.assertEqual(len(sink.data()), 1)
self.assertEqual(sink.data()[0], expected_data)
if __name__ == '__main__':
gr_unittest.run(qa_repack_bits_bb, "qa_repack_bits_bb.xml")<|fim▁end|>
|
#!/usr/bin/env python
|
<|file_name|>test_installer.py<|end_file_name|><|fim▁begin|>"""Smoke tests to check installation health
:Requirement: Installer
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Installer
:Assignee: desingh
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import re
import pytest
from robottelo import ssh
from robottelo.config import settings
from robottelo.constants import RHEL_6_MAJOR_VERSION
from robottelo.constants import RHEL_7_MAJOR_VERSION
from robottelo.helpers import get_host_info
PREVIOUS_INSTALLER_OPTIONS = {
'--[no-]colors',
'--[no-]enable-certs',
'--[no-]enable-foreman',
'--[no-]enable-foreman-cli',
'--[no-]enable-foreman-compute-ec2',
'--[no-]enable-foreman-compute-gce',
'--[no-]enable-foreman-compute-libvirt',
'--[no-]enable-foreman-compute-openstack',
'--[no-]enable-foreman-compute-ovirt',
'--[no-]enable-foreman-compute-vmware',
'--[no-]enable-foreman-cli-kubevirt',
'--[no-]enable-foreman-cli-katello',
'--[no-]enable-foreman-cli-remote-execution',
'--[no-]enable-foreman-plugin-ansible',
'--[no-]enable-foreman-plugin-bootdisk',
'--[no-]enable-foreman-plugin-discovery',
'--[no-]enable-foreman-plugin-hooks',
'--[no-]enable-foreman-plugin-kubevirt',
'--[no-]enable-foreman-plugin-leapp',
'--[no-]enable-foreman-plugin-openscap',
'--[no-]enable-foreman-plugin-remote-execution',
'--[no-]enable-foreman-plugin-rh-cloud',
'--[no-]enable-foreman-plugin-tasks',
'--[no-]enable-foreman-plugin-templates',
'--[no-]enable-foreman-plugin-webhooks',
'--[no-]enable-foreman-proxy',
'--[no-]enable-foreman-proxy-content',
'--[no-]enable-foreman-proxy-plugin-ansible',
'--[no-]enable-foreman-proxy-plugin-dhcp-infoblox',
'--[no-]enable-foreman-proxy-plugin-dhcp-remote-isc',
'--[no-]enable-foreman-proxy-plugin-discovery',
'--[no-]enable-foreman-proxy-plugin-dns-infoblox',
'--[no-]enable-foreman-proxy-plugin-openscap',
'--[no-]enable-foreman-proxy-plugin-remote-execution-ssh',
'--[no-]enable-foreman-proxy-plugin-shellhooks',
'--[no-]enable-katello',
'--[no-]enable-puppet',
'--[no-]lock-package-versions',
'--[no-]parser-cache',
'--certs-ca-common-name',
'--certs-ca-expiration',
'--certs-city',
'--certs-cname',
'--certs-country',
'--certs-default-ca-name',
'--certs-deploy',
'--certs-expiration',
'--certs-generate',
'--certs-group',
'--certs-node-fqdn',
'--certs-org',
'--certs-org-unit',
'--certs-pki-dir',
'--certs-regenerate',
'--certs-reset',
'--certs-server-ca-cert',
'--certs-server-ca-name',
'--certs-server-cert',
'--certs-server-cert-req',
'--certs-server-key',
'--certs-skip-check',
'--certs-ssl-build-dir',
'--certs-state',
'--certs-tar-file',
'--certs-update-all',
'--certs-update-server',
'--certs-update-server-ca',
'--certs-user',
'--color-of-background',
'--compare-scenarios',
'--detailed-exitcodes',
'--disable-scenario',
'--disable-system-checks',
'--dont-save-answers',
'--enable-scenario',
'--force',
'--foreman-app-root',
'--foreman-apache',
'--foreman-cli-foreman-url',
'--foreman-cli-hammer-plugin-prefix',
'--foreman-cli-manage-root-config',
'--foreman-cli-password',
'--foreman-cli-refresh-cache',
'--foreman-cli-request-timeout',
'--foreman-cli-ssl-ca-file',
'--foreman-cli-username',
'--foreman-cli-use-sessions',
'--foreman-cli-version',
'--foreman-client-ssl-ca',
'--foreman-client-ssl-cert',
'--foreman-client-ssl-key',
'--foreman-compute-ec2-version',
'--foreman-compute-gce-version',
'--foreman-compute-libvirt-version',
'--foreman-compute-openstack-version',
'--foreman-compute-ovirt-version',
'--foreman-compute-vmware-version',
'--foreman-cors-domains',
'--foreman-db-database',
'--foreman-db-host',
'--foreman-db-manage',
'--foreman-db-manage-rake',
'--foreman-db-password',
'--foreman-db-pool',
'--foreman-db-port',
'--foreman-db-root-cert',
'--foreman-db-sslmode',
'--foreman-db-username',
'--foreman-dynflow-manage-services',
'--foreman-dynflow-orchestrator-ensure',
'--foreman-dynflow-redis-url',
'--foreman-dynflow-worker-concurrency',
'--foreman-dynflow-worker-instances',
'--foreman-email-delivery-method',
'--foreman-email-smtp-address',
'--foreman-email-smtp-authentication',
'--foreman-email-smtp-domain',
'--foreman-email-smtp-password',
'--foreman-email-smtp-port',
'--foreman-email-smtp-user-name',
'--foreman-foreman-service-puma-threads-max',
'--foreman-foreman-service-puma-threads-min',
'--foreman-foreman-service-puma-workers',
'--foreman-foreman-url',
'--foreman-group',
'--foreman-hsts-enabled',
'--foreman-http-keytab',
'--foreman-initial-admin-email',
'--foreman-initial-admin-first-name',
'--foreman-initial-admin-last-name',
'--foreman-initial-admin-locale',
'--foreman-initial-admin-password',
'--foreman-initial-admin-username',
'--foreman-initial-admin-timezone',
'--foreman-initial-location',
'--foreman-initial-organization',
'--foreman-ipa-authentication',
'--foreman-ipa-manage-sssd',
'--foreman-keycloak-realm',
'--foreman-keycloak',
'--foreman-keycloak-app-name',
'--foreman-loggers',
'--foreman-logging-layout',
'--foreman-logging-level',
'--foreman-logging-type',
'--foreman-manage-user',
'--foreman-oauth-active',
'--foreman-oauth-consumer-key',
'--foreman-oauth-consumer-secret',
'--foreman-oauth-map-users',
'--foreman-pam-service',
'--foreman-plugin-prefix',
'--foreman-plugin-tasks-automatic-cleanup',
'--foreman-plugin-tasks-cron-line',
'--foreman-plugin-tasks-backup',
'--foreman-plugin-version',
'--foreman-proxy-autosignfile',
'--foreman-proxy-bind-host',
'--foreman-proxy-bmc',
'--foreman-proxy-bmc-default-provider',
'--foreman-proxy-bmc-listen-on',
'--foreman-proxy-bmc-ssh-key',
'--foreman-proxy-bmc-ssh-powercycle',
'--foreman-proxy-bmc-ssh-poweroff',
'--foreman-proxy-bmc-ssh-poweron',
'--foreman-proxy-bmc-ssh-powerstatus',
'--foreman-proxy-bmc-ssh-user',
'--foreman-proxy-content-enable-ansible',
'--foreman-proxy-content-enable-deb',
'--foreman-proxy-content-enable-docker',
'--foreman-proxy-content-enable-file',
'--foreman-proxy-content-enable-katello-agent',
'--foreman-proxy-content-enable-yum',
'--foreman-proxy-content-pulpcore-allowed-content-checksums',
'--foreman-proxy-content-pulpcore-api-service-worker-timeout',
'--foreman-proxy-content-pulpcore-content-service-worker-timeout',
'--foreman-proxy-content-pulpcore-cache-enabled',
'--foreman-proxy-content-pulpcore-cache-expires-ttl',
'--foreman-proxy-content-pulpcore-django-secret-key',
'--foreman-proxy-content-pulpcore-mirror',
'--foreman-proxy-content-pulpcore-use-rq-tasking-system',
'--foreman-proxy-content-pulpcore-postgresql-db-name',
'--foreman-proxy-content-pulpcore-manage-postgresql',
'--foreman-proxy-content-pulpcore-postgresql-host',
'--foreman-proxy-content-pulpcore-postgresql-password',
'--foreman-proxy-content-pulpcore-postgresql-port',
'--foreman-proxy-content-pulpcore-postgresql-ssl',
'--foreman-proxy-content-pulpcore-postgresql-ssl-cert',
'--foreman-proxy-content-pulpcore-postgresql-ssl-key',
'--foreman-proxy-content-pulpcore-postgresql-ssl-require',
'--foreman-proxy-content-pulpcore-postgresql-ssl-root-ca',
'--foreman-proxy-content-pulpcore-postgresql-user',
'--foreman-rails-cache-store',
'--foreman-proxy-registration',
'--foreman-proxy-registration-listen-on',
'--foreman-server-ssl-verify-client',
'--puppet-server-ca-client-self-delete',
'--puppet-server-multithreaded',
'--puppet-server-storeconfigs',
'--puppet-server-trusted-external-command',
'--puppet-server-versioned-code-content',
'--puppet-server-versioned-code-id',
'--foreman-proxy-content-pulpcore-worker-count',
'--foreman-proxy-content-puppet',
'--foreman-proxy-content-qpid-router-agent-addr',
'--foreman-proxy-content-qpid-router-agent-port',
'--foreman-proxy-content-qpid-router-broker-addr',
'--foreman-proxy-content-qpid-router-broker-port',
'--foreman-proxy-content-qpid-router-hub-addr',
'--foreman-proxy-content-qpid-router-hub-port',
'--foreman-proxy-content-qpid-router-logging',
'--foreman-proxy-content-qpid-router-logging-level',
'--foreman-proxy-content-qpid-router-logging-path',
'--foreman-proxy-content-qpid-router-ssl-ciphers',
'--foreman-proxy-content-qpid-router-ssl-protocols',
'--foreman-proxy-content-reverse-proxy',
'--foreman-proxy-content-reverse-proxy-port',
'--foreman-proxy-dhcp',
'--foreman-proxy-dhcp-additional-interfaces',
'--foreman-proxy-dhcp-config',
'--foreman-proxy-dhcp-failover-address',
'--foreman-proxy-dhcp-failover-port',
'--foreman-proxy-dhcp-gateway',
'--foreman-proxy-dhcp-interface',
'--foreman-proxy-dhcp-key-name',
'--foreman-proxy-dhcp-key-secret',
'--foreman-proxy-dhcp-leases',
'--foreman-proxy-dhcp-listen-on',
'--foreman-proxy-dhcp-load-balance',
'--foreman-proxy-dhcp-load-split',
'--foreman-proxy-dhcp-manage-acls',
'--foreman-proxy-dhcp-managed',
'--foreman-proxy-dhcp-max-response-delay',
'--foreman-proxy-dhcp-max-unacked-updates',
'--foreman-proxy-dhcp-mclt',
'--foreman-proxy-dhcp-nameservers',
'--foreman-proxy-dhcp-netmask',
'--foreman-proxy-dhcp-network',
'--foreman-proxy-dhcp-node-type',
'--foreman-proxy-dhcp-omapi-port',
'--foreman-proxy-dhcp-option-domain',
'--foreman-proxy-dhcp-peer-address',
'--foreman-proxy-dhcp-ping-free-ip',
'--foreman-proxy-dhcp-provider',
'--foreman-proxy-dhcp-pxefilename',
'--foreman-proxy-dhcp-pxeserver',
'--foreman-proxy-dhcp-range',
'--foreman-proxy-dhcp-search-domains',
'--foreman-proxy-dhcp-server',
'--foreman-proxy-dhcp-subnets',
'--foreman-proxy-dns',
'--foreman-proxy-dns-forwarders',
'--foreman-proxy-dns-interface',
'--foreman-proxy-dns-listen-on',
'--foreman-proxy-dns-managed',
'--foreman-proxy-dns-provider',
'--foreman-proxy-dns-reverse',
'--foreman-proxy-dns-server',
'--foreman-proxy-dns-tsig-keytab',
'--foreman-proxy-dns-tsig-principal',
'--foreman-proxy-dns-ttl',
'--foreman-proxy-dns-zone',
'--foreman-proxy-ensure-packages-version',
'--foreman-proxy-foreman-base-url',
'--foreman-proxy-foreman-ssl-ca',
'--foreman-proxy-foreman-ssl-cert',
'--foreman-proxy-foreman-ssl-key',
'--foreman-proxy-freeipa-config',
'--foreman-proxy-freeipa-remove-dns',
'--foreman-proxy-gpgcheck',
'--foreman-proxy-groups',
'--foreman-proxy-http',
'--foreman-proxy-http-port',
'--foreman-proxy-httpboot',
'--foreman-proxy-httpboot-listen-on',
'--foreman-proxy-keyfile',
'--foreman-proxy-libvirt-connection',
'--foreman-proxy-libvirt-network',
'--foreman-proxy-log',
'--foreman-proxy-log-buffer',
'--foreman-proxy-log-buffer-errors',
'--foreman-proxy-log-level',
'--foreman-proxy-logs',
'--foreman-proxy-logs-listen-on',
'--foreman-proxy-manage-puppet-group',
'--foreman-proxy-manage-sudoersd',
'--foreman-proxy-oauth-consumer-key',
'--foreman-proxy-oauth-consumer-secret',
'--foreman-proxy-oauth-effective-user',
'--foreman-proxy-plugin-ansible-ansible-dir',
'--foreman-proxy-plugin-ansible-callback',
'--foreman-proxy-plugin-ansible-install-runner',
'--foreman-proxy-plugin-ansible-manage-runner-repo',
'--foreman-proxy-plugin-ansible-roles-path',
'--foreman-proxy-plugin-ansible-runner-package-name',
'--foreman-proxy-plugin-ansible-enabled',
'--foreman-proxy-plugin-ansible-host-key-checking',
'--foreman-proxy-plugin-ansible-listen-on',
'--foreman-proxy-plugin-ansible-stdout-callback',
'--foreman-proxy-plugin-ansible-working-dir',
'--foreman-proxy-plugin-ansible-ssh-args',
'--foreman-proxy-plugin-dhcp-infoblox-dns-view',
'--foreman-proxy-plugin-dhcp-infoblox-network-view',
'--foreman-proxy-plugin-dhcp-infoblox-password',
'--foreman-proxy-plugin-dhcp-infoblox-record-type',
'--foreman-proxy-plugin-dhcp-infoblox-username',
'--foreman-proxy-plugin-dhcp-remote-isc-dhcp-config',
'--foreman-proxy-plugin-dhcp-remote-isc-dhcp-leases',
'--foreman-proxy-plugin-dhcp-remote-isc-key-name',
'--foreman-proxy-plugin-dhcp-remote-isc-key-secret',
'--foreman-proxy-plugin-dhcp-remote-isc-omapi-port',
'--foreman-proxy-plugin-discovery-image-name',
'--foreman-proxy-plugin-discovery-install-images',
'--foreman-proxy-plugin-discovery-source-url',
'--foreman-proxy-plugin-discovery-tftp-root',
'--foreman-proxy-plugin-dns-infoblox-dns-server',
'--foreman-proxy-plugin-dns-infoblox-password',
'--foreman-proxy-plugin-dns-infoblox-username',
'--foreman-proxy-plugin-dns-infoblox-dns-view',
'--foreman-proxy-plugin-openscap-contentdir',
'--foreman-proxy-plugin-openscap-enabled',
'--foreman-proxy-plugin-openscap-failed-dir',
'--foreman-proxy-plugin-openscap-listen-on',
'--foreman-proxy-plugin-openscap-openscap-send-log-file',
'--foreman-proxy-plugin-openscap-proxy-name',
'--foreman-proxy-plugin-openscap-reportsdir',
'--foreman-proxy-plugin-openscap-spooldir',
'--foreman-proxy-plugin-openscap-timeout',
'--foreman-proxy-plugin-openscap-version',
'--foreman-proxy-plugin-openscap-corrupted-dir',
'--foreman-proxy-plugin-remote-execution-ssh-async-ssh',
'--foreman-proxy-plugin-remote-execution-ssh-enabled',
'--foreman-proxy-plugin-remote-execution-ssh-generate-keys',
'--foreman-proxy-plugin-remote-execution-ssh-install-key',
'--foreman-proxy-plugin-remote-execution-ssh-listen-on',
'--foreman-proxy-plugin-remote-execution-ssh-local-working-dir',
'--foreman-proxy-plugin-remote-execution-ssh-remote-working-dir',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-identity-dir',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-identity-file',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-kerberos-auth',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-keygen',
'--foreman-proxy-plugin-shellhooks-directory',
'--foreman-proxy-plugin-shellhooks-enabled',
'--foreman-proxy-plugin-shellhooks-listen-on',
'--foreman-proxy-plugin-shellhooks-version',
'--foreman-proxy-puppet',
'--foreman-proxy-puppet-api-timeout',
'--foreman-proxy-puppet-group',
'--foreman-proxy-puppet-listen-on',
'--foreman-proxy-puppet-ssl-ca',
'--foreman-proxy-puppet-ssl-cert',
'--foreman-proxy-puppet-ssl-key',
'--foreman-proxy-puppet-url',
'--foreman-proxy-puppetca',
'--foreman-proxy-puppetca-certificate',
'--foreman-proxy-puppetca-cmd',
'--foreman-proxy-puppetca-listen-on',
'--foreman-proxy-puppetca-provider',
'--foreman-proxy-puppetca-sign-all',
'--foreman-proxy-puppetca-token-ttl',
'--foreman-proxy-puppetca-tokens-file',
'--foreman-proxy-puppetdir',
'--foreman-proxy-realm',
'--foreman-proxy-realm-keytab',
'--foreman-proxy-realm-listen-on',
'--foreman-proxy-realm-principal',
'--foreman-proxy-realm-provider',
'--foreman-proxy-register-in-foreman',
'--foreman-proxy-registered-name',
'--foreman-proxy-registered-proxy-url',
'--foreman-proxy-repo',
'--foreman-proxy-ssl',
'--foreman-proxy-ssl-ca',
'--foreman-proxy-ssl-cert',
'--foreman-proxy-ssl-disabled-ciphers',
'--foreman-proxy-ssl-key',
'--foreman-proxy-ssl-port',
'--foreman-proxy-ssldir',
'--foreman-proxy-template-url',
'--foreman-proxy-templates',
'--foreman-proxy-templates-listen-on',
'--foreman-proxy-tftp',
'--foreman-proxy-tftp-dirs',
'--foreman-proxy-tftp-listen-on',
'--foreman-proxy-tftp-manage-wget',
'--foreman-proxy-tftp-managed',
'--foreman-proxy-tftp-replace-grub2-cfg',
'--foreman-proxy-tftp-root',
'--foreman-proxy-tftp-servername',
'--foreman-proxy-tftp-syslinux-filenames',
'--foreman-proxy-tls-disabled-versions',
'--foreman-proxy-trusted-hosts',
'--foreman-proxy-use-sudoers',
'--foreman-proxy-use-sudoersd',
'--foreman-proxy-version',
'--foreman-rails-env',
'--foreman-server-port',
'--foreman-server-ssl-ca',
'--foreman-server-ssl-cert',
'--foreman-server-ssl-certs-dir',
'--foreman-server-ssl-chain',
'--foreman-server-ssl-crl',
'--foreman-server-ssl-key',
'--foreman-server-ssl-port',
'--foreman-server-ssl-protocol',
'--foreman-serveraliases',
'--foreman-servername',
'--foreman-ssl',
'--foreman-telemetry-logger-enabled',
'--foreman-telemetry-logger-level',
'--foreman-telemetry-prefix',
'--foreman-telemetry-prometheus-enabled',
'--foreman-telemetry-statsd-enabled',
'--foreman-telemetry-statsd-host',
'--foreman-telemetry-statsd-protocol',
'--foreman-unattended',
'--foreman-unattended-url',
'--foreman-user',
'--foreman-user-groups',
'--foreman-version',
'--foreman-vhost-priority',
'--foreman-websockets-encrypt',
'--foreman-websockets-ssl-cert',
'--foreman-websockets-ssl-key',
'--full-help',
'--help',
'--ignore-undocumented',
'--interactive',
'--katello-candlepin-db-host',
'--katello-candlepin-db-name',
'--katello-candlepin-db-password',
'--katello-candlepin-db-port',
'--katello-candlepin-db-ssl',
'--katello-candlepin-db-ssl-verify',
'--katello-candlepin-db-user',
'--katello-candlepin-manage-db',
'--katello-candlepin-oauth-key',
'--katello-candlepin-oauth-secret',
'--katello-hosts-queue-workers',
'--katello-qpid-hostname',
'--katello-qpid-interface',
'--katello-qpid-wcache-page-size',
'--katello-rest-client-timeout',
'--list-scenarios',
'--log-level',
'--migrations-only',
'--noop',
'--[no-]enable-foreman-cli-ansible',
'--[no-]enable-foreman-cli-azure',
'--[no-]enable-foreman-cli-virt-who-configure',
'--[no-]enable-foreman-plugin-azure',
'--[no-]enable-foreman-plugin-remote-execution-cockpit',
'--[no-]enable-foreman-plugin-virt-who-configure',
'--profile',
'--puppet-additional-settings',
'--puppet-agent',
'--puppet-agent-additional-settings',
'--puppet-agent-noop',
'--puppet-agent-restart-command',
'--puppet-allow-any-crl-auth',
'--puppet-auth-allowed',
'--puppet-auth-template',
'--puppet-autosign',
'--puppet-autosign-content',
'--puppet-autosign-entries',
'--puppet-autosign-mode',
'--puppet-autosign-source',
'--puppet-ca-crl-filepath',
'--puppet-ca-port',
'--puppet-ca-server',
'--puppet-classfile',
'--puppet-client-certname',
'--puppet-client-package',
'--puppet-codedir',
'--puppet-cron-cmd',
'--puppet-dir',
'--puppet-dir-group',
'--puppet-dir-owner',
'--puppet-dns-alt-names',
'--puppet-environment',
'--puppet-group',
'--puppet-hiera-config',
'--puppet-http-connect-timeout',
'--puppet-http-read-timeout',
'--puppet-logdir',
'--puppet-manage-packages',
'--puppet-module-repository',
'--puppet-package-install-options',
'--puppet-package-provider',
'--puppet-package-source',
'--puppet-pluginfactsource',
'--puppet-pluginsource',
'--puppet-pluginsync',
'--puppet-port',
'--puppet-postrun-command',
'--puppet-prerun-command',
'--puppet-puppetmaster',
'--puppet-remove-lock',
'--puppet-report',
'--puppet-run-hour',
'--puppet-run-minute',
'--puppet-rundir',
'--puppet-runinterval',
'--puppet-runmode',
'--puppet-server',
'--puppet-server-acceptor-threads',
'--puppet-server-additional-settings',
'--puppet-server-admin-api-whitelist',
'--puppet-server-allow-header-cert-info',
'--puppet-server-ca',
'--puppet-server-ca-allow-auth-extensions',
'--puppet-server-ca-allow-sans',
'--puppet-server-ca-auth-required',
'--puppet-server-ca-client-whitelist',
'--puppet-server-ca-crl-sync',
'--puppet-server-ca-enable-infra-crl',
'--puppet-server-certname',
'--puppet-server-check-for-updates',
'--puppet-server-cipher-suites',
'--puppet-server-common-modules-path',
'--puppet-server-compile-mode',
'--puppet-server-config-version',
'--puppet-server-connect-timeout',
'--puppet-server-crl-enable',
'--puppet-server-custom-trusted-oid-mapping',
'--puppet-server-default-manifest',
'--puppet-server-default-manifest-content',
'--puppet-server-default-manifest-path',
'--puppet-server-dir',
'--puppet-server-environment-class-cache-enabled',
'--puppet-server-environment-timeout',
'--puppet-server-environments-group',
'--puppet-server-environments-mode',
'--puppet-server-environments-owner',
'--puppet-server-envs-dir',
'--puppet-server-envs-target',
'--puppet-server-external-nodes',
'--puppet-server-foreman',
'--puppet-server-foreman-facts',
'--puppet-server-foreman-ssl-ca',
'--puppet-server-foreman-ssl-cert',
'--puppet-server-foreman-ssl-key',
'--puppet-server-foreman-url',
'--puppet-server-git-branch-map',
'--puppet-server-git-repo',
'--puppet-server-git-repo-group',
'--puppet-server-git-repo-mode',
'--puppet-server-git-repo-path',
'--puppet-server-git-repo-user',
'--puppet-server-group',
'--puppet-server-http',
'--puppet-server-http-port',
'--puppet-server-idle-timeout',
'--puppet-server-ip',
'--puppet-server-jruby-gem-home',
'--puppet-server-jvm-cli-args',
'--puppet-server-jvm-config',
'--puppet-server-jvm-extra-args',
'--puppet-server-jvm-java-bin',
'--puppet-server-jvm-max-heap-size',
'--puppet-server-jvm-min-heap-size',
'--puppet-server-manage-user',
'--puppet-server-max-active-instances',
'--puppet-server-max-open-files',
'--puppet-server-max-queued-requests',
'--puppet-server-max-requests-per-instance',
'--puppet-server-max-retry-delay',
'--puppet-server-max-threads',
'--puppet-server-metrics-allowed',
'--puppet-server-metrics-graphite-enable',
'--puppet-server-metrics-graphite-host',
'--puppet-server-metrics-graphite-interval',
'--puppet-server-metrics-graphite-port',
'--puppet-server-metrics-jmx-enable',
'--puppet-server-metrics-server-id',
'--puppet-server-package',
'--puppet-server-parser',
'--puppet-server-port',
'--puppet-server-post-hook-content',
'--puppet-server-post-hook-name',
'--puppet-server-puppet-basedir',
'--puppet-server-puppetserver-dir',
'--puppet-server-puppetserver-experimental',
'--puppet-server-puppetserver-jruby9k',
'--puppet-server-puppetserver-logdir',
'--puppet-server-puppetserver-metrics',
'--puppet-server-puppetserver-profiler',
'--puppet-server-puppetserver-rundir',
'--puppet-server-puppetserver-trusted-agents',
'--puppet-server-puppetserver-trusted-certificate-extensions',
'--puppet-server-puppetserver-vardir',
'--puppet-server-puppetserver-version',
'--puppet-server-puppetserver-auth-template',
'--puppet-server-reports',
'--puppet-server-request-timeout',
'--puppet-server-ruby-load-paths',
'--puppet-server-selector-threads',
'--puppet-server-ssl-acceptor-threads',
'--puppet-server-ssl-chain-filepath',
'--puppet-server-ssl-dir',
'--puppet-server-ssl-dir-manage',
'--puppet-server-ssl-key-manage',
'--puppet-server-ssl-protocols',
'--puppet-server-ssl-selector-threads',
'--puppet-server-strict-variables',
'--puppet-server-use-legacy-auth-conf',
'--puppet-server-user',
'--puppet-server-version',
'--puppet-server-web-idle-timeout',
'--puppet-service-name',
'--puppet-sharedir',
'--puppet-show-diff',
'--puppet-splay',
'--puppet-splaylimit',
'--puppet-srv-domain',
'--puppet-ssldir',
'--puppet-syslogfacility',
'--puppet-systemd-cmd',
'--puppet-systemd-randomizeddelaysec',
'--puppet-systemd-unit-name',
'--puppet-unavailable-runmodes',
'--puppet-use-srv-records',
'--puppet-usecacheonfailure',
'--puppet-user',
'--puppet-vardir',
'--puppet-version',
'--register-with-insights',
'--reset-certs-ca-common-name',
'--reset-certs-ca-expiration',
'--reset-certs-city',
'--reset-certs-cname',
'--reset-certs-country',
'--reset-certs-default-ca-name',
'--reset-certs-deploy',
'--reset-certs-expiration',
'--reset-certs-generate',
'--reset-certs-group',
'--reset-certs-node-fqdn',
'--reset-certs-org',
'--reset-certs-org-unit',
'--reset-certs-pki-dir',
'--reset-certs-regenerate',
'--reset-certs-server-ca-cert',
'--reset-certs-server-ca-name',
'--reset-certs-server-cert',
'--reset-certs-server-cert-req',
'--reset-certs-server-key',
'--reset-certs-ssl-build-dir',
'--reset-certs-state',
'--reset-certs-tar-file',
'--reset-certs-user',
'--reset-data',
'--reset-foreman-apache',
'--reset-foreman-app-root',
'--reset-foreman-cli-foreman-url',
'--reset-foreman-cli-hammer-plugin-prefix',
'--reset-foreman-cli-manage-root-config',
'--reset-foreman-cli-password',
'--reset-foreman-cli-refresh-cache',
'--reset-foreman-cli-request-timeout',
'--reset-foreman-cli-ssl-ca-file',
'--reset-foreman-cli-username',
'--reset-foreman-cli-use-sessions',
'--reset-foreman-cli-version',
'--reset-foreman-client-ssl-ca',
'--reset-foreman-client-ssl-cert',
'--reset-foreman-client-ssl-key',
'--reset-foreman-compute-ec2-version',
'--reset-foreman-compute-gce-version',
'--reset-foreman-compute-libvirt-version',
'--reset-foreman-compute-openstack-version',
'--reset-foreman-compute-ovirt-version',
'--reset-foreman-compute-vmware-version',
'--reset-foreman-cors-domains',
'--reset-foreman-db-database',
'--reset-foreman-db-host',
'--reset-foreman-db-manage',
'--reset-foreman-db-manage-rake',
'--reset-foreman-db-password',
'--reset-foreman-db-pool',
'--reset-foreman-db-port',
'--reset-foreman-db-root-cert',
'--reset-foreman-db-sslmode',
'--reset-foreman-db-username',
'--reset-foreman-dynflow-manage-services',
'--reset-foreman-dynflow-orchestrator-ensure',
'--reset-foreman-dynflow-redis-url',
'--reset-foreman-dynflow-worker-concurrency',
'--reset-foreman-dynflow-worker-instances',
'--reset-foreman-email-delivery-method',
'--reset-foreman-email-smtp-address',
'--reset-foreman-email-smtp-authentication',
'--reset-foreman-email-smtp-domain',
'--reset-foreman-email-smtp-password',
'--reset-foreman-email-smtp-port',
'--reset-foreman-email-smtp-user-name',
'--reset-foreman-foreman-url',
'--reset-foreman-foreman-service-puma-threads-max',
'--reset-foreman-foreman-service-puma-threads-min',
'--reset-foreman-foreman-service-puma-workers',
'--reset-foreman-group',
'--reset-foreman-hsts-enabled',
'--reset-foreman-http-keytab',
'--reset-foreman-initial-admin-email',
'--reset-foreman-initial-admin-first-name',
'--reset-foreman-initial-admin-last-name',
'--reset-foreman-initial-admin-locale',
'--reset-foreman-initial-admin-password',
'--reset-foreman-initial-admin-timezone',
'--reset-foreman-initial-admin-username',
'--reset-foreman-initial-location',
'--reset-foreman-initial-organization',
'--reset-foreman-ipa-authentication',
'--reset-foreman-ipa-manage-sssd',
'--reset-foreman-keycloak',
'--reset-foreman-keycloak-realm',
'--reset-foreman-keycloak-app-name',
'--reset-foreman-loggers',
'--reset-foreman-logging-layout',
'--reset-foreman-logging-level',
'--reset-foreman-logging-type',
'--reset-foreman-manage-user',
'--reset-foreman-oauth-active',
'--reset-foreman-oauth-consumer-key',
'--reset-foreman-oauth-consumer-secret',
'--reset-foreman-oauth-map-users',
'--reset-foreman-pam-service',
'--reset-foreman-plugin-prefix',
'--reset-foreman-plugin-tasks-automatic-cleanup',
'--reset-foreman-plugin-tasks-backup',
'--reset-foreman-plugin-tasks-cron-line',
'--reset-foreman-plugin-version',
'--reset-foreman-proxy-autosignfile',
'--reset-foreman-proxy-bind-host',
'--reset-foreman-proxy-bmc',
'--reset-foreman-proxy-bmc-default-provider',
'--reset-foreman-proxy-bmc-listen-on',
'--reset-foreman-proxy-bmc-ssh-key',
'--reset-foreman-proxy-bmc-ssh-powercycle',
'--reset-foreman-proxy-bmc-ssh-poweroff',
'--reset-foreman-proxy-bmc-ssh-poweron',
'--reset-foreman-proxy-bmc-ssh-powerstatus',
'--reset-foreman-proxy-bmc-ssh-user',
'--reset-foreman-proxy-content-enable-ansible',
'--reset-foreman-proxy-content-enable-deb',
'--reset-foreman-proxy-content-enable-docker',
'--reset-foreman-proxy-content-enable-file',
'--reset-foreman-proxy-content-enable-katello-agent',
'--reset-foreman-proxy-content-enable-yum',
'--reset-foreman-proxy-content-pulpcore-mirror',
'--reset-foreman-proxy-content-pulpcore-allowed-content-checksums',
'--reset-foreman-proxy-content-pulpcore-api-service-worker-timeout',
'--reset-foreman-proxy-content-pulpcore-content-service-worker-timeout',
'--reset-foreman-proxy-content-pulpcore-django-secret-key',
'--reset-foreman-proxy-content-pulpcore-postgresql-db-name',
'--reset-foreman-proxy-content-pulpcore-manage-postgresql',
'--reset-foreman-proxy-content-pulpcore-postgresql-host',
'--reset-foreman-proxy-content-pulpcore-postgresql-password',
'--reset-foreman-proxy-content-pulpcore-postgresql-port',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-cert',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-key',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-require',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-root-ca',
'--reset-foreman-proxy-content-pulpcore-postgresql-user',
'--reset-foreman-proxy-content-pulpcore-worker-count',
'--reset-foreman-proxy-content-pulpcore-cache-enabled',
'--reset-foreman-proxy-content-pulpcore-cache-expires-ttl',
'--reset-foreman-proxy-content-pulpcore-use-rq-tasking-system',
'--reset-foreman-proxy-content-puppet',
'--reset-foreman-proxy-content-qpid-router-agent-addr',
'--reset-foreman-proxy-content-qpid-router-agent-port',
'--reset-foreman-proxy-content-qpid-router-broker-addr',
'--reset-foreman-proxy-content-qpid-router-broker-port',
'--reset-foreman-proxy-content-qpid-router-hub-addr',
'--reset-foreman-proxy-content-qpid-router-hub-port',
'--reset-foreman-proxy-content-qpid-router-logging',
'--reset-foreman-proxy-content-qpid-router-logging-level',
'--reset-foreman-proxy-content-qpid-router-logging-path',
'--reset-foreman-proxy-content-qpid-router-ssl-ciphers',
'--reset-foreman-proxy-content-qpid-router-ssl-protocols',
'--reset-foreman-proxy-content-reverse-proxy',
'--reset-foreman-proxy-content-reverse-proxy-port',
'--reset-foreman-proxy-dhcp',
'--reset-foreman-proxy-dhcp-additional-interfaces',
'--reset-foreman-proxy-dhcp-config',
'--reset-foreman-proxy-dhcp-failover-address',
'--reset-foreman-proxy-dhcp-failover-port',
'--reset-foreman-proxy-dhcp-gateway',
'--reset-foreman-proxy-dhcp-interface',
'--reset-foreman-proxy-dhcp-key-name',
'--reset-foreman-proxy-dhcp-key-secret',
'--reset-foreman-proxy-dhcp-leases',
'--reset-foreman-proxy-dhcp-listen-on',
'--reset-foreman-proxy-dhcp-load-balance',
'--reset-foreman-proxy-dhcp-load-split',
'--reset-foreman-proxy-dhcp-manage-acls',
'--reset-foreman-proxy-dhcp-managed',
'--reset-foreman-proxy-dhcp-max-response-delay',
'--reset-foreman-proxy-dhcp-max-unacked-updates',
'--reset-foreman-proxy-dhcp-mclt',
'--reset-foreman-proxy-dhcp-nameservers',
'--reset-foreman-proxy-dhcp-netmask',
'--reset-foreman-proxy-dhcp-network',
'--reset-foreman-proxy-dhcp-node-type',
'--reset-foreman-proxy-dhcp-omapi-port',
'--reset-foreman-proxy-dhcp-option-domain',
'--reset-foreman-proxy-dhcp-peer-address',
'--reset-foreman-proxy-dhcp-ping-free-ip',
'--reset-foreman-proxy-dhcp-provider',
'--reset-foreman-proxy-dhcp-pxefilename',
'--reset-foreman-proxy-dhcp-pxeserver',
'--reset-foreman-proxy-dhcp-range',
'--reset-foreman-proxy-dhcp-search-domains',
'--reset-foreman-proxy-dhcp-server',
'--reset-foreman-proxy-dhcp-subnets',
'--reset-foreman-proxy-dns',
'--reset-foreman-proxy-dns-forwarders',
'--reset-foreman-proxy-dns-interface',
'--reset-foreman-proxy-dns-listen-on',
'--reset-foreman-proxy-dns-managed',<|fim▁hole|> '--reset-foreman-proxy-dns-tsig-principal',
'--reset-foreman-proxy-dns-ttl',
'--reset-foreman-proxy-dns-zone',
'--reset-foreman-proxy-ensure-packages-version',
'--reset-foreman-proxy-foreman-base-url',
'--reset-foreman-proxy-foreman-ssl-ca',
'--reset-foreman-proxy-foreman-ssl-cert',
'--reset-foreman-proxy-foreman-ssl-key',
'--reset-foreman-proxy-freeipa-config',
'--reset-foreman-proxy-freeipa-remove-dns',
'--reset-foreman-proxy-gpgcheck',
'--reset-foreman-proxy-groups',
'--reset-foreman-proxy-http',
'--reset-foreman-proxy-http-port',
'--reset-foreman-proxy-httpboot',
'--reset-foreman-proxy-httpboot-listen-on',
'--reset-foreman-proxy-keyfile',
'--reset-foreman-proxy-libvirt-connection',
'--reset-foreman-proxy-libvirt-network',
'--reset-foreman-proxy-log',
'--reset-foreman-proxy-log-buffer',
'--reset-foreman-proxy-log-buffer-errors',
'--reset-foreman-proxy-log-level',
'--reset-foreman-proxy-logs',
'--reset-foreman-proxy-logs-listen-on',
'--reset-foreman-proxy-manage-puppet-group',
'--reset-foreman-proxy-manage-sudoersd',
'--reset-foreman-proxy-oauth-consumer-key',
'--reset-foreman-proxy-oauth-consumer-secret',
'--reset-foreman-proxy-oauth-effective-user',
'--reset-foreman-proxy-plugin-ansible-ansible-dir',
'--reset-foreman-proxy-plugin-ansible-callback',
'--reset-foreman-proxy-plugin-ansible-enabled',
'--reset-foreman-proxy-plugin-ansible-host-key-checking',
'--reset-foreman-proxy-plugin-ansible-listen-on',
'--reset-foreman-proxy-plugin-ansible-install-runner',
'--reset-foreman-proxy-plugin-ansible-manage-runner-repo',
'--reset-foreman-proxy-plugin-ansible-roles-path',
'--reset-foreman-proxy-plugin-ansible-runner-package-name',
'--reset-foreman-proxy-plugin-ansible-ssh-args',
'--reset-foreman-proxy-plugin-ansible-stdout-callback',
'--reset-foreman-proxy-plugin-ansible-working-dir',
'--reset-foreman-proxy-plugin-dhcp-infoblox-dns-view',
'--reset-foreman-proxy-plugin-dhcp-infoblox-network-view',
'--reset-foreman-proxy-plugin-dhcp-infoblox-password',
'--reset-foreman-proxy-plugin-dhcp-infoblox-record-type',
'--reset-foreman-proxy-plugin-dhcp-infoblox-username',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-dhcp-config',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-dhcp-leases',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-key-name',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-key-secret',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-omapi-port',
'--reset-foreman-proxy-plugin-discovery-image-name',
'--reset-foreman-proxy-plugin-discovery-install-images',
'--reset-foreman-proxy-plugin-discovery-source-url',
'--reset-foreman-proxy-plugin-discovery-tftp-root',
'--reset-foreman-proxy-plugin-dns-infoblox-dns-server',
'--reset-foreman-proxy-plugin-dns-infoblox-password',
'--reset-foreman-proxy-plugin-dns-infoblox-username',
'--reset-foreman-proxy-plugin-dns-infoblox-dns-view',
'--reset-foreman-proxy-plugin-openscap-contentdir',
'--reset-foreman-proxy-plugin-openscap-corrupted-dir',
'--reset-foreman-proxy-plugin-openscap-enabled',
'--reset-foreman-proxy-plugin-openscap-failed-dir',
'--reset-foreman-proxy-plugin-openscap-listen-on',
'--reset-foreman-proxy-plugin-openscap-openscap-send-log-file',
'--reset-foreman-proxy-plugin-openscap-proxy-name',
'--reset-foreman-proxy-plugin-openscap-reportsdir',
'--reset-foreman-proxy-plugin-openscap-spooldir',
'--reset-foreman-proxy-plugin-openscap-timeout',
'--reset-foreman-proxy-plugin-openscap-version',
'--reset-foreman-proxy-plugin-remote-execution-ssh-async-ssh',
'--reset-foreman-proxy-plugin-remote-execution-ssh-enabled',
'--reset-foreman-proxy-plugin-remote-execution-ssh-generate-keys',
'--reset-foreman-proxy-plugin-remote-execution-ssh-install-key',
'--reset-foreman-proxy-plugin-remote-execution-ssh-listen-on',
'--reset-foreman-proxy-plugin-remote-execution-ssh-local-working-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-remote-working-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-identity-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-identity-file',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-kerberos-auth',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-keygen',
'--reset-foreman-proxy-plugin-shellhooks-directory',
'--reset-foreman-proxy-plugin-shellhooks-enabled',
'--reset-foreman-proxy-plugin-shellhooks-listen-on',
'--reset-foreman-proxy-plugin-shellhooks-version',
'--reset-foreman-proxy-puppet',
'--reset-foreman-proxy-puppet-api-timeout',
'--reset-foreman-proxy-puppet-group',
'--reset-foreman-proxy-puppet-listen-on',
'--reset-foreman-proxy-puppet-ssl-ca',
'--reset-foreman-proxy-puppet-ssl-cert',
'--reset-foreman-proxy-puppet-ssl-key',
'--reset-foreman-proxy-puppet-url',
'--reset-foreman-proxy-puppetca',
'--reset-foreman-proxy-puppetca-certificate',
'--reset-foreman-proxy-puppetca-cmd',
'--reset-foreman-proxy-puppetca-listen-on',
'--reset-foreman-proxy-puppetca-provider',
'--reset-foreman-proxy-puppetca-sign-all',
'--reset-foreman-proxy-puppetca-token-ttl',
'--reset-foreman-proxy-puppetca-tokens-file',
'--reset-foreman-proxy-puppetdir',
'--reset-foreman-proxy-realm',
'--reset-foreman-proxy-realm-keytab',
'--reset-foreman-proxy-realm-listen-on',
'--reset-foreman-proxy-realm-principal',
'--reset-foreman-proxy-realm-provider',
'--reset-foreman-proxy-register-in-foreman',
'--reset-foreman-proxy-registered-name',
'--reset-foreman-proxy-registered-proxy-url',
'--reset-foreman-proxy-registration',
'--reset-foreman-proxy-registration-listen-on',
'--reset-foreman-proxy-repo',
'--reset-foreman-proxy-ssl',
'--reset-foreman-proxy-ssl-ca',
'--reset-foreman-proxy-ssl-cert',
'--reset-foreman-proxy-ssl-disabled-ciphers',
'--reset-foreman-proxy-ssl-key',
'--reset-foreman-proxy-ssl-port',
'--reset-foreman-proxy-ssldir',
'--reset-foreman-proxy-template-url',
'--reset-foreman-proxy-templates',
'--reset-foreman-proxy-templates-listen-on',
'--reset-foreman-proxy-tftp',
'--reset-foreman-proxy-tftp-dirs',
'--reset-foreman-proxy-tftp-listen-on',
'--reset-foreman-proxy-tftp-manage-wget',
'--reset-foreman-proxy-tftp-managed',
'--reset-foreman-proxy-tftp-replace-grub2-cfg',
'--reset-foreman-proxy-tftp-root',
'--reset-foreman-proxy-tftp-servername',
'--reset-foreman-proxy-tftp-syslinux-filenames',
'--reset-foreman-proxy-tls-disabled-versions',
'--reset-foreman-proxy-trusted-hosts',
'--reset-foreman-proxy-use-sudoers',
'--reset-foreman-proxy-use-sudoersd',
'--reset-foreman-proxy-version',
'--reset-foreman-rails-cache-store',
'--reset-foreman-rails-env',
'--reset-puppet-server-ca-client-self-delete',
'--reset-foreman-server-port',
'--reset-foreman-server-ssl-ca',
'--reset-foreman-server-ssl-cert',
'--reset-foreman-server-ssl-certs-dir',
'--reset-foreman-server-ssl-chain',
'--reset-foreman-server-ssl-crl',
'--reset-foreman-server-ssl-key',
'--reset-foreman-server-ssl-port',
'--reset-foreman-server-ssl-protocol',
'--reset-foreman-server-ssl-verify-client',
'--reset-foreman-serveraliases',
'--reset-foreman-servername',
'--reset-foreman-ssl',
'--reset-foreman-telemetry-logger-enabled',
'--reset-foreman-telemetry-logger-level',
'--reset-foreman-telemetry-prefix',
'--reset-foreman-telemetry-prometheus-enabled',
'--reset-foreman-telemetry-statsd-enabled',
'--reset-foreman-telemetry-statsd-host',
'--reset-foreman-telemetry-statsd-protocol',
'--reset-foreman-unattended',
'--reset-foreman-unattended-url',
'--reset-foreman-user',
'--reset-foreman-user-groups',
'--reset-foreman-version',
'--reset-foreman-vhost-priority',
'--reset-foreman-websockets-encrypt',
'--reset-foreman-websockets-ssl-cert',
'--reset-foreman-websockets-ssl-key',
'--reset-katello-candlepin-db-host',
'--reset-katello-candlepin-db-name',
'--reset-katello-candlepin-db-password',
'--reset-katello-candlepin-db-port',
'--reset-katello-candlepin-db-ssl',
'--reset-katello-candlepin-db-ssl-verify',
'--reset-katello-candlepin-db-user',
'--reset-katello-candlepin-manage-db',
'--reset-katello-candlepin-oauth-key',
'--reset-katello-candlepin-oauth-secret',
'--reset-katello-hosts-queue-workers',
'--reset-katello-qpid-hostname',
'--reset-katello-qpid-interface',
'--reset-katello-qpid-wcache-page-size',
'--reset-katello-rest-client-timeout',
'--reset-puppet-additional-settings',
'--reset-puppet-agent',
'--reset-puppet-agent-additional-settings',
'--reset-puppet-agent-noop',
'--reset-puppet-agent-restart-command',
'--reset-puppet-allow-any-crl-auth',
'--reset-puppet-auth-allowed',
'--reset-puppet-auth-template',
'--reset-puppet-autosign',
'--reset-puppet-autosign-content',
'--reset-puppet-autosign-entries',
'--reset-puppet-autosign-mode',
'--reset-puppet-autosign-source',
'--reset-puppet-ca-crl-filepath',
'--reset-puppet-ca-port',
'--reset-puppet-ca-server',
'--reset-puppet-classfile',
'--reset-puppet-client-certname',
'--reset-puppet-client-package',
'--reset-puppet-codedir',
'--reset-puppet-cron-cmd',
'--reset-puppet-dir',
'--reset-puppet-dir-group',
'--reset-puppet-dir-owner',
'--reset-puppet-dns-alt-names',
'--reset-puppet-environment',
'--reset-puppet-group',
'--reset-puppet-hiera-config',
'--reset-puppet-http-connect-timeout',
'--reset-puppet-http-read-timeout',
'--reset-puppet-logdir',
'--reset-puppet-manage-packages',
'--reset-puppet-module-repository',
'--reset-puppet-package-provider',
'--reset-puppet-package-source',
'--reset-puppet-package-install-options',
'--reset-puppet-pluginfactsource',
'--reset-puppet-pluginsource',
'--reset-puppet-pluginsync',
'--reset-puppet-port',
'--reset-puppet-postrun-command',
'--reset-puppet-prerun-command',
'--reset-puppet-puppetmaster',
'--reset-puppet-remove-lock',
'--reset-puppet-report',
'--reset-puppet-run-hour',
'--reset-puppet-run-minute',
'--reset-puppet-rundir',
'--reset-puppet-runinterval',
'--reset-puppet-runmode',
'--reset-puppet-server',
'--reset-puppet-server-acceptor-threads',
'--reset-puppet-server-additional-settings',
'--reset-puppet-server-admin-api-whitelist',
'--reset-puppet-server-allow-header-cert-info',
'--reset-puppet-server-ca',
'--reset-puppet-server-ca-allow-auth-extensions',
'--reset-puppet-server-ca-allow-sans',
'--reset-puppet-server-ca-auth-required',
'--reset-puppet-server-ca-client-whitelist',
'--reset-puppet-server-ca-crl-sync',
'--reset-puppet-server-ca-enable-infra-crl',
'--reset-puppet-server-certname',
'--reset-puppet-server-check-for-updates',
'--reset-puppet-server-cipher-suites',
'--reset-puppet-server-common-modules-path',
'--reset-puppet-server-compile-mode',
'--reset-puppet-server-config-version',
'--reset-puppet-server-connect-timeout',
'--reset-puppet-server-crl-enable',
'--reset-puppet-server-custom-trusted-oid-mapping',
'--reset-puppet-server-default-manifest',
'--reset-puppet-server-default-manifest-content',
'--reset-puppet-server-default-manifest-path',
'--reset-puppet-server-dir',
'--reset-puppet-server-environment-class-cache-enabled',
'--reset-puppet-server-environment-timeout',
'--reset-puppet-server-environments-group',
'--reset-puppet-server-environments-mode',
'--reset-puppet-server-environments-owner',
'--reset-puppet-server-envs-dir',
'--reset-puppet-server-envs-target',
'--reset-puppet-server-external-nodes',
'--reset-puppet-server-foreman',
'--reset-puppet-server-foreman-facts',
'--reset-puppet-server-foreman-ssl-ca',
'--reset-puppet-server-foreman-ssl-cert',
'--reset-puppet-server-foreman-ssl-key',
'--reset-puppet-server-foreman-url',
'--reset-puppet-server-git-branch-map',
'--reset-puppet-server-git-repo',
'--reset-puppet-server-git-repo-group',
'--reset-puppet-server-git-repo-mode',
'--reset-puppet-server-git-repo-path',
'--reset-puppet-server-git-repo-user',
'--reset-puppet-server-group',
'--reset-puppet-server-http',
'--reset-puppet-server-http-port',
'--reset-puppet-server-idle-timeout',
'--reset-puppet-server-ip',
'--reset-puppet-server-jruby-gem-home',
'--reset-puppet-server-jvm-cli-args',
'--reset-puppet-server-jvm-config',
'--reset-puppet-server-jvm-extra-args',
'--reset-puppet-server-jvm-java-bin',
'--reset-puppet-server-jvm-max-heap-size',
'--reset-puppet-server-jvm-min-heap-size',
'--reset-puppet-server-manage-user',
'--reset-puppet-server-max-active-instances',
'--reset-puppet-server-max-open-files',
'--reset-puppet-server-max-queued-requests',
'--reset-puppet-server-max-requests-per-instance',
'--reset-puppet-server-max-retry-delay',
'--reset-puppet-server-max-threads',
'--reset-puppet-server-metrics-allowed',
'--reset-puppet-server-metrics-graphite-enable',
'--reset-puppet-server-metrics-graphite-host',
'--reset-puppet-server-metrics-graphite-interval',
'--reset-puppet-server-metrics-graphite-port',
'--reset-puppet-server-metrics-jmx-enable',
'--reset-puppet-server-metrics-server-id',
'--reset-puppet-server-multithreaded',
'--reset-puppet-server-package',
'--reset-puppet-server-parser',
'--reset-puppet-server-port',
'--reset-puppet-server-post-hook-content',
'--reset-puppet-server-post-hook-name',
'--reset-puppet-server-puppet-basedir',
'--reset-puppet-server-puppetserver-dir',
'--reset-puppet-server-puppetserver-experimental',
'--reset-puppet-server-puppetserver-jruby9k',
'--reset-puppet-server-puppetserver-logdir',
'--reset-puppet-server-puppetserver-metrics',
'--reset-puppet-server-puppetserver-rundir',
'--reset-puppet-server-puppetserver-trusted-agents',
'--reset-puppet-server-puppetserver-trusted-certificate-extensions',
'--reset-puppet-server-puppetserver-vardir',
'--reset-puppet-server-puppetserver-version',
'--reset-puppet-server-puppetserver-auth-template',
'--reset-puppet-server-puppetserver-profiler',
'--reset-puppet-server-reports',
'--reset-puppet-server-request-timeout',
'--reset-puppet-server-ruby-load-paths',
'--reset-puppet-server-storeconfigs',
'--reset-puppet-server-selector-threads',
'--reset-puppet-server-ssl-acceptor-threads',
'--reset-puppet-server-ssl-chain-filepath',
'--reset-puppet-server-ssl-dir',
'--reset-puppet-server-ssl-dir-manage',
'--reset-puppet-server-ssl-key-manage',
'--reset-puppet-server-ssl-protocols',
'--reset-puppet-server-ssl-selector-threads',
'--reset-puppet-server-strict-variables',
'--reset-puppet-server-trusted-external-command',
'--reset-puppet-server-use-legacy-auth-conf',
'--reset-puppet-server-user',
'--reset-puppet-server-versioned-code-content',
'--reset-puppet-server-versioned-code-id',
'--reset-puppet-server-version',
'--reset-puppet-server-web-idle-timeout',
'--reset-puppet-service-name',
'--reset-puppet-sharedir',
'--reset-puppet-show-diff',
'--reset-puppet-splay',
'--reset-puppet-splaylimit',
'--reset-puppet-srv-domain',
'--reset-puppet-ssldir',
'--reset-puppet-syslogfacility',
'--reset-puppet-systemd-cmd',
'--reset-puppet-systemd-randomizeddelaysec',
'--reset-puppet-systemd-unit-name',
'--reset-puppet-unavailable-runmodes',
'--reset-puppet-use-srv-records',
'--reset-puppet-usecacheonfailure',
'--reset-puppet-user',
'--reset-puppet-vardir',
'--reset-puppet-version',
'--scenario',
'--skip-checks-i-know-better',
'--skip-puppet-version-check',
'--tuning',
'--[no-]verbose',
'--verbose-log-level',
'-S',
'-h',
'-i',
'-l',
'-n',
'-p',
'-s',
'-v',
'-',
}
LAST_SAVED_SECTIONS = {
'= Generic:',
'= Module certs:',
'= Module foreman:',
'= Module foreman_cli:',
'= Module foreman_compute_ec2:',
'= Module foreman_compute_gce:',
'= Module foreman_compute_libvirt:',
'= Module foreman_compute_openstack:',
'= Module foreman_compute_ovirt:',
'= Module foreman_compute_vmware:',
'= Module foreman_plugin_tasks:',
'= Module foreman_proxy:',
'= Module foreman_proxy_content:',
'= Module foreman_proxy_plugin_ansible:',
'= Module foreman_proxy_plugin_dhcp_infoblox:',
'= Module foreman_proxy_plugin_dhcp_remote_isc:',
'= Module foreman_proxy_plugin_discovery:',
'= Module foreman_proxy_plugin_dns_infoblox:',
'= Module foreman_proxy_plugin_openscap:',
'= Module foreman_proxy_plugin_shellhooks:',
'= Module foreman_proxy_plugin_remote_execution_ssh:',
'= Module katello:',
'= Module puppet:',
}
SATELLITE_SERVICES = {
'dynflow-sidekiq@orchestrator',
'dynflow-sidekiq@worker-1',
'dynflow-sidekiq@worker-hosts-queue-1',
'foreman-proxy',
'foreman',
'httpd',
'postgresql',
'pulpcore-api',
'pulpcore-content',
'rh-redis5-redis',
'puppetserver',
}
def extract_help(filter='params'):
"""Generator function to extract satellite installer params and sections from lines of help text.
In general lst is cmd.stdout, e.g., a list of strings representing host
stdout
:param string filter: Filter `sections` or `params` in full help, default is params
:return: generator with params or sections depends on filter parameter
"""
stdout = ssh.command('satellite-installer --full-help').stdout
for line in stdout or []:
line = line.strip()
if filter == 'sections':
if line.startswith('= '):
yield line
else:
first_2_tokens = line.split()[:2]
for token in first_2_tokens:
if token[0] == '-':
yield token.replace(',', '')
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_foreman_module():
"""Check if SELinux foreman module has the right version
:id: a0736b3a-3d42-4a09-a11a-28c1d58214a5
:steps:
1. Check "foreman-selinux" package availability on satellite.
2. Check SELinux foreman module on satellite.
:CaseImportance: Critical
:CaseLevel: System
:expectedresults: Foreman RPM and SELinux module versions match
"""
rpm_result = ssh.command('rpm -q foreman-selinux')
assert rpm_result.return_code == 0
semodule_result = ssh.command('semodule -l | grep foreman')
assert semodule_result.return_code == 0
# Sample rpm output: foreman-selinux-1.7.2.8-1.el7sat.noarch
version_regex = re.compile(r'((\d\.?)+[-.]\d)')
rpm_version = version_regex.search(''.join(rpm_result.stdout)).group(1)
# Sample semodule output: foreman 1.7.2.8
semodule_version = version_regex.search(''.join(semodule_result.stdout)).group(1)
rpm_version = rpm_version[:-2]
assert rpm_version.replace('-', '.') == semodule_version
@pytest.mark.skip_if_open('BZ:1987288')
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_check_installer_services():
"""Check if services start correctly
:id: 85fd4388-6d94-42f5-bed2-24be38e9f104
:steps:
1. Run 'systemctl status <tomcat>' command to check tomcat service status on satellite.
2. Run 'foreman-maintain service status' command on satellite to check the satellite
services.
3. Run the 'hammer ping' command on satellite.
:BZ: 1987288
:expectedresults: All services are started
:CaseImportance: Critical
:CaseLevel: System
"""
major_version = get_host_info()[1]
service_name = 'tomcat6' if major_version == RHEL_6_MAJOR_VERSION else 'tomcat'
SATELLITE_SERVICES.add(service_name)
if major_version >= RHEL_7_MAJOR_VERSION:
status_format = "systemctl status {0}"
else:
status_format = "service {0} status"
for service in SATELLITE_SERVICES:
result = ssh.command(status_format.format(service))
assert result.return_code == 0
assert len(result.stderr) == 0
# check status reported by hammer ping command
username = settings.server.admin_username
password = settings.server.admin_password
result = ssh.command(f'hammer -u {username} -p {password} ping')
result_output = [
service.strip() for service in result.stdout if not re.search(r'message:', service)
]
# iterate over the lines grouping every 3 lines
# example [1, 2, 3, 4, 5, 6] will return [(1, 2, 3), (4, 5, 6)]
for service, status, response in zip(*[iter(result_output)] * 3):
service = service.replace(':', '').strip()
status = status.split(':')[1].strip().lower()
response = response.split(':', 1)[1].strip()
assert status == 'ok', f'{service} responded with {response}'
@pytest.mark.upgrade
@pytest.mark.tier3
@pytest.mark.parametrize('filter', ['params', 'sections'])
def test_installer_options_and_sections(filter):
"""Look for changes on installer sections and options/flags
:id: a51d3b9f-f347-4a96-a31a-770349db08c7
:parametrized: yes
:Steps:
1. parse installer sections and options/flags
2. compare with last saved data
:expectedresults: Ideally sections and options should not change on zstreams.
Documentation must be updated accordingly when such changes occur.
So when this test fail we QE can act on it, asking dev if
changes occurs on zstream and checking docs are up to date.
:CaseImportance: Medium
"""
current = set(extract_help(filter=filter))
previous = PREVIOUS_INSTALLER_OPTIONS if filter == 'params' else LAST_SAVED_SECTIONS
removed = list(previous - current)
removed.sort()
added = list(current - previous)
added.sort()
msg = f"###Removed {filter}:\n{removed}\n###Added {filter}:\n{added}"
assert previous == current, msg
@pytest.mark.stubbed
@pytest.mark.tier3
def test_satellite_installation_on_ipv6():
"""
Check the satellite installation on ipv6 machine.
:id: 24fa5ef0-1673-427c-82ab-740758683cff
steps:
1. Install satellite on ipv6 machine.
:expectedresults:
1: Installation should be successful.
2: After installation, All the services should be up and running.
3. Status of hammer ping should be ok.
4: Satellite service restart should work.
5: After system reboot all the services comes to up state.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_capsule_installation_on_ipv6():
"""
Check the capsule installation over ipv6 machine
:id: 75341e29-342f-41fc-aaa8-cda013b7dfa1
:steps:
1. Install capsule on ipv6 machine.
:expectedresults:
1. Capsule installation should be successful.
2. After installation, All the Services should be up and running.
3. Satellite service restart should work.
4. After system reboot all the services come to up state.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_installer_check_on_ipv6():
"""
Check the satellite-installer command execution with tuning options and updated config file.
:id: 411bbffb-027f-4df0-8566-1719d1d0651a
steps:
1. Install satellite on ipv6 machine
2. Trigger the satellite-installer command with "--tuning medium" flag.
3. Update the custom-hira.yaml file(add any supportable config parameter).
4. Trigger the satellite-installer command with no option.
:expectedresults:
1. Tuning parameter set successfully for medium size.
2. custom-hiera.yaml related changes should be successfully applied.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_installer_verbose_stdout():
"""Look for Satellite installer verbose STDOUT
:id: 5d0fb30a-4a63-41b3-bc6f-c4057942ce3c
steps:
1. Install satellite package.
2. Run Satellite installer
3. Observe installer STDOUT.
:expectedresults:
1. Installer STDOUTs following groups hooks completion.
pre_migrations, boot, init, pre_values, pre_validations, pre_commit, pre, post
2. Installer STDOUTs system configuration completion.
3. Finally, Installer informs running satellite url, credentials,
external capsule installation pre-requisite, upgrade capsule instruction,
running internal capsule url, log file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_installer_answers_file():
"""Answers file to configure plugins and hooks
:id: 5cb40e4b-1acb-49f9-a085-a7dead1664b5
steps:
1. Install satellte package
2. Modify `/etc/foreman-installer/scenarios.d/satellite-answers.yaml` file to
configure hook/plugin on satellite
3. Run Satellite installer
:expectedresults: Installer configures plugins and hooks in answers file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_capsule_installer_verbose_stdout():
"""Look for Capsule installer verbose STDOUT
:id: 323e85e3-2ad1-4018-aa35-1d51f1e7f5a2
steps:
1. Install capsule package.
2. Run Satellite installer --scenario capsule
3. Observe installer STDOUT.
:expectedresults:
1. Installer STDOUTs following groups hooks completion.
pre_migrations, boot, init, pre_values, pre_validations, pre_commit, pre, post
2. Installer STDOUTs system configuration completion.
3. Finally, Installer informs running capsule url, log file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_installer_timestamp_logs():
"""Look for Satellite installer timestamp based logs
:id: 9b4d32f6-d471-4bdb-8a79-9bb20ecb86aa
steps:
1. Install satellite package.
2. Run Satellite installer
3. Observe installer log file `/var/log/foreman-installer/satellite.log`.
:expectedresults:
1. Installer logs satellite installation with timestamps in following format
YYYY-MM-DD HH:MM:SS
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""<|fim▁end|>
|
'--reset-foreman-proxy-dns-provider',
'--reset-foreman-proxy-dns-reverse',
'--reset-foreman-proxy-dns-server',
'--reset-foreman-proxy-dns-tsig-keytab',
|
<|file_name|>feature_extraction.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
'''
Description:
Extract the feature from the text in English.
Version:
python3
'''
from sklearn.feature_extraction.text import CountVectorizer<|fim▁hole|>
VECTORIZER = CountVectorizer(min_df=1)
# 以下代码设置了特征提取方法的参数(以1-2个单词作为滑动窗口大小,以空格作为单词的分割点,最小词频为1)
# 详细参考API介绍:
# http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction
# VECTORIZER = CountVectorizer(ngram_range=(1,2), token_pattern=r'\b\w+\b', min_df=1)
CORPUS = [
'This is the first document.',
'This is the second second document.',
'And the third one.',
'Is this the first document?'
]
X = VECTORIZER.fit_transform(CORPUS)
FEATURE_NAMES = VECTORIZER.get_feature_names()
print(FEATURE_NAMES)<|fim▁end|>
| |
<|file_name|>gamma.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
from . import core
from . import refstate
__all__ = ['GammaEos','GammaCalc']
#====================================================================
# Base Class
#====================================================================
def set_calculator(eos_mod, kind, kind_opts):
assert kind in kind_opts, (
kind + ' is not a valid thermal calculator. '+
'You must select one of: ' + str(kind_opts))
eos_mod._kind = kind
if kind=='GammaPowLaw':
calc = _GammaPowLaw(eos_mod)
elif kind=='GammaShiftPowLaw':
calc = _GammaShiftPowLaw(eos_mod)
elif kind=='GammaFiniteStrain':
calc = _GammaFiniteStrain(eos_mod)
else:
raise NotImplementedError(kind+' is not a valid '+
'GammaEos Calculator.')
eos_mod._add_calculator(calc, calc_type='gamma')
pass
#====================================================================
class GammaEos(with_metaclass(ABCMeta, core.Eos)):
"""
EOS model for compression dependence of Grüneisen parameter.
Parameters
----------
Thermodyn properties depend only on volume
"""
_kind_opts = ['GammaPowLaw','GammaShiftPowLaw','GammaFiniteStrain']
def __init__(self, kind='GammaPowLaw', natom=1, model_state={}):
self._pre_init(natom=natom)
set_calculator(self, kind, self._kind_opts)
ref_compress_state='P0'
ref_thermal_state='T0'
ref_energy_type = 'E0'
refstate.set_calculator(self, ref_compress_state=ref_compress_state,
ref_thermal_state=ref_thermal_state,
ref_energy_type=ref_energy_type)
# self._set_ref_state()
self._post_init(model_state=model_state)
pass
def __repr__(self):
calc = self.calculators['gamma']
return ("GammaEos(kind={kind}, natom={natom}, "
"model_state={model_state}, "
")"
.format(kind=repr(calc.name),
natom=repr(self.natom),
model_state=self.model_state
)
)
def _set_ref_state(self):
calc = self.calculators['gamma']
path_const = calc.path_const
if path_const=='S':
param_ref_names = []
param_ref_units = []
param_ref_defaults = []
param_ref_scales = []
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for ThermalEos.')
self._path_const = calc.path_const
self._param_ref_names = param_ref_names
self._param_ref_units = param_ref_units
self._param_ref_defaults = param_ref_defaults
self._param_ref_scales = param_ref_scales
pass
def gamma(self, V_a):
gamma_a = self.calculators['gamma']._calc_gamma(V_a)
return gamma_a
def gamma_deriv(self, V_a):
gamma_deriv_a = self.calculators['gamma']._calc_gamma_deriv(V_a)
return gamma_deriv_a
def temp(self, V_a, T0=None):
temp_a = self.calculators['gamma']._calc_temp(V_a, T0=T0)
return temp_a
#====================================================================
class GammaCalc(with_metaclass(ABCMeta, core.Calculator)):
"""
Abstract Equation of State class for a reference Compression Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
def __init__(self, eos_mod):
self._eos_mod = eos_mod
self._init_params()
self._path_const = 'S'
pass
@property
def path_const( self ):
return self._path_const
####################
# Required Methods #
####################
@abstractmethod
def _init_params( self ):
"""Initialize list of calculator parameter names."""
pass
@abstractmethod
def _calc_gamma(self, V_a):
pass
@abstractmethod
def _calc_gamma_deriv(self, V_a):
pass
@abstractmethod
def _calc_temp(self, V_a, T0=None):
pass
def _calc_theta(self, V_a):
theta0 = self.eos_mod.get_param_values(param_names=['theta0'])
theta = self._calc_temp(V_a, T0=theta0)
return theta
####################
# Optional Methods #
####################
# EOS property functions
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun(V_a)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = core.get_params([paramname])[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
core.set_params([paramname,], [param+dparam,])
# Note that self is implicitly included
dval_a = fun(V_a) - val0_a
# reset param to original value
core.set_params([paramname], [param])
deriv_a = dval_a/dxfrac
return deriv_a
def _calc_energy_perturb(self, V_a):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale(
apply_expand_adj=self.expand_adj)
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self._calc_param_deriv(fname, paramname, V_a)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
#====================================================================
# Implementations
#====================================================================
class _GammaPowLaw(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.0<|fim▁hole|> self._param_defaults = [V0, gamma0, q]
self._param_scales = [V0, gamma0, q]
pass
def _calc_gamma(self, V_a):
V0, gamma0, q = self.eos_mod.get_param_values(
param_names=['V0','gamma0','q'])
gamma_a = gamma0 *(V_a/V0)**q
return gamma_a
def _calc_gamma_deriv(self, V_a):
q, = self.eos_mod.get_param_values(param_names=['q'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = q*gamma_a/V_a
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
# T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
gamma0, q = self.eos_mod.get_param_values(
param_names=['gamma0','q'])
gamma_a = self._calc_gamma(V_a)
T_a = T0*np.exp(-(gamma_a - gamma0)/q)
return T_a
#====================================================================
class _GammaShiftPowLaw(GammaCalc):
"""
Shifted Power Law description of Grüneisen Parameter (Al’tshuler, 1987)
"""
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaShiftPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.5
gamma_inf = 2/3
beta = 1.4
T0 = 300
self._param_names = ['V0', 'gamma0', 'gamma_inf', 'beta', 'T0']
self._param_units = ['ang^3', '1', '1', '1', 'K']
self._param_defaults = [V0, gamma0, gamma_inf, beta, T0]
self._param_scales = [V0, gamma0, gamma_inf, beta, T0]
pass
def _calc_gamma(self, V_a):
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = gamma_inf + (gamma0-gamma_inf)*(V_a/V0)**beta
return gamma_a
def _calc_gamma_deriv(self, V_a):
gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = beta/V_a*(gamma_a-gamma_inf)
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
x = V_a/V0
T_a = T0*x**(-gamma_inf)*np.exp((gamma0-gamma_inf)/beta*(1-x**beta))
return T_a
#====================================================================
class _GammaFiniteStrain(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaFiniteStrain, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 0.5
gammap0 = -2
self._param_names = ['V0', 'gamma0', 'gammap0']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, gammap0]
self._param_scales = [V0, gamma0, gammap0]
pass
def _calc_strain_coefs(self):
V0, gamma0, gammap0 = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gammap0'])
a1 = 6*gamma0
a2 = -12*gamma0 +36*gamma0**2 -18*gammap0
return a1, a2
def _calc_fstrain(self, V_a, deriv=False):
V0, = self.eos_mod.get_param_values(param_names=['V0'])
x = V_a/V0
if deriv:
return -1/(3*V0)*x**(-5/3)
else:
return 1/2*(x**(-2/3)-1)
pass
def _calc_gamma(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
gamma_a = (2*fstr_a+1)*(a1+a2*fstr_a)/(6*(1+a1*fstr_a+0.5*a2*fstr_a**2))
return gamma_a
def _calc_gamma_deriv(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
fstr_deriv = self._calc_fstrain(V_a, deriv=True)
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = gamma_a*fstr_deriv*(
2/(2*fstr_a+1)+a2/(a1+a2*fstr_a)
-(a1+a2*fstr_a)/(1+a1*fstr_a+.5*a2*fstr_a**2))
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
T_a = T0*np.sqrt(1 + a1*fstr_a + 0.5*a2*fstr_a**2)
return T_a
#====================================================================<|fim▁end|>
|
q = 1.0
self._param_names = ['V0', 'gamma0', 'q']
self._param_units = ['ang^3', '1', '1']
|
<|file_name|>persistent.ts<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
export abstract class Persistent {
public id: number;
}
|
<|file_name|>bnw_shell.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os.path as path<|fim▁hole|>sys.path.insert(0,root)<|fim▁end|>
|
import sys
root=path.abspath(path.dirname(__file__))
|
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3.9
# -*- coding: utf-8 -*-
"""Eris Codebase Monitor
Eris maintains an up-to-date set of reports for every file in a codebase.
A status indicator summarises the state of each report, and a report is viewed
by selecting this status indicator with the cursor.
The reports are cached in the codebase's root directory in a ".eris"
directory.
"""
import asyncio
import contextlib
import functools
import gzip
import importlib<|fim▁hole|>import math
import multiprocessing
import os
import pickle
import shutil
import signal
import subprocess
import sys
import time
import docopt
import pygments.styles
import pyinotify
import eris
import eris.fill3 as fill3
import eris.sorted_collection as sorted_collection
import eris.terminal as terminal
import eris.termstr as termstr
import eris.tools as tools
import eris.worker as worker
import eris.paged_list as paged_list
USAGE = """
Usage:
eris [options] <directory>
eris -h | --help
eris -i | --info
Example:
# eris my_project
Options:
-h, --help Show the full help.
-i, --info Show information about the available tools.
-w COUNT, --workers=COUNT The number of processes working in parallel.
By default it is the number of cpus minus 1.
-e "COMMAND", --editor="COMMAND" The command used to start the editor, in
the *edit command. It may contain options.
-t THEME, --theme=THEME The pygment theme used for syntax
highlighting. Defaults to "native".
-c TYPE, --compression=TYPE The type of compression used in the cache:
gzip, lzma, bz2, or none. Defaults to gzip.
"""
KEYS_DOC = """Keys:
arrow keys, page up/down, mouse - Move the cursor or scroll the result pane.
tab - Change the focus between summary and result pane.
q, esc - Quit.
h - Show the help screen. (toggle)
o - Orient the result pane as portrait or landscape. (toggle)
l - Show the activity log. (toggle)
e - Edit the current file with an editor defined by -e, $EDITOR or $VISUAL.
n - Move to the next issue.
N - Move to the next issue of the current tool.
s - Sort files by type, or by directory location. (toggle)
r - Refresh the currently selected report.
R - Refresh all reports of the current tool.
f - Resize the focused pane to the full screen. (toggle)
x - Open the current file with xdg-open.
"""
class Entry:
MAX_WIDTH = 0
def __init__(self, path, results, change_time, highlighted=None,
set_results=True):
self.path = path
self.change_time = change_time
self.highlighted = highlighted
self.results = results
if set_results:
# FIX: this is missed for entries appended later
for result in results:
result.entry = self
self.widget = fill3.Row(results)
self.appearance_cache = None
self.last_width = None
def __eq__(self, other):
return self.path == other.path
def __len__(self):
return len(self.results)
def __getitem__(self, index):
return self.results[index]
def appearance_min(self):
if self.appearance_cache is None \
or self.last_width != Entry.MAX_WIDTH:
self.last_width = Entry.MAX_WIDTH
if self.highlighted is not None:
self.results[self.highlighted].is_highlighted = True
row_appearance = self.widget.appearance_min()
path = tools.path_colored(self.path)
padding = " " * (self.last_width - len(self.results) + 1)
self.appearance_cache = [row_appearance[0] + padding + path]
if self.highlighted is not None:
self.results[self.highlighted].is_highlighted = False
return self.appearance_cache
def as_html(self):
html_parts = []
styles = set()
for result in self.widget:
result_html, result_styles = result.as_html()
html_parts.append(result_html)
styles.update(result_styles)
path = tools.path_colored(self.path)
padding = " " * (Entry.MAX_WIDTH - len(self.widget) + 1)
path_html, path_styles = termstr.TermStr(padding + path).as_html()
return "".join(html_parts) + path_html, styles.union(path_styles)
def is_path_excluded(path):
return any(part.startswith(".") for part in path.split(os.path.sep))
def codebase_files(path, skip_hidden_directories=True):
for (dirpath, dirnames, filenames) in os.walk(path):
if skip_hidden_directories:
filtered_dirnames = [dirname for dirname in dirnames
if not is_path_excluded(dirname)]
dirnames[:] = filtered_dirnames
for filename in filenames:
if not is_path_excluded(filename):
yield os.path.join(dirpath, filename)
def fix_paths(root_path, paths):
return (os.path.join(".", os.path.relpath(path, root_path))
for path in paths)
def blend_color(a_color, b_color, transparency):
a_r, a_g, a_b = a_color
b_r, b_g, b_b = b_color
complement = 1 - transparency
return (int(a_r * transparency + b_r * complement),
int(a_g * transparency + b_g * complement),
int(a_b * transparency + b_b * complement))
def highlight_str(line, highlight_color, transparency):
@functools.lru_cache()
def blend_style(style):
fg_color = (style.fg_color if type(style.fg_color) == tuple
else termstr.xterm_color_to_rgb(style.fg_color))
bg_color = (style.bg_color if type(style.bg_color) == tuple
else termstr.xterm_color_to_rgb(style.bg_color))
return termstr.CharStyle(
blend_color(fg_color, highlight_color, transparency),
blend_color(bg_color, highlight_color, transparency),
is_bold=style.is_bold, is_italic=style.is_italic,
is_underlined=style.is_underlined)
return termstr.TermStr(line).transform_style(blend_style)
def in_green(str_):
return termstr.TermStr(str_, termstr.CharStyle(termstr.Color.lime))
_UP, _DOWN, _LEFT, _RIGHT = (0, -1), (0, 1), (-1, 0), (1, 0)
def directory_sort(entry):
path = entry.path
return (os.path.dirname(path), tools.splitext(path)[1],
os.path.basename(path))
def type_sort(entry):
path = entry.path
return (tools.splitext(path)[1], os.path.dirname(path),
os.path.basename(path))
class Summary:
def __init__(self, root_path, jobs_added_event):
self._root_path = root_path
self._jobs_added_event = jobs_added_event
self._view_widget = fill3.View.from_widget(self)
self.is_directory_sort = True
self._old_entries = []
self.__cursor_position = (0, 0)
self.reset()
def reset(self):
Entry.MAX_WIDTH = 0
self._max_path_length = 0
self.result_total = 0
self.completed_total = 0
self.is_loaded = False
self.closest_placeholder_generator = None
sort_func = directory_sort if self.is_directory_sort else type_sort
self._entries = sorted_collection.SortedCollection([], key=sort_func)
def __getstate__(self):
state = self.__dict__.copy()
state["closest_placeholder_generator"] = None
state["_jobs_added_event"] = None
summary_path = os.path.join(tools.CACHE_PATH, "summary_dir")
open_compressed = functools.partial(gzip.open, compresslevel=1)
x, y = self.cursor_position()
if y == 0:
entries = []
else:
entries = itertools.chain(
[self._entries[y]], itertools.islice(self._entries, y),
itertools.islice(self._entries, y+1, None))
state["_old_entries"] = paged_list.PagedList(
entries, summary_path, 2000, 1, exist_ok=True,
open_func=open_compressed)
state["_entries"] = None
state["__cursor_position"] = (x, 0)
return state
def __setstate__(self, state):
self.__dict__ = state
self.reset()
@property
def _cursor_position(self):
return self.__cursor_position
@_cursor_position.setter
def _cursor_position(self, new_position):
if new_position != self.__cursor_position:
self.__cursor_position = new_position
self.closest_placeholder_generator = None
def sort_entries(self):
key_func = directory_sort if self.is_directory_sort else type_sort
self._entries = sorted_collection.SortedCollection(
self._entries, key=key_func)
self.closest_placeholder_generator = None
def add_entry(self, entry):
if entry in self._entries:
return
for result in entry:
self.result_total += 1
if result.is_completed:
self.completed_total += 1
Entry.MAX_WIDTH = max(len(entry), Entry.MAX_WIDTH)
self._max_path_length = max(len(entry.path) - len("./"),
self._max_path_length)
entry_index = self._entries.insert(entry)
x, y = self._cursor_position
if entry_index <= y:
self.scroll(0, -1)
self._jobs_added_event.set()
if self.is_loaded:
self.closest_placeholder_generator = None
def on_file_added(self, path):
full_path = os.path.join(self._root_path, path)
try:
change_time = os.stat(full_path).st_ctime
except OSError:
return
row = [tools.Result(path, tool) for tool in tools.tools_for_path(path)]
entry = Entry(path, row, change_time)
self.add_entry(entry)
def on_file_deleted(self, path):
if os.path.exists(os.path.join(self._root_path, path)):
return
entry = Entry(path, [], None)
try:
index = self._entries.index(entry)
except ValueError:
return
x, y = self._cursor_position
if index < y:
self.scroll(0, 1)
for result in self._entries[index]:
if result.is_completed:
self.completed_total -= 1
self.result_total -= 1
result.delete()
row = self._entries[index]
del self._entries._keys[index]
del self._entries._items[index]
if len(row) == Entry.MAX_WIDTH:
Entry.MAX_WIDTH = max((len(entry) for entry in self._entries),
default=0)
if (len(path) - 2) == self._max_path_length:
self._max_path_length = max(((len(entry.path) - 2)
for entry in self._entries), default=0)
x, y = self._cursor_position
if y == len(self._entries):
self._cursor_position = x, y - 1
self.closest_placeholder_generator = None
def on_file_modified(self, path):
entry = Entry(path, [], None)
try:
entry_index = self._entries.index(entry)
except ValueError:
return
entry = self._entries[entry_index]
for result in entry:
self.refresh_result(result, only_completed=False)
self.closest_placeholder_generator = None
return entry
@contextlib.contextmanager
def keep_selection(self):
try:
cursor_path = self.get_selection().path
except AttributeError:
yield
return
x, y = self._cursor_position
yield
for index, row in enumerate(self._entries):
if row.path == cursor_path:
self._cursor_position = (x, index)
return
if y >= len(self._entries):
self._cursor_position = (x, len(self._entries) - 1)
async def sync_with_filesystem(self, appearance_changed_event, log=None):
start_time = time.time()
cache = {}
log.log_message("Started loading summary…")
for index, entry in enumerate(self._old_entries):
if index != 0 and index % 5000 == 0:
log.log_message(f"Loaded {index} files…")
await asyncio.sleep(0)
self.add_entry(entry)
if index % 1000 == 0:
appearance_changed_event.set()
cache[entry.path] = entry.change_time
duration = time.time() - start_time
log.log_message(f"Finished loading summary. {round(duration, 2)} secs")
self.is_loaded = True
self.closest_placeholder_generator = None
log.log_message("Started sync with filesystem…")
start_time = time.time()
all_paths = set()
for path in fix_paths(self._root_path, codebase_files(self._root_path)):
await asyncio.sleep(0)
all_paths.add(path)
if path in cache:
full_path = os.path.join(self._root_path, path)
change_time = os.stat(full_path).st_ctime
if change_time != cache[path]:
cache[path] = change_time
entry = self.on_file_modified(path)
entry.change_time = change_time
else:
self.on_file_added(path)
appearance_changed_event.set()
for path in cache.keys() - all_paths:
await asyncio.sleep(0)
self.on_file_deleted(path)
duration = time.time() - start_time
log.log_message(f"Finished sync with filesystem. {round(duration, 2)} secs")
def _sweep_up(self, x, y):
yield from reversed(self._entries[y][:x])
while True:
y = (y - 1) % len(self._entries)
yield from reversed(self._entries[y])
def _sweep_down(self, x, y):
yield from self._entries[y][x:]
while True:
y = (y + 1) % len(self._entries)
yield from self._entries[y]
def _sweep_combined(self, x, y):
for up_result, down_result in zip(self._sweep_up(x, y),
self._sweep_down(x, y)):
yield down_result
yield up_result
def _placeholder_sweep(self):
x, y = self.cursor_position()
for index, result in enumerate(self._sweep_combined(x, y)):
if index > self.result_total:
break
if result.status == tools.Status.pending:
yield result
async def get_closest_placeholder(self):
if self.closest_placeholder_generator is None:
self.closest_placeholder_generator = self._placeholder_sweep()
try:
return self.closest_placeholder_generator.send(None)
except StopIteration:
raise StopAsyncIteration
def appearance_dimensions(self):
return self._max_path_length + 1 + Entry.MAX_WIDTH, len(self._entries)
def appearance_interval(self, interval):
start_y, end_y = interval
x, y = self.cursor_position()
self._entries[y].highlighted = x
self._entries[y].appearance_cache = None
appearance = fill3.Column(self._entries).appearance_interval(interval)
self._entries[y].highlighted = None
self._entries[y].appearance_cache = None
return appearance
def _set_scroll_position(self, cursor_x, cursor_y, summary_height):
scroll_x, scroll_y = new_scroll_x, new_scroll_y = \
self._view_widget.position
if cursor_y < scroll_y:
new_scroll_y = max(cursor_y - summary_height + 1, 0)
if (scroll_y + summary_height - 1) < cursor_y:
new_scroll_y = cursor_y
self._view_widget.position = new_scroll_x, new_scroll_y
def _highlight_cursor_row(self, appearance, cursor_y):
scroll_x, scroll_y = self._view_widget.position
highlighted_y = cursor_y - scroll_y
appearance[highlighted_y] = (highlight_str(
appearance[highlighted_y][:-1], termstr.Color.white, 0.8)
+ appearance[highlighted_y][-1])
return appearance
def appearance(self, dimensions):
width, height = dimensions
if len(self._entries) == 0:
return [" " * width for row in range(height)]
cursor_x, cursor_y = self.cursor_position()
width, height = width - 1, height - 1 # Minus one for the scrollbars
self._set_scroll_position(cursor_x, cursor_y, height)
return self._highlight_cursor_row(
self._view_widget.appearance(dimensions), cursor_y)
def scroll(self, dx, dy):
scroll_x, scroll_y = self._view_widget.position
dy = min(dy, scroll_y)
self._view_widget.position = scroll_x, scroll_y - dy
self._move_cursor((0, -dy))
def cursor_position(self):
x, y = self._cursor_position
try:
return min(x, len(self._entries[y])-1), y
except IndexError:
return 0, 0
def get_selection(self):
x, y = self.cursor_position()
return self._entries[y][x]
def _move_cursor(self, vector):
if vector == (0, 0):
return
dx, dy = vector
if dy == 0:
x, y = self.cursor_position()
self._cursor_position = ((x + dx) % len(self._entries[y]), y)
elif dx == 0:
x, y = self._cursor_position
self._cursor_position = (x, (y + dy) % len(self._entries))
else:
raise ValueError
def cursor_right(self):
self._move_cursor(_RIGHT)
def cursor_left(self):
self._move_cursor(_LEFT)
def cursor_up(self):
self._move_cursor(_UP)
def cursor_down(self):
self._move_cursor(_DOWN)
def cursor_page_up(self):
view_width, view_height = self._view_widget.portal.last_dimensions
self.scroll(0, view_height)
def cursor_page_down(self):
view_width, view_height = self._view_widget.portal.last_dimensions
self.scroll(0, -view_height)
def cursor_home(self):
x, y = self._cursor_position
self._cursor_position = x, 0
def cursor_end(self):
x, y = self._cursor_position
self._cursor_position = x, len(self._entries) - 1
def _issue_generator(self):
x, y = self.cursor_position()
for index in range(len(self._entries) + 1):
row_index = (index + y) % len(self._entries)
row = self._entries[row_index]
for index_x, result in enumerate(row):
if (result.status == tools.Status.problem and
not (row_index == y and index_x <= x and
index != len(self._entries))):
yield result, (index_x, row_index)
def move_to_next_issue(self):
with contextlib.suppress(StopIteration):
issue, self._cursor_position = self._issue_generator().send(None)
def move_to_next_issue_of_tool(self):
current_tool = self.get_selection().tool
for issue, position in self._issue_generator():
if issue.tool == current_tool:
self._cursor_position = position
return
def refresh_result(self, result, only_completed=True):
if result.is_completed or not only_completed:
if result.is_completed:
self.completed_total -= 1
result.reset()
result.delete()
self.closest_placeholder_generator = None
self._jobs_added_event.set()
def refresh_tool(self, tool):
for row in self._entries:
for result in row:
if result.tool == tool:
self.refresh_result(result)
def clear_running(self):
for row in self._entries:
for result in row:
if result.status == tools.Status.running:
self.refresh_result(result)
def as_html(self):
html_parts = []
styles = set()
for row in self._entries:
html_row, styles_row = row.as_html()
html_parts.append(html_row)
styles.update(styles_row)
return ("<style>a { text-decoration:none; }</style><pre>" +
"<br>".join(html_parts) + "</pre>"), styles
class Log:
_GREY_BOLD_STYLE = termstr.CharStyle(termstr.Color.grey_100, is_bold=True)
_GREEN_STYLE = termstr.CharStyle(termstr.Color.lime)
def __init__(self, appearance_changed_event):
self._appearance_changed_event = appearance_changed_event
self.lines = []
self._appearance = None
def __getstate__(self):
state = self.__dict__.copy()
state["_appearance_changed_event"] = None
return state
def log_message(self, message, timestamp=None, char_style=None):
if isinstance(message, list):
message = [part[1] if isinstance(part, tuple) else part
for part in message]
message = fill3.join("", message)
if char_style is not None:
message = termstr.TermStr(message, char_style)
timestamp = (time.strftime("%H:%M:%S", time.localtime())
if timestamp is None else timestamp)
line = termstr.TermStr(timestamp, Log._GREY_BOLD_STYLE) + " " + message
if not sys.stdout.isatty():
print(line, flush=True)
return
self.lines.append(line)
self._appearance = None
self._appearance_changed_event.set()
def log_command(self, message, timestamp=None):
self.log_message(message, char_style=Log._GREEN_STYLE)
def appearance(self, dimensions):
if self._appearance is None or \
fill3.appearance_dimensions(self._appearance) != dimensions:
width, height = dimensions
del self.lines[:-height]
self._appearance = fill3.appearance_resize(self.lines, dimensions)
return self._appearance
def highlight_chars(str_, style, marker="*"):
parts = str_.split(marker)
highlighted_parts = [termstr.TermStr(part[0], style) + part[1:]
for part in parts[1:] if part != ""]
return fill3.join("", [parts[0]] + highlighted_parts)
def get_status_help():
return fill3.join("\n", ["Statuses:"] +
[" " + tools.STATUS_TO_TERMSTR[status] + " " + meaning
for status, meaning in tools.STATUS_MEANINGS])
class Help:
def __init__(self, summary, screen):
self.summary = summary
self.screen = screen
help_text = fill3.join("\n", [__doc__, KEYS_DOC, get_status_help()])
self.view = fill3.View.from_widget(fill3.Text(help_text))
self.widget = fill3.Border(self.view, title="Help")
portal = self.view.portal
self.key_map = {"h": self._exit_help, terminal.UP_KEY: portal.scroll_up,
terminal.DOWN_KEY: portal.scroll_down,
terminal.LEFT_KEY: portal.scroll_left,
terminal.RIGHT_KEY: portal.scroll_right,
"q": self._exit_help, terminal.ESC: self._exit_help}
def _exit_help(self):
self.screen._is_help_visible = False
def on_mouse_input(self, term_code, appearance_changed_event):
event = terminal.decode_mouse_input(term_code)
if event[1] == terminal.WHEEL_UP_MOUSE:
self.view.portal.scroll_up()
appearance_changed_event.set()
elif event[1] == terminal.WHEEL_DOWN_MOUSE:
self.view.portal.scroll_down()
appearance_changed_event.set()
def on_keyboard_input(self, term_code, appearance_changed_event):
action = (self.key_map.get(term_code) or
self.key_map.get(term_code.lower()))
if action is not None:
action()
appearance_changed_event.set()
def appearance(self, dimensions):
return self.widget.appearance(dimensions)
class Listing:
def __init__(self, view):
self.view = view
self.last_dimensions = None
def appearance(self, dimensions):
self.last_dimensions = dimensions
return self.view.appearance(dimensions)
class Screen:
def __init__(self, summary, log, appearance_changed_event, main_loop):
self._summary = summary
self._log = log
self._appearance_changed_event = appearance_changed_event
self._main_loop = main_loop
self._is_summary_focused = True
self.workers = None
self._is_listing_portrait = True
self._is_log_visible = True
self._is_help_visible = False
self._is_fullscreen = False
self._make_widgets()
self._last_mouse_position = 0, 0
def __getstate__(self):
state = self.__dict__.copy()
state["_appearance_changed_event"] = None
state["_main_loop"] = None
state["workers"] = None
return state
def make_workers(self, worker_count, is_being_tested, compression):
workers = []
for index in range(worker_count):
worker_ = worker.Worker(is_being_tested, compression)
workers.append(worker_)
future = worker_.job_runner(self, self._summary, self._log,
self._summary._jobs_added_event,
self._appearance_changed_event)
worker_.future = future
self.workers = workers
def stop_workers(self):
for worker_ in self.workers:
if worker_.result is not None:
worker_.result.reset()
worker_.kill()
def _partition(self, percentage, widgets, length):
smaller_length = max(int(length * (percentage / 100)), 10)
return [smaller_length, length - smaller_length]
def _make_widgets(self):
self._help_widget = Help(self._summary, self)
root_path = os.path.basename(self._summary._root_path)
summary = fill3.Border(self._summary, title="Summary of " + root_path)
self._summary_border = summary
try:
selected_widget = self._summary.get_selection()
result_widget = selected_widget.result
except IndexError:
result_widget = fill3.Text("Nothing selected")
self._view = fill3.View.from_widget(result_widget)
self._listing = fill3.Border(Listing(self._view))
log = fill3.Border(self._log, title="Log",
characters=Screen._DIMMED_BORDER)
quarter_cut = functools.partial(self._partition, 25)
golden_cut = functools.partial(self._partition, 38.198)
three_quarter_cut = functools.partial(self._partition, 75)
port_log = fill3.Row([fill3.Column([summary, log], three_quarter_cut),
self._listing], golden_cut)
land_log = fill3.Column([fill3.Row([summary, log]), self._listing],
quarter_cut)
port_no_log = fill3.Row([summary, self._listing], golden_cut)
land_no_log = fill3.Column([summary, self._listing], quarter_cut)
self._layouts = [[land_no_log, port_no_log], [land_log, port_log]]
self._set_focus()
def toggle_help(self):
self._is_help_visible = not self._is_help_visible
def toggle_log(self):
self._is_log_visible = not self._is_log_visible
def toggle_window_orientation(self):
self._is_listing_portrait = not self._is_listing_portrait
def _move_listing(self, vector):
dx, dy = vector
selected_widget = self._summary.get_selection()
x, y = selected_widget.scroll_position
widget_width, widget_height = fill3.appearance_dimensions(
selected_widget.result.appearance_min())
listing_width, listing_height = (self._listing.widget.
last_dimensions)
listing_width -= 1 # scrollbars
listing_height -= 1
x = min(x + dx, max(widget_width - listing_width, 0))
y = min(y + dy, max(widget_height - listing_height, 0))
x = max(0, x)
y = max(0, y)
selected_widget.scroll_position = x, y
def cursor_up(self):
(self._summary.cursor_up() if self._is_summary_focused
else self._move_listing(_UP))
def cursor_down(self):
(self._summary.cursor_down() if self._is_summary_focused
else self._move_listing(_DOWN))
def cursor_right(self):
(self._summary.cursor_right() if self._is_summary_focused
else self._move_listing(_RIGHT))
def cursor_left(self):
(self._summary.cursor_left() if self._is_summary_focused
else self._move_listing(_LEFT))
def cursor_page_up(self):
(self._summary.cursor_page_up() if self._is_summary_focused
else self.listing_page_up())
def cursor_page_down(self):
(self._summary.cursor_page_down() if self._is_summary_focused
else self.listing_page_down())
def cursor_end(self):
(self._summary.cursor_end() if self._is_summary_focused
else self._page_listing(_RIGHT))
def cursor_home(self):
(self._summary.cursor_home() if self._is_summary_focused
else self._page_listing(_LEFT))
def _page_listing(self, vector):
dx, dy = vector
listing_width, listing_height = self._listing.widget.last_dimensions
self._move_listing((dx * (listing_width // 2),
dy * (listing_height // 2)))
def listing_page_up(self):
self._page_listing(_UP)
def listing_page_down(self):
self._page_listing(_DOWN)
def move_to_next_issue(self):
self._summary.move_to_next_issue()
def move_to_next_issue_of_tool(self):
self._summary.move_to_next_issue_of_tool()
def edit_file(self):
if self.editor_command is None:
self._log.log_message("An editor has not been defined. "
"See option -e.")
else:
path = self._summary.get_selection().path
path_colored = tools.path_colored(path)
line_num = (self._summary.get_selection().entry[0].
scroll_position[1] + 1)
self._log.log_message([in_green("Editing "), path_colored,
in_green(f" at line {line_num}…")])
subprocess.Popen(f"{self.editor_command} +{line_num} {path}",
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def toggle_status_style(self):
self._summary.toggle_status_style(self._log)
def toggle_order(self):
self._summary.is_directory_sort = not self._summary.is_directory_sort
sort_order = ("directory then type" if self._summary.is_directory_sort
else "type then directory")
self._log.log_command(f"Sorting files by {sort_order}.")
with self._summary.keep_selection():
self._summary.sort_entries()
def quit_(self):
os.kill(os.getpid(), signal.SIGINT)
def refresh(self):
selection = self._summary.get_selection()
tool_name = tools.tool_name_colored(selection.tool, selection.path)
path_colored = tools.path_colored(selection.path)
self._log.log_message([in_green("Refreshing "), tool_name,
in_green(" result of "), path_colored,
in_green("…")])
self._summary.refresh_result(selection)
def refresh_tool(self):
selection = self._summary.get_selection()
tool_name = tools.tool_name_colored(selection.tool, selection.path)
self._log.log_message([in_green("Refreshing all results of "),
tool_name, in_green("…")])
self._summary.refresh_tool(selection.tool)
_DIMMED_BORDER = [termstr.TermStr(part).fg_color(termstr.Color.grey_100)
for part in fill3.Border.THIN]
def _set_focus(self):
focused, unfocused = fill3.Border.THICK, Screen._DIMMED_BORDER
self._summary_border.set_style(focused if self._is_summary_focused
else unfocused)
self._listing.set_style(unfocused if self._is_summary_focused
else focused)
def toggle_focus(self):
self._is_summary_focused = not self._is_summary_focused
self._set_focus()
def toggle_fullscreen(self):
self._is_fullscreen = not self._is_fullscreen
def xdg_open(self):
path = self._summary.get_selection().path
path_colored = tools.path_colored(path)
self._log.log_message([in_green("Opening "), path_colored,
in_green("…")])
subprocess.Popen(["xdg-open", path], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def save(self):
worker.Worker.unsaved_jobs_total = 0
pickle_path = os.path.join(tools.CACHE_PATH, "summary.pickle")
open_compressed = functools.partial(gzip.open, compresslevel=1)
tools.dump_pickle_safe(self, pickle_path, open=open_compressed)
def _select_entry_at_position(self, x, y, view_width, view_height):
border_width = 1
if x < border_width or y < border_width or x > view_width or \
y > view_height:
return
view_x, view_y = self._summary._view_widget.portal.position
column_index = x - border_width + view_x
row_index = y - border_width + view_y
if row_index >= len(self._summary._entries):
return
row = self._summary._entries[row_index]
if column_index < 0 or column_index >= len(row):
return
self._summary._cursor_position = column_index, row_index
def _is_switching_focus(self, x, y, view_width, view_height):
return (not self._is_fullscreen and
(self._is_listing_portrait and
(x > view_width and
self._is_summary_focused or x <= view_width and
not self._is_summary_focused) or
not self._is_listing_portrait and
(y > view_height and
self._is_summary_focused or y <= view_height and
not self._is_summary_focused)))
def on_mouse_input(self, term_code):
if self._is_help_visible:
self._help_widget.on_mouse_input(
term_code, self._appearance_changed_event)
return
event = terminal.decode_mouse_input(term_code)
if event[0] not in [terminal.PRESS_MOUSE, terminal.DRAG_MOUSE]:
return
x, y = event[2:4]
if event[0] == terminal.DRAG_MOUSE:
last_x, last_y = self._last_mouse_position
dx, dy = x - last_x, y - last_y
if self._is_summary_focused:
self._summary.scroll(dx, dy)
else:
self._move_listing((-dx, -dy))
else: # Mouse press
if event[1] == terminal.WHEEL_UP_MOUSE:
self.listing_page_up()
elif event[1] == terminal.WHEEL_DOWN_MOUSE:
self.listing_page_down()
else:
view_width, view_height = \
self._summary._view_widget.portal.last_dimensions
if self._is_switching_focus(x, y, view_width, view_height):
self.toggle_focus()
else:
self._select_entry_at_position(
x, y, view_width, view_height)
self._last_mouse_position = x, y
self._appearance_changed_event.set()
def on_keyboard_input(self, term_code):
if self._is_help_visible:
self._help_widget.on_keyboard_input(
term_code, self._appearance_changed_event)
return
action = (Screen._KEY_MAP.get(term_code) or
Screen._KEY_MAP.get(term_code.lower()))
if action is not None:
action(self)
self._appearance_changed_event.set()
def _fix_listing(self):
widget = self._summary.get_selection()
view = self._listing.widget.view
view.position = widget.scroll_position
x, y = view.position
view.widget = widget.result
tool_name = tools.tool_name_colored(widget.tool, widget.path)
divider = " " + self._listing.top * 2 + " "
self._listing.title = (
tools.path_colored(widget.path) + divider + tool_name + " " +
tools.STATUS_TO_TERMSTR[widget.status] + divider +
"line " + str(y+1))
_STATUS_BAR = highlight_chars(
" *help *quit *t*a*b:focus *orient *log *edit *next *sort"
" *refresh *fullscreen *xdg-open", Log._GREEN_STYLE)
@functools.lru_cache()
def _get_partial_bar_chars(self, bar_transparency):
bar_color = blend_color(termstr.Color.black, termstr.Color.white,
bar_transparency)
return [termstr.TermStr(char).fg_color(bar_color).
bg_color(termstr.Color.black)
for char in fill3.ScrollBar._PARTIAL_CHARS[1]]
@functools.lru_cache(maxsize=2)
def _get_status_bar_appearance(self, width, progress_bar_size):
bar_transparency = 0.7
bar = self._STATUS_BAR.center(width)[:width]
fraction, whole = math.modf(progress_bar_size)
whole = int(whole)
if whole < len(bar) and bar[whole].data == " ":
left_part = bar[:whole]
right_part = (self._get_partial_bar_chars(bar_transparency)
[int(fraction * 8)] + bar[whole+1:])
else:
progress_bar_size = round(progress_bar_size)
left_part = bar[:progress_bar_size]
right_part = bar[progress_bar_size:]
return [highlight_str(left_part, termstr.Color.white, bar_transparency)
+ right_part]
def _get_status_bar(self, width):
incomplete = self._summary.result_total - self._summary.completed_total
progress_bar_size = width if self._summary.result_total == 0 else \
max(0, width * incomplete / self._summary.result_total)
return self._get_status_bar_appearance(width, progress_bar_size)
def appearance(self, dimensions):
if len(self._summary._entries) > 0:
self._fix_listing()
if self._is_help_visible:
body = self._help_widget
elif self._is_fullscreen:
body = (self._summary_border if self._is_summary_focused
else self._listing)
else:
body = (self._layouts[self._is_log_visible]
[self._is_listing_portrait])
width, height = max(dimensions[0], 10), max(dimensions[1], 20)
result = (body.appearance((width, height-1)) +
self._get_status_bar(width))
return (result if (width, height) == dimensions
else fill3.appearance_resize(result, dimensions))
_KEY_MAP = {
"o": toggle_window_orientation, "l": toggle_log, "h": toggle_help,
terminal.UP_KEY: cursor_up, terminal.DOWN_KEY: cursor_down,
terminal.LEFT_KEY: cursor_left, terminal.RIGHT_KEY: cursor_right,
terminal.PAGE_DOWN_KEY: cursor_page_down,
terminal.PAGE_UP_KEY: cursor_page_up, "s": toggle_order,
terminal.HOME_KEY: cursor_home, terminal.END_KEY: cursor_end,
"n": move_to_next_issue, "N": move_to_next_issue_of_tool,
"e": edit_file, "q": quit_, terminal.ESC: quit_, "r": refresh,
"R": refresh_tool, "\t": toggle_focus, "f": toggle_fullscreen,
"x": xdg_open}
def setup_inotify(root_path, loop, on_filesystem_event, exclude_filter):
watch_manager = pyinotify.WatchManager()
event_mask = (pyinotify.IN_CREATE | pyinotify.IN_DELETE |
pyinotify.IN_CLOSE_WRITE | pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO)
watch_manager.add_watch(root_path, event_mask, rec=True, auto_add=True,
proc_fun=on_filesystem_event,
exclude_filter=exclude_filter, quiet=False)
return pyinotify.AsyncioNotifier(watch_manager, loop,
callback=lambda notifier: None)
def load_state(pickle_path, jobs_added_event, appearance_changed_event,
root_path, loop):
is_first_run = True
try:
with gzip.open(pickle_path, "rb") as file_:
screen = pickle.load(file_)
except (FileNotFoundError, AttributeError):
summary = Summary(root_path, jobs_added_event)
log = Log(appearance_changed_event)
screen = Screen(summary, log, appearance_changed_event, loop)
else:
is_first_run = False
screen._appearance_changed_event = appearance_changed_event
screen._main_loop = loop
summary = screen._summary
summary._jobs_added_event = jobs_added_event
summary._root_path = root_path
summary.clear_running()
log = screen._log
log._appearance_changed_event = appearance_changed_event
return summary, screen, log, is_first_run
def on_filesystem_event(event, summary, root_path, appearance_changed_event):
path = list(fix_paths(root_path, [event.pathname]))[0]
if is_path_excluded(path[2:]):
return
inotify_actions = {pyinotify.IN_CREATE: summary.on_file_added,
pyinotify.IN_MOVED_TO: summary.on_file_added,
pyinotify.IN_DELETE: summary.on_file_deleted,
pyinotify.IN_MOVED_FROM: summary.on_file_deleted,
pyinotify.IN_ATTRIB: summary.on_file_modified,
pyinotify.IN_CLOSE_WRITE: summary.on_file_modified}
if event.mask not in inotify_actions:
return
try:
inotify_actions[event.mask](path)
except Exception:
tools.log_error()
raise KeyboardInterrupt
appearance_changed_event.set()
def main(root_path, loop, worker_count=None, editor_command=None, theme=None,
compression=None, is_being_tested=False):
if worker_count is None:
worker_count = max(multiprocessing.cpu_count() - 1, 1)
if theme is None:
theme = "native"
if compression is None:
compression = "gzip"
os.environ["PYGMENT_STYLE"] = theme
pickle_path = os.path.join(tools.CACHE_PATH, "summary.pickle")
jobs_added_event = asyncio.Event()
appearance_changed_event = asyncio.Event()
summary, screen, log, is_first_run = load_state(
pickle_path, jobs_added_event, appearance_changed_event, root_path,
loop)
screen.editor_command = editor_command
log.log_message("Program started.")
jobs_added_event.set()
callback = lambda event: on_filesystem_event(event, summary, root_path,
appearance_changed_event)
notifier = setup_inotify(root_path, loop, callback, is_path_excluded)
try:
log.log_message(f"Starting workers ({worker_count}) …")
screen.make_workers(worker_count, is_being_tested, compression)
def exit_loop():
log.log_command("Exiting…")
time.sleep(0.05)
screen.stop_workers()
loop.stop()
loop.create_task(summary.sync_with_filesystem(
appearance_changed_event, log))
for worker in screen.workers:
loop.create_task(worker.future)
if sys.stdout.isatty():
with fill3.context(loop, appearance_changed_event, screen,
exit_loop=exit_loop):
loop.run_forever()
log.log_message("Program stopped.")
else:
try:
loop.run_forever()
except KeyboardInterrupt:
screen.stop_workers()
loop.stop()
finally:
notifier.stop()
if summary.is_loaded:
screen.save()
@contextlib.contextmanager
def chdir(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_cwd)
def manage_cache(root_path):
cache_path = os.path.join(root_path, tools.CACHE_PATH)
timestamp_path = os.path.join(cache_path, "creation_time")
if os.path.exists(cache_path):
timestamp = os.stat(timestamp_path).st_mtime
for resource_path in ["__main__.py", "tools.py", "tools.toml"]:
with importlib.resources.path(eris, resource_path) as resource:
if resource.stat().st_mtime > timestamp:
print("Eris has been updated, so clearing the cache and"
" recalculating all results…")
shutil.rmtree(cache_path)
break
if not os.path.exists(cache_path):
os.mkdir(cache_path)
open(timestamp_path, "w").close()
def print_tool_info():
extensions_for_tool = {}
for extensions, tools_ in tools.TOOLS_FOR_EXTENSIONS:
for extension in extensions:
for tool in tools_:
extensions_for_tool.setdefault(
tool, {extension}).add(extension)
for tool in sorted(tools.tools_all(), key=lambda t: t.__name__):
print(termstr.TermStr(tool.__name__).bold()
if tools.is_tool_available(tool)
else termstr.TermStr(tool.__name__).fg_color(termstr.Color.red)
+ " (not available) ")
print("url:", tool.url)
extensions = list(extensions_for_tool.get(tool, {"*"}))
print("extensions:", ", ".join(extensions))
if hasattr(tool, "command"):
print(f"command: {tool.command} foo.{extensions[0]}")
else:
print("function:", "eris.tools." + tool.__name__)
print()
def check_arguments():
cmdline_help = __doc__ + USAGE.replace("*", "")
arguments = docopt.docopt(cmdline_help, help=False)
if arguments["--help"]:
print(cmdline_help)
sys.exit(0)
if arguments["--info"]:
print_tool_info()
sys.exit(0)
worker_count = None
try:
if arguments["--workers"] is not None:
worker_count = int(arguments["--workers"])
if worker_count == 0:
print("There must be at least one worker.")
sys.exit(1)
except ValueError:
print("--workers requires a number.")
sys.exit(1)
root_path = os.path.abspath(arguments["<directory>"])
if not os.path.exists(root_path):
print("File does not exist:", root_path)
sys.exit(1)
if not os.path.isdir(root_path):
print("File is not a directory:", root_path)
sys.exit(1)
if arguments["--theme"] is not None:
themes = list(pygments.styles.get_all_styles())
if arguments["--theme"] not in themes:
print("--theme must be one of:", " ".join(themes))
sys.exit(1)
if arguments["--compression"] is not None:
compressions = ["gzip", "lzma", "bz2", "none"]
if arguments["--compression"] not in compressions:
print("--compression must be one of:", " ".join(compressions))
sys.exit(1)
editor_command = arguments["--editor"] or os.environ.get("EDITOR", None)\
or os.environ.get("VISUAL", None)
return root_path, worker_count, editor_command, arguments["--theme"], \
arguments["--compression"]
def inotify_watches_exceeded():
print("Error: This codebase has too many directories to be monitored.")
print(" Fix by increasing the kernel parameter user.max_inotify_watches "
"to exceed the number of directories.")
print(" e.g. 'sudo sysctl user.max_inotify_watches=200000'")
def entry_point():
root_path, worker_count, editor_command, theme, compression = \
check_arguments()
manage_cache(root_path)
with terminal.terminal_title("eris: " + os.path.basename(root_path)):
with chdir(root_path): # FIX: Don't change directory if possible.
loop = asyncio.get_event_loop()
try:
main(root_path, loop, worker_count, editor_command, theme,
compression)
except pyinotify.WatchManagerError:
inotify_watches_exceeded()
if __name__ == "__main__":
entry_point()<|fim▁end|>
|
import importlib.resources
import itertools
|
<|file_name|>go16.go<|end_file_name|><|fim▁begin|>// +build go1.6
package gotool
import (
"go/build"
"path/filepath"<|fim▁hole|> "runtime"
)
var gorootSrc = filepath.Join(runtime.GOROOT(), "src")
func shouldIgnoreImport(p *build.Package) bool {
return p == nil || len(p.InvalidGoFiles) == 0
}<|fim▁end|>
| |
<|file_name|>writer.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
#coding=utf-8
# Filename: writer.py
'''
日志记录
@author: 1th
@data: 2017.2.28
'''
from time import sleep
import datetime
from simpleat.conf import settings, globalvar
from simpleat.core import exceptions
from .logger import write_log
_CMD_OUT = settings.CMD_OUT # 是否在命令行进行输出
_LOG_OUT = settings.LOG_OUT # 程序运行过程中是否开启日志输出
_LOG_DIR = settings.LOG_DIR # log文件存储文件夹
def log(logmsg, level, logstr=_LOG_DIR):
'''
纪录日志,自动获取当前时间
Args:
level: 日志等级
logstr: log文件文件夹
'''
fulllogmsg = ''.join(['[', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \
'] - ', level, ' - ', unicode(logmsg)])
try:
write_log(fulllogmsg, level, logstr)
except exceptions.WriteLogException as wle:
if _CMD_OUT:
print unicode(wle)
<|fim▁hole|>def logger():
'''
自动检查LOG_MESSAGE中是否有需要记录的日志
'''
while True:
if not globalvar.g_hold_lognote.empty():
content, level = globalvar.g_hold_lognote.get()
if _LOG_OUT:
log(content, level)
sleep(0.5)<|fim▁end|>
| |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>#![no_std]
#[macro_use]
extern crate std_artiq as std;
extern crate board;
extern crate byteorder;
mod proto;
#[cfg(has_drtio)]
mod crc32;
use std::io::{self, Read, Write};
#[cfg(has_drtio)]
use core::slice;
use proto::*;
#[derive(Debug)]
pub enum Packet {
EchoRequest,
EchoReply,
RtioErrorRequest,
RtioNoErrorReply,
RtioErrorCollisionReply,
RtioErrorBusyReply,
MonitorRequest { channel: u16, probe: u8 },
MonitorReply { value: u32 },
InjectionRequest { channel: u16, overrd: u8, value: u8 },
InjectionStatusRequest { channel: u16, overrd: u8 },
InjectionStatusReply { value: u8 },
I2cStartRequest { busno: u8 },
I2cRestartRequest { busno: u8 },
I2cStopRequest { busno: u8 },
I2cWriteRequest { busno: u8, data: u8 },
I2cWriteReply { succeeded: bool, ack: bool },
I2cReadRequest { busno: u8, ack: bool },
I2cReadReply { succeeded: bool, data: u8 },
I2cBasicReply { succeeded: bool },
SpiSetConfigRequest { busno: u8, flags: u8, write_div: u8, read_div: u8 },
SpiSetXferRequest { busno: u8, chip_select: u16, write_length: u8, read_length: u8 },
SpiWriteRequest { busno: u8, data: u32 },
SpiReadRequest { busno: u8 },
SpiReadReply { succeeded: bool, data: u32 },
SpiBasicReply { succeeded: bool },
}
impl Packet {
pub fn read_from(reader: &mut Read) -> io::Result<Packet> {
Ok(match read_u8(reader)? {
0x00 => Packet::EchoRequest,
0x01 => Packet::EchoReply,
0x20 => Packet::RtioErrorRequest,
0x21 => Packet::RtioNoErrorReply,
0x22 => Packet::RtioErrorCollisionReply,
0x23 => Packet::RtioErrorBusyReply,
0x40 => Packet::MonitorRequest {
channel: read_u16(reader)?,
probe: read_u8(reader)?
},
0x41 => Packet::MonitorReply {
value: read_u32(reader)?
},
0x50 => Packet::InjectionRequest {
channel: read_u16(reader)?,
overrd: read_u8(reader)?,
value: read_u8(reader)?
},
0x51 => Packet::InjectionStatusRequest {
channel: read_u16(reader)?,
overrd: read_u8(reader)?
},
0x52 => Packet::InjectionStatusReply {
value: read_u8(reader)?
},
0x80 => Packet::I2cStartRequest {
busno: read_u8(reader)?
},
0x81 => Packet::I2cRestartRequest {
busno: read_u8(reader)?
},
0x82 => Packet::I2cStopRequest {
busno: read_u8(reader)?
},
0x83 => Packet::I2cWriteRequest {
busno: read_u8(reader)?,
data: read_u8(reader)?
},
0x84 => Packet::I2cWriteReply {
succeeded: read_bool(reader)?,
ack: read_bool(reader)?
},
0x85 => Packet::I2cReadRequest {
busno: read_u8(reader)?,
ack: read_bool(reader)?
},
0x86 => Packet::I2cReadReply {
succeeded: read_bool(reader)?,
data: read_u8(reader)?
},
0x87 => Packet::I2cBasicReply {
succeeded: read_bool(reader)?
},
0x90 => Packet::SpiSetConfigRequest {
busno: read_u8(reader)?,
flags: read_u8(reader)?,
write_div: read_u8(reader)?,
read_div: read_u8(reader)?
},
0x91 => Packet::SpiSetXferRequest {
busno: read_u8(reader)?,
chip_select: read_u16(reader)?,
write_length: read_u8(reader)?,
read_length: read_u8(reader)?
},
0x92 => Packet::SpiWriteRequest {
busno: read_u8(reader)?,
data: read_u32(reader)?
},
0x93 => Packet::SpiReadRequest {
busno: read_u8(reader)?
},
0x94 => Packet::SpiReadReply {
succeeded: read_bool(reader)?,
data: read_u32(reader)?
},
0x95 => Packet::SpiBasicReply {
succeeded: read_bool(reader)?
},
_ => return Err(io::Error::new(io::ErrorKind::InvalidData, "unknown packet type"))
})
}
pub fn write_to(&self, writer: &mut Write) -> io::Result<()> {
match *self {
Packet::EchoRequest => write_u8(writer, 0x00)?,
Packet::EchoReply => write_u8(writer, 0x01)?,
Packet::RtioErrorRequest => write_u8(writer, 0x20)?,
Packet::RtioNoErrorReply => write_u8(writer, 0x21)?,
Packet::RtioErrorCollisionReply => write_u8(writer, 0x22)?,
Packet::RtioErrorBusyReply => write_u8(writer, 0x23)?,
Packet::MonitorRequest { channel, probe } => {
write_u8(writer, 0x40)?;
write_u16(writer, channel)?;
write_u8(writer, probe)?;
},
Packet::MonitorReply { value } => {
write_u8(writer, 0x41)?;
write_u32(writer, value)?;
},
Packet::InjectionRequest { channel, overrd, value } => {
write_u8(writer, 0x50)?;
write_u16(writer, channel)?;
write_u8(writer, overrd)?;
write_u8(writer, value)?;
},
Packet::InjectionStatusRequest { channel, overrd } => {
write_u8(writer, 0x51)?;
write_u16(writer, channel)?;
write_u8(writer, overrd)?;
},
Packet::InjectionStatusReply { value } => {
write_u8(writer, 0x52)?;
write_u8(writer, value)?;
},
Packet::I2cStartRequest { busno } => {
write_u8(writer, 0x80)?;
write_u8(writer, busno)?;
},
Packet::I2cRestartRequest { busno } => {
write_u8(writer, 0x81)?;
write_u8(writer, busno)?;
},
Packet::I2cStopRequest { busno } => {
write_u8(writer, 0x82)?;
write_u8(writer, busno)?;
},
Packet::I2cWriteRequest { busno, data } => {
write_u8(writer, 0x83)?;
write_u8(writer, busno)?;
write_u8(writer, data)?;
},
Packet::I2cWriteReply { succeeded, ack } => {
write_u8(writer, 0x84)?;
write_bool(writer, succeeded)?;
write_bool(writer, ack)?;
},
Packet::I2cReadRequest { busno, ack } => {
write_u8(writer, 0x85)?;
write_u8(writer, busno)?;
write_bool(writer, ack)?;
},
Packet::I2cReadReply { succeeded, data } => {
write_u8(writer, 0x86)?;
write_bool(writer, succeeded)?;
write_u8(writer, data)?;
},
Packet::I2cBasicReply { succeeded } => {
write_u8(writer, 0x87)?;
write_bool(writer, succeeded)?;
},
Packet::SpiSetConfigRequest { busno, flags, write_div, read_div } => {
write_u8(writer, 0x90)?;
write_u8(writer, busno)?;
write_u8(writer, flags)?;
write_u8(writer, write_div)?;
write_u8(writer, read_div)?;
},
Packet::SpiSetXferRequest { busno, chip_select, write_length, read_length } => {
write_u8(writer, 0x91)?;
write_u8(writer, busno)?;
write_u16(writer, chip_select)?;
write_u8(writer, write_length)?;
write_u8(writer, read_length)?;
},
Packet::SpiWriteRequest { busno, data } => {
write_u8(writer, 0x92)?;
write_u8(writer, busno)?;
write_u32(writer, data)?;
},
Packet::SpiReadRequest { busno } => {
write_u8(writer, 0x93)?;
write_u8(writer, busno)?;
},
Packet::SpiReadReply { succeeded, data } => {
write_u8(writer, 0x94)?;
write_bool(writer, succeeded)?;
write_u32(writer, data)?;
},
Packet::SpiBasicReply { succeeded } => {
write_u8(writer, 0x95)?;
write_bool(writer, succeeded)?;
},
}
Ok(())
}
}
#[cfg(has_drtio)]
pub mod hw {
use super::*;
use std::io::Cursor;
fn rx_has_error(linkno: u8) -> bool {
let linkno = linkno as usize;
unsafe {
let error = (board::csr::DRTIO[linkno].aux_rx_error_read)() != 0;
if error {
(board::csr::DRTIO[linkno].aux_rx_error_write)(1)
}
error
}
}
struct RxBuffer(u8, &'static [u8]);
impl Drop for RxBuffer {
fn drop(&mut self) {
unsafe {
(board::csr::DRTIO[self.0 as usize].aux_rx_present_write)(1);
}
}
}
fn rx_get_buffer(linkno: u8) -> Option<RxBuffer> {
let linkidx = linkno as usize;
unsafe {
if (board::csr::DRTIO[linkidx].aux_rx_present_read)() == 1 {
let length = (board::csr::DRTIO[linkidx].aux_rx_length_read)();
let base = board::mem::DRTIO_AUX[linkidx].base + board::mem::DRTIO_AUX[linkidx].size/2;
let sl = slice::from_raw_parts(base as *mut u8, length as usize);
Some(RxBuffer(linkno, sl))
} else {
None
}
}
}
pub fn recv_link(linkno: u8) -> io::Result<Option<Packet>> {
if rx_has_error(linkno) {
return Err(io::Error::new(io::ErrorKind::Other, "gateware reported error"))
}
let buffer = rx_get_buffer(linkno);
match buffer {
Some(rxb) => {
let slice = rxb.1;
let mut reader = Cursor::new(slice);
let len = slice.len();
if len < 8 {
return Err(io::Error::new(io::ErrorKind::InvalidData, "packet too short"))
}
let computed_crc = crc32::checksum_ieee(&reader.get_ref()[0..len-4]);
reader.set_position((len-4) as u64);
let crc = read_u32(&mut reader)?;
if crc != computed_crc {
return Err(io::Error::new(io::ErrorKind::InvalidData, "packet CRC failed"))
}
reader.set_position(0);
let packet_r = Packet::read_from(&mut reader);
match packet_r {
Ok(packet) => Ok(Some(packet)),
Err(e) => Err(e)
}
}
None => Ok(None)
}
}
pub fn recv_timeout_link(linkno: u8, timeout_ms: Option<u64>) -> io::Result<Packet> {
let timeout_ms = timeout_ms.unwrap_or(10);
let limit = board::clock::get_ms() + timeout_ms;
while board::clock::get_ms() < limit {
match recv_link(linkno) {
Ok(None) => (),
Ok(Some(packet)) => return Ok(packet),
Err(e) => return Err(e)
}
}
return Err(io::Error::new(io::ErrorKind::TimedOut, "timed out waiting for data"))
}
fn tx_get_buffer(linkno: u8) -> &'static mut [u8] {
let linkno = linkno as usize;
unsafe {
while (board::csr::DRTIO[linkno].aux_tx_read)() != 0 {}
let base = board::mem::DRTIO_AUX[linkno].base;
let size = board::mem::DRTIO_AUX[linkno].size/2;
slice::from_raw_parts_mut(base as *mut u8, size)
}
}
fn tx_ack_buffer(linkno: u8, length: u16) {
let linkno = linkno as usize;
unsafe {
(board::csr::DRTIO[linkno].aux_tx_length_write)(length);
(board::csr::DRTIO[linkno].aux_tx_write)(1)
}
}
pub fn send_link(linkno: u8, packet: &Packet) -> io::Result<()> {
let sl = tx_get_buffer(linkno);
let mut writer = Cursor::new(sl);
packet.write_to(&mut writer)?;
let mut len = writer.position();
let padding = 4 - (len % 4);
if padding != 4 {
for _ in 0..padding {
write_u8(&mut writer, 0)?;
}
len += padding;<|fim▁hole|> write_u32(&mut writer, crc)?;
len += 4;
tx_ack_buffer(linkno, len as u16);
Ok(())
}
// TODO: routing
fn get_linkno(nodeno: u8) -> io::Result<u8> {
if nodeno == 0 || nodeno as usize > board::csr::DRTIO.len() {
return Err(io::Error::new(io::ErrorKind::NotFound, "invalid node number"))
}
Ok(nodeno - 1)
}
pub fn recv(nodeno: u8) -> io::Result<Option<Packet>> {
let linkno = get_linkno(nodeno)?;
recv_link(linkno)
}
pub fn recv_timeout(nodeno: u8, timeout_ms: Option<u64>) -> io::Result<Packet> {
let linkno = get_linkno(nodeno)?;
recv_timeout_link(linkno, timeout_ms)
}
pub fn send(nodeno: u8, packet: &Packet) -> io::Result<()> {
let linkno = get_linkno(nodeno)?;
send_link(linkno, packet)
}
}<|fim▁end|>
|
}
let crc = crc32::checksum_ieee(&writer.get_ref()[0..len as usize]);
|
<|file_name|>FileManager.cpp<|end_file_name|><|fim▁begin|>/*
* This file is part of Dune Legacy.
*
* Dune Legacy is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* Dune Legacy is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Dune Legacy. If not, see <http://www.gnu.org/licenses/>.
*/
#include <FileClasses/FileManager.h>
#include <globals.h>
#include <FileClasses/TextManager.h>
#include <misc/FileSystem.h>
#include <config.h>
#include <misc/fnkdat.h>
#include <misc/md5.h>
#include <misc/string_util.h>
#include <algorithm>
#include <stdexcept>
#include <sstream>
#include <iomanip>
FileManager::FileManager(bool saveMode) {
fprintf(stderr,"\n");
fprintf(stderr,"FileManager is loading PAK-Files...\n\n");
fprintf(stderr,"MD5-Checksum Filename\n");
std::vector<std::string> searchPath = getSearchPath();
std::vector<std::string> FileList = getNeededFiles();
std::vector<std::string>::const_iterator filenameIter;
for(filenameIter = FileList.begin(); filenameIter != FileList.end(); ++filenameIter) {
std::vector<std::string>::const_iterator searchPathIter;
for(searchPathIter = searchPath.begin(); searchPathIter != searchPath.end(); ++searchPathIter) {
std::string filepath = *searchPathIter + "/" + *filenameIter;
if(getCaseInsensitiveFilename(filepath) == true) {
try {
fprintf(stderr,"%s %s\n", md5FromFilename(filepath).c_str(), filepath.c_str());
pakFiles.push_back(new Pakfile(filepath));
} catch (std::exception &e) {
if(saveMode == false) {
while(pakFiles.empty()) {
delete pakFiles.back();
pakFiles.pop_back();
}
throw std::runtime_error("FileManager::FileManager(): Error while opening " + filepath + ": " + e.what());
}
}
// break out of searchPath-loop because we have opened the file in one directory
break;
}
}
}
fprintf(stderr,"\n");
}
FileManager::~FileManager() {
std::vector<Pakfile*>::const_iterator iter;
for(iter = pakFiles.begin(); iter != pakFiles.end(); ++iter) {
delete *iter;
}
}
std::vector<std::string> FileManager::getSearchPath() {
std::vector<std::string> searchPath;
searchPath.push_back(DUNELEGACY_DATADIR);
char tmp[FILENAME_MAX];
fnkdat("data", tmp, FILENAME_MAX, FNKDAT_USER | FNKDAT_CREAT);
searchPath.push_back(tmp);
return searchPath;
}
std::vector<std::string> FileManager::getNeededFiles() {
std::vector<std::string> fileList;
#if 0
fileList.push_back("LEGACY.PAK");
fileList.push_back("OPENSD2.PAK");
fileList.push_back("DUNE.PAK");
fileList.push_back("SCENARIO.PAK");
fileList.push_back("MENTAT.PAK");
fileList.push_back("VOC.PAK");
fileList.push_back("MERC.PAK");
fileList.push_back("FINALE.PAK");
fileList.push_back("INTRO.PAK");
fileList.push_back("INTROVOC.PAK");
fileList.push_back("SOUND.PAK");
std::string LanguagePakFiles = (pTextManager != NULL) ? _("LanguagePakFiles") : "";
if(LanguagePakFiles.empty()) {
LanguagePakFiles = "ENGLISH.PAK,HARK.PAK,ATRE.PAK,ORDOS.PAK";
}
std::vector<std::string> additionalPakFiles = splitString(LanguagePakFiles);
std::vector<std::string>::iterator iter;
for(iter = additionalPakFiles.begin(); iter != additionalPakFiles.end(); ++iter) {
fileList.push_back(*iter);
}
std::sort(fileList.begin(), fileList.end());
#endif
return fileList;
}
std::vector<std::string> FileManager::getMissingFiles() {
std::vector<std::string> MissingFiles;
std::vector<std::string> searchPath = getSearchPath();
std::vector<std::string> FileList = getNeededFiles();
std::vector<std::string>::const_iterator filenameIter;
for(filenameIter = FileList.begin(); filenameIter != FileList.end(); ++filenameIter) {
bool bFound = false;
std::vector<std::string>::const_iterator searchPathIter;
for(searchPathIter = searchPath.begin(); searchPathIter != searchPath.end(); ++searchPathIter) {
std::string filepath = *searchPathIter + "/" + *filenameIter;
if(getCaseInsensitiveFilename(filepath) == true) {
bFound = true;
break;
}
}
if(bFound == false) {
MissingFiles.push_back(*filenameIter);
}
}
return MissingFiles;
}
SDL_RWops* FileManager::openFile(std::string filename) {
SDL_RWops* ret;
// try loading external file
std::vector<std::string> searchPath = getSearchPath();
std::vector<std::string>::const_iterator searchPathIter;
for(searchPathIter = searchPath.begin(); searchPathIter != searchPath.end(); ++searchPathIter) {
std::string externalFilename = *searchPathIter + "/" + filename;
if(getCaseInsensitiveFilename(externalFilename) == true) {
if((ret = SDL_RWFromFile(externalFilename.c_str(), "rb")) != NULL) {
return ret;
}
}
}
// now try loading from pak file
std::vector<Pakfile*>::const_iterator iter;
for(iter = pakFiles.begin(); iter != pakFiles.end(); ++iter) {
ret = (*iter)->openFile(filename);
if(ret != NULL) {
return ret;
}
}
throw std::runtime_error("FileManager::OpenFile(): Cannot find " + filename + "!");
}
bool FileManager::exists(std::string filename) const {
// try finding external file
std::vector<std::string> searchPath = getSearchPath();
std::vector<std::string>::const_iterator searchPathIter;
for(searchPathIter = searchPath.begin(); searchPathIter != searchPath.end(); ++searchPathIter) {
std::string externalFilename = *searchPathIter + "/" + filename;<|fim▁hole|> }
}
// now try finding in one pak file
std::vector<Pakfile*>::const_iterator iter;
for(iter = pakFiles.begin(); iter != pakFiles.end(); ++iter) {
if((*iter)->exists(filename) == true) {
return true;
}
}
return false;
}
std::string FileManager::md5FromFilename(std::string filename) {
unsigned char md5sum[16];
if(md5_file(filename.c_str(), md5sum) != 0) {
throw std::runtime_error("Cannot open or read " + filename + "!");
} else {
std::stringstream stream;
stream << std::setfill('0') << std::hex;
for(int i=0;i<16;i++) {
stream << std::setw(2) << (int) md5sum[i];
}
return stream.str();
}
}<|fim▁end|>
|
if(getCaseInsensitiveFilename(externalFilename) == true) {
return true;
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# Copyright (c) 2019 Red Hat, Inc.
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Molecule distribution package setuptools installer."""
import setuptools
HAS_DIST_INFO_CMD = False
try:
import setuptools.command.dist_info
HAS_DIST_INFO_CMD = True
except ImportError:
"""Setuptools version is too old."""
ALL_STRING_TYPES = tuple(map(type, ('', b'', u'')))
MIN_NATIVE_SETUPTOOLS_VERSION = 34, 4, 0
"""Minimal setuptools having good read_configuration implementation."""
RUNTIME_SETUPTOOLS_VERSION = tuple(map(int, setuptools.__version__.split('.')))
"""Setuptools imported now."""
READ_CONFIG_SHIM_NEEDED = (
RUNTIME_SETUPTOOLS_VERSION < MIN_NATIVE_SETUPTOOLS_VERSION
)
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {
str_if_nested_or_str(k): str_if_nested_or_str(v)
for k, v in dct.items()
}
if not READ_CONFIG_SHIM_NEEDED:
from setuptools.config import read_configuration, ConfigOptionsHandler
import setuptools.config
import setuptools.dist
# Set default value for 'use_scm_version'
setattr(setuptools.dist.Distribution, 'use_scm_version', False)
# Attach bool parser to 'use_scm_version' option
class ShimConfigOptionsHandler(ConfigOptionsHandler):
"""Extension class for ConfigOptionsHandler."""
@property
def parsers(self):
"""Return an option mapping with default data type parsers."""
_orig_parsers = super(ShimConfigOptionsHandler, self).parsers
return dict(use_scm_version=self._parse_bool, **_orig_parsers)
def parse_section_packages__find(self, section_options):
find_kwargs = super(
ShimConfigOptionsHandler, self
).parse_section_packages__find(section_options)
return stringify_dict_contents(find_kwargs)
setuptools.config.ConfigOptionsHandler = ShimConfigOptionsHandler
else:
"""This is a shim for setuptools<required."""
import functools
import io
import json
import sys
import warnings
try:
import setuptools.config
def filter_out_unknown_section(i):
def chi(self, *args, **kwargs):
i(self, *args, **kwargs)
self.sections = {
s: v for s, v in self.sections.items()
if s != 'packages.find'
}
return chi
setuptools.config.ConfigHandler.__init__ = filter_out_unknown_section(
setuptools.config.ConfigHandler.__init__,
)
except ImportError:
pass
def ignore_unknown_options(s):
@functools.wraps(s)
def sw(**attrs):
try:
ignore_warning_regex = (
r"Unknown distribution option: "
r"'(license_file|project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
return s(**attrs)
finally:
warnings.resetwarnings()
return sw
def parse_predicates(python_requires):
import itertools
import operator
sorted_operators_map = tuple(sorted(
{
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'': operator.eq,
}.items(),
key=lambda i: len(i[0]),
reverse=True,
))
def is_decimal(s):
return type(u'')(s).isdecimal()
conditions = map(str.strip, python_requires.split(','))
for c in conditions:
for op_sign, op_func in sorted_operators_map:
if not c.startswith(op_sign):
continue
raw_ver = itertools.takewhile(
is_decimal,
c[len(op_sign):].strip().split('.'),
)
ver = tuple(map(int, raw_ver))
yield op_func, ver
break
def validate_required_python_or_fail(python_requires=None):
if python_requires is None:
return
python_version = sys.version_info
preds = parse_predicates(python_requires)
for op, v in preds:
py_ver_slug = python_version[:max(len(v), 3)]
condition_matches = op(py_ver_slug, v)
if not condition_matches:
raise RuntimeError(
"requires Python '{}' but the running Python is {}".
format(
python_requires,
'.'.join(map(str, python_version[:3])),
)
)
def verify_required_python_runtime(s):
@functools.wraps(s)
def sw(**attrs):
try:
validate_required_python_or_fail(attrs.get('python_requires'))
except RuntimeError as re:
sys.exit('{} {!s}'.format(attrs['name'], re))
return s(**attrs)
return sw
setuptools.setup = ignore_unknown_options(setuptools.setup)
setuptools.setup = verify_required_python_runtime(setuptools.setup)
try:
from configparser import ConfigParser, NoSectionError
except ImportError:
from ConfigParser import ConfigParser, NoSectionError
ConfigParser.read_file = ConfigParser.readfp
def maybe_read_files(d):
"""Read files if the string starts with `file:` marker."""
FILE_FUNC_MARKER = 'file:'
d = d.strip()
if not d.startswith(FILE_FUNC_MARKER):
return d
descs = []
for fname in map(str.strip, str(d[len(FILE_FUNC_MARKER):]).split(',')):
with io.open(fname, encoding='utf-8') as f:
descs.append(f.read())
return ''.join(descs)<|fim▁hole|> return list(filter(bool, map(str.strip, str(v).strip().splitlines())))
def cfg_val_to_dict(v):
"""Turn config val to dict and filter out empty lines."""
return dict(
map(lambda l: list(map(str.strip, l.split('=', 1))),
filter(bool, map(str.strip, str(v).strip().splitlines())))
)
def cfg_val_to_primitive(v):
"""Parse primitive config val to appropriate data type."""
return json.loads(v.strip().lower())
def read_configuration(filepath):
"""Read metadata and options from setup.cfg located at filepath."""
cfg = ConfigParser()
with io.open(filepath, encoding='utf-8') as f:
cfg.read_file(f)
md = dict(cfg.items('metadata'))
for list_key in 'classifiers', 'keywords', 'project_urls':
try:
md[list_key] = cfg_val_to_list(md[list_key])
except KeyError:
pass
try:
md['long_description'] = maybe_read_files(md['long_description'])
except KeyError:
pass
opt = dict(cfg.items('options'))
for list_key in 'include_package_data', 'use_scm_version', 'zip_safe':
try:
opt[list_key] = cfg_val_to_primitive(opt[list_key])
except KeyError:
pass
for list_key in 'scripts', 'install_requires', 'setup_requires':
try:
opt[list_key] = cfg_val_to_list(opt[list_key])
except KeyError:
pass
try:
opt['package_dir'] = cfg_val_to_dict(opt['package_dir'])
except KeyError:
pass
try:
opt_package_data = dict(cfg.items('options.package_data'))
if not opt_package_data.get('', '').strip():
opt_package_data[''] = opt_package_data['*']
del opt_package_data['*']
except (KeyError, NoSectionError):
opt_package_data = {}
try:
opt_extras_require = dict(cfg.items('options.extras_require'))
opt['extras_require'] = {}
for k, v in opt_extras_require.items():
opt['extras_require'][k] = cfg_val_to_list(v)
except NoSectionError:
pass
opt['package_data'] = {}
for k, v in opt_package_data.items():
opt['package_data'][k] = cfg_val_to_list(v)
try:
opt_exclude_package_data = dict(
cfg.items('options.exclude_package_data'),
)
if (
not opt_exclude_package_data.get('', '').strip()
and '*' in opt_exclude_package_data
):
opt_exclude_package_data[''] = opt_exclude_package_data['*']
del opt_exclude_package_data['*']
except NoSectionError:
pass
else:
opt['exclude_package_data'] = {}
for k, v in opt_exclude_package_data.items():
opt['exclude_package_data'][k] = cfg_val_to_list(v)
cur_pkgs = opt.get('packages', '').strip()
if '\n' in cur_pkgs:
opt['packages'] = cfg_val_to_list(opt['packages'])
elif cur_pkgs.startswith('find:'):
opt_packages_find = stringify_dict_contents(
dict(cfg.items('options.packages.find'))
)
opt['packages'] = setuptools.find_packages(**opt_packages_find)
return {'metadata': md, 'options': opt}
def cut_local_version_on_upload(version):
"""Generate a PEP440 local version if uploading to PyPI."""
import os
import setuptools_scm.version # only present during setup time
IS_PYPI_UPLOAD = os.getenv('PYPI_UPLOAD') == 'true' # set in tox.ini
return (
'' if IS_PYPI_UPLOAD
else setuptools_scm.version.get_local_node_and_date(version)
)
if HAS_DIST_INFO_CMD:
class patched_dist_info(setuptools.command.dist_info.dist_info):
def run(self):
self.egg_base = str_if_nested_or_str(self.egg_base)
return setuptools.command.dist_info.dist_info.run(self)
declarative_setup_params = read_configuration('setup.cfg')
"""Declarative metadata and options as read by setuptools."""
setup_params = {}
"""Explicit metadata for passing into setuptools.setup() call."""
setup_params = dict(setup_params, **declarative_setup_params['metadata'])
setup_params = dict(setup_params, **declarative_setup_params['options'])
if HAS_DIST_INFO_CMD:
setup_params['cmdclass'] = {
'dist_info': patched_dist_info,
}
setup_params['use_scm_version'] = {
'local_scheme': cut_local_version_on_upload,
}
# Patch incorrectly decoded package_dir option
# ``egg_info`` demands native strings failing with unicode under Python 2
# Ref https://github.com/pypa/setuptools/issues/1136
setup_params = stringify_dict_contents(setup_params)
__name__ == '__main__' and setuptools.setup(**setup_params)<|fim▁end|>
|
def cfg_val_to_list(v):
"""Turn config val to list and filter out empty lines."""
|
<|file_name|>test_sns.py<|end_file_name|><|fim▁begin|># Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
from .common import BaseTest, functional
from c7n.resources.aws import shape_validate
from c7n.utils import yaml_load
class TestSNS(BaseTest):
@functional
def test_sns_remove_matched(self):
session_factory = self.replay_flight_data("test_sns_remove_matched")
client = session_factory().client("sns")
name = "test-sns-remove-matched"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": {"AWS": "arn:aws:iam::644160558196:root"},
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "Public",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-rm-matched",
"resource": "sns",
"filters": [
{"TopicArn": topic_arn},
{"type": "cross-account", "whitelist": ["123456789012"]},
],
"actions": [{"type": "remove-statements", "statement_ids": "matched"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual([r["TopicArn"] for r in resources], [topic_arn])
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertEqual(
[s["Sid"] for s in data.get("Statement", ())], ["SpecificAllow"]
)
@functional
def test_sns_remove_named(self):
session_factory = self.replay_flight_data("test_sns_remove_named")
client = session_factory().client("sns")
name = "test-sns-remove-named"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-rm-named",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{"type": "remove-statements", "statement_ids": ["RemoveMe"]}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue("RemoveMe" not in [s["Sid"] for s in data.get("Statement", ())])
@functional
def test_sns_modify_replace_policy(self):
session_factory = self.replay_flight_data("test_sns_modify_replace_policy")
client = session_factory().client("sns")
name = "test_sns_modify_replace_policy"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
}
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-replace-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "ReplaceWithMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
}
],
"remove-statements": "*",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue(
"ReplaceWithMe" in [s["Sid"] for s in data.get("Statement", ())]
)
@functional
def test_sns_account_id_template(self):
session_factory = self.replay_flight_data("test_sns_account_id_template")
client = session_factory().client("sns")
name = "test_sns_account_id_template"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
}
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-replace-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "__default_statement_ID_{account_id}",
"Effect": "Allow",
"Principal": {"Service": "s3.amazonaws.com"},
"Action": "SNS:Publish",
"Resource": topic_arn,
"Condition": {
"StringEquals": {
"AWS:SourceAccount": "{account_id}"
},
"ArnLike": {"aws:SourceArn": "arn:aws:s3:*:*:*"},
},
}
],
"remove-statements": "*",<|fim▁hole|> }
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue(
"__default_statement_ID_" +
self.account_id in [s["Sid"] for s in data.get("Statement", ())]
)
@functional
def test_sns_modify_remove_policy(self):
session_factory = self.replay_flight_data("test_sns_modify_remove_policy")
client = session_factory().client("sns")
name = "test_sns_modify_remove_policy"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-remove-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [],
"remove-statements": ["RemoveMe"],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue("RemoveMe" not in [s["Sid"] for s in data.get("Statement", ())])
@functional
def test_sns_modify_add_policy(self):
session_factory = self.replay_flight_data("test_sns_modify_add_policy")
client = session_factory().client("sns")
name = "test_sns_modify_add_policy"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
}
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-add-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "AddMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
}
],
"remove-statements": [],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
self.assertTrue("AddMe" in [s["Sid"] for s in data.get("Statement", ())])
@functional
def test_sns_modify_add_and_remove_policy(self):
session_factory = self.replay_flight_data(
"test_sns_modify_add_and_remove_policy"
)
client = session_factory().client("sns")
name = "test_sns_modify_add_and_remove_policy"
topic_arn = client.create_topic(Name=name)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
client.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="Policy",
AttributeValue=json.dumps(
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "SpecificAllow",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:Subscribe"],
"Resource": topic_arn,
},
{
"Sid": "RemoveMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
},
],
}
),
)
p = self.load_policy(
{
"name": "sns-modify-add-and-remove-policy",
"resource": "sns",
"filters": [{"TopicArn": topic_arn}],
"actions": [
{
"type": "modify-policy",
"add-statements": [
{
"Sid": "AddMe",
"Effect": "Allow",
"Principal": "*",
"Action": ["SNS:GetTopicAttributes"],
"Resource": topic_arn,
}
],
"remove-statements": ["RemoveMe"],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
data = json.loads(
client.get_topic_attributes(TopicArn=resources[0]["TopicArn"])[
"Attributes"
][
"Policy"
]
)
statement_ids = {s["Sid"] for s in data.get("Statement", ())}
self.assertTrue("AddMe" in statement_ids)
self.assertTrue("RemoveMe" not in statement_ids)
self.assertTrue("SpecificAllow" in statement_ids)
def test_sns_topic_encryption(self):
session_factory = self.replay_flight_data('test_sns_kms_related_filter_test')
kms = session_factory().client('kms')
p = self.load_policy(
{
'name': 'test-sns-kms-related-filter',
'resource': 'sns',
'filters': [
{
'TopicArn': 'arn:aws:sns:us-east-1:644160558196:test'
},
{
'type': 'kms-key',
'key': 'c7n:AliasName',
'value': 'alias/skunk/trails'
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
aliases = kms.list_aliases(KeyId=resources[0]['KmsMasterKeyId'])
self.assertEqual(aliases['Aliases'][0]['AliasName'], 'alias/skunk/trails')
def test_set_sns_topic_encryption(self):
session_factory = self.replay_flight_data('test_sns_set_encryption')
topic = 'arn:aws:sns:us-west-1:644160558196:test'
p = self.load_policy(
{
'name': 'test-sns-kms-related-filter',
'resource': 'sns',
'filters': [
{
'TopicArn': topic
},
{
'KmsMasterKeyId': 'absent'
}
],
'actions': [
{
'type': 'set-encryption'
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
sns = session_factory().client('sns')
attributes = sns.get_topic_attributes(TopicArn=topic)
self.assertTrue(attributes['Attributes']['KmsMasterKeyId'], 'alias/aws/sns')
def test_sns_disable_encryption(self):
session_factory = self.replay_flight_data('test_sns_unset_encryption')
topic = 'arn:aws:sns:us-west-1:644160558196:test'
p = self.load_policy(
{
'name': 'test-sns-kms-related-filter',
'resource': 'sns',
'filters': [
{
'TopicArn': topic
},
{
'KmsMasterKeyId': 'alias/aws/sns'
}
],
'actions': [
{
'type': 'set-encryption',
'enabled': False
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
sns = session_factory().client('sns')
attributes = sns.get_topic_attributes(TopicArn=topic)['Attributes']
self.assertFalse(attributes.get('KmsMasterKeyId'))
def test_sns_set_encryption_custom_key(self):
session_factory = self.replay_flight_data('test_sns_set_encryption_custom_key')
topic = 'arn:aws:sns:us-west-1:644160558196:test'
key_alias = 'alias/alias/test/key'
sns = session_factory().client('sns')
p = self.load_policy(
{
'name': 'test-sns-kms-related-filter-alias',
'resource': 'sns',
'filters': [
{
'TopicArn': topic
},
{
'KmsMasterKeyId': 'absent'
}
],
'actions': [
{
'type': 'set-encryption',
'key': key_alias
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
attributes = sns.get_topic_attributes(TopicArn=topic)['Attributes']
self.assertEqual(attributes.get('KmsMasterKeyId'), key_alias)
def test_sns_delete(self):
session_factory = self.replay_flight_data('test_sns_delete_topic')
policy = """
name: delete-sns
resource: aws.sns
filters:
- TopicArn: arn:aws:sns:us-west-1:644160558196:test
actions:
- type: delete
"""
p = self.load_policy(yaml_load(policy), session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('sns')
resources = client.list_topics()['Topics']
self.assertEqual(len(resources), 0)
def test_sns_tag(self):
session_factory = self.replay_flight_data("test_sns_tag")
p = self.load_policy(
{
"name": "tag-sns",
"resource": "sns",
"filters": [{"tag:Tagging": "absent"}],
"actions": [{"type": "tag", "key": "Tagging", "value": "added"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("sns")
tags = client.list_tags_for_resource(ResourceArn=resources[0]["TopicArn"])["Tags"]
self.assertEqual(tags[0]["Value"], "added")
def test_sns_remove_tag(self):
session_factory = self.replay_flight_data(
"test_sns_remove_tag")
p = self.load_policy(
{
"name": "untag-sns",
"resource": "sns",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_cleanup",
"op": "delete",
}
],
"actions": [{"type": "remove-tag", "tags": ["custodian_cleanup"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("sns")
tags = client.list_tags_for_resource(ResourceArn=resources[0]["TopicArn"])["Tags"]
self.assertEqual(len(tags), 0)
def test_sns_mark_for_op(self):
session_factory = self.replay_flight_data(
"test_sns_mark_for_op"
)
p = self.load_policy(
{
"name": "sns-untagged-delete",
"resource": "sns",
"filters": [
{"tag:Tagging": "absent"},
{"tag:custodian_cleanup": "absent"},
],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_cleanup",
"op": "delete",
"days": 1,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client("sns")
tags = client.list_tags_for_resource(ResourceArn=resources[0]["TopicArn"])["Tags"]
self.assertTrue(tags[0]["Key"], "custodian_cleanup")
def test_sns_post_finding(self):
factory = self.replay_flight_data('test_sns_post_finding')
p = self.load_policy({
'name': 'sns',
'resource': 'aws.sns',
'actions': [
{'type': 'post-finding',
'types': [
'Software and Configuration Checks/OrgStandard/abc-123']}]},
session_factory=factory, config={'region': 'us-west-2'})
resources = p.resource_manager.get_resources([
'arn:aws:sns:us-west-2:644160558196:config-topic'])
rfinding = p.resource_manager.actions[0].format_resource(
resources[0])
self.assertEqual(
rfinding,
{'Details': {'AwsSnsTopic': {
'Owner': '644160558196',
'TopicName': 'config-topic'}},
'Id': 'arn:aws:sns:us-west-2:644160558196:config-topic',
'Partition': 'aws',
'Region': 'us-west-2',
'Type': 'AwsSnsTopic'})
shape_validate(
rfinding['Details']['AwsSnsTopic'],
'AwsSnsTopicDetails', 'securityhub')
def test_sns_config(self):
session_factory = self.replay_flight_data("test_sns_config")
p = self.load_policy(
{"name": "sns-config",
"source": "config",
"resource": "sns"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0]['Tags'][0]['Value'], 'false')
class TestSubscription(BaseTest):
def test_subscription_delete(self):
factory = self.replay_flight_data("test_subscription_delete")
p = self.load_policy(
{
"name": "external-owner-delete",
"resource": "sns-subscription",
"filters": [
{
"type": "value",
"key": "Owner",
"value": "123456789099",
"op": "ne",
}
],
"actions": [{"type": "delete"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertNotEqual(resources[0]["Owner"], "123456789099")
client = factory().client("sns")
subs = client.list_subscriptions()
for s in subs.get("Subscriptions", []):
self.assertTrue("123456789099" == s.get("Owner"))<|fim▁end|>
| |
<|file_name|>template_depexpr_field_ref.cc<|end_file_name|><|fim▁begin|><|fim▁hole|>// Checks that we don't fall over on fields that depend on expressions.
//- @T defines/binding TyvarT
template <typename T> struct S {
T t;
//- @f ref DepF
//- DepF.node/kind lookup
//- DepF.text f
//- !{DepF param.0 Anything}
//- @thing ref DepThing
//- DepThing.node/kind lookup
//- DepThing param.0 TyvarT
int i = (t.thing(3) + 4).f;
};<|fim▁end|>
| |
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
//! FFI bindings to oledb.
#![no_std]
#![experimental]<|fim▁hole|>extern "system" {
}<|fim▁end|>
|
extern crate winapi;
use winapi::*;
|
<|file_name|>projects-contribution-report.js<|end_file_name|><|fim▁begin|>import m from 'mithril';
import _ from 'underscore';
import postgrest from 'mithril-postgrest';
import models from '../models';
import h from '../h';
import projectDashboardMenu from '../c/project-dashboard-menu';
import projectContributionReportHeader from '../c/project-contribution-report-header';
import projectContributionReportContent from '../c/project-contribution-report-content';
import projectsContributionReportVM from '../vms/projects-contribution-report-vm';
import FilterMain from '../c/filter-main';
import FilterDropdown from '../c/filter-dropdown';
import InfoProjectContributionLegend from '../c/info-project-contribution-legend';
import ProjectContributionStateLegendModal from '../c/project-contribution-state-legend-modal';
import ProjectContributionDeliveryLegendModal from '../c/project-contribution-delivery-legend-modal';
const projectContributionReport = {
controller(args) {
const listVM = postgrest.paginationVM(models.projectContribution, 'id.desc', {
Prefer: 'count=exact'
}),
filterVM = projectsContributionReportVM,
project = m.prop([{}]),
rewards = m.prop([]),
contributionStateOptions = m.prop([]),
reloadSelectOptions = (projectState) => {
let opts = [{
value: '',
option: 'Todos'
}];
const optionsMap = {
online: [{
value: 'paid',
option: 'Confirmado'
},
{
value: 'pending',
option: 'Iniciado'
},
{
value: 'refunded,chargeback,deleted,pending_refund',
option: 'Contestado'
},
],
waiting_funds: [{
value: 'paid',
option: 'Confirmado'
},
{
value: 'pending',
option: 'Iniciado'
},
{
value: 'refunded,chargeback,deleted,pending_refund',
option: 'Contestado'
},
],
failed: [{
value: 'pending_refund',
option: 'Reembolso em andamento'
},
{
value: 'refunded',
option: 'Reembolsado'
},
{
value: 'paid',
option: 'Reembolso não iniciado'
},
],
successful: [{
value: 'paid',
option: 'Confirmado'
},
{
value: 'refunded,chargeback,deleted,pending_refund',
option: 'Contestado'
},
]
};
opts = opts.concat(optionsMap[projectState] || []);
contributionStateOptions(opts);
},
submit = () => {
if (filterVM.reward_id() === 'null') {
listVM.firstPage(filterVM.withNullParameters()).then(null);
} else {
listVM.firstPage(filterVM.parameters()).then(null);
}
return false;
},
filterBuilder = [{
component: FilterMain,
data: {
inputWrapperClass: '.w-input.text-field',
btnClass: '.btn.btn-medium',
vm: filterVM.full_text_index,
placeholder: 'Busque por nome ou email do apoiador'
}
},
{
label: 'reward_filter',
component: FilterDropdown,
data: {
label: 'Recompensa selecionada',
onchange: submit,
name: 'reward_id',
vm: filterVM.reward_id,
wrapper_class: '.w-sub-col.w-col.w-col-4',
options: []
}<|fim▁hole|> {
label: 'delivery_filter',
component: FilterDropdown,
data: {
custom_label: [InfoProjectContributionLegend, {
content: [ProjectContributionDeliveryLegendModal],
text: 'Status da entrega'
}],
onchange: submit,
name: 'delivery_status',
vm: filterVM.delivery_status,
wrapper_class: '.w-col.w-col-4',
options: [{
value: '',
option: 'Todos'
},
{
value: 'undelivered',
option: 'Não enviada'
},
{
value: 'delivered',
option: 'Enviada'
},
{
value: 'error',
option: 'Erro no envio'
},
{
value: 'received',
option: 'Recebida'
}
]
}
},
{
label: 'payment_state',
component: FilterDropdown,
data: {
custom_label: [InfoProjectContributionLegend, {
text: 'Status do apoio',
content: [ProjectContributionStateLegendModal, {
project
}]
}],
name: 'state',
onchange: submit,
vm: filterVM.state,
wrapper_class: '.w-sub-col.w-col.w-col-4',
options: contributionStateOptions
}
}
];
filterVM.project_id(args.root.getAttribute('data-id'));
const lReward = postgrest.loaderWithToken(models.rewardDetail.getPageOptions({
project_id: `eq.${filterVM.project_id()}`
}));
const lProject = postgrest.loaderWithToken(models.projectDetail.getPageOptions({
project_id: `eq.${filterVM.project_id()}`
}));
lReward.load().then(rewards);
lProject.load().then((data) => {
project(data);
reloadSelectOptions(_.first(data).state);
});
const mapRewardsToOptions = () => {
let options = [];
if (!lReward()) {
options = _.map(rewards(), r => ({
value: r.id,
option: `R$ ${h.formatNumber(r.minimum_value, 2, 3)} - ${r.description.substring(0, 20)}`
}));
}
options.unshift({
value: null,
option: 'Sem recompensa'
});
options.unshift({
value: '',
option: 'Todas'
});
return options;
};
if (!listVM.collection().length) {
listVM.firstPage(filterVM.parameters());
}
return {
listVM,
filterVM,
filterBuilder,
submit,
lReward,
lProject,
rewards,
project,
mapRewardsToOptions
};
},
view(ctrl) {
const list = ctrl.listVM;
if (!ctrl.lProject()) {
return [
m.component(projectDashboardMenu, {
project: m.prop(_.first(ctrl.project()))
}),
m.component(projectContributionReportHeader, {
submit: ctrl.submit,
filterBuilder: ctrl.filterBuilder,
form: ctrl.filterVM.formDescriber,
mapRewardsToOptions: ctrl.mapRewardsToOptions,
filterVM: ctrl.filterVM
}),
m('.divider.u-margintop-30'),
m.component(projectContributionReportContent, {
submit: ctrl.submit,
list,
filterVM: ctrl.filterVM,
project: m.prop(_.first(ctrl.project()))
})
];
}
return h.loader();
}
};
export default projectContributionReport;<|fim▁end|>
|
},
|
<|file_name|>storage-gen.go<|end_file_name|><|fim▁begin|>// Package storage provides access to the Cloud Storage JSON API.
//
// See https://developers.google.com/storage/docs/json_api/
//
// Usage example:
//
// import "google.golang.org/api/storage/v1beta1"
// ...
// storageService, err := storage.New(oauthHttpClient)
package storage // import "google.golang.org/api/storage/v1beta1"
import (
"bytes"
"encoding/json"
"errors"
"fmt"
context "golang.org/x/net/context"
ctxhttp "golang.org/x/net/context/ctxhttp"
gensupport "google.golang.org/api/gensupport"
googleapi "google.golang.org/api/googleapi"
"io"
"net/http"
"net/url"
"strconv"
"strings"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = ctxhttp.Do
const apiId = "storage:v1beta1"
const apiName = "storage"
const apiVersion = "v1beta1"
const basePath = "https://www.googleapis.com/storage/v1beta1/"
// OAuth2 scopes used by this API.
const (
// Manage your data and permissions in Google Cloud Storage
DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
// View your data in Google Cloud Storage
DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
// Manage your data in Google Cloud Storage
DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
)
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.BucketAccessControls = NewBucketAccessControlsService(s)
s.Buckets = NewBucketsService(s)
s.ObjectAccessControls = NewObjectAccessControlsService(s)
s.Objects = NewObjectsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
BucketAccessControls *BucketAccessControlsService
Buckets *BucketsService
ObjectAccessControls *ObjectAccessControlsService
Objects *ObjectsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {
rs := &BucketAccessControlsService{s: s}
return rs
}
type BucketAccessControlsService struct {
s *Service
}
func NewBucketsService(s *Service) *BucketsService {
rs := &BucketsService{s: s}
return rs
}
type BucketsService struct {
s *Service
}
func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {
rs := &ObjectAccessControlsService{s: s}
return rs
}
type ObjectAccessControlsService struct {
s *Service
}
func NewObjectsService(s *Service) *ObjectsService {
rs := &ObjectsService{s: s}
return rs
}
type ObjectsService struct {
s *Service
}
// Bucket: A bucket.
type Bucket struct {
// Acl: Access controls on the bucket.
Acl []*BucketAccessControl `json:"acl,omitempty"`
// DefaultObjectAcl: Default access controls to apply to new objects
// when no ACL is provided.
DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
// Id: The name of the bucket.
Id string `json:"id,omitempty"`
// Kind: The kind of item this is. For buckets, this is always
// storage#bucket.
Kind string `json:"kind,omitempty"`
// Location: The location of the bucket. Object data for objects in the
// bucket resides in physical storage in this location. Can be US or EU.
// Defaults to US.
Location string `json:"location,omitempty"`
// Owner: The owner of the bucket. This will always be the project
// team's owner group.
Owner *BucketOwner `json:"owner,omitempty"`
// ProjectId: The project the bucket belongs to.
ProjectId uint64 `json:"projectId,omitempty,string"`
// SelfLink: The URI of this bucket.
SelfLink string `json:"selfLink,omitempty"`
// TimeCreated: Creation time of the bucket in RFC 3339 format.
TimeCreated string `json:"timeCreated,omitempty"`
// Website: The bucket's website configuration.
Website *BucketWebsite `json:"website,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Acl") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Acl") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Bucket) MarshalJSON() ([]byte, error) {
type noMethod Bucket
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketOwner: The owner of the bucket. This will always be the project
// team's owner group.
type BucketOwner struct {
// Entity: The entity, in the form group-groupId.
Entity string `json:"entity,omitempty"`
// EntityId: The ID for the entity.
EntityId string `json:"entityId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Entity") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Entity") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BucketOwner) MarshalJSON() ([]byte, error) {
type noMethod BucketOwner
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketWebsite: The bucket's website configuration.
type BucketWebsite struct {
// MainPageSuffix: Behaves as the bucket's directory index where missing
// objects are treated as potential directories.
MainPageSuffix string `json:"mainPageSuffix,omitempty"`
// NotFoundPage: The custom object to return when a requested resource
// is not found.
NotFoundPage string `json:"notFoundPage,omitempty"`
// ForceSendFields is a list of field names (e.g. "MainPageSuffix") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MainPageSuffix") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
type noMethod BucketWebsite
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketAccessControl: An access-control entry.
type BucketAccessControl struct {
// Bucket: The name of the bucket.
Bucket string `json:"bucket,omitempty"`
// Domain: The domain associated with the entity, if any.
Domain string `json:"domain,omitempty"`
// Email: The email address associated with the entity, if any.
Email string `json:"email,omitempty"`
// Entity: The entity holding the permission, in one of the following
// forms:
// - user-userId
// - user-email
// - group-groupId
// - group-email
// - domain-domain
// - allUsers
// - allAuthenticatedUsers Examples:
// - The user [email protected] would be [email protected].
// - The group [email protected] would be
// [email protected].
// - To refer to all members of the Google Apps for Business domain
// example.com, the entity would be domain-example.com.
Entity string `json:"entity,omitempty"`
// EntityId: The ID for the entity, if any.
EntityId string `json:"entityId,omitempty"`
// Id: The ID of the access-control entry.
Id string `json:"id,omitempty"`
// Kind: The kind of item this is. For bucket access control entries,
// this is always storage#bucketAccessControl.
Kind string `json:"kind,omitempty"`
// Role: The access permission for the entity. Can be READER, WRITER, or
// OWNER.
Role string `json:"role,omitempty"`
// SelfLink: The link to this access-control entry.
SelfLink string `json:"selfLink,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Bucket") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bucket") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
type noMethod BucketAccessControl
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketAccessControls: An access-control list.
type BucketAccessControls struct {
// Items: The list of items.
Items []*BucketAccessControl `json:"items,omitempty"`
// Kind: The kind of item this is. For lists of bucket access control
// entries, this is always storage#bucketAccessControls.
Kind string `json:"kind,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Items") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Items") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
type noMethod BucketAccessControls
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Buckets: A list of buckets.
type Buckets struct {
// Items: The list of items.
Items []*Bucket `json:"items,omitempty"`
// Kind: The kind of item this is. For lists of buckets, this is always
// storage#buckets.
Kind string `json:"kind,omitempty"`
// NextPageToken: The continuation token, used to page through large
// result sets. Provide this value in a subsequent request to return the
// next page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Items") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Items") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Buckets) MarshalJSON() ([]byte, error) {
type noMethod Buckets
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Object: An object.
type Object struct {
// Acl: Access controls on the object.
Acl []*ObjectAccessControl `json:"acl,omitempty"`
// Bucket: The bucket containing this object.
Bucket string `json:"bucket,omitempty"`
// CacheControl: Cache-Control directive for the object data.
CacheControl string `json:"cacheControl,omitempty"`
// ContentDisposition: Content-Disposition of the object data.
ContentDisposition string `json:"contentDisposition,omitempty"`
// ContentEncoding: Content-Encoding of the object data.
ContentEncoding string `json:"contentEncoding,omitempty"`
// ContentLanguage: Content-Language of the object data.
ContentLanguage string `json:"contentLanguage,omitempty"`
// Id: The ID of the object.
Id string `json:"id,omitempty"`
// Kind: The kind of item this is. For objects, this is always
// storage#object.
Kind string `json:"kind,omitempty"`
// Media: Object media data. Provided on your behalf when uploading raw
// media or multipart/related with an auxiliary media part.
Media *ObjectMedia `json:"media,omitempty"`
// Metadata: User-provided metadata, in key/value pairs.
Metadata map[string]string `json:"metadata,omitempty"`
// Name: The name of this object. Required if not specified by URL
// parameter.
Name string `json:"name,omitempty"`
// Owner: The owner of the object. This will always be the uploader of
// the object.
Owner *ObjectOwner `json:"owner,omitempty"`
// SelfLink: The link to this object.
SelfLink string `json:"selfLink,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Acl") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Acl") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Object) MarshalJSON() ([]byte, error) {
type noMethod Object
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ObjectMedia: Object media data. Provided on your behalf when
// uploading raw media or multipart/related with an auxiliary media
// part.
type ObjectMedia struct {
// Algorithm: Hash algorithm used. Currently only MD5 is supported.
// Required if a hash is provided.
Algorithm string `json:"algorithm,omitempty"`
// ContentType: Content-Type of the object data.
ContentType string `json:"contentType,omitempty"`
// Data: URL-safe Base64-encoded data. This property can be used to
// insert objects under 64KB in size, and will only be returned in
// response to the get method for objects so created. When this resource
// is returned in response to the list method, this property is omitted.
Data string `json:"data,omitempty"`
// Hash: Hash of the data. Required if a hash algorithm is provided.
Hash string `json:"hash,omitempty"`
// Length: Content-Length of the data in bytes.
Length uint64 `json:"length,omitempty,string"`
// Link: Media download link.
Link string `json:"link,omitempty"`
// TimeCreated: Creation time of the data in RFC 3339 format.
TimeCreated string `json:"timeCreated,omitempty"`
// ForceSendFields is a list of field names (e.g. "Algorithm") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Algorithm") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ObjectMedia) MarshalJSON() ([]byte, error) {
type noMethod ObjectMedia
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ObjectOwner: The owner of the object. This will always be the
// uploader of the object.
type ObjectOwner struct {
// Entity: The entity, in the form user-userId.
Entity string `json:"entity,omitempty"`
// EntityId: The ID for the entity.
EntityId string `json:"entityId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Entity") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Entity") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
type noMethod ObjectOwner
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ObjectAccessControl: An access-control entry.
type ObjectAccessControl struct {
// Bucket: The name of the bucket.
Bucket string `json:"bucket,omitempty"`
// Domain: The domain associated with the entity, if any.
Domain string `json:"domain,omitempty"`
// Email: The email address associated with the entity, if any.
Email string `json:"email,omitempty"`
// Entity: The entity holding the permission, in one of the following
// forms:
// - user-userId
// - user-email
// - group-groupId
// - group-email
// - domain-domain
// - allUsers
// - allAuthenticatedUsers Examples:
// - The user [email protected] would be [email protected].
// - The group [email protected] would be
// [email protected].
// - To refer to all members of the Google Apps for Business domain
// example.com, the entity would be domain-example.com.
Entity string `json:"entity,omitempty"`
// EntityId: The ID for the entity, if any.
EntityId string `json:"entityId,omitempty"`
// Id: The ID of the access-control entry.
Id string `json:"id,omitempty"`
// Kind: The kind of item this is. For object access control entries,
// this is always storage#objectAccessControl.
Kind string `json:"kind,omitempty"`
// Object: The name of the object.
Object string `json:"object,omitempty"`
// Role: The access permission for the entity. Can be READER or OWNER.
Role string `json:"role,omitempty"`
// SelfLink: The link to this access-control entry.
SelfLink string `json:"selfLink,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Bucket") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bucket") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
type noMethod ObjectAccessControl
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ObjectAccessControls: An access-control list.
type ObjectAccessControls struct {
// Items: The list of items.
Items []*ObjectAccessControl `json:"items,omitempty"`
// Kind: The kind of item this is. For lists of object access control
// entries, this is always storage#objectAccessControls.
Kind string `json:"kind,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Items") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Items") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
type noMethod ObjectAccessControls
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Objects: A list of objects.
type Objects struct {
// Items: The list of items.
Items []*Object `json:"items,omitempty"`
// Kind: The kind of item this is. For lists of objects, this is always
// storage#objects.
Kind string `json:"kind,omitempty"`
// NextPageToken: The continuation token, used to page through large
// result sets. Provide this value in a subsequent request to return the
// next page of results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Prefixes: The list of prefixes of objects matching-but-not-listed up
// to and including the requested delimiter.
Prefixes []string `json:"prefixes,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Items") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Items") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Objects) MarshalJSON() ([]byte, error) {
type noMethod Objects
raw := noMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "storage.bucketAccessControls.delete":
type BucketAccessControlsDeleteCall struct {
s *Service
bucket string
entity string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes the ACL entry for the specified entity on the
// specified bucket.
func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {
c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.entity = entity
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.delete" call.
func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes the ACL entry for the specified entity on the specified bucket.",
// "httpMethod": "DELETE",
// "id": "storage.bucketAccessControls.delete",
// "parameterOrder": [
// "bucket",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl/{entity}",
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.bucketAccessControls.get":
type BucketAccessControlsGetCall struct {
s *Service
bucket string
entity string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Returns the ACL entry for the specified entity on the specified
// bucket.
func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {
c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.entity = entity
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.get" call.
// Exactly one of *BucketAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *BucketAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &BucketAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns the ACL entry for the specified entity on the specified bucket.",
// "httpMethod": "GET",
// "id": "storage.bucketAccessControls.get",
// "parameterOrder": [
// "bucket",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl/{entity}",
// "response": {
// "$ref": "BucketAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.bucketAccessControls.insert":
type BucketAccessControlsInsertCall struct {
s *Service
bucket string
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Insert: Creates a new ACL entry on the specified bucket.
func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {
c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.bucketaccesscontrol = bucketaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.insert" call.
// Exactly one of *BucketAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *BucketAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &BucketAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new ACL entry on the specified bucket.",
// "httpMethod": "POST",
// "id": "storage.bucketAccessControls.insert",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl",
// "request": {
// "$ref": "BucketAccessControl"
// },
// "response": {
// "$ref": "BucketAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.bucketAccessControls.list":
type BucketAccessControlsListCall struct {
s *Service
bucket string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Retrieves ACL entries on the specified bucket.
func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {
c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.list" call.
// Exactly one of *BucketAccessControls or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *BucketAccessControls.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &BucketAccessControls{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Retrieves ACL entries on the specified bucket.",
// "httpMethod": "GET",
// "id": "storage.bucketAccessControls.list",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl",
// "response": {
// "$ref": "BucketAccessControls"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.bucketAccessControls.patch":
type BucketAccessControlsPatchCall struct {
s *Service
bucket string
entity string
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates an ACL entry on the specified bucket. This method
// supports patch semantics.
func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.entity = entity
c.bucketaccesscontrol = bucketaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.patch" call.
// Exactly one of *BucketAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *BucketAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &BucketAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.bucketAccessControls.patch",
// "parameterOrder": [
// "bucket",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl/{entity}",
// "request": {
// "$ref": "BucketAccessControl"
// },
// "response": {
// "$ref": "BucketAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.bucketAccessControls.update":
type BucketAccessControlsUpdateCall struct {
s *Service
bucket string
entity string
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates an ACL entry on the specified bucket.
func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {
c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.entity = entity
c.bucketaccesscontrol = bucketaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketAccessControlsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.bucketAccessControls.update" call.
// Exactly one of *BucketAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *BucketAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &BucketAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an ACL entry on the specified bucket.",
// "httpMethod": "PUT",
// "id": "storage.bucketAccessControls.update",
// "parameterOrder": [
// "bucket",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/acl/{entity}",
// "request": {
// "$ref": "BucketAccessControl"
// },
// "response": {
// "$ref": "BucketAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.buckets.delete":
type BucketsDeleteCall struct {
s *Service
bucket string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes an empty bucket.
func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {
c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.delete" call.
func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes an empty bucket.",
// "httpMethod": "DELETE",
// "id": "storage.buckets.delete",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}",
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.buckets.get":
type BucketsGetCall struct {
s *Service
bucket string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Returns metadata for the specified bucket.
func (r *BucketsService) Get(bucket string) *BucketsGetCall {
c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit acl and defaultObjectAcl properties.
func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.get" call.
// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Bucket.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Bucket{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns metadata for the specified bucket.",
// "httpMethod": "GET",
// "id": "storage.buckets.get",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit acl and defaultObjectAcl properties."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}",
// "response": {
// "$ref": "Bucket"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_only",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.buckets.insert":
type BucketsInsertCall struct {
s *Service
bucket *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Insert: Creates a new bucket.
func (r *BucketsService) Insert(bucket *Bucket) *BucketsInsertCall {
c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl, unless the bucket resource
// specifies acl or defaultObjectAcl properties, when it defaults to
// full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit acl and defaultObjectAcl properties.
func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.insert" call.
// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Bucket.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Bucket{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new bucket.",
// "httpMethod": "POST",
// "id": "storage.buckets.insert",
// "parameters": {
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit acl and defaultObjectAcl properties."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b",
// "request": {
// "$ref": "Bucket"
// },
// "response": {
// "$ref": "Bucket"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.buckets.list":
type BucketsListCall struct {
s *Service
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Retrieves a list of buckets for a given project.
func (r *BucketsService) List(projectId uint64) *BucketsListCall {
c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.urlParams_.Set("projectId", fmt.Sprint(projectId))
return c
}
// MaxResults sets the optional parameter "max-results": Maximum number
// of buckets to return.
func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {
c.urlParams_.Set("max-results", fmt.Sprint(maxResults))
return c
}
// PageToken sets the optional parameter "pageToken": A
// previously-returned page token representing part of the larger set of
// results to view.
func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit acl and defaultObjectAcl properties.
func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.list" call.
// Exactly one of *Buckets or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Buckets.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Buckets{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Retrieves a list of buckets for a given project.",
// "httpMethod": "GET",
// "id": "storage.buckets.list",
// "parameterOrder": [
// "projectId"
// ],
// "parameters": {
// "max-results": {
// "description": "Maximum number of buckets to return.",
// "format": "uint32",
// "location": "query",
// "minimum": "0",
// "type": "integer"
// },
// "pageToken": {
// "description": "A previously-returned page token representing part of the larger set of results to view.",
// "location": "query",
// "type": "string"
// },
// "projectId": {
// "description": "A valid API project identifier.",
// "format": "uint64",
// "location": "query",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit acl and defaultObjectAcl properties."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b",
// "response": {
// "$ref": "Buckets"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_only",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "storage.buckets.patch":
type BucketsPatchCall struct {
s *Service
bucket string
bucket2 *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates a bucket. This method supports patch semantics.
func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.bucket2 = bucket2
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit acl and defaultObjectAcl properties.
func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.patch" call.
// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Bucket.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Bucket{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a bucket. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.buckets.patch",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit acl and defaultObjectAcl properties."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}",
// "request": {
// "$ref": "Bucket"
// },
// "response": {
// "$ref": "Bucket"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.buckets.update":
type BucketsUpdateCall struct {
s *Service
bucket string
bucket2 *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a bucket.
func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.bucket2 = bucket2
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit acl and defaultObjectAcl properties.
func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *BucketsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.buckets.update" call.
// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Bucket.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Bucket{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a bucket.",
// "httpMethod": "PUT",
// "id": "storage.buckets.update",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit acl and defaultObjectAcl properties."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}",
// "request": {
// "$ref": "Bucket"
// },
// "response": {
// "$ref": "Bucket"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.objectAccessControls.delete":
type ObjectAccessControlsDeleteCall struct {
s *Service
bucket string
object string
entity string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes the ACL entry for the specified entity on the
// specified object.
func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.entity = entity
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.delete" call.
func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes the ACL entry for the specified entity on the specified object.",
// "httpMethod": "DELETE",
// "id": "storage.objectAccessControls.delete",
// "parameterOrder": [
// "bucket",
// "object",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl/{entity}",
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.objectAccessControls.get":
type ObjectAccessControlsGetCall struct {
s *Service
bucket string
object string
entity string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Returns the ACL entry for the specified entity on the specified
// object.
func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.entity = entity
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.get" call.
// Exactly one of *ObjectAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ObjectAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ObjectAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns the ACL entry for the specified entity on the specified object.",
// "httpMethod": "GET",
// "id": "storage.objectAccessControls.get",
// "parameterOrder": [
// "bucket",
// "object",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl/{entity}",
// "response": {
// "$ref": "ObjectAccessControl"<|fim▁hole|> // }
}
// method id "storage.objectAccessControls.insert":
type ObjectAccessControlsInsertCall struct {
s *Service
bucket string
object string
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Insert: Creates a new ACL entry on the specified object.
func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.objectaccesscontrol = objectaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.insert" call.
// Exactly one of *ObjectAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ObjectAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ObjectAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new ACL entry on the specified object.",
// "httpMethod": "POST",
// "id": "storage.objectAccessControls.insert",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl",
// "request": {
// "$ref": "ObjectAccessControl"
// },
// "response": {
// "$ref": "ObjectAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.objectAccessControls.list":
type ObjectAccessControlsListCall struct {
s *Service
bucket string
object string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Retrieves ACL entries on the specified object.
func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.list" call.
// Exactly one of *ObjectAccessControls or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ObjectAccessControls.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ObjectAccessControls{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Retrieves ACL entries on the specified object.",
// "httpMethod": "GET",
// "id": "storage.objectAccessControls.list",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl",
// "response": {
// "$ref": "ObjectAccessControls"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.objectAccessControls.patch":
type ObjectAccessControlsPatchCall struct {
s *Service
bucket string
object string
entity string
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates an ACL entry on the specified object. This method
// supports patch semantics.
func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.entity = entity
c.objectaccesscontrol = objectaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.patch" call.
// Exactly one of *ObjectAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ObjectAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ObjectAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.objectAccessControls.patch",
// "parameterOrder": [
// "bucket",
// "object",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl/{entity}",
// "request": {
// "$ref": "ObjectAccessControl"
// },
// "response": {
// "$ref": "ObjectAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.objectAccessControls.update":
type ObjectAccessControlsUpdateCall struct {
s *Service
bucket string
object string
entity string
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates an ACL entry on the specified object.
func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.entity = entity
c.objectaccesscontrol = objectaccesscontrol
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
"entity": c.entity,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objectAccessControls.update" call.
// Exactly one of *ObjectAccessControl or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ObjectAccessControl.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ObjectAccessControl{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an ACL entry on the specified object.",
// "httpMethod": "PUT",
// "id": "storage.objectAccessControls.update",
// "parameterOrder": [
// "bucket",
// "object",
// "entity"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of a bucket.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "entity": {
// "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}/acl/{entity}",
// "request": {
// "$ref": "ObjectAccessControl"
// },
// "response": {
// "$ref": "ObjectAccessControl"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
// }
}
// method id "storage.objects.delete":
type ObjectsDeleteCall struct {
s *Service
bucket string
object string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes data blobs and associated metadata.
func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("DELETE", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objects.delete" call.
func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if err != nil {
return err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return err
}
return nil
// {
// "description": "Deletes data blobs and associated metadata.",
// "httpMethod": "DELETE",
// "id": "storage.objects.delete",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which the object resides.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}",
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.objects.get":
type ObjectsGetCall struct {
s *Service
bucket string
object string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Retrieves objects or their associated metadata.
func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit the acl property.
func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "storage.objects.get" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Object.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Object{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Retrieves objects or their associated metadata.",
// "httpMethod": "GET",
// "id": "storage.objects.get",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which the object resides.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit the acl property."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}",
// "response": {
// "$ref": "Object"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_only",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ],
// "supportsMediaDownload": true
// }
}
// method id "storage.objects.insert":
type ObjectsInsertCall struct {
s *Service
bucket string
object *Object
urlParams_ gensupport.URLParams
media_ io.Reader
mediaBuffer_ *gensupport.MediaBuffer
singleChunk_ bool
mediaType_ string
mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_.
progressUpdater_ googleapi.ProgressUpdater
ctx_ context.Context
header_ http.Header
}
// Insert: Stores new data blobs and associated metadata.
func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {
c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
return c
}
// Name sets the optional parameter "name": Name of the object. Required
// when the object metadata is not otherwise provided. Overrides the
// object metadata's name value, if any.
func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
c.urlParams_.Set("name", name)
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl, unless the object resource
// specifies the acl property, when it defaults to full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit the acl property.
func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
c.urlParams_.Set("projection", projection)
return c
}
// Media specifies the media to upload in one or more chunks. The chunk
// size may be controlled by supplying a MediaOption generated by
// googleapi.ChunkSize. The chunk size defaults to
// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
// upload request will be determined by sniffing the contents of r,
// unless a MediaOption generated by googleapi.ContentType is
// supplied.
// At most one of Media and ResumableMedia may be set.
func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall {
opts := googleapi.ProcessMediaOptions(options)
chunkSize := opts.ChunkSize
if !opts.ForceEmptyContentType {
r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType)
}
c.media_, c.mediaBuffer_, c.singleChunk_ = gensupport.PrepareUpload(r, chunkSize)
return c
}
// ResumableMedia specifies the media to upload in chunks and can be
// canceled with ctx.
//
// Deprecated: use Media instead.
//
// At most one of Media and ResumableMedia may be set. mediaType
// identifies the MIME media type of the upload, such as "image/png". If
// mediaType is "", it will be auto-detected. The provided ctx will
// supersede any context previously provided to the Context method.
func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {
c.ctx_ = ctx
rdr := gensupport.ReaderAtToReader(r, size)
rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType)
c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize)
c.media_ = nil
c.mediaSize_ = size
c.singleChunk_ = false
return c
}
// ProgressUpdater provides a callback function that will be called
// after every chunk. It should be a low-latency function in order to
// not slow down the upload operation. This should only be called when
// using ResumableMedia (as opposed to Media).
func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {
c.progressUpdater_ = pu
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
// This context will supersede any context previously provided to the
// ResumableMedia method.
func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
if c.media_ != nil || c.mediaBuffer_ != nil {
urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
protocol := "multipart"
if !c.singleChunk_ {
protocol = "resumable"
}
c.urlParams_.Set("uploadType", protocol)
}
if body == nil {
body = new(bytes.Buffer)
reqHeaders.Set("Content-Type", "application/json")
}
var media io.Reader
if c.media_ != nil {
media = c.media_
} else if c.singleChunk_ {
media, _, _, _ = c.mediaBuffer_.Chunk()
}
if media != nil {
combined, ctype := gensupport.CombineBodyMedia(body, "application/json", media, c.mediaType_)
defer combined.Close()
reqHeaders.Set("Content-Type", ctype)
body = combined
}
if c.mediaBuffer_ != nil && c.mediaType_ != "" && !c.singleChunk_ {
reqHeaders.Set("X-Upload-Content-Type", c.mediaType_)
}
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("POST", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objects.insert" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Object.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
if c.mediaBuffer_ != nil && !c.singleChunk_ {
loc := res.Header.Get("Location")
rx := &gensupport.ResumableUpload{
Client: c.s.client,
UserAgent: c.s.userAgent(),
URI: loc,
Media: c.mediaBuffer_,
MediaType: c.mediaType_,
Callback: func(curr int64) {
if c.progressUpdater_ != nil {
c.progressUpdater_(curr, c.mediaSize_)
}
},
}
ctx := c.ctx_
if ctx == nil {
ctx = context.TODO()
}
res, err = rx.Upload(ctx)
if err != nil {
return nil, err
}
defer res.Body.Close()
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
}
ret := &Object{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Stores new data blobs and associated metadata.",
// "httpMethod": "POST",
// "id": "storage.objects.insert",
// "mediaUpload": {
// "accept": [
// "*/*"
// ],
// "protocols": {
// "resumable": {
// "multipart": true,
// "path": "/resumable/upload/storage/v1beta1/b/{bucket}/o"
// },
// "simple": {
// "multipart": true,
// "path": "/upload/storage/v1beta1/b/{bucket}/o"
// }
// }
// },
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "name": {
// "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
// "location": "query",
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl, unless the object resource specifies the acl property, when it defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit the acl property."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}/o",
// "request": {
// "$ref": "Object"
// },
// "response": {
// "$ref": "Object"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ],
// "supportsMediaDownload": true,
// "supportsMediaUpload": true
// }
}
// method id "storage.objects.list":
type ObjectsListCall struct {
s *Service
bucket string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Retrieves a list of objects matching the criteria.
func (r *ObjectsService) List(bucket string) *ObjectsListCall {
c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// Delimiter sets the optional parameter "delimiter": Returns results in
// a directory-like mode. items will contain only objects whose names,
// aside from the prefix, do not contain delimiter. Objects whose names,
// aside from the prefix, contain delimiter will have their name,
// truncated after the delimiter, returned in prefixes. Duplicate
// prefixes are omitted.
func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
c.urlParams_.Set("delimiter", delimiter)
return c
}
// MaxResults sets the optional parameter "max-results": Maximum number
// of items plus prefixes to return. As duplicate prefixes are omitted,
// fewer total results may be returned than requested.
func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {
c.urlParams_.Set("max-results", fmt.Sprint(maxResults))
return c
}
// PageToken sets the optional parameter "pageToken": A
// previously-returned page token representing part of the larger set of
// results to view.
func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Prefix sets the optional parameter "prefix": Filter results to
// objects whose names begin with this prefix.
func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {
c.urlParams_.Set("prefix", prefix)
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to no_acl.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit the acl property.
func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("GET", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objects.list" call.
// Exactly one of *Objects or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Objects.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Objects{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Retrieves a list of objects matching the criteria.",
// "httpMethod": "GET",
// "id": "storage.objects.list",
// "parameterOrder": [
// "bucket"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which to look for objects.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "delimiter": {
// "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
// "location": "query",
// "type": "string"
// },
// "max-results": {
// "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested.",
// "format": "uint32",
// "location": "query",
// "minimum": "0",
// "type": "integer"
// },
// "pageToken": {
// "description": "A previously-returned page token representing part of the larger set of results to view.",
// "location": "query",
// "type": "string"
// },
// "prefix": {
// "description": "Filter results to objects whose names begin with this prefix.",
// "location": "query",
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to no_acl.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit the acl property."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}/o",
// "response": {
// "$ref": "Objects"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_only",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ],
// "supportsSubscription": true
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "storage.objects.patch":
type ObjectsPatchCall struct {
s *Service
bucket string
object string
object2 *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates a data blob's associated metadata. This method
// supports patch semantics.
func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.object2 = object2
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit the acl property.
func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PATCH", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "storage.objects.patch" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Object.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Object{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a data blob's associated metadata. This method supports patch semantics.",
// "httpMethod": "PATCH",
// "id": "storage.objects.patch",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which the object resides.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit the acl property."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}",
// "request": {
// "$ref": "Object"
// },
// "response": {
// "$ref": "Object"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ]
// }
}
// method id "storage.objects.update":
type ObjectsUpdateCall struct {
s *Service
bucket string
object string
object2 *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates a data blob's associated metadata.
func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.object = object
c.object2 = object2
return c
}
// Projection sets the optional parameter "projection": Set of
// properties to return. Defaults to full.
//
// Possible values:
// "full" - Include all properties.
// "no_acl" - Omit the acl property.
func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do and Download
// methods. Any pending HTTP request will be aborted if the provided
// context is canceled.
func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ObjectsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
urls += "?" + c.urlParams_.Encode()
req, _ := http.NewRequest("PUT", urls, body)
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"bucket": c.bucket,
"object": c.object,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Download fetches the API endpoint's "media" value, instead of the normal
// API response value. If the returned error is nil, the Response is guaranteed to
// have a 2xx status code. Callers must close the Response.Body as usual.
func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("media")
if err != nil {
return nil, err
}
if err := googleapi.CheckMediaResponse(res); err != nil {
res.Body.Close()
return nil, err
}
return res, nil
}
// Do executes the "storage.objects.update" call.
// Exactly one of *Object or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Object.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Object{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := json.NewDecoder(res.Body).Decode(target); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a data blob's associated metadata.",
// "httpMethod": "PUT",
// "id": "storage.objects.update",
// "parameterOrder": [
// "bucket",
// "object"
// ],
// "parameters": {
// "bucket": {
// "description": "Name of the bucket in which the object resides.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "object": {
// "description": "Name of the object.",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "projection": {
// "description": "Set of properties to return. Defaults to full.",
// "enum": [
// "full",
// "no_acl"
// ],
// "enumDescriptions": [
// "Include all properties.",
// "Omit the acl property."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "b/{bucket}/o/{object}",
// "request": {
// "$ref": "Object"
// },
// "response": {
// "$ref": "Object"
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control",
// "https://www.googleapis.com/auth/devstorage.read_write"
// ],
// "supportsMediaDownload": true
// }
}<|fim▁end|>
|
// },
// "scopes": [
// "https://www.googleapis.com/auth/devstorage.full_control"
// ]
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from dogpile.cache.region import register_backend
register_backend(
"dogpile.cache.null", "dogpile.cache.backends.null", "NullBackend")
register_backend(
"dogpile.cache.dbm", "dogpile.cache.backends.file", "DBMBackend")
register_backend(
"dogpile.cache.pylibmc", "dogpile.cache.backends.memcached",
"PylibmcBackend")
register_backend(
"dogpile.cache.bmemcached", "dogpile.cache.backends.memcached",
"BMemcachedBackend")
register_backend(
"dogpile.cache.memcached", "dogpile.cache.backends.memcached",
"MemcachedBackend")
register_backend(<|fim▁hole|> "MemoryPickleBackend")
register_backend(
"dogpile.cache.redis", "dogpile.cache.backends.redis", "RedisBackend")<|fim▁end|>
|
"dogpile.cache.memory", "dogpile.cache.backends.memory", "MemoryBackend")
register_backend(
"dogpile.cache.memory_pickle", "dogpile.cache.backends.memory",
|
<|file_name|>optimizer.py<|end_file_name|><|fim▁begin|>class Optimizer:
def __init__(self, model, params=None):
self.model = model
if params:
self.model.set_params(**params)
self.params = self.model.get_params()
self.__chain = list()
def step(self, name, values, skipped=False):
if not skipped:
self.__chain.append({
'pname': name,
'pvalues': values
})
return self<|fim▁hole|> def solve(self, evaluator):
score = -1
for param in self.__chain:
self.model.set_params(**self.params) # set previous best param
results = [(evaluator(self.model.set_params(**{param['pname']: value})), value)
for value in param['pvalues']]
results = sorted(results, lambda a, b: -1 if a[0] < b[0] else 1)
print param['pname']
for result in results:
print result[1], ' : ', result[0]
# update best params
self.params[param['pname']] = results[0][1]
score = results[0][0]
return score<|fim▁end|>
| |
<|file_name|>calendar-coverage.js<|end_file_name|><|fim▁begin|>version https://git-lfs.github.com/spec/v1
oid sha256:7076c06d1c5b0c8a10633ae2837be365f999203218d8484d9c4dd3bcb53c7e95<|fim▁hole|>size 49060<|fim▁end|>
| |
<|file_name|>v3_server.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"strconv"
"time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/membershippb"
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/lease/leasehttp"
"go.etcd.io/etcd/server/v3/mvcc"
"github.com/gogo/protobuf/proto"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
)
const (
// In the health case, there might be a small gap (10s of entries) between
// the applied index and committed index.
// However, if the committed entries are very heavy to apply, the gap might grow.
// We should stop accepting new proposals if the gap growing to a certain point.
maxGapBetweenApplyAndCommitIndex = 5000
traceThreshold = 100 * time.Millisecond
readIndexRetryTime = 500 * time.Millisecond
)
type RaftKV interface {
Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
}
type Lessor interface {
// LeaseGrant sends LeaseGrant request to raft and apply it after committed.
LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
// LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
// is returned.
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
// LeaseTimeToLive retrieves lease information.
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
// LeaseLeases lists all leases.
LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
}
type Authenticator interface {
AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error)
Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
}
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
trace := traceutil.New("range",
s.Logger(),
traceutil.Field{Key: "range_begin", Value: string(r.Key)},
traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)},
)
ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
var resp *pb.RangeResponse
var err error
defer func(start time.Time) {
warnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
if resp != nil {
trace.AddField(
traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
traceutil.Field{Key: "response_revision", Value: resp.Header.Revision},
)
}
trace.LogIfLong(traceThreshold)
}(time.Now())
if !r.Serializable {
err = s.linearizableReadNotify(ctx)
trace.Step("agreement among raft nodes before linearized reading")
if err != nil {
return nil, err
}
}
chk := func(ai *auth.AuthInfo) error {
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
}
get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
err = serr
return nil, err
}
return resp, err
}
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
if err != nil {
return nil, err
}
return resp.(*pb.PutResponse), nil
}
func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
if err != nil {
return nil, err
}
return resp.(*pb.DeleteRangeResponse), nil
}
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
if isTxnReadonly(r) {
trace := traceutil.New("transaction",
s.Logger(),
traceutil.Field{Key: "read_only", Value: true},
)
ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
if !isTxnSerializable(r) {
err := s.linearizableReadNotify(ctx)
trace.Step("agreement among raft nodes before linearized reading")
if err != nil {
return nil, err
}
}
var resp *pb.TxnResponse
var err error
chk := func(ai *auth.AuthInfo) error {
return checkTxnAuth(s.authStore, ai, r)
}
defer func(start time.Time) {
warnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
trace.LogIfLong(traceThreshold)
}(time.Now())
get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
return nil, serr
}
return resp, err
}
ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
if err != nil {
return nil, err
}
return resp.(*pb.TxnResponse), nil
}
func isTxnSerializable(r *pb.TxnRequest) bool {
for _, u := range r.Success {
if r := u.GetRequestRange(); r == nil || !r.Serializable {
return false
}
}
for _, u := range r.Failure {
if r := u.GetRequestRange(); r == nil || !r.Serializable {
return false
}
}
return true
}
func isTxnReadonly(r *pb.TxnRequest) bool {
for _, u := range r.Success {
if r := u.GetRequestRange(); r == nil {
return false
}
}
for _, u := range r.Failure {
if r := u.GetRequestRange(); r == nil {
return false
}
}
return true
}
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
startTime := time.Now()
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
trace := traceutil.TODO()
if result != nil && result.trace != nil {
trace = result.trace
defer func() {
trace.LogIfLong(traceThreshold)
}()
applyStart := result.trace.GetStartTime()
result.trace.SetStartTime(startTime)
trace.InsertStep(0, applyStart, "process raft request")
}
if r.Physical && result != nil && result.physc != nil {
<-result.physc
// The compaction is done deleting keys; the hash is now settled
// but the data is not necessarily committed. If there's a crash,
// the hash may revert to a hash prior to compaction completing
// if the compaction resumes. Force the finished compaction to
// commit so it won't resume following a crash.
s.be.ForceCommit()
trace.Step("physically apply compaction")
}
if err != nil {
return nil, err
}
if result.err != nil {
return nil, result.err
}
resp := result.resp.(*pb.CompactionResponse)
if resp == nil {
resp = &pb.CompactionResponse{}
}
if resp.Header == nil {<|fim▁hole|> resp.Header = &pb.ResponseHeader{}
}
resp.Header.Revision = s.kv.Rev()
trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
return resp, nil
}
func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
// no id given? choose one
for r.ID == int64(lease.NoLease) {
// only use positive int64 id's
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
}
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
if err != nil {
return nil, err
}
return resp.(*pb.LeaseGrantResponse), nil
}
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
if err != nil {
return nil, err
}
return resp.(*pb.LeaseRevokeResponse), nil
}
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
ttl, err := s.lessor.Renew(id)
if err == nil { // already requested to primary lessor(leader)
return ttl, nil
}
if err != lease.ErrNotPrimary {
return -1, err
}
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
defer cancel()
// renewals don't go through raft; forward to leader manually
for cctx.Err() == nil && err != nil {
leader, lerr := s.waitLeader(cctx)
if lerr != nil {
return -1, lerr
}
for _, url := range leader.PeerURLs {
lurl := url + leasehttp.LeasePrefix
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
if err == nil || err == lease.ErrLeaseNotFound {
return ttl, err
}
}
// Throttle in case of e.g. connection problems.
time.Sleep(50 * time.Millisecond)
}
if cctx.Err() == context.DeadlineExceeded {
return -1, ErrTimeout
}
return -1, ErrCanceled
}
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
if s.Leader() == s.ID() {
// primary; timetolive directly from leader
le := s.lessor.Lookup(lease.LeaseID(r.ID))
if le == nil {
return nil, lease.ErrLeaseNotFound
}
// TODO: fill out ResponseHeader
resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
if r.Keys {
ks := le.Keys()
kbs := make([][]byte, len(ks))
for i := range ks {
kbs[i] = []byte(ks[i])
}
resp.Keys = kbs
}
return resp, nil
}
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
defer cancel()
// forward to leader
for cctx.Err() == nil {
leader, err := s.waitLeader(cctx)
if err != nil {
return nil, err
}
for _, url := range leader.PeerURLs {
lurl := url + leasehttp.LeaseInternalPrefix
resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
if err == nil {
return resp.LeaseTimeToLiveResponse, nil
}
if err == lease.ErrLeaseNotFound {
return nil, err
}
}
}
if cctx.Err() == context.DeadlineExceeded {
return nil, ErrTimeout
}
return nil, ErrCanceled
}
func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
ls := s.lessor.Leases()
lss := make([]*pb.LeaseStatus, len(ls))
for i := range ls {
lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
}
return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
}
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
leader := s.cluster.Member(s.Leader())
for leader == nil {
// wait an election
dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
select {
case <-time.After(dur):
leader = s.cluster.Member(s.Leader())
case <-s.stopping:
return nil, ErrStopped
case <-ctx.Done():
return nil, ErrNoLeader
}
}
if leader == nil || len(leader.PeerURLs) == 0 {
return nil, ErrNoLeader
}
return leader, nil
}
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
if err != nil {
return nil, err
}
return resp.(*pb.AlarmResponse), nil
}
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthEnableResponse), nil
}
func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthDisableResponse), nil
}
func (s *EtcdServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthStatus: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthStatusResponse), nil
}
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
if err := s.linearizableReadNotify(ctx); err != nil {
return nil, err
}
lg := s.Logger()
var resp proto.Message
for {
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
if err != nil {
if err != auth.ErrAuthNotEnabled {
lg.Warn(
"invalid authentication was requested",
zap.String("user", r.Name),
zap.Error(err),
)
}
return nil, err
}
st, err := s.AuthStore().GenTokenPrefix()
if err != nil {
return nil, err
}
// internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it.
// In addition, it will let a WAL entry not record password as a plain text.
internalReq := &pb.InternalAuthenticateRequest{
Name: r.Name,
SimpleToken: st,
}
resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
if err != nil {
return nil, err
}
if checkedRevision == s.AuthStore().Revision() {
break
}
lg.Info("revision when password checked became stale; retrying")
}
return resp.(*pb.AuthenticateResponse), nil
}
func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
if r.Options == nil || !r.Options.NoPassword {
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
if err != nil {
return nil, err
}
r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
r.Password = ""
}
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserAddResponse), nil
}
func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserDeleteResponse), nil
}
func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
if r.Password != "" {
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
if err != nil {
return nil, err
}
r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
r.Password = ""
}
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserChangePasswordResponse), nil
}
func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserGrantRoleResponse), nil
}
func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserGetResponse), nil
}
func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserListResponse), nil
}
func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthUserRevokeRoleResponse), nil
}
func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleAddResponse), nil
}
func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleGrantPermissionResponse), nil
}
func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleGetResponse), nil
}
func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleListResponse), nil
}
func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleRevokePermissionResponse), nil
}
func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
if err != nil {
return nil, err
}
return resp.(*pb.AuthRoleDeleteResponse), nil
}
func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
result, err := s.processInternalRaftRequestOnce(ctx, r)
if err != nil {
return nil, err
}
if result.err != nil {
return nil, result.err
}
if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil {
applyStart := result.trace.GetStartTime()
// The trace object is created in apply. Here reset the start time to trace
// the raft request time by the difference between the request start time
// and apply start time
result.trace.SetStartTime(startTime)
result.trace.InsertStep(0, applyStart, "process raft request")
result.trace.LogIfLong(traceThreshold)
}
return result.resp, nil
}
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
return s.raftRequestOnce(ctx, r)
}
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
trace := traceutil.Get(ctx)
ai, err := s.AuthInfoFromCtx(ctx)
if err != nil {
return err
}
if ai == nil {
// chk expects non-nil AuthInfo; use empty credentials
ai = &auth.AuthInfo{}
}
if err = chk(ai); err != nil {
return err
}
trace.Step("get authentication metadata")
// fetch response for serialized request
get()
// check for stale token revision in case the auth store was updated while
// the request has been handled.
if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
return auth.ErrAuthOldRevision
}
return nil
}
func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
ai := s.getAppliedIndex()
ci := s.getCommittedIndex()
if ci > ai+maxGapBetweenApplyAndCommitIndex {
return nil, ErrTooManyRequests
}
r.Header = &pb.RequestHeader{
ID: s.reqIDGen.Next(),
}
// check authinfo if it is not InternalAuthenticateRequest
if r.Authenticate == nil {
authInfo, err := s.AuthInfoFromCtx(ctx)
if err != nil {
return nil, err
}
if authInfo != nil {
r.Header.Username = authInfo.Username
r.Header.AuthRevision = authInfo.Revision
}
}
data, err := r.Marshal()
if err != nil {
return nil, err
}
if len(data) > int(s.Cfg.MaxRequestBytes) {
return nil, ErrRequestTooLarge
}
id := r.ID
if id == 0 {
id = r.Header.ID
}
ch := s.w.Register(id)
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
defer cancel()
start := time.Now()
err = s.r.Propose(cctx, data)
if err != nil {
proposalsFailed.Inc()
s.w.Trigger(id, nil) // GC wait
return nil, err
}
proposalsPending.Inc()
defer proposalsPending.Dec()
select {
case x := <-ch:
return x.(*applyResult), nil
case <-cctx.Done():
proposalsFailed.Inc()
s.w.Trigger(id, nil) // GC wait
return nil, s.parseProposeCtxErr(cctx.Err(), start)
case <-s.done:
return nil, ErrStopped
}
}
// Watchable returns a watchable interface attached to the etcdserver.
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
func (s *EtcdServer) linearizableReadLoop() {
for {
requestId := s.reqIDGen.Next()
leaderChangedNotifier := s.LeaderChangedNotify()
select {
case <-leaderChangedNotifier:
continue
case <-s.readwaitc:
case <-s.stopping:
return
}
// as a single loop is can unlock multiple reads, it is not very useful
// to propagate the trace from Txn or Range.
trace := traceutil.New("linearizableReadLoop", s.Logger())
nextnr := newNotifier()
s.readMu.Lock()
nr := s.readNotifier
s.readNotifier = nextnr
s.readMu.Unlock()
confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestId)
if isStopped(err) {
return
}
if err != nil {
nr.notify(err)
continue
}
trace.Step("read index received")
trace.AddField(traceutil.Field{Key: "readStateIndex", Value: confirmedIndex})
appliedIndex := s.getAppliedIndex()
trace.AddField(traceutil.Field{Key: "appliedIndex", Value: strconv.FormatUint(appliedIndex, 10)})
if appliedIndex < confirmedIndex {
select {
case <-s.applyWait.Wait(confirmedIndex):
case <-s.stopping:
return
}
}
// unblock all l-reads requested at indices before confirmedIndex
nr.notify(nil)
trace.Step("applied index is now lower than readState.Index")
trace.LogAllStepsIfLong(traceThreshold)
}
}
func isStopped(err error) bool {
return err == raft.ErrStopped || err == ErrStopped
}
func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) {
err := s.sendReadIndex(requestId)
if err != nil {
return 0, err
}
lg := s.Logger()
errorTimer := time.NewTimer(s.Cfg.ReqTimeout())
defer errorTimer.Stop()
retryTimer := time.NewTimer(readIndexRetryTime)
defer retryTimer.Stop()
firstCommitInTermNotifier := s.FirstCommitInTermNotify()
for {
select {
case rs := <-s.r.readStateC:
requestIdBytes := uint64ToBigEndianBytes(requestId)
gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIdBytes)
if !gotOwnResponse {
// a previous request might time out. now we should ignore the response of it and
// continue waiting for the response of the current requests.
responseId := uint64(0)
if len(rs.RequestCtx) == 8 {
responseId = binary.BigEndian.Uint64(rs.RequestCtx)
}
lg.Warn(
"ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader",
zap.Uint64("sent-request-id", requestId),
zap.Uint64("received-request-id", responseId),
)
slowReadIndex.Inc()
continue
}
return rs.Index, nil
case <-leaderChangedNotifier:
readIndexFailed.Inc()
// return a retryable error.
return 0, ErrLeaderChanged
case <-firstCommitInTermNotifier:
firstCommitInTermNotifier = s.FirstCommitInTermNotify()
lg.Info("first commit in current term: resending ReadIndex request")
err := s.sendReadIndex(requestId)
if err != nil {
return 0, err
}
retryTimer.Reset(readIndexRetryTime)
continue
case <-retryTimer.C:
lg.Warn(
"waiting for ReadIndex response took too long, retrying",
zap.Uint64("sent-request-id", requestId),
zap.Duration("retry-timeout", readIndexRetryTime),
)
err := s.sendReadIndex(requestId)
if err != nil {
return 0, err
}
retryTimer.Reset(readIndexRetryTime)
continue
case <-errorTimer.C:
lg.Warn(
"timed out waiting for read index response (local node might have slow network)",
zap.Duration("timeout", s.Cfg.ReqTimeout()),
)
slowReadIndex.Inc()
return 0, ErrTimeout
case <-s.stopping:
return 0, ErrStopped
}
}
}
func uint64ToBigEndianBytes(number uint64) []byte {
byteResult := make([]byte, 8)
binary.BigEndian.PutUint64(byteResult, number)
return byteResult
}
func (s *EtcdServer) sendReadIndex(requestIndex uint64) error {
ctxToSend := uint64ToBigEndianBytes(requestIndex)
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
err := s.r.ReadIndex(cctx, ctxToSend)
cancel()
if err == raft.ErrStopped {
return err
}
if err != nil {
lg := s.Logger()
lg.Warn("failed to get read index from Raft", zap.Error(err))
readIndexFailed.Inc()
return err
}
return nil
}
func (s *EtcdServer) LinearizableReadNotify(ctx context.Context) error {
return s.linearizableReadNotify(ctx)
}
func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
s.readMu.RLock()
nc := s.readNotifier
s.readMu.RUnlock()
// signal linearizable loop for current notify if it hasn't been already
select {
case s.readwaitc <- struct{}{}:
default:
}
// wait for read state notification
select {
case <-nc.c:
return nc.err
case <-ctx.Done():
return ctx.Err()
case <-s.done:
return ErrStopped
}
}
func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
if authInfo != nil || err != nil {
return authInfo, err
}
if !s.Cfg.ClientCertAuthEnabled {
return nil, nil
}
authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
return authInfo, nil
}
func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
switch r.Action {
case pb.DowngradeRequest_VALIDATE:
return s.downgradeValidate(ctx, r.Version)
case pb.DowngradeRequest_ENABLE:
return s.downgradeEnable(ctx, r)
case pb.DowngradeRequest_CANCEL:
return s.downgradeCancel(ctx)
default:
return nil, ErrUnknownMethod
}
}
func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.DowngradeResponse, error) {
resp := &pb.DowngradeResponse{}
targetVersion, err := convertToClusterVersion(v)
if err != nil {
return nil, err
}
// gets leaders commit index and wait for local store to finish applying that index
// to avoid using stale downgrade information
err = s.linearizableReadNotify(ctx)
if err != nil {
return nil, err
}
cv := s.ClusterVersion()
if cv == nil {
return nil, ErrClusterVersionUnavailable
}
resp.Version = cv.String()
allowedTargetVersion := membership.AllowedDowngradeVersion(cv)
if !targetVersion.Equal(*allowedTargetVersion) {
return nil, ErrInvalidDowngradeTargetVersion
}
downgradeInfo := s.cluster.DowngradeInfo()
if downgradeInfo.Enabled {
// Todo: return the downgrade status along with the error msg
return nil, ErrDowngradeInProcess
}
return resp, nil
}
func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
// validate downgrade capability before starting downgrade
v := r.Version
lg := s.Logger()
if resp, err := s.downgradeValidate(ctx, v); err != nil {
lg.Warn("reject downgrade request", zap.Error(err))
return resp, err
}
targetVersion, err := convertToClusterVersion(v)
if err != nil {
lg.Warn("reject downgrade request", zap.Error(err))
return nil, err
}
raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()}
_, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
if err != nil {
lg.Warn("reject downgrade request", zap.Error(err))
return nil, err
}
resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
return &resp, nil
}
func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) {
// gets leaders commit index and wait for local store to finish applying that index
// to avoid using stale downgrade information
if err := s.linearizableReadNotify(ctx); err != nil {
return nil, err
}
downgradeInfo := s.cluster.DowngradeInfo()
if !downgradeInfo.Enabled {
return nil, ErrNoInflightDowngrade
}
raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false}
_, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
if err != nil {
return nil, err
}
resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()}
return &resp, nil
}<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Dedicated single thread actor-ref implementations
use std::thread;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use std::fmt::{self, Debug, Formatter};
use {Actor, ActorSpawner};
use {ActorRef, ActorRefImpl, ActorRefEnum};
use {SendError};
#[cfg(test)]
mod tests;
/// A simplistic environment to run an actor in<|fim▁hole|> tx: Sender<Message>,
actor: Arc<Mutex<Box<Actor<Message>>>>,
}
/// An ActorSpawner which spawns a dedicated thread for every
/// actor.
pub struct DedicatedThreadSpawner;
impl ActorSpawner for DedicatedThreadSpawner {
/// Create and ActorCell for the given actor.
fn spawn<Message, A>(&self, actor: A) -> ActorRef<Message>
where Message: Send + 'static, A: Actor<Message> + 'static
{
let (tx, rx) = channel();
let actor_box: Box<Actor<Message>> = Box::new(actor);
let actor = Arc::new(Mutex::new(actor_box));
let actor_for_thread = actor.clone();
thread::spawn( move|| {
let mut actor = actor_for_thread.lock().unwrap();
loop {
match rx.recv() {
Ok(msg) => {
debug!("Processing");
actor.process(msg);
},
Err(error) => {
debug!("Quitting: {:?}", error);
break;
},
}
}
});
ActorRef(
ActorRefEnum::DedicatedThread(
ActorCell {
tx: tx,
actor: actor
}
)
)
}
}
impl<Message: Send + 'static> ActorRefImpl<Message> for ActorCell<Message> {
fn send(&self, msg: Message) -> Result<(), SendError<Message>> {
Ok(try!(self.tx.send(msg)))
}
}
impl<Message: Send + 'static> Debug for ActorCell<Message> {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "ActorCell")
}
}
impl<Message: Send + 'static> Clone for ActorCell<Message> {
fn clone(&self) -> ActorCell<Message> {
ActorCell {
tx: self.tx.clone(),
actor: self.actor.clone(),
}
}
}<|fim▁end|>
|
/// which can act as ActorRef.
///
/// It uses one thread per actor.
pub struct ActorCell<Message: Send> {
|
<|file_name|>tessellation.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "gm/gm.h"
#include "src/core/SkCanvasPriv.h"
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrGeometryProcessor.h"
#include "src/gpu/GrMemoryPool.h"
#include "src/gpu/GrOpFlushState.h"
#include "src/gpu/GrOpsRenderPass.h"
#include "src/gpu/GrPipeline.h"
#include "src/gpu/GrProgramInfo.h"
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrShaderCaps.h"
#include "src/gpu/GrShaderVar.h"
#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
#include "src/gpu/glsl/GrGLSLVarying.h"
#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
#include "src/gpu/ops/GrDrawOp.h"
#include "src/gpu/v1/SurfaceDrawContext_v1.h"
namespace skiagm {
constexpr static GrGeometryProcessor::Attribute kPositionAttrib =
{"position", kFloat3_GrVertexAttribType, kFloat3_GrSLType};
constexpr static std::array<float, 3> kTri1[3] = {
{20.5f,20.5f,1}, {170.5f,280.5f,4}, {320.5f,20.5f,1}};
constexpr static std::array<float, 3> kTri2[3] = {
{640.5f,280.5f,3}, {490.5f,20.5f,1}, {340.5f,280.5f,6}};
constexpr static SkRect kRect = {20.5f, 340.5f, 640.5f, 480.5f};
constexpr static int kWidth = (int)kRect.fRight + 21;
constexpr static int kHeight = (int)kRect.fBottom + 21;
/**
* This is a GPU-backend specific test. It ensures that tessellation works as expected by drawing
* several triangles. The test passes as long as the triangle tessellations match the reference
* images on gold.
*/
class TessellationGM : public GpuGM {
SkString onShortName() override { return SkString("tessellation"); }
SkISize onISize() override { return {kWidth, kHeight}; }
DrawResult onDraw(GrRecordingContext*, SkCanvas*, SkString*) override;
};
class TessellationTestTriShader : public GrGeometryProcessor {
public:
TessellationTestTriShader(const SkMatrix& viewMatrix)
: GrGeometryProcessor(kTessellationTestTriShader_ClassID), fViewMatrix(viewMatrix) {
this->setVertexAttributes(&kPositionAttrib, 1);
this->setWillUseTessellationShaders();
}
private:
const char* name() const final { return "TessellationTestTriShader"; }
void addToKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const final {}
class Impl : public ProgramImpl {
public:
void setData(const GrGLSLProgramDataManager& pdman,
const GrShaderCaps&,
const GrGeometryProcessor& geomProc) override {
pdman.setSkMatrix(fViewMatrixUniform,
geomProc.cast<TessellationTestTriShader>().fViewMatrix);
}
private:
void onEmitCode(EmitArgs& args, GrGPArgs*) override {
args.fVaryingHandler->emitAttributes(args.fGeomProc.cast<TessellationTestTriShader>());
const char* viewMatrix;
fViewMatrixUniform = args.fUniformHandler->addUniform(
nullptr, kVertex_GrShaderFlag, kFloat3x3_GrSLType, "view_matrix", &viewMatrix);
args.fVertBuilder->declareGlobal(
GrShaderVar("P_", kFloat3_GrSLType, GrShaderVar::TypeModifier::Out));
args.fVertBuilder->codeAppendf(R"(
P_.xy = (%s * float3(position.xy, 1)).xy;
P_.z = position.z;)", viewMatrix);
// GrGLProgramBuilder will call writeTess*ShaderGLSL when it is compiling.
this->writeFragmentShader(args.fFragBuilder, args.fOutputColor, args.fOutputCoverage);
}
SkString getTessControlShaderGLSL(const GrGeometryProcessor&,
const char* versionAndExtensionDecls,
const GrGLSLUniformHandler&,
const GrShaderCaps&) const override {
SkString code(versionAndExtensionDecls);
code.append(R"(
layout(vertices = 3) out;
in vec3 P_[];
out vec3 P[];
void main() {
P[gl_InvocationID] = P_[gl_InvocationID];
gl_TessLevelOuter[gl_InvocationID] = P_[gl_InvocationID].z;
gl_TessLevelInner[0] = 2.0;
})");
return code;
}
SkString getTessEvaluationShaderGLSL(const GrGeometryProcessor&,
const char* versionAndExtensionDecls,
const GrGLSLUniformHandler&,
const GrShaderCaps&) const override {
SkString code(versionAndExtensionDecls);
code.append(R"(
layout(triangles, equal_spacing, cw) in;
uniform vec4 sk_RTAdjust;
in vec3 P[];
out vec3 barycentric_coord;
void main() {
vec2 devcoord = mat3x2(P[0].xy, P[1].xy, P[2].xy) * gl_TessCoord.xyz;
devcoord = round(devcoord - .5) + .5; // Make horz and vert lines on px bounds.
gl_Position = vec4(devcoord.xy * sk_RTAdjust.xz + sk_RTAdjust.yw, 0.0, 1.0);
float i = 0.0;
if (gl_TessCoord.y == 0.0) {
i += gl_TessCoord.z * P[1].z;
} else {
i += P[1].z;
if (gl_TessCoord.x == 0.0) {
i += gl_TessCoord.y * P[0].z;
} else {
i += P[0].z;
if (gl_TessCoord.z == 0.0) {
i += gl_TessCoord.x * P[2].z;
} else {
barycentric_coord = vec3(0, 1, 0);
return;
}
}
}
i = abs(mod(i, 2.0) - 1.0);
barycentric_coord = vec3(i, 0, 1.0 - i);
})");
return code;
}
<|fim▁hole|> const char* coverage) {
f->declareGlobal(GrShaderVar("barycentric_coord", kFloat3_GrSLType,
GrShaderVar::TypeModifier::In));
f->codeAppendf(R"(
half3 d = half3(1 - barycentric_coord/fwidth(barycentric_coord));
half coverage = max(max(d.x, d.y), d.z);
half4 %s = half4(0, coverage, coverage, 1);
const half4 %s = half4(1);)", color, coverage);
}
GrGLSLUniformHandler::UniformHandle fViewMatrixUniform;
};
std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const final {
return std::make_unique<Impl>();
}
const SkMatrix fViewMatrix;
};
class TessellationTestRectShader : public GrGeometryProcessor {
public:
TessellationTestRectShader(const SkMatrix& viewMatrix)
: GrGeometryProcessor(kTessellationTestTriShader_ClassID), fViewMatrix(viewMatrix) {
this->setWillUseTessellationShaders();
}
const char* name() const final { return "TessellationTestRectShader"; }
void addToKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const final {}
private:
class Impl : public ProgramImpl {
public:
void setData(const GrGLSLProgramDataManager& pdman,
const GrShaderCaps&,
const GrGeometryProcessor& geomProc) override {
pdman.setSkMatrix(fViewMatrixUniform,
geomProc.cast<TessellationTestRectShader>().fViewMatrix);
}
private:
void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
const char* viewMatrix;
fViewMatrixUniform = args.fUniformHandler->addUniform(
nullptr, kVertex_GrShaderFlag, kFloat3x3_GrSLType, "view_matrix", &viewMatrix);
args.fVertBuilder->declareGlobal(
GrShaderVar("M_", kFloat3x3_GrSLType, GrShaderVar::TypeModifier::Out));
args.fVertBuilder->codeAppendf("M_ = %s;", viewMatrix);
// GrGLProgramBuilder will call writeTess*ShaderGLSL when it is compiling.
this->writeFragmentShader(args.fFragBuilder, args.fOutputColor, args.fOutputCoverage);
}
SkString getTessControlShaderGLSL(const GrGeometryProcessor&,
const char* versionAndExtensionDecls,
const GrGLSLUniformHandler&,
const GrShaderCaps&) const override {
SkString code(versionAndExtensionDecls);
code.append(R"(
layout(vertices = 1) out;
in mat3 M_[];
out mat3 M[];
void main() {
M[gl_InvocationID] = M_[gl_InvocationID];
gl_TessLevelInner[0] = 8.0;
gl_TessLevelInner[1] = 2.0;
gl_TessLevelOuter[0] = 2.0;
gl_TessLevelOuter[1] = 8.0;
gl_TessLevelOuter[2] = 2.0;
gl_TessLevelOuter[3] = 8.0;
})");
return code;
}
SkString getTessEvaluationShaderGLSL(const GrGeometryProcessor&,
const char* versionAndExtensionDecls,
const GrGLSLUniformHandler&,
const GrShaderCaps&) const override {
SkString code(versionAndExtensionDecls);
code.appendf(R"(
layout(quads, equal_spacing, cw) in;
uniform vec4 sk_RTAdjust;
in mat3 M[];
out vec4 barycentric_coord;
void main() {
vec4 R = vec4(%f, %f, %f, %f);
vec2 localcoord = mix(R.xy, R.zw, gl_TessCoord.xy);
vec2 devcoord = (M[0] * vec3(localcoord, 1)).xy;
devcoord = round(devcoord - .5) + .5; // Make horz and vert lines on px bounds.
gl_Position = vec4(devcoord.xy * sk_RTAdjust.xz + sk_RTAdjust.yw, 0.0, 1.0);
float i = gl_TessCoord.x * 8.0;
i = abs(mod(i, 2.0) - 1.0);
if (gl_TessCoord.y == 0.0 || gl_TessCoord.y == 1.0) {
barycentric_coord = vec4(i, 1.0 - i, 0, 0);
} else {
barycentric_coord = vec4(0, 0, i, 1.0 - i);
}
})", kRect.left(), kRect.top(), kRect.right(), kRect.bottom());
return code;
}
void writeFragmentShader(GrGLSLFPFragmentBuilder* f, const char* color,
const char* coverage) {
f->declareGlobal(GrShaderVar("barycentric_coord", kFloat4_GrSLType,
GrShaderVar::TypeModifier::In));
f->codeAppendf(R"(
float4 fwidths = fwidth(barycentric_coord);
half coverage = 0;
for (int i = 0; i < 4; ++i) {
if (fwidths[i] != 0) {
coverage = half(max(coverage, 1 - barycentric_coord[i]/fwidths[i]));
}
}
half4 %s = half4(coverage, 0, coverage, 1);
const half4 %s = half4(1);)", color, coverage);
}
GrGLSLUniformHandler::UniformHandle fViewMatrixUniform;
};
std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const final {
return std::make_unique<Impl>();
}
const SkMatrix fViewMatrix;
};
class TessellationTestOp : public GrDrawOp {
DEFINE_OP_CLASS_ID
public:
TessellationTestOp(const SkMatrix& viewMatrix, const std::array<float, 3>* triPositions)
: GrDrawOp(ClassID()), fViewMatrix(viewMatrix), fTriPositions(triPositions) {
this->setBounds(SkRect::MakeIWH(kWidth, kHeight), HasAABloat::kNo, IsHairline::kNo);
}
private:
const char* name() const override { return "TessellationTestOp"; }
FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; }
GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*, GrClampType) override {
return GrProcessorSet::EmptySetAnalysis();
}
void onPrePrepare(GrRecordingContext*,
const GrSurfaceProxyView& writeView,
GrAppliedClip*,
const GrDstProxyView&,
GrXferBarrierFlags renderPassXferBarriers,
GrLoadOp colorLoadOp) override {}
void onPrepare(GrOpFlushState* flushState) override {
if (fTriPositions) {
if (void* vertexData = flushState->makeVertexSpace(sizeof(float) * 3, 3, &fVertexBuffer,
&fBaseVertex)) {
memcpy(vertexData, fTriPositions, sizeof(float) * 3 * 3);
}
}
}
void onExecute(GrOpFlushState* state, const SkRect& chainBounds) override {
GrPipeline pipeline(GrScissorTest::kDisabled, SkBlendMode::kSrc,
state->drawOpArgs().writeView().swizzle());
int tessellationPatchVertexCount;
std::unique_ptr<GrGeometryProcessor> shader;
if (fTriPositions) {
if (!fVertexBuffer) {
return;
}
tessellationPatchVertexCount = 3;
shader = std::make_unique<TessellationTestTriShader>(fViewMatrix);
} else {
// Use a mismatched number of vertices in the input patch vs output.
// (The tessellation control shader will output one vertex per patch.)
tessellationPatchVertexCount = 5;
shader = std::make_unique<TessellationTestRectShader>(fViewMatrix);
}
GrProgramInfo programInfo(state->caps(), state->writeView(), state->usesMSAASurface(),
&pipeline, &GrUserStencilSettings::kUnused,
shader.get(), GrPrimitiveType::kPatches,
tessellationPatchVertexCount, state->renderPassBarriers(),
state->colorLoadOp());
state->bindPipeline(programInfo, SkRect::MakeIWH(kWidth, kHeight));
state->bindBuffers(nullptr, nullptr, std::move(fVertexBuffer));
state->draw(tessellationPatchVertexCount, fBaseVertex);
}
const SkMatrix fViewMatrix;
const std::array<float, 3>* const fTriPositions;
sk_sp<const GrBuffer> fVertexBuffer;
int fBaseVertex = 0;
};
static SkPath build_outset_triangle(const std::array<float, 3>* tri) {
SkPath outset;
for (int i = 0; i < 3; ++i) {
SkPoint p = {tri[i][0], tri[i][1]};
SkPoint left = {tri[(i + 2) % 3][0], tri[(i + 2) % 3][1]};
SkPoint right = {tri[(i + 1) % 3][0], tri[(i + 1) % 3][1]};
SkPoint n0, n1;
n0.setNormalize(left.y() - p.y(), p.x() - left.x());
n1.setNormalize(p.y() - right.y(), right.x() - p.x());
p += (n0 + n1) * 3;
if (0 == i) {
outset.moveTo(p);
} else {
outset.lineTo(p);
}
}
return outset;
}
DrawResult TessellationGM::onDraw(GrRecordingContext* rContext,
SkCanvas* canvas,
SkString* errorMsg) {
auto sdc = SkCanvasPriv::TopDeviceSurfaceDrawContext(canvas);
if (!sdc) {
*errorMsg = kErrorMsg_DrawSkippedGpuOnly;
return DrawResult::kSkip;
}
if (!rContext->priv().caps()->shaderCaps()->tessellationSupport()) {
*errorMsg = "Requires GPU tessellation support.";
return DrawResult::kSkip;
}
if (!rContext->priv().caps()->shaderCaps()->shaderDerivativeSupport()) {
*errorMsg = "Requires shader derivatives."
"(These are expected to always be present when there is tessellation!!)";
return DrawResult::kFail;
}
canvas->clear(SK_ColorBLACK);
SkPaint borderPaint;
borderPaint.setColor4f({0,1,1,1});
borderPaint.setAntiAlias(true);
canvas->drawPath(build_outset_triangle(kTri1), borderPaint);
canvas->drawPath(build_outset_triangle(kTri2), borderPaint);
borderPaint.setColor4f({1,0,1,1});
canvas->drawRect(kRect.makeOutset(1.5f, 1.5f), borderPaint);
sdc->addDrawOp(GrOp::Make<TessellationTestOp>(rContext, canvas->getTotalMatrix(), kTri1));
sdc->addDrawOp(GrOp::Make<TessellationTestOp>(rContext, canvas->getTotalMatrix(), kTri2));
sdc->addDrawOp(GrOp::Make<TessellationTestOp>(rContext, canvas->getTotalMatrix(), nullptr));
return skiagm::DrawResult::kOk;
}
DEF_GM( return new TessellationGM(); )
} // namespace skiagm<|fim▁end|>
|
void writeFragmentShader(GrGLSLFPFragmentBuilder* f, const char* color,
|
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for antibiobank project.
<|fim▁hole|>
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "antibiobank.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()<|fim▁end|>
|
It exposes the WSGI callable as a module-level variable named ``application``.
|
<|file_name|>replicationcontroller.go<|end_file_name|><|fim▁begin|>/*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package filtered
import (
context "context"
apicorev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
v1 "k8s.io/client-go/informers/core/v1"
kubernetes "k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/listers/core/v1"
cache "k8s.io/client-go/tools/cache"<|fim▁hole|> controller "knative.dev/pkg/controller"
injection "knative.dev/pkg/injection"
logging "knative.dev/pkg/logging"
)
func init() {
injection.Default.RegisterFilteredInformers(withInformer)
injection.Dynamic.RegisterDynamicInformer(withDynamicInformer)
}
// Key is used for associating the Informer inside the context.Context.
type Key struct {
Selector string
}
func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
infs := []controller.Informer{}
for _, selector := range labelSelectors {
f := filtered.Get(ctx, selector)
inf := f.Core().V1().ReplicationControllers()
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
infs = append(infs, inf.Informer())
}
return ctx, infs
}
func withDynamicInformer(ctx context.Context) context.Context {
untyped := ctx.Value(filtered.LabelKey{})
if untyped == nil {
logging.FromContext(ctx).Panic(
"Unable to fetch labelkey from context.")
}
labelSelectors := untyped.([]string)
for _, selector := range labelSelectors {
inf := &wrapper{client: client.Get(ctx), selector: selector}
ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
}
return ctx
}
// Get extracts the typed informer from the context.
func Get(ctx context.Context, selector string) v1.ReplicationControllerInformer {
untyped := ctx.Value(Key{Selector: selector})
if untyped == nil {
logging.FromContext(ctx).Panicf(
"Unable to fetch k8s.io/client-go/informers/core/v1.ReplicationControllerInformer with selector %s from context.", selector)
}
return untyped.(v1.ReplicationControllerInformer)
}
type wrapper struct {
client kubernetes.Interface
namespace string
selector string
}
var _ v1.ReplicationControllerInformer = (*wrapper)(nil)
var _ corev1.ReplicationControllerLister = (*wrapper)(nil)
func (w *wrapper) Informer() cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(nil, &apicorev1.ReplicationController{}, 0, nil)
}
func (w *wrapper) Lister() corev1.ReplicationControllerLister {
return w
}
func (w *wrapper) ReplicationControllers(namespace string) corev1.ReplicationControllerNamespaceLister {
return &wrapper{client: w.client, namespace: namespace, selector: w.selector}
}
func (w *wrapper) List(selector labels.Selector) (ret []*apicorev1.ReplicationController, err error) {
reqs, err := labels.ParseToRequirements(w.selector)
if err != nil {
return nil, err
}
selector = selector.Add(reqs...)
lo, err := w.client.CoreV1().ReplicationControllers(w.namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: selector.String(),
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
if err != nil {
return nil, err
}
for idx := range lo.Items {
ret = append(ret, &lo.Items[idx])
}
return ret, nil
}
func (w *wrapper) Get(name string) (*apicorev1.ReplicationController, error) {
// TODO(mattmoor): Check that the fetched object matches the selector.
return w.client.CoreV1().ReplicationControllers(w.namespace).Get(context.TODO(), name, metav1.GetOptions{
// TODO(mattmoor): Incorporate resourceVersion bounds based on staleness criteria.
})
}<|fim▁end|>
|
client "knative.dev/pkg/client/injection/kube/client"
filtered "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
|
<|file_name|>components.py<|end_file_name|><|fim▁begin|>####################################################################################################
#
# GroupedPurchaseOrder - A Django Application.
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
from django.core.urlresolvers import reverse, NoReverseMatch
from django.forms.utils import flatatt
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
####################################################################################################
from .html import join_text, merge_new_words, render_tag
####################################################################################################
#
# Notes:
# - How to concate in {% %} ? #deleteModal{{ supplier.pk }}
# - url 'suppliers.update' supplier.pk
#
####################################################################################################
####################################################################################################
def render_icon(icon, title=''):
"""Render a glyphicon.
"""
#? escape ?
# attrs = {'class': 'glyphicon glyphicon-{}'.format(icon)}
attrs = {'class': 'glyphicon glyphicon-' + icon}
if title:
attrs['title'] = _(title)
return format_html('<span{0}></span>', flatatt(attrs))
####################################################################################################
def render_button(content, icon=None, style='default', size='', href='', title='', button_class='', attrs=None):
"""Render a button with content
"""
# <button type="button" class="btn btn-default">Default</button>
# <button type="button" class="btn btn-primary">Primary</button>
# <button type="button" class="btn btn-success">Success</button>
# <button type="button" class="btn btn-info">Info</button>
# <button type="button" class="btn btn-warning">Warning</button>
# <button type="button" class="btn btn-danger">Danger</button>
# <button type="button" class="btn btn-link">Link</button>
#
# size : btn-lg, btn-sm, btn-xs
# <button type="button" class="btn btn-primary btn-lg">Large button</button>
#
# btn-block
# <button type="button" class="btn btn-primary btn-lg btn-block">Block level button</button>
# <button type="button" class="btn btn-default btn-lg btn-block">Block level button</button>
#
# active
# <button type="button" class="btn btn-primary btn-lg active">Primary button</button>
# <a href="#" class="btn btn-default btn-lg active" role="button">Link</a>
#
# disabled="disabled"
# <button type="button" class="btn btn-lg btn-primary" disabled="disabled">Primary button</button>
# <a href="#" class="btn btn-default btn-lg disabled" role="button">Link</a>
#
# <a class="btn btn-default" href="#" role="button">Link</a>
# <button class="btn btn-default" type="submit">Button</button>
# <input class="btn btn-default" type="button" value="Input">
# <input class="btn btn-default" type="submit" value="Submit">
if attrs is None:
attrs = {}
classes = ['btn']
button_styles = ('default', 'primary', 'success', 'info', 'warning', 'danger', 'link')
if style in button_styles:
classes.append('btn-' + style)
else:
raise ValueError('Parameter style must be {} ("{}" given)',
', '.join(button_styles), style)
# size = text_value(size).lower().strip()
if size:<|fim▁hole|> classes.append('btn-sm')
elif size == 'lg' or size == 'large':
classes.append('btn-lg')
else:
raise ValueError('Parameter "size" should be "xs", "sm", "lg" or empty ("{}" given)',
format(size))
attrs['class'] = merge_new_words(button_class, classes)
if href:
try:
# current_app = context['request'].resolver_match.namespace
# viewname=viewname, args=view_args, kwargs=view_kwargs, current_app=current_app
url = reverse(href)
except NoReverseMatch:
url = href
attrs['href'] = url
tag = 'a'
else:
tag = 'button'
if title:
attrs['title'] = escape(_(title))
icon_content = render_icon(icon) if icon else ''
if content:
content = join_text((icon_content, escape(_(content))), separator=' ')
else:
content = icon_content
return render_tag(tag, mark_safe(content), attrs=attrs)
####################################################################################################
def render_icon_button(icon, **kwargs):
return render_button(None, icon=icon, **kwargs)
####################################################################################################
def render_modal_icon_button(icon, *args, **kwargs):
attrs = {'data-toggle':'modal', 'data-target':join_text(args)}
return render_button(None, icon=icon, attrs=attrs, **kwargs)
####################################################################################################
def render_dismiss_button(title, **kwargs):
attrs = {'type':'button', 'data-dismiss':'modal'}
return render_button(title, attrs=attrs, **kwargs)
####################################################################################################
def render_close_button(*args, **kwargs):
# '<button type="button" class="close" data-dismiss="modal">'
# '</button>'
attrs = {'type':'button', 'class':'close', 'data-dismiss':'modal'}
title = escape(_('Close'))
content = ('<span aria-hidden="true">×</span>'
'<span class="sr-only">{0}</span>'.format(title))
return render_tag('button', mark_safe(content), attrs=attrs)
####################################################################################################
#
# End
#
####################################################################################################<|fim▁end|>
|
if size == 'xs':
classes.append('btn-xs')
elif size == 'sm' or size == 'small':
|
<|file_name|>span.rs<|end_file_name|><|fim▁begin|>//! Types representing positions and spans inside source file.
use super::Source;
use std::rc::Rc;
use std::cmp;
use std::usize;
/// Represent a unique position within all source managed files.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Pos(pub usize);
impl Pos {
/// Create a span between current position and target
pub fn span_to(self, to: Pos) -> Span {
Span::new(self, to)
}
}
/// Represent a span within all source managed files.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Span {
pub start: Pos,
pub end: Pos
}
impl Span {
/// Creata a new span
pub fn new(a: Pos, b: Pos) -> Span {
Span {
start: a,
end: b,
}
}
/// Create a dummy span.
pub fn none() -> Span {
Span {
start: Pos(usize::MAX),
end: Pos(usize::MAX),
}
}
/// Check if this span is meaningful.
pub fn is_none(&self) -> bool {
self.start.0 == usize::MAX
}
/// Merge with another span
pub fn merge(self, other: Span) -> Span {
let start = cmp::min(self.start.0, other.start.0);
let end = cmp::max(self.end.0, other.end.0);
Span::new(Pos(start), Pos(end))
}
}
/// Represent a position within a single source file.
pub struct FatPos {
pub source: Rc<Source>,
pub pos: usize,
}
impl FatPos {
pub fn new(src: Rc<Source>, pos: usize) -> FatPos {
FatPos {
source: src,
pos: pos
}
}
}
/// Represent a span within a single source file.
pub struct FatSpan {
pub source: Rc<Source>,<|fim▁hole|> pub end: usize,
}
impl FatSpan {
pub fn new(src: Rc<Source>, start: usize, end: usize) -> FatSpan {
FatSpan {
source: src,
start: start,
end: end,
}
}
}<|fim▁end|>
|
pub start: usize,
|
<|file_name|>cusolverOperations.cpp<|end_file_name|><|fim▁begin|>#include <boost\math\special_functions.hpp>
#include "common.h"
#include "cusolverOperations.h"
namespace matCUDA
{
template< typename TElement>
cusolverStatus_t cusolverOperations<TElement>::ls( Array<TElement> *A, Array<TElement> *x, Array<TElement> *C )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
TElement *d_A, *Workspace, *d_C;
int INFOh = 2;
CUDA_CALL( cudaMalloc(&d_A, A->getDim(0) * A->getDim(1) * sizeof(TElement)) );
CUDA_CALL( cudaMalloc(&d_C, C->getDim(0) * C->getDim(1) * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, A->data(), A->getDim(0) * A->getDim(1) * sizeof(TElement), cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(d_C, C->data(), C->getDim(0) * C->getDim(1) * sizeof(TElement), cudaMemcpyHostToDevice) );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgetrf_bufferSize(&handle, A->getDim(0), A->getDim(1), d_A, A->getDim(0), &Lwork) );
CUDA_CALL( cudaMalloc( &Workspace, Lwork * sizeof(TElement) ) );
int *devIpiv, *devInfo;
size_t size_pivot = std::min(C->getDim(0),C->getDim(1));
CUDA_CALL( cudaMalloc( &devIpiv, size_pivot * sizeof(int) ) );
CUDA_CALL( cudaMalloc( &devInfo, sizeof(int) ) );
/////***** performance test *****/////
//CUDA_CALL( cudaDeviceSynchronize() );
//tic();
//for( int i = 0; i < 10; i++ ) {
CUSOLVER_CALL( cusolverDnTgetrf( &handle, A->getDim(0), A->getDim(1), d_A, A->getDim(0), Workspace, devIpiv, devInfo ) );
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Factorization Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUSOLVER_CALL( cusolverDnTgetrs( &handle, CUBLAS_OP_N, std::min(A->getDim(0),A->getDim(1)), C->getDim(1), d_A, A->getDim(0), devIpiv, d_C, C->getDim(0), devInfo ) );
//}
//CUDA_CALL( cudaDeviceSynchronize() );
//toc();
////***** end of performance test *****/////
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Inversion Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUDA_CALL( cudaMemcpy( C->data(), d_C, C->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
for( int i = 0; i< x->GetDescriptor().GetDim( 0 ); i++ ) {
for( int j = 0; j < x->GetDescriptor().GetDim( 1 ); j++ )
(*x)( i, j ) = (*C)( i, j );
}
// free memory
CUDA_CALL( cudaFree( d_A ) );
CUDA_CALL( cudaFree( d_C ) );
CUDA_CALL( cudaFree( Workspace ) );
CUDA_CALL( cudaFree( devIpiv ) );
CUDA_CALL( cudaFree( devInfo ) );
// Destroy the handle
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::ls( Array<int> *A, Array<int> *x, Array<int> *C );
template cusolverStatus_t cusolverOperations<float>::ls( Array<float> *A, Array<float> *x, Array<float> *C );
template cusolverStatus_t cusolverOperations<double>::ls( Array<double> *A, Array<double> *x, Array<double> *C );
template cusolverStatus_t cusolverOperations<ComplexFloat>::ls( Array<ComplexFloat> *A, Array<ComplexFloat> *x, Array<ComplexFloat> *C );
template cusolverStatus_t cusolverOperations<ComplexDouble>::ls( Array<ComplexDouble> *A, Array<ComplexDouble> *x, Array<ComplexDouble> *C );
template< typename TElement>
cusolverStatus_t cusolverOperations<TElement>::invert( Array<TElement> *result, Array<TElement> *data )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
size_t M = data->getDim(0);
size_t N = data->getDim(1);
size_t minMN = std::min(M,N);
TElement *d_A, *Workspace, *d_B;
int INFOh = 2;
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMalloc(&d_B, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, data->data(), M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
cuda_eye<TElement>( d_B, minMN );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgetrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc( &Workspace, Lwork * sizeof(TElement) ) );
int *devIpiv, *devInfo;
size_t size_pivot = std::min(data->getDim(0),data->getDim(1));
CUDA_CALL( cudaMalloc( &devIpiv, size_pivot * sizeof(int) ) );
CUDA_CALL( cudaMalloc( &devInfo, sizeof(int) ) );
/////***** performance test *****/////
//CUDA_CALL( cudaDeviceSynchronize() );
//tic();
//for( int i = 0; i < 10; i++ ) {
CUSOLVER_CALL( cusolverDnTgetrf( &handle, M, N, d_A, M, Workspace, devIpiv, devInfo ) );
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Factorization Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUSOLVER_CALL( cusolverDnTgetrs( &handle, CUBLAS_OP_N, data->getDim(0), data->getDim(1), d_A, data->getDim(0), devIpiv, d_B, data->getDim(0), devInfo ) );
//}
//CUDA_CALL( cudaDeviceSynchronize() );
//toc();
////***** end of performance test *****/////
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Inversion Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUDA_CALL( cudaMemcpy( result->data(), d_B, result->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
// free memory
CUDA_CALL( cudaFree( d_A ) );
CUDA_CALL( cudaFree( d_B ) );
CUDA_CALL( cudaFree( Workspace ) );
CUDA_CALL( cudaFree( devIpiv ) );
CUDA_CALL( cudaFree( devInfo ) );
// Destroy the handle
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::invert( Array<int> *result, Array<int> *data );
template cusolverStatus_t cusolverOperations<float>::invert( Array<float> *result, Array<float> *data );
template cusolverStatus_t cusolverOperations<double>::invert( Array<double> *result, Array<double> *data );
template cusolverStatus_t cusolverOperations<ComplexFloat>::invert( Array<ComplexFloat> *result, Array<ComplexFloat> *data );
template cusolverStatus_t cusolverOperations<ComplexDouble>::invert( Array<ComplexDouble> *result, Array<ComplexDouble> *data );
template< typename TElement>
cusolverStatus_t cusolverOperations<TElement>::invert_zerocopy( Array<TElement> *result, Array<TElement> *data )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
size_t M = data->getDim(0);
size_t N = data->getDim(1);
size_t minMN = std::min(M,N);
TElement *d_A, *Workspace, *d_B;
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
// pass host pointer to device
CUDA_CALL( cudaHostGetDevicePointer( &d_B, result->data(), 0 ) );
CUDA_CALL( cudaMemcpy(d_A, data->data(), M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
cuda_eye<TElement>( d_B, minMN );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgetrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc( &Workspace, Lwork * sizeof(TElement) ) );
int *devIpiv, *devInfo;
size_t size_pivot = std::min(data->getDim(0),data->getDim(1));
CUDA_CALL( cudaMalloc( &devIpiv, size_pivot * sizeof(int) ) );
CUDA_CALL( cudaMalloc( &devInfo, sizeof(int) ) );
CUSOLVER_CALL( cusolverDnTgetrf( &handle, M, N, d_A, M, Workspace, devIpiv, devInfo ) );
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
int INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Factorization Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUSOLVER_CALL( cusolverDnTgetrs( &handle, CUBLAS_OP_N, data->getDim(0), data->getDim(1), d_A, data->getDim(0), devIpiv, d_B, data->getDim(0), devInfo ) );
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Inversion Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
// free memory
CUDA_CALL( cudaFree( d_A ) );
CUDA_CALL( cudaFree( Workspace ) );
CUDA_CALL( cudaFree( devIpiv ) );
CUDA_CALL( cudaFree( devInfo ) );
// Destroy the handle
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::invert_zerocopy( Array<int> *result, Array<int> *data );
template cusolverStatus_t cusolverOperations<float>::invert_zerocopy( Array<float> *result, Array<float> *data );
template cusolverStatus_t cusolverOperations<double>::invert_zerocopy( Array<double> *result, Array<double> *data );
template cusolverStatus_t cusolverOperations<ComplexFloat>::invert_zerocopy( Array<ComplexFloat> *result, Array<ComplexFloat> *data );
template cusolverStatus_t cusolverOperations<ComplexDouble>::invert_zerocopy( Array<ComplexDouble> *result, Array<ComplexDouble> *data );
template< typename TElement>
cusolverStatus_t cusolverOperations<TElement>::lu( Array<TElement> *A, Array<TElement> *lu, Array<TElement> *Pivot )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
size_t M = A->getDim(0);
size_t N = A->getDim(1);
size_t minMN = std::min(M,N);
TElement *d_A, *Workspace;
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, A->data(), M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgetrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc( &Workspace, Lwork * sizeof(TElement) ) );
int *devIpiv, *devInfo;
size_t size_pivot = std::min(A->getDim(0),A->getDim(1));
CUDA_CALL( cudaMalloc( &devIpiv, size_pivot * sizeof(int) ) );
CUDA_CALL( cudaMalloc( &devInfo, sizeof(int) ) );
CUSOLVER_CALL( cusolverDnTgetrf( &handle, M, N, d_A, M, Workspace, devIpiv, devInfo ) );
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
int INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Factorization Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
Array<int> pivotVector( size_pivot );
CUDA_CALL( cudaMemcpy( lu->data(), d_A, lu->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
CUDA_CALL( cudaMemcpy( pivotVector.data(), devIpiv, size_pivot*sizeof( int ), cudaMemcpyDeviceToHost ) );
from_permutation_vector_to_permutation_matrix( Pivot, &pivotVector );
// free memory
CUDA_CALL( cudaFree( d_A ) );
CUDA_CALL( cudaFree( Workspace ) );
CUDA_CALL( cudaFree( devIpiv ) );
CUDA_CALL( cudaFree( devInfo ) );
// Destroy the handle
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::lu( Array<int> *A, Array<int> *lu, Array<int> *Pivot );
template cusolverStatus_t cusolverOperations<float>::lu( Array<float> *A, Array<float> *lu, Array<float> *Pivot );
template cusolverStatus_t cusolverOperations<double>::lu( Array<double> *A, Array<double> *lu, Array<double> *Pivot );
template cusolverStatus_t cusolverOperations<ComplexFloat>::lu( Array<ComplexFloat> *A, Array<ComplexFloat> *lu, Array<ComplexFloat> *Pivot );
template cusolverStatus_t cusolverOperations<ComplexDouble>::lu( Array<ComplexDouble> *A, Array<ComplexDouble> *lu, Array<ComplexDouble> *Pivot );
template< typename TElement>
cusolverStatus_t cusolverOperations<TElement>::lu( Array<TElement> *A, Array<TElement> *lu )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
size_t M = A->getDim(0);
size_t N = A->getDim(1);
size_t minMN = std::min(M,N);
TElement *d_A, *Workspace;
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, A->data(), M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgetrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc( &Workspace, Lwork * sizeof(TElement) ) );
int *devIpiv, *devInfo;
size_t size_pivot = std::min(A->getDim(0),A->getDim(1));
CUDA_CALL( cudaMalloc( &devIpiv, size_pivot * sizeof(int) ) );
CUDA_CALL( cudaMalloc( &devInfo, sizeof(int) ) );
/////***** performance test *****/////
//CUDA_CALL( cudaDeviceSynchronize() );
//tic();
//for( int i = 0; i < 10; i++ ) {
CUSOLVER_CALL( cusolverDnTgetrf( &handle, M, N, d_A, M, Workspace, devIpiv, devInfo ) );
//}
//CUDA_CALL( cudaDeviceSynchronize() );
//toc();
////***** end of performance test *****/////
CUDA_CALL( cudaDeviceSynchronize() );
// copy from GPU
int INFOh = 2;
CUDA_CALL( cudaMemcpy( &INFOh, devInfo, sizeof( int ), cudaMemcpyDeviceToHost ) );
if( INFOh > 0 )
{
printf("Factorization Failed: Matrix is singular\n");
return CUSOLVER_STATUS_EXECUTION_FAILED;
}
CUDA_CALL( cudaMemcpy( lu->data(), d_A, lu->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
// free memory
CUDA_CALL( cudaFree( d_A ) );
CUDA_CALL( cudaFree( Workspace ) );
CUDA_CALL( cudaFree( devIpiv ) );
CUDA_CALL( cudaFree( devInfo ) );
// Destroy the handle
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::lu( Array<int> *A, Array<int> *lu );
template cusolverStatus_t cusolverOperations<float>::lu( Array<float> *A, Array<float> *lu );
template cusolverStatus_t cusolverOperations<double>::lu( Array<double> *A, Array<double> *lu );
template cusolverStatus_t cusolverOperations<ComplexFloat>::lu( Array<ComplexFloat> *A, Array<ComplexFloat> *lu );
template cusolverStatus_t cusolverOperations<ComplexDouble>::lu( Array<ComplexDouble> *A, Array<ComplexDouble> *lu );
template <typename TElement>
cusolverStatus_t cusolverOperations<TElement>::qr( Array<TElement> *A, Array<TElement> *Q, Array<TElement> *R )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
int M = A->getDim(0);
int N = A->getDim(1);
int minMN = std::min(M,N);
TElement *d_A, *h_A, *TAU, *Workspace, *d_Q;
h_A = A->data();
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, h_A, M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgeqrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc(&TAU, minMN * sizeof(TElement)) );
CUDA_CALL(cudaMalloc(&Workspace, Lwork * sizeof(TElement)));
int *devInfo;
CUDA_CALL( cudaMalloc(&devInfo, sizeof(int)) );
CUDA_CALL( cudaMemset( (void*)devInfo, 0, 1 ) );
CUSOLVER_CALL( cusolverDnTgeqrf(&handle, M, N, d_A, M, TAU, Workspace, Lwork, devInfo) );
int devInfo_h = 0;
CUDA_CALL( cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost) );
if (devInfo_h != 0)
return CUSOLVER_STATUS_INTERNAL_ERROR;
// CALL CUDA FUNCTION
CUDA_CALL( cudaMemcpy( R->data(), d_A, R->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
for(int j = 0; j < M; j++)
for(int i = j + 1; i < N; i++)
(*R)(i,j) = 0;
// --- Initializing the output Q matrix (Of course, this step could be done by a kernel function directly on the device)
//*Q = eye<TElement> ( std::min(Q->getDim(0),Q->getDim(1)) );
CUDA_CALL( cudaMalloc(&d_Q, M*M*sizeof(TElement)) );
cuda_eye<TElement>( d_Q, std::min(Q->getDim(0),Q->getDim(1)) );
// --- CUDA qr execution
CUSOLVER_CALL( cusolverDnTormqr(&handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, M, N, std::min(M, N), d_A, M, TAU, d_Q, M, Workspace, Lwork, devInfo) );
// --- At this point, d_Q contains the elements of Q. Showing this.
CUDA_CALL( cudaMemcpy(Q->data(), d_Q, M*M*sizeof(TElement), cudaMemcpyDeviceToHost) );
CUDA_CALL( cudaDeviceSynchronize() );
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::qr( Array<int> *A, Array<int> *Q, Array<int> *R );
template cusolverStatus_t cusolverOperations<float>::qr( Array<float> *A, Array<float> *Q, Array<float> *R );
template cusolverStatus_t cusolverOperations<double>::qr( Array<double> *A, Array<double> *Q, Array<double> *R );
template cusolverStatus_t cusolverOperations<ComplexFloat>::qr( Array<ComplexFloat> *A, Array<ComplexFloat> *Q, Array<ComplexFloat> *R );
template cusolverStatus_t cusolverOperations<ComplexDouble>::qr( Array<ComplexDouble> *A, Array<ComplexDouble> *Q, Array<ComplexDouble> *R );
template<> cusolverStatus_t cusolverOperations<ComplexFloat>::dpss( Array<ComplexFloat> *eigenvector, index_t N, double NW, index_t degree )
{
return CUSOLVER_STATUS_NOT_INITIALIZED;
}
template<> cusolverStatus_t cusolverOperations<ComplexDouble>::dpss( Array<ComplexDouble> *eigenvector, index_t N, double NW, index_t degree )
{
return CUSOLVER_STATUS_NOT_INITIALIZED;
}
template <typename TElement>
cusolverStatus_t cusolverOperations<TElement>::dpss( Array<TElement> *eigenvector, index_t N, double NW, index_t degree )
{
// define matrix T (NxN)
TElement** T = new TElement*[ N ];
for(int i = 0; i < N; ++i)
T[ i ] = new TElement[ N ];
// fill in T as function of ( N, W )
// T is a tridiagonal matrix, i. e., it has diagonal, subdiagonal and superdiagonal
// the others elements are 0
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
if( j == i - 1 ) // subdiagonal
T[ i ][ j ] = ( (TElement)N - i )*i/2;
else if( j == i ) // diagonal
T[ i ][ j ] = pow( (TElement)(N-1)/2 - i, 2 )*boost::math::cos_pi( 2*NW/(TElement)N/boost::math::constants::pi<TElement>() );
else if( j == i + 1 ) // superdiagonal
T[ i ][ j ] = ( i + 1 )*( (TElement)N - 1 - i )/2*( j == i + 1 );
else // others elements
T[ i ][ j ] = 0;
}
}
// declarations needed
cusolverStatus_t statCusolver = CUSOLVER_STATUS_SUCCESS;
cusolverSpHandle_t handleCusolver = NULL;
cusparseHandle_t handleCusparse = NULL;
cusparseMatDescr_t descrA = NULL;
int *h_cooRowIndex = NULL, *h_cooColIndex = NULL;
TElement *h_cooVal = NULL;
int *d_cooRowIndex = NULL, *d_cooColIndex = NULL, *d_csrRowPtr = NULL;
TElement *d_cooVal = NULL;
int nnz;
TElement *h_eigenvector0 = NULL, *d_eigenvector0 = NULL, *d_eigenvector = NULL;
int maxite = 1e6; // number of maximum iteration
TElement tol = 1; // tolerance
TElement mu, *d_mu;
TElement max_lambda;
// define interval of eigenvalues of T
// interval is [-max_lambda,max_lambda]
max_lambda = ( N - 1 )*( N + 2 ) + N*( N + 1 )/8 + 0.25;
// amount of nonzero elements of T
nnz = 3*N - 2;
// allocate host memory
h_cooRowIndex = new int[ nnz*sizeof( int ) ];
h_cooColIndex = new int[ nnz*sizeof( int ) ];
h_cooVal = new TElement[ nnz*sizeof( TElement ) ];
h_eigenvector0 = new TElement[ N*sizeof( TElement ) ];
// fill in vectors that describe T as a sparse matrix
int counter = 0;
for (int i = 0; i < N; i++ ) {
for( int j = 0; j < N; j++ ) {
if( T[ i ][ j ] != 0 ) {
h_cooRowIndex[counter] = i;
h_cooColIndex[counter] = j;
h_cooVal[counter++] = T[ i ][ j ];
}
}
}
// fill in initial eigenvector guess
for( int i = 0; i < N; i++ )
h_eigenvector0[ i ] = 1/( abs( i - N/2 ) + 1 );
// allocate device memory
CUDA_CALL( cudaMalloc((void**)&d_cooRowIndex,nnz*sizeof( int )) );
CUDA_CALL( cudaMalloc((void**)&d_cooColIndex,nnz*sizeof( int )) );
CUDA_CALL( cudaMalloc((void**)&d_cooVal, nnz*sizeof( TElement )) );
CUDA_CALL( cudaMalloc((void**)&d_csrRowPtr, (N+1)*sizeof( int )) );
CUDA_CALL( cudaMalloc((void**)&d_eigenvector0, N*sizeof( TElement )) );
CUDA_CALL( cudaMalloc((void**)&d_eigenvector, N*sizeof( TElement )) );
CUDA_CALL( cudaMalloc( &d_mu, sizeof( TElement ) ) );
CUDA_CALL( cudaMemset( d_mu, -max_lambda, sizeof( TElement ) ) );
// copy data to device
CUDA_CALL( cudaMemcpy( d_cooRowIndex, h_cooRowIndex, (size_t)(nnz*sizeof( int )), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_cooColIndex, h_cooColIndex, (size_t)(nnz*sizeof( int )), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_cooVal, h_cooVal, (size_t)(nnz*sizeof( TElement )), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( d_eigenvector0, h_eigenvector0, (size_t)(N*sizeof( TElement )), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaMemcpy( &mu, d_mu, sizeof( TElement ), cudaMemcpyDeviceToHost ) );
// initialize cusparse and cusolver
CUSOLVER_CALL( cusolverSpCreate( &handleCusolver ) );
CUSPARSE_CALL( cusparseCreate( &handleCusparse ) );
// create and define cusparse matrix descriptor
CUSPARSE_CALL( cusparseCreateMatDescr(&descrA) );
CUSPARSE_CALL( cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL ) );
CUSPARSE_CALL( cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO ) );
// transform from coordinates (COO) values to compressed row pointers (CSR) values
CUSPARSE_CALL( cusparseXcoo2csr( handleCusparse, d_cooRowIndex, nnz, N, d_csrRowPtr, CUSPARSE_INDEX_BASE_ZERO ) );
// call cusolverSp<type>csreigvsi
CUSOLVER_CALL( cusolverSpTcsreigvsi( &handleCusolver, N, nnz, &descrA, d_cooVal, d_csrRowPtr, d_cooColIndex, max_lambda, d_eigenvector0, maxite, tol, d_mu, d_eigenvector ) );
cudaDeviceSynchronize();
CUDA_CALL( cudaGetLastError() );
// copy from device to host
CUDA_CALL( cudaMemcpy( &mu, d_mu, (size_t)sizeof( TElement ), cudaMemcpyDeviceToHost ) );
CUDA_CALL( cudaMemcpy( eigenvector->data(), d_eigenvector, (size_t)(N*sizeof( TElement )), cudaMemcpyDeviceToHost ) );
// destroy and free stuff
CUSPARSE_CALL( cusparseDestroyMatDescr( descrA ) );
CUSPARSE_CALL( cusparseDestroy( handleCusparse ) );
CUSOLVER_CALL( cusolverSpDestroy( handleCusolver ) );
CUDA_CALL( cudaFree( d_cooRowIndex ) );
CUDA_CALL( cudaFree( d_cooColIndex ) );
CUDA_CALL( cudaFree( d_cooVal ) );
CUDA_CALL( cudaFree( d_csrRowPtr ) );
CUDA_CALL( cudaFree( d_eigenvector0 ) );
CUDA_CALL( cudaFree( d_eigenvector ) );
CUDA_CALL( cudaFree( d_mu ) );
delete[] h_eigenvector0;
delete[] h_cooRowIndex;
delete[] h_cooColIndex;
delete[] h_cooVal;
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<float>::dpss( Array<float> *eigenvector, index_t N, double NW, index_t degree );
template cusolverStatus_t cusolverOperations<double>::dpss( Array<double> *eigenvector, index_t N, double NW, index_t degree );
template <typename TElement>
cusolverStatus_t cusolverOperations<TElement>::qr_zerocopy( Array<TElement> *A, Array<TElement> *Q, Array<TElement> *R )
{
cusolverDnHandle_t handle;
CUSOLVER_CALL( cusolverDnCreate(&handle) );
int M = A->getDim(0);
int N = A->getDim(1);
int minMN = std::min(M,N);
TElement *d_A, *h_A, *TAU, *Workspace, *d_Q, *d_R;
h_A = A->data();
CUDA_CALL( cudaMalloc(&d_A, M * N * sizeof(TElement)) );
CUDA_CALL( cudaMemcpy(d_A, h_A, M * N * sizeof(TElement), cudaMemcpyHostToDevice) );
int Lwork = 0;
CUSOLVER_CALL( cusolverDnTgeqrf_bufferSize(&handle, M, N, d_A, M, &Lwork) );
CUDA_CALL( cudaMalloc(&TAU, minMN * sizeof(TElement)) );
CUDA_CALL(cudaMalloc(&Workspace, Lwork * sizeof(TElement)));
int *devInfo;
CUDA_CALL( cudaMalloc(&devInfo, sizeof(int)) );
CUDA_CALL( cudaMemset( (void*)devInfo, 0, 1 ) );
CUSOLVER_CALL( cusolverDnTgeqrf(&handle, M, N, d_A, M, TAU, Workspace, Lwork, devInfo) );
int devInfo_h = 0;
CUDA_CALL( cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost) );
if (devInfo_h != 0)
return CUSOLVER_STATUS_INTERNAL_ERROR;
// CALL CUDA FUNCTION
CUDA_CALL( cudaMemcpy( R->data(), d_A, R->getNElements()*sizeof( TElement ), cudaMemcpyDeviceToHost ) );
// pass host pointer to device
CUDA_CALL( cudaHostGetDevicePointer( &d_R, R->data(), 0 ) );
CUDA_CALL( cudaHostGetDevicePointer( &d_Q, Q->data(), 0 ) );
zeros_under_diag<TElement>( d_R, std::min(R->getDim(0),R->getDim(1)) );
//for(int j = 0; j < M; j++)
// for(int i = j + 1; i < N; i++)
// (*R)(i,j) = 0;
// --- Initializing the output Q matrix (Of course, this step could be done by a kernel function directly on the device)
//*Q = eye<TElement> ( std::min(Q->getDim(0),Q->getDim(1)) );
cuda_eye<TElement>( d_Q, std::min(Q->getDim(0),Q->getDim(1)) );
CUDA_CALL( cudaDeviceSynchronize() );
// --- CUDA qr_zerocopy execution
CUSOLVER_CALL( cusolverDnTormqr(&handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, M, N, std::min(M, N), d_A, M, TAU, d_Q, M, Workspace, Lwork, devInfo) );
CUDA_CALL( cudaDeviceSynchronize() );
CUSOLVER_CALL( cusolverDnDestroy(handle) );
return CUSOLVER_STATUS_SUCCESS;
}
template cusolverStatus_t cusolverOperations<int>::qr_zerocopy( Array<int> *A, Array<int> *Q, Array<int> *R );
template cusolverStatus_t cusolverOperations<float>::qr_zerocopy( Array<float> *A, Array<float> *Q, Array<float> *R );
template cusolverStatus_t cusolverOperations<double>::qr_zerocopy( Array<double> *A, Array<double> *Q, Array<double> *R );
template cusolverStatus_t cusolverOperations<ComplexFloat>::qr_zerocopy( Array<ComplexFloat> *A, Array<ComplexFloat> *Q, Array<ComplexFloat> *R );
template cusolverStatus_t cusolverOperations<ComplexDouble>::qr_zerocopy( Array<ComplexDouble> *A, Array<ComplexDouble> *Q, Array<ComplexDouble> *R );// transform permutation vector into permutation matrix
// TODO (or redo) - too slow!!!
template <typename TElement>
void cusolverOperations<TElement>::from_permutation_vector_to_permutation_matrix( Array<TElement> *pivotMatrix, Array<int> *pivotVector )
{
//pivotVector->print();
//*pivotMatrix = eye<TElement>(pivotVector->getDim(0));
//index_t idx1, idx2;
//for( int i = 0; i < pivotVector->GetDescriptor().GetDim(0); i++ ) {
// if( i + 1 == (*pivotVector)(i) )
// continue;
// else
// {
// idx1 = i;
// idx2 = (*pivotVector)(i)-1;
// (*pivotMatrix)( idx1, idx1 ) = 0;
// (*pivotMatrix)( idx2, idx2 ) = 0;
// (*pivotMatrix)( idx1, idx2 ) = 1;
// (*pivotMatrix)( idx2, idx1 ) = 1;
// }
// pivotMatrix->print();
//}
//pivotMatrix->print();
//
//*pivotMatrix = eye<TElement>(pivotVector->getDim(0));
//pivotVector->print();
//eye<double>(pivotVector->getDim(0)).print();
Array<TElement> pivotAux = eye<TElement>(pivotVector->GetDescriptor().GetDim(0));
index_t idx1, idx2;
for( int i = 0; i < pivotVector->GetDescriptor().GetDim(0); i++ ) {
idx1 = i;
idx2 = (*pivotVector)(i)-1;
pivotAux( idx1, idx1 ) = 0;
pivotAux( idx2, idx2 ) = 0;
pivotAux( idx1, idx2 ) = 1;
pivotAux( idx2, idx1 ) = 1;
(*pivotMatrix) = pivotAux*(*pivotMatrix);
pivotAux = eye<TElement>(pivotVector->GetDescriptor().GetDim(0));
//pivotMatrix->print();
}
//pivotMatrix->print();
}
cusolverStatus_t cusolverOperations<float>::cusolverSpTcsreigvsi( cusolverSpHandle_t *handle, int m, int nnz, cusparseMatDescr_t *descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, float mu0, const float *x0, int maxite, float tol, float *mu, float *x )
{
return cusolverSpScsreigvsi( *handle, m ,nnz, *descrA, csrValA, csrRowPtrA, csrColIndA, mu0, x0, maxite, tol, mu, x );;
}
cusolverStatus_t cusolverOperations<double>::cusolverSpTcsreigvsi( cusolverSpHandle_t *handle, int m, int nnz, cusparseMatDescr_t *descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, double mu0, const double *x0, int maxite, double tol, double *mu, double *x )
{
return cusolverSpDcsreigvsi( *handle, m ,nnz, *descrA, csrValA, csrRowPtrA, csrColIndA, mu0, x0, maxite, tol, mu, x );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverSpTcsreigvsi( cusolverSpHandle_t *handle, int m, int nnz, cusparseMatDescr_t *descrA, const ComplexFloat *csrValA, const int *csrRowPtrA, const int *csrColIndA, ComplexFloat mu0, const ComplexFloat *x0, int maxite, ComplexFloat tol, ComplexFloat *mu, ComplexFloat *x )
{
cuFloatComplex mu02 = make_cuFloatComplex( mu0.real(), mu0.imag() );
return cusolverSpCcsreigvsi( *handle, m ,nnz, *descrA, (const cuFloatComplex*)csrValA, csrRowPtrA, csrColIndA, mu02, (const cuFloatComplex*)x0, maxite, tol.real(), (cuFloatComplex*)mu, (cuFloatComplex*)x );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverSpTcsreigvsi( cusolverSpHandle_t *handle, int m, int nnz, cusparseMatDescr_t *descrA, const ComplexDouble *csrValA, const int *csrRowPtrA, const int *csrColIndA, ComplexDouble mu0, const ComplexDouble *x0, int maxite, ComplexDouble tol, ComplexDouble *mu, ComplexDouble *x )
{
cuDoubleComplex mu02 = make_cuDoubleComplex( mu0.real(), mu0.imag() );
return cusolverSpZcsreigvsi( *handle, m ,nnz, *descrA, (const cuDoubleComplex*)csrValA, csrRowPtrA, csrColIndA, mu02, (const cuDoubleComplex*)x0, maxite, tol.real(), (cuDoubleComplex*)mu, (cuDoubleComplex*)x );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTgeqrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, int *A, int lda, int *Lwork )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTgeqrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, float *A, int lda, int *Lwork )
{
return cusolverDnSgeqrf_bufferSize( *handle, m, n, A, lda, Lwork );
} <|fim▁hole|>
cusolverStatus_t cusolverOperations<double>::cusolverDnTgeqrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, double *A, int lda, int *Lwork )
{
return cusolverDnDgeqrf_bufferSize( *handle, m, n, A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTgeqrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, ComplexFloat *A, int lda, int *Lwork )
{
return cusolverDnCgeqrf_bufferSize( *handle, m, n, (cuFloatComplex*)A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTgeqrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, ComplexDouble *A, int lda, int *Lwork )
{
return cusolverDnZgeqrf_bufferSize( *handle, m, n, (cuDoubleComplex*)A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTgeqrf( cusolverDnHandle_t *handle, int m, int n, int *A, int lda, int *TAU, int *Workspace, int Lwork, int *devInfo )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTgeqrf( cusolverDnHandle_t *handle, int m, int n, float *A, int lda, float *TAU, float *Workspace, int Lwork, int *devInfo )
{
return cusolverDnSgeqrf( *handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo );
}
cusolverStatus_t cusolverOperations<double>::cusolverDnTgeqrf( cusolverDnHandle_t *handle, int m, int n, double *A, int lda, double *TAU, double *Workspace, int Lwork, int *devInfo )
{
return cusolverDnDgeqrf( *handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTgeqrf( cusolverDnHandle_t *handle, int m, int n, ComplexFloat *A, int lda, ComplexFloat *TAU, ComplexFloat *Workspace, int Lwork, int *devInfo )
{
return cusolverDnCgeqrf( *handle, m, n, (cuFloatComplex*)A, lda, (cuFloatComplex*)TAU, (cuFloatComplex*)Workspace, Lwork, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTgeqrf( cusolverDnHandle_t *handle, int m, int n, ComplexDouble *A, int lda, ComplexDouble *TAU, ComplexDouble *Workspace, int Lwork, int *devInfo )
{
return cusolverDnZgeqrf( *handle, m, n, (cuDoubleComplex*)A, lda, (cuDoubleComplex*)TAU, (cuDoubleComplex*)Workspace, Lwork, devInfo );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTormqr( cusolverDnHandle_t *handle, cublasSideMode_t side, cublasOperation_t trans, int m, int n, int k, const int *A, int lda, const int *tau, int *C, int ldc, int *work, int lwork, int *devInfo )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTormqr( cusolverDnHandle_t *handle, cublasSideMode_t side, cublasOperation_t trans, int m, int n, int k, const float *A, int lda, const float *tau, float *C, int ldc, float *work, int lwork, int *devInfo )
{
return cusolverDnSormqr( *handle, side, trans, m, n, k, A, lda, tau, C, ldc, work, lwork, devInfo );
}
cusolverStatus_t cusolverOperations<double>::cusolverDnTormqr( cusolverDnHandle_t *handle, cublasSideMode_t side, cublasOperation_t trans, int m, int n, int k, const double *A, int lda, const double *tau, double *C, int ldc, double *work, int lwork, int *devInfo )
{
return cusolverDnDormqr( *handle, side, trans, m, n, k, A, lda, tau, C, ldc, work, lwork, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTormqr( cusolverDnHandle_t *handle, cublasSideMode_t side, cublasOperation_t trans, int m, int n, int k, const ComplexFloat *A, int lda, const ComplexFloat *tau, ComplexFloat *C, int ldc, ComplexFloat *work, int lwork, int *devInfo )
{
return cusolverDnCunmqr( *handle, side, trans, m, n, k, (const cuFloatComplex*)A, lda, (const cuFloatComplex*)tau, (cuFloatComplex*)C, ldc, (cuFloatComplex*)work, lwork, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTormqr( cusolverDnHandle_t *handle, cublasSideMode_t side, cublasOperation_t trans, int m, int n, int k, const ComplexDouble *A, int lda, const ComplexDouble *tau, ComplexDouble *C, int ldc, ComplexDouble *work, int lwork, int *devInfo )
{
return cusolverDnZunmqr( *handle, side, trans, m, n, k, (const cuDoubleComplex*)A, lda, (const cuDoubleComplex*)tau, (cuDoubleComplex*)C, ldc, (cuDoubleComplex*)work, lwork, devInfo );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTgetrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, int *A, int lda, int *Lwork )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTgetrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, float *A, int lda, int *Lwork )
{
return cusolverDnSgetrf_bufferSize( *handle, m, n, A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<double>::cusolverDnTgetrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, double *A, int lda, int *Lwork )
{
return cusolverDnDgetrf_bufferSize( *handle, m, n, A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTgetrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, ComplexFloat *A, int lda, int *Lwork )
{
return cusolverDnCgetrf_bufferSize( *handle, m, n, (cuFloatComplex*)A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTgetrf_bufferSize( cusolverDnHandle_t *handle, int m, int n, ComplexDouble *A, int lda, int *Lwork )
{
return cusolverDnZgetrf_bufferSize( *handle, m, n, (cuDoubleComplex*)A, lda, Lwork );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTgetrf( cusolverDnHandle_t *handle, int m, int n, int *A, int lda, int *Workspace, int *devIpiv, int *devInfo )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTgetrf( cusolverDnHandle_t *handle, int m, int n, float *A, int lda, float *Workspace, int *devIpiv, int *devInfo )
{
return cusolverDnSgetrf( *handle, m, n, A, lda, Workspace, devIpiv, devInfo );
}
cusolverStatus_t cusolverOperations<double>::cusolverDnTgetrf( cusolverDnHandle_t *handle, int m, int n, double *A, int lda, double *Workspace, int *devIpiv, int *devInfo )
{
return cusolverDnDgetrf( *handle, m, n, A, lda, Workspace, devIpiv, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTgetrf( cusolverDnHandle_t *handle, int m, int n, ComplexFloat *A, int lda, ComplexFloat *Workspace, int *devIpiv, int *devInfo )
{
return cusolverDnCgetrf( *handle, m, n, (cuFloatComplex*)A, lda, (cuFloatComplex*)Workspace, devIpiv, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTgetrf( cusolverDnHandle_t *handle, int m, int n, ComplexDouble *A, int lda, ComplexDouble *Workspace, int *devIpiv, int *devInfo )
{
return cusolverDnZgetrf( *handle, m, n, (cuDoubleComplex*)A, lda, (cuDoubleComplex*)Workspace, devIpiv, devInfo );
}
cusolverStatus_t cusolverOperations<int>::cusolverDnTgetrs( cusolverDnHandle_t *handle, cublasOperation_t trans, int n, int nrhs, const int *A, int lda, const int *devIpiv, int *B, int ldb, int *devInfo )
{
return CUSOLVER_STATUS_SUCCESS;
}
cusolverStatus_t cusolverOperations<float>::cusolverDnTgetrs( cusolverDnHandle_t *handle, cublasOperation_t trans, int n, int nrhs, const float *A, int lda, const int *devIpiv, float *B, int ldb, int *devInfo )
{
return cusolverDnSgetrs( *handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo );
}
cusolverStatus_t cusolverOperations<double>::cusolverDnTgetrs( cusolverDnHandle_t *handle, cublasOperation_t trans, int n, int nrhs, const double *A, int lda, const int *devIpiv, double *B, int ldb, int *devInfo )
{
return cusolverDnDgetrs( *handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexFloat>::cusolverDnTgetrs( cusolverDnHandle_t *handle, cublasOperation_t trans, int n, int nrhs, const ComplexFloat *A, int lda, const int *devIpiv, ComplexFloat *B, int ldb, int *devInfo )
{
return cusolverDnCgetrs( *handle, trans, n, nrhs, (const cuFloatComplex*)A, lda, devIpiv, (cuFloatComplex*)B, ldb, devInfo );
}
cusolverStatus_t cusolverOperations<ComplexDouble>::cusolverDnTgetrs( cusolverDnHandle_t *handle, cublasOperation_t trans, int n, int nrhs, const ComplexDouble *A, int lda, const int *devIpiv, ComplexDouble *B, int ldb, int *devInfo )
{
return cusolverDnZgetrs( *handle, trans, n, nrhs, (const cuDoubleComplex*)A, lda, devIpiv, (cuDoubleComplex*)B, ldb, devInfo );
}
}<|fim▁end|>
| |
<|file_name|>dstr-array-elem-trlg-iter-list-nrml-close-skip.js<|end_file_name|><|fim▁begin|>// This file was procedurally generated from the following sources:
// - src/dstr-assignment/array-elem-trlg-iter-list-nrml-close-skip.case
// - src/dstr-assignment/default/for-of.template
/*---
description: IteratorClose is not invoked when evaluation of AssignmentElementList exhausts the iterator (For..of statement)
esid: sec-for-in-and-for-of-statements-runtime-semantics-labelledevaluation
es6id: 13.7.5.11
features: [Symbol.iterator, destructuring-binding]
flags: [generated]
info: |
IterationStatement :
for ( LeftHandSideExpression of AssignmentExpression ) Statement
1. Let keyResult be the result of performing ? ForIn/OfHeadEvaluation(« »,
AssignmentExpression, iterate).
2. Return ? ForIn/OfBodyEvaluation(LeftHandSideExpression, Statement,
keyResult, assignment, labelSet).
13.7.5.13 Runtime Semantics: ForIn/OfBodyEvaluation
[...]
4. If destructuring is true and if lhsKind is assignment, then
a. Assert: lhs is a LeftHandSideExpression.
b. Let assignmentPattern be the parse of the source text corresponding to
lhs using AssignmentPattern as the goal symbol.
[...]
ArrayAssignmentPattern :
[ AssignmentElementList , Elisionopt AssignmentRestElementopt ]
[...]
3. Let iteratorRecord be Record {[[iterator]]: iterator, [[done]]: false}.
4. Let status be the result of performing
IteratorDestructuringAssignmentEvaluation of AssignmentElementList using
iteratorRecord as the argument.
5. If status is an abrupt completion, then
a. If iteratorRecord.[[done]] is false, return IteratorClose(iterator,
status).
b. Return Completion(status).
---*/
var nextCount = 0;
var returnCount = 0;
var iterable = {};
var thrower = function() {
throw new Test262Error();
};
var x;
var iterator = {
next: function() {
nextCount += 1;
return { done: true };
},
return: function() {
returnCount += 1;
}
};<|fim▁hole|> return iterator;
};
var counter = 0;
for ([ x , ] of [iterable]) {
assert.sameValue(nextCount, 1);
assert.sameValue(returnCount, 0);
counter += 1;
}
assert.sameValue(counter, 1);<|fim▁end|>
|
iterable[Symbol.iterator] = function() {
|
<|file_name|>funcionJS.js<|end_file_name|><|fim▁begin|>//Funcion para insertar un trozo de codigo HTML
function loadXMLDoc(url)
{
<|fim▁hole|> var xmlhttp;
if (window.XMLHttpRequest)
{// code for IE7+, Firefox, Chrome, Opera, Safari
xmlhttp=new XMLHttpRequest();
}
else
{// code for IE6, IE5
xmlhttp=new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
document.getElementById("principal").innerHTML=xmlhttp.responseText;
}
}
xmlhttp.open("GET",url,true);
xmlhttp.send();
}<|fim▁end|>
| |
<|file_name|>exercise-19.js<|end_file_name|><|fim▁begin|>// 19. Write a JavaScript function that returns array elements larger than a number.
//two agrs - an array and a number to be larger than
function isGreater(arr, num) {<|fim▁hole|> var resultArray = [];
//iterate through based on length of the arr
for(var i = 0; i < arr.length; i++) {
//if current arr value is greater than num
if(arr[i] > num) {
//push result to resultArray
resultArray.push(arr[i]);
}
}
//log results
console.log(resultArray);
}<|fim▁end|>
|
//set up an array to contain the results
|
<|file_name|>multifiles_local_test.go<|end_file_name|><|fim▁begin|>/*
Real-time Online/Offline Charging System (OCS) for Telecom & ISP environments
Copyright (C) ITsysCOM GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
*/
package config
import (
"flag"
"github.com/cgrates/cgrates/utils"
"testing"
)
var testLocal = flag.Bool("local", false, "Perform the tests only on local test environment, disabled by default.") // This flag will be passed here via "go test -local" args
var mfCgrCfg *CGRConfig
func TestMfInitConfig(t *testing.T) {
if !*testLocal {
return
}
var err error
if mfCgrCfg, err = NewCGRConfigFromFolder("/usr/share/cgrates/conf/samples/multifiles"); err != nil {
t.Fatal("Got config error: ", err.Error())
}
}
func TestMfGeneralItems(t *testing.T) {
if !*testLocal {
return
}
if mfCgrCfg.DefaultReqType != utils.META_PSEUDOPREPAID { // Twice reconfigured
t.Error("DefaultReqType: ", mfCgrCfg.DefaultReqType)
}
if mfCgrCfg.DefaultCategory != "call" { // Not configred, should be inherited from default
t.Error("DefaultCategory: ", mfCgrCfg.DefaultCategory)
}
}
func TestMfCdreDefaultInstance(t *testing.T) {
if !*testLocal {
return
}
for _, prflName := range []string{"*default", "export1"} {
if _, hasIt := mfCgrCfg.CdreProfiles[prflName]; !hasIt {
t.Error("Cdre does not contain profile ", prflName)
}
}
prfl := "*default"
if mfCgrCfg.CdreProfiles[prfl].CdrFormat != "csv" {
t.Error("Default instance has cdrFormat: ", mfCgrCfg.CdreProfiles[prfl].CdrFormat)
}
if mfCgrCfg.CdreProfiles[prfl].DataUsageMultiplyFactor != 1024.0 {
t.Error("Default instance has cdrFormat: ", mfCgrCfg.CdreProfiles[prfl].DataUsageMultiplyFactor)
}
if len(mfCgrCfg.CdreProfiles[prfl].HeaderFields) != 0 {
t.Error("Default instance has number of header fields: ", len(mfCgrCfg.CdreProfiles[prfl].HeaderFields))<|fim▁hole|> if mfCgrCfg.CdreProfiles[prfl].ContentFields[2].Tag != "Direction" {
t.Error("Unexpected headerField value: ", mfCgrCfg.CdreProfiles[prfl].ContentFields[2].Tag)
}
}
func TestMfCdreExport1Instance(t *testing.T) {
if !*testLocal {
return
}
prfl := "export1"
if mfCgrCfg.CdreProfiles[prfl].CdrFormat != "csv" {
t.Error("Export1 instance has cdrFormat: ", mfCgrCfg.CdreProfiles[prfl].CdrFormat)
}
if mfCgrCfg.CdreProfiles[prfl].DataUsageMultiplyFactor != 1.0 {
t.Error("Export1 instance has DataUsageMultiplyFormat: ", mfCgrCfg.CdreProfiles[prfl].DataUsageMultiplyFactor)
}
if mfCgrCfg.CdreProfiles[prfl].CostRoundingDecimals != 3.0 {
t.Error("Export1 instance has cdrFormat: ", mfCgrCfg.CdreProfiles[prfl].CostRoundingDecimals)
}
if len(mfCgrCfg.CdreProfiles[prfl].HeaderFields) != 2 {
t.Error("Export1 instance has number of header fields: ", len(mfCgrCfg.CdreProfiles[prfl].HeaderFields))
}
if mfCgrCfg.CdreProfiles[prfl].HeaderFields[1].Tag != "RunId" {
t.Error("Unexpected headerField value: ", mfCgrCfg.CdreProfiles[prfl].HeaderFields[1].Tag)
}
if len(mfCgrCfg.CdreProfiles[prfl].ContentFields) != 9 {
t.Error("Export1 instance has number of content fields: ", len(mfCgrCfg.CdreProfiles[prfl].ContentFields))
}
if mfCgrCfg.CdreProfiles[prfl].ContentFields[2].Tag != "Account" {
t.Error("Unexpected headerField value: ", mfCgrCfg.CdreProfiles[prfl].ContentFields[2].Tag)
}
}<|fim▁end|>
|
}
if len(mfCgrCfg.CdreProfiles[prfl].ContentFields) != 12 {
t.Error("Default instance has number of content fields: ", len(mfCgrCfg.CdreProfiles[prfl].ContentFields))
}
|
<|file_name|>connect.py<|end_file_name|><|fim▁begin|>import requests
import os
def post_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''
This function posts data to ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
value: Value to be sent
timestamp: Optional, for custom timestamp
context: Optional, for custom context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
data = {ubi_var:{"value": value}}
if (timestamp!=None):
data[ubi_var]["timestamp"]=timestamp
if (context!=None):
data[ubi_var]["context"]=context
r = requests.post(url=url, headers=headers, json= data)
except Exception as e:
return e
def get_var(token, ubi_source, ubi_var, value, timestamp=None, context=None):
'''<|fim▁hole|> This function gets data from ubidots
Input parameters:
token: The Ubidots token
ubi_source: The name of the user's Ubidots datasource
ubi_var: The name of the user's Ubidots variable
Return:
Returns in this order the next parameters: value, timestamp, context
'''
try:
url = os.getenv("UBIDOTS_URL") if os.getenv("UBIDOTS_URL") is not None else "http://things.ubidots.com/"
url = url + "api/v1.6/devices/" + ubi_source + "/" + ubi_var + "/values?page_size=1"
headers = {"X-Auth-Token": token,
"Content-Type":"application/json"}
r = requests.get(url=url, headers=headers)
return r.json()[0]['value'], r.json()[0]['timestamp'], r.json()[0]['context']
except Exception as e:
return e<|fim▁end|>
| |
<|file_name|>HexStats.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat-wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
print( getstats() )
return hexchat.EAT_ALL
def printstats(word, word_to_eol, userdata):
hexchat.command('say {}'.format( getstats() ))
return hexchat.EAT_ALL
def check_opped(ctx, nickprefixes):
op_idx = nickprefixes.index('@')
nick = ctx.get_info('nick')
me = [user for user in ctx.get_list('users') if hexchat.nickcmp(user.nick, nick) == 0][0]
if me.prefix and nickprefixes.index(me.prefix[0]) <= op_idx:
return True
return False
def getstats():
contexts = hexchat.get_list('channels')
channels = 0
servers = 0
queries = 0
ops = 0
for ctx in contexts:
if ctx.type == 1:
servers += 1
elif ctx.type == 2:
channels += 1
if check_opped(ctx.context, ctx.nickprefixes):
ops += 1
elif ctx.type == 3:
queries += 1
return 'Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries )
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
hexchat.hook_command("printstats", printstats, help="/printstats Says HexChat user statistics in current context")<|fim▁end|>
|
import hexchat
|
<|file_name|>header.component.js<|end_file_name|><|fim▁begin|>(function() {
'use strict';
/**
* idHeader
*
* This component renders the application header.
*/
angular
.module('app.components')
.component('idHeader', header());
/**
* Internal function that returns the component.
* @returns {object} the angular component
*/
function header() {
return {
templateUrl: 'app/components/header.component.html',
controller: HeaderCtrl,
controllerAs: 'vm'
};
}
/**
* Constructor function for the component's controller.
* @constructor
*/
HeaderCtrl.$inject = ['$state', 'eventingService', 'authService'];
function HeaderCtrl($state, eventingService, authService) {
var vm = this;
// lifecycle hooks
vm.$onInit = onInit;
// scope functions
vm.executeSearch = executeSearch;
vm.goToIdeas = goToIdeas;
vm.goToPeople = goToPeople;
vm.goToAccount = goToAccount;
vm.logout = logout;
vm.clearSearchValue = clearSearchValue;
/////////////////////
/**
* Initializes the component.
*/<|fim▁hole|> }
vm.headerVisible = true;
vm.searchValue = '';
vm.searchResultsVisible = false;
eventingService.registerListener('accountChange', 'header', function(user) {
vm.currentUserName = !user ? '' : user.firstName + ' ' + user.lastName;
});
}
/**
* Executes a search.
*/
function executeSearch() {
vm.searchResultsVisible = true;
}
/**
* Navigates to the Ideas view.
*/
function goToIdeas() {
$state.go('ideas');
}
/**
* Navigates to the People view.
*/
function goToPeople() {
$state.go('people');
}
/**
* Navigates to the Account view.
*/
function goToAccount() {
$state.go('account');
}
/**
* Logs the current user out of the application.
*/
function logout() {
authService.logout()
.then(function() {
$state.go('login');
});
}
/**
* Clears the search text field.
*/
function clearSearchValue() {
vm.searchValue = '';
}
}
})();<|fim▁end|>
|
function onInit() {
var currentUser = authService.currentUser();
if (currentUser) {
vm.currentUserName = currentUser.firstName + ' ' + currentUser.lastName;
|
<|file_name|>trafficDownloader.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
__author__ = "Devin Kelly"
import pymongo
import time
import json
import re
import os
import daemon
from datetime import datetime
from tornado import httpclient, ioloop
def parseHTML(htmlData):
expr = re.compile("In current traffic: [0-9]{0,2} mins")
matches = re.finditer(expr, htmlData)
trafficData = []
for ii in matches:
tmpData = {}
s = re.sub('<[^<]+?>', '', htmlData[ii.start(0): ii.start(0) + 180])
s = re.sub("<.*$", '', s)
(travelTime, route) = s.split('mins')
route = re.sub("^\s*", "", route)
route = re.sub("\s*$", "", route)
tmpData["route"] = route
travelTime = re.sub("^.*:\s*", "", travelTime)
tmpData["time"] = travelTime<|fim▁hole|>
return trafficData
def insertData(coll, data):
timestamp = time.time()
for trip in data:
coll.insert({"commuteTime": trip['time'], "timestamp": timestamp, "route": trip['route']})
def getWeekdayCommuteTimeFunction(coll, toAddr, fromAddr, startHour, endHour):
toAddr = toAddr.replace(" ", "+")
fromAddr = fromAddr.replace(" ", "+")
url = "https://maps.google.com/maps?saddr={0}&daddr={1}&hl=en".format(toAddr, fromAddr)
def weekdayCommuteTime():
now = time.time()
dt = datetime.fromtimestamp(now)
if dt.weekday() > 4:
return
if dt.hour < startHour or dt.hour > endHour:
return
http_client = httpclient.HTTPClient()
print 'fetching'
try:
response = http_client.fetch(url)
trafficData = parseHTML(response.body)
print trafficData
insertData(coll, trafficData)
except httpclient.HTTPError as e:
print "Error:", e
http_client.close()
return weekdayCommuteTime
def main():
# Setup DB
dbName = "traffic"
cli = pymongo.MongoClient()
db = cli[dbName]
# Read Config File
with open("trafficConfig.json") as fd:
config = json.loads(fd.read())
home = config["home"]
work = config["work"]
interval = config["interval_ms"]
# Setup IO Loop
callbacks = []
io_loop = ioloop.IOLoop.instance()
# morning commute
startHour = 6
endHour = 11
coll = db["morning"]
F1 = getWeekdayCommuteTimeFunction(coll, home, work, startHour, endHour)
callbacks.append(ioloop.PeriodicCallback(F1, interval, io_loop))
# afternoon commute
startHour = 15
endHour = 23
coll = db["afternoon"]
F2 = getWeekdayCommuteTimeFunction(coll, work, home, startHour, endHour)
callbacks.append(ioloop.PeriodicCallback(F2, interval, io_loop))
# Start callbacks
[ii.start() for ii in callbacks]
# Start IO Loop
io_loop.start()
return
if __name__ == "__main__":
pwd = os.getcwd()
with daemon.DaemonContext(working_directory=pwd):
main()<|fim▁end|>
|
trafficData.append(tmpData)
|
<|file_name|>main.cpp<|end_file_name|><|fim▁begin|>/*
This source file is part of KBEngine
For the latest info, see http://www.kbengine.org/
Copyright (c) 2008-2012 KBEngine.
KBEngine is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
KBEngine is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with KBEngine. If not, see <http://www.gnu.org/licenses/>.
*/
#include "server/kbemain.hpp"
#include "cellappmgr.hpp"
#include "machine/machine_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "machine/machine_interface.hpp"
#undef DEFINE_IN_INTERFACE
<|fim▁hole|>#include "baseappmgr/baseappmgr_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "baseappmgr/baseappmgr_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "cellapp/cellapp_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "cellapp/cellapp_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "baseapp/baseapp_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "baseapp/baseapp_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "dbmgr/dbmgr_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "dbmgr/dbmgr_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "loginapp/loginapp_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "loginapp/loginapp_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "resourcemgr/resourcemgr_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "resourcemgr/resourcemgr_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "tools/message_log/messagelog_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "tools/message_log/messagelog_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "tools/bots/bots_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "tools/bots/bots_interface.hpp"
#undef DEFINE_IN_INTERFACE
#include "tools/billing_system/billingsystem_interface.hpp"
#define DEFINE_IN_INTERFACE
#include "tools/billing_system/billingsystem_interface.hpp"
using namespace KBEngine;
int KBENGINE_MAIN(int argc, char* argv[])
{
ENGINE_COMPONENT_INFO& info = g_kbeSrvConfig.getCellAppMgr();
return kbeMainT<Cellappmgr>(argc, argv, CELLAPPMGR_TYPE, -1, -1, "", 0, info.internalInterface);
}<|fim▁end|>
| |
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/**
* @fileoverview Defines compressors utility methods.
*
* @see https://google.github.io/styleguide/javascriptguide.xml
* @see https://developers.google.com/closure/compiler/docs/js-for-compiler
* @module glize/compressors
*/
import * as lzw from 'lzw-compressor';
/**
* Enumeration of available compression types.
* @enum {string}
*/
export const TYPE = {
LZW: 'lzw'
};
/**
* Compress data string using specified compression type.
* @param {string} data Data to compress.
* @param {string=} [opt_type=TYPE.LZW] Optional compression type.
* @return {string} Returns compressed data.
* @method
* @example
* const result = compress(
* 'Any string of any length. Any string of any length. Any string of any length.');
* console.log(result);
* //> Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ.
*/
export const compress = (data, opt_type = TYPE.LZW) => {
let result = '';
if (TYPE.LZW === opt_type) {
result = lzw.compress(data);
}<|fim▁hole|>
/**
* Decompress data string using specified compression type.
* @param {string} data Data to compress.
* @param {string=} [opt_type=TYPE.LZW] Optional compression type.
* @return {string} Returns compressed data.
* @method
* @example
* const result = decompress('Any string of aā leĈth. ĀĂĄĆĈĊČĎĂđēĕėďĚćĉċčďġgĔ.');
* console.log(result);
* //> Any string of any length. Any string of any length. Any string of any length.
*/
export const decompress = (data, opt_type = TYPE.LZW) => {
let result = '';
if (TYPE.LZW === opt_type) {
result = lzw.decompress(data);
}
return result;
};<|fim▁end|>
|
return result;
};
|
<|file_name|>configProject.py<|end_file_name|><|fim▁begin|>from PyQt5 import QtCore
from src.business.configuration.constants import project as p
from src.ui.commons.verification import cb
class ConfigProject:
def __init__(self):
self._settings = QtCore.QSettings(p.CONFIG_FILE, QtCore.QSettings.IniFormat)
def get_value(self, menu, value):
return self._settings.value(menu + '/' + value)
def set_site_settings(self, name, site_id, imager_id):
self._settings.beginGroup(p.SITE_TITLE)
self._settings.setValue(p.NAME, name)
self._settings.setValue(p.SITE_ID, site_id)
self._settings.setValue(p.IMAGER_ID, imager_id)
self._settings.endGroup()
def set_geographic_settings(self, lat, long, elev, press, temp):
self._settings.beginGroup(p.GEOGRAPHIC_TITLE)
self._settings.setValue(p.LATITUDE, lat)
self._settings.setValue(p.LONGITUDE, long)
self._settings.setValue(p.ELEVATION, elev)
self._settings.setValue(p.PRESSURE, press)
self._settings.setValue(p.TEMPERATURE, temp)
self._settings.endGroup()
def set_moonsun_settings(self, solarelev, ignoreLunar, lunarph, lunarpos):
self._settings.beginGroup(p.SUN_MOON_TITLE)
self._settings.setValue(p.MAX_SOLAR_ELEVATION, solarelev)
self._settings.setValue(p.IGNORE_LUNAR_POSITION, ignoreLunar)
self._settings.setValue(p.MAX_LUNAR_PHASE, lunarph)
self._settings.setValue(p.MAX_LUNAR_ELEVATION, lunarpos)
self._settings.endGroup()
def save_settings(self):
self._settings.sync()
def get_site_settings(self):
return self.get_value(p.SITE_TITLE, p.NAME),\
self.get_value(p.SITE_TITLE, p.SITE_ID),\
self.get_value(p.SITE_TITLE, p.IMAGER_ID)
def get_geographic_settings(self):
m = p.GEOGRAPHIC_TITLE
return self.get_value(m, p.LATITUDE),\
self.get_value(m, p.LONGITUDE),\
self.get_value(m, p.ELEVATION),\
self.get_value(m, p.PRESSURE),\<|fim▁hole|>
def get_moonsun_settings(self):
m = p.SUN_MOON_TITLE
return self.get_value(m, p.MAX_SOLAR_ELEVATION),\
cb(self.get_value(m, p.IGNORE_LUNAR_POSITION)),\
self.get_value(m, p.MAX_LUNAR_PHASE),\
self.get_value(m, p.MAX_LUNAR_ELEVATION)<|fim▁end|>
|
self.get_value(m, p.TEMPERATURE)
|
<|file_name|>test_subscription.py<|end_file_name|><|fim▁begin|>"""Test class for Subscriptions
:Requirement: Subscription
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: CLI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import tempfile
import csv
import os
from robottelo import manifests
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.csv_ import CSV_
from robottelo.cli.factory import (
activationkey_add_subscription_to_repo,
make_activation_key,
make_lifecycle_environment,
make_org,
setup_org_for_a_rh_repo,
)
from robottelo.cli.host import Host
from robottelo.cli.repository import Repository
from robottelo.cli.repository_set import RepositorySet
from robottelo.cli.subscription import Subscription
from robottelo.constants import (
PRDS,
REPOS,
REPOSET,
DEFAULT_SUBSCRIPTION_NAME,
SATELLITE_SUBSCRIPTION_NAME,
)
from robottelo.decorators import (
run_in_one_thread,
skip_if_bug_open,
tier1,
tier2,
tier3,
upgrade
)
from robottelo.ssh import download_file, upload_file
from robottelo.test import CLITestCase
from robottelo.vm import VirtualMachine
@run_in_one_thread
class SubscriptionTestCase(CLITestCase):
"""Manifest CLI tests"""
def setUp(self):
"""Tests for content-view via Hammer CLI"""
super(SubscriptionTestCase, self).setUp()
self.org = make_org()
# pylint: disable=no-self-use
def _upload_manifest(self, org_id, manifest=None):
"""Uploads a manifest into an organization.
A cloned manifest will be used if ``manifest`` is None.
"""
if manifest is None:
manifest = manifests.clone()
self.upload_manifest(org_id, manifest)
@staticmethod
def _read_csv_file(file_path):
"""Read a csv file as a dictionary
:param str file_path: The file location path to read as csv
:returns a tuple (list, list[dict]) that represent field_names, data
"""
csv_data = []
with open(file_path, 'r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
field_names = csv_reader.fieldnames
for csv_row in csv_reader:
csv_data.append(csv_row)
return field_names, csv_data
@staticmethod
def _write_csv_file(file_path, filed_names, csv_data):
"""Write to csv file
:param str file_path: The file location path to write as csv
:param list filed_names: The field names to be written
:param list[dict] csv_data: the list dict data to be saved
"""
with open(file_path, 'w') as csv_file:
csv_writer = csv.DictWriter(csv_file, filed_names, delimiter=',')
csv_writer.writeheader()
for csv_row in csv_data:
csv_writer.writerow(csv_row)
@tier1
def test_positive_manifest_upload(self):
"""upload manifest
:id: e5a0e4f8-fed9-4896-87a0-ac33f6baa227
:expectedresults: Manifest are uploaded properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
@tier1
@upgrade
def test_positive_manifest_delete(self):
"""Delete uploaded manifest
:id: 01539c07-00d5-47e2-95eb-c0fd4f39090f
:expectedresults: Manifest are deleted properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
Subscription.delete_manifest({
'organization-id': self.org['id'],
})
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
@tier2
@upgrade
def test_positive_enable_manifest_reposet(self):
"""enable repository set
:id: cc0f8f40-5ea6-4fa7-8154-acdc2cb56b45
:expectedresults: you are able to enable and synchronize repository
contained in a manifest
:CaseLevel: Integration
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
RepositorySet.enable({
'basearch': 'x86_64',
'name': REPOSET['rhva6'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
'releasever': '6Server',
})
Repository.synchronize({
'name': REPOS['rhva6']['name'],
'organization-id': self.org['id'],
'product': PRDS['rhel'],
})
@tier1
def test_positive_manifest_history(self):
"""upload manifest and check history
:id: 000ab0a0-ec1b-497a-84ff-3969a965b52c
:expectedresults: Manifest history is shown properly
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=None,
)
history = Subscription.manifest_history({
'organization-id': self.org['id'],
})
self.assertIn(
'{0} file imported successfully.'.format(self.org['name']),
''.join(history),
)
@tier1
@upgrade
def test_positive_manifest_refresh(self):
"""upload manifest and refresh
:id: 579bbbf7-11cf-4d78-a3b1-16d73bd4ca57
:expectedresults: Manifests can be refreshed
:CaseImportance: Critical
"""
self._upload_manifest(
self.org['id'], manifests.original_manifest())
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
Subscription.refresh_manifest({
'organization-id': self.org['id'],
})
Subscription.delete_manifest({
'organization-id': self.org['id'],
})
@skip_if_bug_open('bugzilla', 1226425)
@tier1
def test_negative_manifest_refresh(self):
"""manifest refresh must fail with a cloned manifest
:id: 7f40795f-7841-4063-8a43-de0325c92b1f
:expectedresults: the refresh command returns a non-zero return code
:BZ: 1226425
:CaseImportance: Critical
"""
self._upload_manifest(self.org['id'])
Subscription.list(
{'organization-id': self.org['id']},
per_page=False,
)
with self.assertRaises(CLIReturnCodeError):
Subscription.refresh_manifest({
'organization-id': self.org['id'],
})
@tier3
def test_positive_restore_ak_and_content_hosts_subscriptions(self):
"""Restore activation key and content hosts subscriptions
:id: a44fdeda-9c8c-4316-85b4-a9b6b9f1ffdb
:customerscenario: true
:steps:
1. Setup activation key , lifecycle environment and content view
with RH repository
2. Add RH subscription to activation key
3. Setup hosts (minimum two) and subscribe them to activation key
4. Attach RH subscription to the created content hosts
5. export the activation key and content hosts subscriptions
6. Delete the subscription manifest
7. Ensure that the activation key and content hosts subscriptions
does not exist
8. Upload the subscription manifest
9. Ensure the activation key and content hosts subscriptions does
not exist
10. Restore the activation key and content hosts subscriptions
:expectedresults: activation key and content hosts subscriptions
restored
:CaseImportance: Critical
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
activation_key = make_activation_key({
'organization-id': self.org['id'],
'lifecycle-environment-id': lce['id'],
})
ActivationKey.update({
'organization-id': self.org['id'],
'id': activation_key['id'],
'auto-attach': 'false',
})
setup_org_for_a_rh_repo({
'product': PRDS['rhel'],
'repository-set': REPOSET['rhst7'],
'repository': REPOS['rhst7']['name'],
'organization-id': self.org['id'],
'lifecycle-environment-id': lce['id'],
'activationkey-id': activation_key['id'],
}, force_use_cdn=True)
org_subs = Subscription.list({u'organization-id': self.org['id']})
default_subscription_id = None
for sub in org_subs:
if sub['name'] == DEFAULT_SUBSCRIPTION_NAME:
default_subscription_id = sub['id']
break
self.assertIsNotNone(
default_subscription_id, msg='Default subscription not found')
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertIn(
DEFAULT_SUBSCRIPTION_NAME, [sub['name'] for sub in ak_subs])
with VirtualMachine() as client1, VirtualMachine() as client2:
hosts = []
for client in [client1, client2]:
client.install_katello_ca()
client.register_contenthost(
self.org['label'], activation_key=activation_key['name'])
self.assertTrue(client.subscribed)
host = Host.info({'name': client.hostname})
hosts.append(host)
Host.subscription_attach({
'host-id': host['id'],
'subscription-id': default_subscription_id,
})
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertIn(
DEFAULT_SUBSCRIPTION_NAME,
[sub['name'] for sub in host_subscriptions]
)
# export the current activations and content hosts subscriptions
ak_file_path = '/tmp/ak_{0}.csv'.format(self.org['label'])
ch_file_path = '/tmp/content_hosts_{0}.csv'.format(
self.org['label'])
CSV_.activation_keys({
'export': True,
'file': ak_file_path,
'organization': self.org['name'],
'itemized-subscriptions': True,
})
CSV_.content_hosts({
'export': True,
'file': ch_file_path,
'organization': self.org['name'],
'itemized-subscriptions': True,
})
# delete the manifest
Subscription.delete_manifest({'organization-id': self.org['id']})
# ensure that the subscription does not exist any more
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertNotIn(
DEFAULT_SUBSCRIPTION_NAME, [sub['name'] for sub in ak_subs])
for host in hosts:
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertNotIn(
DEFAULT_SUBSCRIPTION_NAME,
[sub['name'] for sub in host_subscriptions]
)
# upload the manifest again
self._upload_manifest(self.org['id'])
# ensure that the subscription was not auto attached
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertNotIn(
DEFAULT_SUBSCRIPTION_NAME, [sub['name'] for sub in ak_subs])
for host in hosts:
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertNotIn(
DEFAULT_SUBSCRIPTION_NAME,
[sub['name'] for sub in host_subscriptions]
)
# restore from the saved activation key and content hosts
# subscriptions
CSV_.activation_keys({
'file': ak_file_path,
'organization': self.org['name'],
'itemized-subscriptions': True,
})
CSV_.content_hosts({
'file': ch_file_path,
'organization': self.org['name'],
'itemized-subscriptions': True,
})
# ensure that the subscriptions has been restored
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertIn(
DEFAULT_SUBSCRIPTION_NAME, [sub['name'] for sub in ak_subs])
for host in hosts:
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertIn(
DEFAULT_SUBSCRIPTION_NAME,
[sub['name'] for sub in host_subscriptions]
)
@tier3
def test_positive_restore_content_hosts_with_modified_subscription(self):
"""Restore content hosts subscription from an exported content host csv
file with modified subscription.
:id: d8ac08fe-24e0-41e7-b3d8-0ca13a702a64
:customerscenario: true
:steps:
1. Setup activation key , lifecycle environment and content view
with RH tools repository
2. Setup hosts (minimum two) and subscribe them to activation key
3. Attach RH subscription to the created content hosts
4. Export the organization content hosts to a csv file
5. Create a new csv file and modify the subscription with an other
one (the new subscription must have other data than the default
one)
6. Import the new csv file to organization content hosts
:expectedresults: content hosts restored with the new subscription
:BZ: 1296978
:CaseImportance: Critical
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
activation_key = make_activation_key({
'organization-id': self.org['id'],
'lifecycle-environment-id': lce['id'],
})
ActivationKey.update({
'organization-id': self.org['id'],
'id': activation_key['id'],
'auto-attach': 'false',
})
# Create RH tools repository and contents, this step should upload
# the default manifest
setup_org_for_a_rh_repo({
'product': PRDS['rhel'],
'repository-set': REPOSET['rhst7'],
'repository': REPOS['rhst7']['name'],
'organization-id': self.org['id'],
'lifecycle-environment-id': lce['id'],
'activationkey-id': activation_key['id'],
}, force_use_cdn=True)
# Export and download the organization subscriptions to prepare the new
# subscription (The replacement of the default subscription)
org_subs_csv_filename = 'subs_{0}.csv'.format(self.org['name'])
org_subs_csv_remote_file_path = '/tmp/{0}'.format(
org_subs_csv_filename)
# export organization subscription to csv file
CSV_.subscriptions({
'export': True,
'file': org_subs_csv_remote_file_path,
'organization': self.org['name'],
})
# download the organization subscriptions
org_subs_csv_local_file_path = os.path.join(
tempfile.gettempdir(), org_subs_csv_filename)
download_file(
org_subs_csv_remote_file_path, org_subs_csv_local_file_path)
_, org_subscriptions = self._read_csv_file(
org_subs_csv_local_file_path)
new_subscription = None
for sub in org_subscriptions:
if sub['Subscription Name'] == SATELLITE_SUBSCRIPTION_NAME:
new_subscription = sub
break
self.assertIsNotNone(new_subscription)
# retrieve the default subscription id
org_subs = Subscription.list({u'organization-id': self.org['id']})
default_subscription_id = None
for sub in org_subs:
if sub['name'] == DEFAULT_SUBSCRIPTION_NAME:
default_subscription_id = sub['id']
break
self.assertIsNotNone(
default_subscription_id, msg='Default subscription not found')
# create 2 Virtual machines
with VirtualMachine() as client1, VirtualMachine() as client2:
hosts = []
for client in [client1, client2]:
client.install_katello_ca()
client.register_contenthost(
self.org['label'], activation_key=activation_key['name'])
self.assertTrue(client.subscribed)
host = Host.info({'name': client.hostname})
hosts.append(host)
Host.subscription_attach({
'host-id': host['id'],
'subscription-id': default_subscription_id,
})
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertEqual(len(host_subscriptions), 1)
self.assertEqual(
host_subscriptions[0]['name'], DEFAULT_SUBSCRIPTION_NAME)
# export the content host data to csv file
chs_export_file_name = 'chs_export_{0}.csv'.format(
self.org['label'])
chs_export_remote_file_path = (
'/tmp/{0}'.format(chs_export_file_name)
)
CSV_.content_hosts({
'export': True,
'file': chs_export_remote_file_path,
'organization': self.org['name'],
})
# download the csv file
chs_export_local_file_path = os.path.join(
tempfile.gettempdir(), chs_export_file_name)
download_file(
chs_export_remote_file_path, chs_export_local_file_path)
# modify the content hosts subscription
field_names, csv_data = self._read_csv_file(
chs_export_local_file_path)
# each client is represented by one row of data
self.assertEqual(len(csv_data), 2)
for row_data in csv_data:
# The subscription is saved in the following format:
# """<quantity>|<sku>|<name>|<contract>|<account>"""
subscription_data = row_data['Subscriptions'].strip(
'"').split('|')
# change the subscription SKU (looks like RH00001)
subscription_data[1] = new_subscription['Subscription SKU']
# change the name
subscription_data[2] = new_subscription['Subscription Name']
# change the contract number
subscription_data[3] = new_subscription[
'Subscription Contract']
# change the subscription account
subscription_data[4] = new_subscription[
'Subscription Account']
# modify the subscription data
row_data['Subscriptions'] = '"{0}"'.format(
'|'.join(subscription_data))
# generate a new csv file
chs_import_file_name = 'chs_import_{0}.csv'.format(
self.org['name'])
chs_import_local_file_path = os.path.join(
tempfile.gettempdir(), chs_import_file_name)
self._write_csv_file(
chs_import_local_file_path, field_names, csv_data)
# upload the file
chs_import_remote_file_path = (
'/tmp/{0}'.format(chs_import_file_name)
)
upload_file(
chs_import_local_file_path, chs_import_remote_file_path)
# import content hosts data from csv file
CSV_.content_hosts({
'file': chs_import_remote_file_path,
'organization': self.org['name'],
})
for host in hosts:
host_subscriptions = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
'host-id': host['id'],
}, output_format='json')
self.assertEqual(len(host_subscriptions), 1)
self.assertEqual(
host_subscriptions[0]['name'], SATELLITE_SUBSCRIPTION_NAME)
self.assertEqual(
host_subscriptions[0]['contract'],
new_subscription['Subscription Contract'])<|fim▁hole|> self.assertEqual(
host_subscriptions[0]['account'],
new_subscription['Subscription Account'])
@tier3
def test_positive_restore_ak_with_modified_subscription(self):
"""Restore activation key subscription from an exported activation key
csv file with modified subscription.
:id: 40b86d1c-88f8-451c-bf19-c5bf11223cb6
:steps:
1. Upload a manifest
2. Create an activation key
3. Attach RH subscription to the created activation key
4. Export the organization activation keys to a csv file
5. Create a new csv file and modify the subscription with an other
one (the new subscription must have other data than the default
one)
6. Import the new csv file to organization activation keys
:expectedresults: activation key restored with the new subscription
:BZ: 1296978
:CaseImportance: Critical
"""
# upload the organization default manifest
self._upload_manifest(self.org['id'])
# Export and download the organization subscriptions to prepare the new
# subscription (The replacement of the default subscription)
org_subs_csv_filename = 'subs_{0}.csv'.format(self.org['name'])
org_subs_csv_remote_file_path = '/tmp/{0}'.format(
org_subs_csv_filename)
# export organization subscription to csv file
CSV_.subscriptions({
'export': True,
'file': org_subs_csv_remote_file_path,
'organization': self.org['name'],
})
# download the organization subscriptions
org_subs_csv_local_file_path = os.path.join(
tempfile.gettempdir(), org_subs_csv_filename)
download_file(
org_subs_csv_remote_file_path, org_subs_csv_local_file_path)
_, org_subscriptions = self._read_csv_file(
org_subs_csv_local_file_path)
new_subscription = None
for sub in org_subscriptions:
if sub['Subscription Name'] == SATELLITE_SUBSCRIPTION_NAME:
new_subscription = sub
break
self.assertIsNotNone(new_subscription)
# Create an activation key and add the default subscription
activation_key = make_activation_key({
'organization-id': self.org['id'],
})
activationkey_add_subscription_to_repo({
'organization-id': self.org['id'],
'activationkey-id': activation_key['id'],
'subscription': DEFAULT_SUBSCRIPTION_NAME,
})
org_subs = Subscription.list({u'organization-id': self.org['id']})
default_subscription_id = None
for sub in org_subs:
if sub['name'] == DEFAULT_SUBSCRIPTION_NAME:
default_subscription_id = sub['id']
break
self.assertIsNotNone(
default_subscription_id, msg='Default subscription not found')
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertEqual(len(ak_subs), 1)
self.assertEqual(
ak_subs[0]['name'], DEFAULT_SUBSCRIPTION_NAME)
# export activation key data to csv file
ak_export_file_name = 'ak_{0}_{1}_export.csv'.format(
self.org['name'], activation_key['name'])
ak_remote_export_file_path = '/tmp/{0}'.format(ak_export_file_name)
CSV_.activation_keys({
'export': True,
'file': ak_remote_export_file_path,
'organization': self.org['name'],
})
# download the file to local temp dir
ak_local_export_file_path = os.path.join(
tempfile.gettempdir(), ak_export_file_name)
download_file(
ak_remote_export_file_path, local_file=ak_local_export_file_path)
# modify the file with new subscription data and upload it
field_names, csv_ak_data = self._read_csv_file(
ak_local_export_file_path)
self.assertEqual(len(csv_ak_data), 1)
csv_ak_data = csv_ak_data[0]
field_names = csv_ak_data.keys()
self.assertIn('Subscriptions', field_names)
self.assertIn('Subscriptions', csv_ak_data)
# The subscription is saved in the following format:
# """<quantity>|<sku>|<name>|<contract>|<account>"""
subscription_data = csv_ak_data['Subscriptions'].strip('"').split('|')
# change the subscription SKU (looks like RH00001)
subscription_data[1] = new_subscription['Subscription SKU']
# change the name
subscription_data[2] = new_subscription['Subscription Name']
# change the contract number
subscription_data[3] = new_subscription['Subscription Contract']
# change the subscription account
subscription_data[4] = new_subscription['Subscription Account']
# modify the subscription data and generate a new csv file
csv_ak_data['Subscriptions'] = '"{0}"'.format(
'|'.join(subscription_data))
ak_import_file_name = 'ak_{0}_{1}_import.csv'.format(
self.org['name'], activation_key['name'])
ak_local_import_file_path = os.path.join(
tempfile.gettempdir(), ak_import_file_name)
self._write_csv_file(
ak_local_import_file_path, field_names, [csv_ak_data])
# upload the generated file
ak_remote_import_file_path = '/tmp/{0}'.format(ak_import_file_name)
upload_file(ak_local_import_file_path, ak_remote_import_file_path)
# import the generated csv file
CSV_.activation_keys({
'file': ak_remote_import_file_path,
'organization': self.org['name'],
})
ak_subs = ActivationKey.subscriptions({
'organization-id': self.org['id'],
'id': activation_key['id'],
}, output_format='json')
self.assertEqual(len(ak_subs), 1)
self.assertEqual(
ak_subs[0]['name'], SATELLITE_SUBSCRIPTION_NAME)
self.assertEqual(
ak_subs[0]['contract'],
new_subscription['Subscription Contract'])
self.assertEqual(
ak_subs[0]['account'], new_subscription['Subscription Account'])<|fim▁end|>
| |
<|file_name|>test_utils.py<|end_file_name|><|fim▁begin|>import teca.utils as tecautils
import teca.ConfigHandler as tecaconf
import unittest
class TestFileFilter(unittest.TestCase):
def setUp(self):
self.conf = tecaconf.ConfigHandler(
"tests/test_data/configuration.json",
{"starting_path": "tests/test_data/images"}
)
self.files_list = [
"foo.doc",
"yukinon.jpg",
"cuteflushadoingflushathings.webm"
]
def test_dothefiltering(self):
self.assertTrue("foo.doc" not in<|fim▁hole|> self.conf))
self.assertTrue("yukinon.jpg" in
tecautils.filterImages(self.files_list,
self.conf))
def test_nofiles(self):
self.assertEqual(0, len(tecautils.filterImages([], self.conf)))<|fim▁end|>
|
tecautils.filterImages(self.files_list,
|
<|file_name|>Statement.after.py<|end_file_name|><|fim▁begin|>def f():
a = 1<|fim▁hole|>
def foo(a_new, b_new):
print(a_new + b_new * 123)<|fim▁end|>
|
b = 1
foo(a, b)
|
<|file_name|>test_models_speaker_contact.py<|end_file_name|><|fim▁begin|># coding: utf-8
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.test import TestCase
from django.core.exceptions import ValidationError
from eventex.core.models import Speaker, Contact
class SpeakerModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker(
name='Davi Garcia',
slug='davi-garcia',<|fim▁hole|> self.speaker.save()
def test_create(self):
"""
Speaker instance must be saved.
"""
self.assertEqual(1, self.speaker.pk)
def test_unicode(self):
"""
Speaker string representation should be the name.
"""
self.assertEqual(u'Davi Garcia', unicode(self.speaker))
class ContactModelTest(TestCase):
"""
Test class.
"""
def setUp(self):
"""
Test initialization.
"""
self.speaker = Speaker.objects.create(
name='Davi Garcia',
slug='davi-garcia',
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
def test_email(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='E',
value='[email protected]'
)
self.assertEqual(1, contact.pk)
def test_phone(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='P',
value='21-987654321'
)
self.assertEqual(1, contact.pk)
def test_fax(self):
"""
Speaker should have email contact.
"""
contact = Contact.objects.create(
speaker=self.speaker,
kind='F',
value='21-123456789'
)
self.assertEqual(1, contact.pk)
def test_kind(self):
"""
Contact kind must be limited to E, P or F.
"""
contact = Contact(speaker=self.speaker, kind='A', value='B')
self.assertRaises(ValidationError, contact.full_clean)
def test_unicode(self):
"""
Contact string representation should be value.
"""
contact = Contact(
speaker=self.speaker,
kind='E',
value='[email protected]')
self.assertEqual(u'[email protected]', unicode(contact))<|fim▁end|>
|
url='http://www.davigarcia.com.br',
description='Passionate software developer!'
)
|
<|file_name|>send_event_to_device.rs<|end_file_name|><|fim▁begin|>//! `PUT /_matrix/client/*/sendToDevice/{eventType}/{txnId}`
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#put_matrixclientv3sendtodeviceeventtypetxnid
use std::collections::BTreeMap;
use ruma_common::{
api::ruma_api, events::AnyToDeviceEventContent, to_device::DeviceIdOrAllDevices,
TransactionId, UserId,
};
use ruma_serde::Raw;
ruma_api! {
metadata: {
description: "Send an event to a device or devices.",
method: PUT,
name: "send_event_to_device",
r0_path: "/_matrix/client/r0/sendToDevice/:event_type/:txn_id",
stable_path: "/_matrix/client/v3/sendToDevice/:event_type/:txn_id",
rate_limited: false,
authentication: AccessToken,
added: 1.0,
}
request: {
/// Type of event being sent to each device.
#[ruma_api(path)]
pub event_type: &'a str,
/// A request identifier unique to the access token used to send the request.<|fim▁hole|> #[ruma_api(path)]
pub txn_id: &'a TransactionId,
/// Messages to send.
///
/// Different message events can be sent to different devices in the same request, but all
/// events within one request must be of the same type.
pub messages: Messages,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given event type, transaction ID and raw messages.
pub fn new_raw(event_type: &'a str, txn_id: &'a TransactionId, messages: Messages) -> Self {
Self { event_type, txn_id, messages }
}
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
}
/// Messages to send in a send-to-device request.
///
/// Represented as a map of `{ user-ids => { device-ids => message-content } }`.
pub type Messages =
BTreeMap<Box<UserId>, BTreeMap<DeviceIdOrAllDevices, Raw<AnyToDeviceEventContent>>>;
}<|fim▁end|>
| |
<|file_name|>test2.js<|end_file_name|><|fim▁begin|>var objc = require('../')
<|fim▁hole|>console.log(NSApplication);
var sharedApplication = objc.sel_registerName('sharedApplication');
var app = objc.objc_msgSend(NSApplication, sharedApplication);
console.log(app);<|fim▁end|>
|
objc.dlopen('/System/Library/Frameworks/AppKit.framework/AppKit');
NSApplication = objc.objc_getClass('NSApplication');
|
<|file_name|>glfs.rs<|end_file_name|><|fim▁begin|>#![allow(non_camel_case_types)]
use libc::{c_char, c_int, c_long, c_void, dev_t, dirent, gid_t, flock, mode_t, off_t, size_t,
stat, ssize_t, statvfs, timespec, uid_t};
pub enum Struct_glfs { }
pub type glfs_t = Struct_glfs;
pub enum Struct_glfs_fd { }
pub type glfs_fd_t = Struct_glfs_fd;
pub type glfs_io_cbk = ::std::option::Option<
extern "C" fn(fd: *mut glfs_fd_t,
ret: ssize_t,
data: *mut c_void)
-> (),
>;
#[repr(C)]
pub struct iovec {
pub iov_base: *const c_void,
pub iov_len: size_t,
}
#[link(name = "gfapi")]
extern "C" {
/// Create a new 'virtual mount' object.
/// This is most likely the very first function you will use. This function
/// will create a new glfs_t (virtual mount) object in memory.
/// On this newly created glfs_t, you need to be either set a volfile path
/// (glfs_set_volfile) or a volfile server (glfs_set_volfile_server).
/// The glfs_t object needs to be initialized with glfs_init() before you
/// can start issuing file operations on it.
pub fn glfs_new(volname: *const c_char) -> *mut glfs_t;
/// Specify the path to the volume specification file.
/// If you are using a static volume specification file (without dynamic
/// volume management abilities from the CLI), then specify the path to
/// the volume specification file.
/// This is incompatible with glfs_set_volfile_server().
pub fn glfs_set_volfile(fs: *mut glfs_t, volfile: *const c_char) -> c_int;
/// Specify the list of addresses for management server.
/// This function specifies the list of addresses for the management server
/// (glusterd) to connect, and establish the volume configuration. The @volname
/// parameter passed to glfs_new() is the volume which will be virtually
/// mounted as the glfs_t object. All operations performed by the CLI at
/// the management server will automatically be reflected in the 'virtual
/// mount' object as it maintains a connection to glusterd and polls on
/// configuration change notifications.
/// This is incompatible with glfs_set_volfile().
pub fn glfs_set_volfile_server(
fs: *mut glfs_t,
transport: *const c_char,
host: *const c_char,
port: c_int,
) -> c_int;
pub fn glfs_unset_volfile_server(
fs: *mut glfs_t,
transport: *const c_char,
host: *const c_char,
port: c_int,
) -> c_int;
/// This function specifies logging parameters for the virtual mount.
/// Default log file is /dev/null.
pub fn glfs_set_logging(fs: *mut glfs_t, logfile: *const c_char, loglevel: c_int) -> c_int;
/// This function initializes the glfs_t object. This consists of many steps:
/// Spawn a poll-loop thread.
/// Establish connection to management daemon and receive volume specification.
/// Construct translator graph and initialize graph.
/// Wait for initialization (connecting to all bricks) to complete.
pub fn glfs_init(fs: *mut glfs_t) -> c_int;
/// This function attempts to gracefully destroy glfs_t object. An attempt is
/// made to wait for all background processing to complete before returning.
/// glfs_fini() must be called after all operations on glfs_t is finished.
pub fn glfs_fini(fs: *mut glfs_t) -> c_int;
/// Get the volfile associated with the virtual mount
/// Sometimes it's useful e.g. for scripts to see the volfile, so that they
/// can parse it and find subvolumes to do things like split-brain resolution
/// or custom layouts. The API here was specifically intended to make access
/// e.g. from Python as simple as possible.
/// Note that the volume must be started (not necessarily mounted) for this
/// to work.
pub fn glfs_get_volfile(fs: *mut glfs_t, buf: *mut c_void, len: size_t) -> ssize_t;
/// This function when invoked for the first time sends RPC call to the
/// the management server (glusterd) to fetch volume uuid and stores it
/// in the glusterfs_context linked to the glfs object fs which can be used
/// in the subsequent calls. Later it parses that UUID to convert it from
/// cannonical string format into an opaque byte array and copy it into
/// the volid array. Incase if either of the input parameters, volid or size,
/// is NULL, number of bytes required to copy the volume UUID is returned.
pub fn glfs_get_volumeid(fs: *mut Struct_glfs, volid: *mut c_char, size: size_t) -> c_int;
pub fn glfs_setfsuid(fsuid: uid_t) -> c_int;
pub fn glfs_setfsgid(fsgid: gid_t) -> c_int;
pub fn glfs_setfsgroups(size: size_t, list: *const gid_t) -> c_int;
/// This function opens a file on a virtual mount.
pub fn glfs_open(fs: *mut glfs_t, path: *const c_char, flags: c_int) -> *mut glfs_fd_t;
/// This function opens a file on a virtual mount.
pub fn glfs_creat(
fs: *mut glfs_t,
path: *const c_char,
flags: c_int,
mode: mode_t,
) -> *mut glfs_fd_t;
pub fn glfs_close(fd: *mut glfs_fd_t) -> c_int;
pub fn glfs_from_glfd(fd: *mut glfs_fd_t) -> *mut glfs_t;
pub fn glfs_set_xlator_option(
fs: *mut glfs_t,
xlator: *const c_char,
key: *const c_char,
value: *const c_char,
) -> c_int;
pub fn glfs_read(fd: *mut glfs_fd_t, buf: *mut c_void, count: size_t, flags: c_int) -> ssize_t;
pub fn glfs_write(
fd: *mut glfs_fd_t,
buf: *const c_void,
count: size_t,
flags: c_int,
) -> ssize_t;
pub fn glfs_read_async(
fd: *mut glfs_fd_t,
buf: *mut c_void,
count: size_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_write_async(
fd: *mut glfs_fd_t,
buf: *const c_void,
count: size_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_readv(
fd: *mut glfs_fd_t,
iov: *const iovec,
iovcnt: c_int,
flags: c_int,
) -> ssize_t;
pub fn glfs_writev(
fd: *mut glfs_fd_t,
iov: *const iovec,
iovcnt: c_int,
flags: c_int,
) -> ssize_t;
pub fn glfs_readv_async(
fd: *mut glfs_fd_t,
iov: *const iovec,
count: c_int,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_writev_async(
fd: *mut glfs_fd_t,
iov: *const iovec,
count: c_int,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_pread(
fd: *mut glfs_fd_t,
buf: *mut c_void,
count: size_t,
offset: off_t,
flags: c_int,
) -> ssize_t;
pub fn glfs_pwrite(
fd: *mut glfs_fd_t,
buf: *const c_void,
count: size_t,
offset: off_t,
flags: c_int,
) -> ssize_t;
pub fn glfs_pread_async(
fd: *mut glfs_fd_t,
buf: *mut c_void,
count: size_t,
offset: off_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_pwrite_async(
fd: *mut glfs_fd_t,
buf: *const c_void,
count: c_int,
offset: off_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_preadv(
fd: *mut glfs_fd_t,
iov: *const iovec,
iovcnt: c_int,
offset: off_t,
flags: c_int,
) -> ssize_t;
pub fn glfs_pwritev(
fd: *mut glfs_fd_t,
iov: *const iovec,
iovcnt: c_int,
offset: off_t,
flags: c_int,
) -> ssize_t;
pub fn glfs_preadv_async(
fd: *mut glfs_fd_t,
iov: *const iovec,
count: c_int,
offset: off_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_pwritev_async(
fd: *mut glfs_fd_t,
iov: *const iovec,
count: c_int,
offset: off_t,
flags: c_int,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_lseek(fd: *mut glfs_fd_t, offset: off_t, whence: c_int) -> off_t;
pub fn glfs_truncate(fs: *mut glfs_t, path: *const c_char, length: off_t) -> c_int;
pub fn glfs_ftruncate(fd: *mut glfs_fd_t, length: off_t) -> c_int;
pub fn glfs_ftruncate_async(
fd: *mut glfs_fd_t,
length: off_t,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_lstat(fs: *mut glfs_t, path: *const c_char, buf: *mut stat) -> c_int;
pub fn glfs_stat(fs: *mut glfs_t, path: *const c_char, buf: *mut stat) -> c_int;
pub fn glfs_fstat(fd: *mut glfs_fd_t, buf: *mut stat) -> c_int;
pub fn glfs_fsync(fd: *mut glfs_fd_t) -> c_int;
pub fn glfs_fsync_async(fd: *mut glfs_fd_t, _fn: glfs_io_cbk, data: *mut c_void) -> c_int;
pub fn glfs_fdatasync(fd: *mut glfs_fd_t) -> c_int;
pub fn glfs_fdatasync_async(fd: *mut glfs_fd_t, _fn: glfs_io_cbk, data: *mut c_void) -> c_int;
pub fn glfs_access(fs: *mut glfs_t, path: *const c_char, mode: c_int) -> c_int;
pub fn glfs_symlink(fs: *mut glfs_t, oldpath: *const c_char, newpath: *const c_char) -> c_int;
pub fn glfs_readlink(
fs: *mut glfs_t,
path: *const c_char,
buf: *mut c_char,
bufsiz: size_t,
) -> c_int;
pub fn glfs_mknod(fs: *mut glfs_t, path: *const c_char, mode: mode_t, dev: dev_t) -> c_int;
pub fn glfs_mkdir(fs: *mut glfs_t, path: *const c_char, mode: mode_t) -> c_int;
pub fn glfs_unlink(fs: *mut glfs_t, path: *const c_char) -> c_int;
pub fn glfs_rmdir(fs: *mut glfs_t, path: *const c_char) -> c_int;
pub fn glfs_rename(fs: *mut glfs_t, oldpath: *const c_char, newpath: *const c_char) -> c_int;
pub fn glfs_link(fs: *mut glfs_t, oldpath: *const c_char, newpath: *const c_char) -> c_int;
pub fn glfs_opendir(fs: *mut glfs_t, path: *const c_char) -> *mut glfs_fd_t;
/// glfs_readdir_r and glfs_readdirplus_r ARE thread safe AND re-entrant,
/// but the interface has ambiguity about the size of dirent to be allocated
/// before calling the APIs. 512 byte buffer (for dirent) is sufficient for
/// all known systems which are tested againt glusterfs/gfapi, but may be
/// insufficient in the future.
pub fn glfs_readdir_r(
fd: *mut glfs_fd_t,
dirent: *mut dirent,
result: *mut *mut dirent,
) -> c_int;
/// glfs_readdir_r and glfs_readdirplus_r ARE thread safe AND re-entrant,
/// but the interface has ambiguity about the size of dirent to be allocated
/// before calling the APIs. 512 byte buffer (for dirent) is sufficient for
/// all known systems which are tested againt glusterfs/gfapi, but may be
/// insufficient in the future.
pub fn glfs_readdirplus_r(
fd: *mut glfs_fd_t,
stat: *mut stat,
dirent: *mut dirent,
result: *mut *mut dirent,
) -> c_int;
/// glfs_readdir and glfs_readdirplus are NEITHER thread safe NOR re-entrant
/// when called on the same directory handle. However they ARE thread safe
/// AND re-entrant when called on different directory handles (which may be
/// referring to the same directory too.)
pub fn glfs_readdir(fd: *mut glfs_fd_t) -> *mut dirent;
pub fn glfs_readdirplus(fd: *mut glfs_fd_t, stat: *mut stat) -> *mut dirent;
pub fn glfs_telldir(fd: *mut glfs_fd_t) -> c_long;
pub fn glfs_seekdir(fd: *mut glfs_fd_t, offset: c_long) -> ();
pub fn glfs_closedir(fd: *mut glfs_fd_t) -> c_int;
pub fn glfs_statvfs(fs: *mut glfs_t, path: *const c_char, buf: *mut statvfs) -> c_int;
pub fn glfs_chmod(fs: *mut glfs_t, path: *const c_char, mode: mode_t) -> c_int;
pub fn glfs_fchmod(fd: *mut glfs_fd_t, mode: mode_t) -> c_int;
pub fn glfs_chown(fs: *mut glfs_t, path: *const c_char, uid: uid_t, gid: gid_t) -> c_int;
pub fn glfs_lchown(fs: *mut glfs_t, path: *const c_char, uid: uid_t, gid: gid_t) -> c_int;
pub fn glfs_fchown(fd: *mut glfs_fd_t, uid: uid_t, gid: gid_t) -> c_int;
pub fn glfs_utimens(fs: *mut glfs_t, path: *const c_char, times: *const timespec) -> c_int;
pub fn glfs_lutimens(fs: *mut glfs_t, path: *const c_char, times: *const timespec) -> c_int;
pub fn glfs_futimens(fd: *mut glfs_fd_t, times: *const timespec) -> c_int;
pub fn glfs_getxattr(
fs: *mut glfs_t,
path: *const c_char,
name: *const c_char,
value: *mut c_void,
size: size_t,
) -> ssize_t;
pub fn glfs_lgetxattr(
fs: *mut glfs_t,
path: *const c_char,
name: *const c_char,
value: *mut c_void,
size: size_t,
) -> ssize_t;
pub fn glfs_fgetxattr(
fd: *mut glfs_fd_t,
name: *const c_char,
value: *mut c_void,
size: size_t,
) -> ssize_t;
pub fn glfs_listxattr(
fs: *mut glfs_t,
path: *const c_char,
value: *mut c_void,
size: size_t,
) -> ssize_t;
pub fn glfs_llistxattr(
fs: *mut glfs_t,
path: *const c_char,
value: *mut c_void,
size: size_t,
) -> ssize_t;
pub fn glfs_flistxattr(fd: *mut glfs_fd_t, value: *mut c_void, size: size_t) -> ssize_t;
pub fn glfs_setxattr(
fs: *mut glfs_t,
path: *const c_char,
name: *const c_char,
value: *const c_void,
size: size_t,
flags: c_int,
) -> c_int;
pub fn glfs_lsetxattr(
fs: *mut glfs_t,
path: *const c_char,
name: *const c_char,
value: *const c_void,
size: size_t,
flags: c_int,
) -> c_int;
pub fn glfs_fsetxattr(
fd: *mut glfs_fd_t,
name: *const c_char,
value: *const c_void,
size: size_t,
flags: c_int,
) -> c_int;
pub fn glfs_removexattr(fs: *mut glfs_t, path: *const c_char, name: *const c_char) -> c_int;
pub fn glfs_lremovexattr(fs: *mut glfs_t, path: *const c_char, name: *const c_char) -> c_int;
pub fn glfs_fremovexattr(fd: *mut glfs_fd_t, name: *const c_char) -> c_int;
pub fn glfs_fallocate(
fd: *mut glfs_fd_t,
keep_size: c_int,
offset: off_t,
len: size_t,
) -> c_int;
pub fn glfs_discard(fd: *mut glfs_fd_t, offset: off_t, len: size_t) -> c_int;
pub fn glfs_discard_async(
fd: *mut glfs_fd_t,
length: off_t,
lent: size_t,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_zerofill(fd: *mut glfs_fd_t, offset: off_t, len: off_t) -> c_int;<|fim▁hole|> pub fn glfs_zerofill_async(
fd: *mut glfs_fd_t,
length: off_t,
len: off_t,
_fn: glfs_io_cbk,
data: *mut c_void,
) -> c_int;
pub fn glfs_getcwd(fs: *mut glfs_t, buf: *mut c_char, size: size_t) -> *mut c_char;
pub fn glfs_chdir(fs: *mut glfs_t, path: *const c_char) -> c_int;
pub fn glfs_fchdir(fd: *mut glfs_fd_t) -> c_int;
pub fn glfs_realpath(
fs: *mut glfs_t,
path: *const c_char,
resolved_path: *mut c_char,
) -> *mut c_char;
pub fn glfs_posix_lock(fd: *mut glfs_fd_t, cmd: c_int, flock: *mut flock) -> c_int;
pub fn glfs_dup(fd: *mut glfs_fd_t) -> *mut glfs_fd_t;
}<|fim▁end|>
| |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import url<|fim▁hole|>urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^post/(?P<pk>[0-9]+)/$', views.detail, name='detail'),
url(r'^archive/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$', views.archives, name='archive'),
url(r'^category/(?P<pk>[0-9]+)/$', views.categories, name='category'),
url(r'^tag/(?P<pk>[0-9]+)/$', views.get_posts_by_tag, name='tag'),
]<|fim▁end|>
|
from . import views
|
<|file_name|>test_query.py<|end_file_name|><|fim▁begin|># Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestQuery(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.datastore.query import Query
return Query
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_defaults_wo_implicit_dataset_id(self):
self.assertRaises(ValueError, self._makeOne)
def test_ctor_defaults_w_implicit_dataset_id(self):
from gcloud._testing import _Monkey
from gcloud.datastore import _implicit_environ
_DATASET = 'DATASET'
with _Monkey(_implicit_environ, DATASET_ID=_DATASET):
query = self._makeOne()
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, None)
self.assertEqual(query.namespace, None)
self.assertEqual(query.ancestor, None)
self.assertEqual(query.filters, [])
self.assertEqual(query.projection, [])
self.assertEqual(query.order, [])
self.assertEqual(query.group_by, [])
def test_ctor_explicit(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
_KIND = 'KIND'
_NAMESPACE = 'NAMESPACE'
ancestor = Key('ANCESTOR', 123, dataset_id=_DATASET)
FILTERS = [('foo', '=', 'Qux'), ('bar', '<', 17)]
PROJECTION = ['foo', 'bar', 'baz']
ORDER = ['foo', 'bar']
GROUP_BY = ['foo']
query = self._makeOne(
dataset_id=_DATASET,
kind=_KIND,
namespace=_NAMESPACE,
ancestor=ancestor,
filters=FILTERS,
projection=PROJECTION,
order=ORDER,
group_by=GROUP_BY,
)
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, _KIND)
self.assertEqual(query.namespace, _NAMESPACE)
self.assertEqual(query.ancestor.path, ancestor.path)
self.assertEqual(query.filters, FILTERS)
self.assertEqual(query.projection, PROJECTION)
self.assertEqual(query.order, ORDER)
self.assertEqual(query.group_by, GROUP_BY)
def test_namespace_setter_w_non_string(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.namespace = val
self.assertRaises(ValueError, _assign, object())
def test_namespace_setter(self):
_DATASET = 'DATASET'
_NAMESPACE = 'NAMESPACE'
query = self._makeOne(_DATASET)
query.namespace = _NAMESPACE
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.namespace, _NAMESPACE)
def test_kind_setter_w_non_string(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.kind = val
self.assertRaises(TypeError, _assign, object())
def test_kind_setter_wo_existing(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET)
query.kind = _KIND
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, _KIND)
def test_kind_setter_w_existing(self):
_DATASET = 'DATASET'
_KIND_BEFORE = 'KIND_BEFORE'
_KIND_AFTER = 'KIND_AFTER'
query = self._makeOne(_DATASET, _KIND_BEFORE)
self.assertEqual(query.kind, _KIND_BEFORE)
query.kind = _KIND_AFTER
self.assertEqual(query.dataset_id, _DATASET)
self.assertEqual(query.kind, _KIND_AFTER)
def test_ancestor_setter_w_non_key(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
def _assign(val):
query.ancestor = val
self.assertRaises(TypeError, _assign, object())
self.assertRaises(TypeError, _assign, ['KIND', 'NAME'])
def test_ancestor_setter_w_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
_NAME = u'NAME'
key = Key('KIND', 123, dataset_id='DATASET')
query = self._makeOne(_DATASET)
query.add_filter('name', '=', _NAME)
query.ancestor = key
self.assertEqual(query.ancestor.path, key.path)
def test_ancestor_deleter_w_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
key = Key('KIND', 123, dataset_id='DATASET')
query = self._makeOne(_DATASET, ancestor=key)
del query.ancestor
self.assertTrue(query.ancestor is None)
def test_add_filter_setter_w_unknown_operator(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter,
'firstname', '~~', 'John')
def test_add_filter_w_known_operator(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
query.add_filter('firstname', '=', u'John')
self.assertEqual(query.filters, [('firstname', '=', u'John')])
def test_add_filter_w_all_operators(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
query.add_filter('leq_prop', '<=', u'val1')
query.add_filter('geq_prop', '>=', u'val2')
query.add_filter('lt_prop', '<', u'val3')
query.add_filter('gt_prop', '>', u'val4')
query.add_filter('eq_prop', '=', u'val5')
self.assertEqual(len(query.filters), 5)
self.assertEqual(query.filters[0], ('leq_prop', '<=', u'val1'))
self.assertEqual(query.filters[1], ('geq_prop', '>=', u'val2'))
self.assertEqual(query.filters[2], ('lt_prop', '<', u'val3'))
self.assertEqual(query.filters[3], ('gt_prop', '>', u'val4'))
self.assertEqual(query.filters[4], ('eq_prop', '=', u'val5'))
def test_add_filter_w_known_operator_and_entity(self):
from gcloud.datastore.entity import Entity
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
other = Entity()
other['firstname'] = u'John'
other['lastname'] = u'Smith'
query.add_filter('other', '=', other)
self.assertEqual(query.filters, [('other', '=', other)])<|fim▁hole|> def test_add_filter_w_whitespace_property_name(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
PROPERTY_NAME = ' property with lots of space '
query.add_filter(PROPERTY_NAME, '=', u'John')
self.assertEqual(query.filters, [(PROPERTY_NAME, '=', u'John')])
def test_add_filter___key__valid_key(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
key = Key('Foo', dataset_id='DATASET')
query.add_filter('__key__', '=', key)
self.assertEqual(query.filters, [('__key__', '=', key)])
def test_filter___key__invalid_operator(self):
from gcloud.datastore.key import Key
_DATASET = 'DATASET'
key = Key('Foo', dataset_id='DATASET')
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter, '__key__', '<', key)
def test_filter___key__invalid_value(self):
_DATASET = 'DATASET'
query = self._makeOne(_DATASET)
self.assertRaises(ValueError, query.add_filter, '__key__', '=', None)
def test_projection_setter_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.projection = []
self.assertEqual(query.projection, [])
def test_projection_setter_string(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.projection = 'field1'
self.assertEqual(query.projection, ['field1'])
def test_projection_setter_non_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.projection = ['field1', 'field2']
self.assertEqual(query.projection, ['field1', 'field2'])
def test_projection_setter_multiple_calls(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
_PROJECTION1 = ['field1', 'field2']
_PROJECTION2 = ['field3']
query = self._makeOne(_DATASET, _KIND)
query.projection = _PROJECTION1
self.assertEqual(query.projection, _PROJECTION1)
query.projection = _PROJECTION2
self.assertEqual(query.projection, _PROJECTION2)
def test_keys_only(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.keys_only()
self.assertEqual(query.projection, ['__key__'])
def test_order_setter_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND, order=['foo', '-bar'])
query.order = []
self.assertEqual(query.order, [])
def test_order_setter_string(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.order = 'field'
self.assertEqual(query.order, ['field'])
def test_order_setter_single_item_list_desc(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.order = ['-field']
self.assertEqual(query.order, ['-field'])
def test_order_setter_multiple(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.order = ['foo', '-bar']
self.assertEqual(query.order, ['foo', '-bar'])
def test_group_by_setter_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND, group_by=['foo', 'bar'])
query.group_by = []
self.assertEqual(query.group_by, [])
def test_group_by_setter_string(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.group_by = 'field1'
self.assertEqual(query.group_by, ['field1'])
def test_group_by_setter_non_empty(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
query.group_by = ['field1', 'field2']
self.assertEqual(query.group_by, ['field1', 'field2'])
def test_group_by_multiple_calls(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
_GROUP_BY1 = ['field1', 'field2']
_GROUP_BY2 = ['field3']
query = self._makeOne(_DATASET, _KIND)
query.group_by = _GROUP_BY1
self.assertEqual(query.group_by, _GROUP_BY1)
query.group_by = _GROUP_BY2
self.assertEqual(query.group_by, _GROUP_BY2)
def test_fetch_defaults_wo_implicit_connection(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
query = self._makeOne(_DATASET, _KIND)
self.assertRaises(ValueError, query.fetch)
def test_fetch_defaults_w_implicit_connection(self):
from gcloud._testing import _Monkey
from gcloud.datastore import _implicit_environ
_DATASET = 'DATASET'
_KIND = 'KIND'
connection = _Connection()
query = self._makeOne(_DATASET, _KIND)
with _Monkey(_implicit_environ, CONNECTION=connection):
iterator = query.fetch()
self.assertTrue(iterator._query is query)
self.assertEqual(iterator._limit, None)
self.assertEqual(iterator._offset, 0)
def test_fetch_explicit(self):
_DATASET = 'DATASET'
_KIND = 'KIND'
connection = _Connection()
query = self._makeOne(_DATASET, _KIND)
iterator = query.fetch(limit=7, offset=8, connection=connection)
self.assertTrue(iterator._query is query)
self.assertEqual(iterator._limit, 7)
self.assertEqual(iterator._offset, 8)
class TestIterator(unittest2.TestCase):
_DATASET = 'DATASET'
_NAMESPACE = 'NAMESPACE'
_KIND = 'KIND'
_ID = 123
_START = b'\x00'
_END = b'\xFF'
def _getTargetClass(self):
from gcloud.datastore.query import Iterator
return Iterator
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _addQueryResults(self, connection, cursor=_END, more=False):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
MORE = datastore_pb.QueryResultBatch.NOT_FINISHED
NO_MORE = datastore_pb.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT
_ID = 123
entity_pb = datastore_pb.Entity()
entity_pb.key.partition_id.dataset_id = self._DATASET
path_element = entity_pb.key.path_element.add()
path_element.kind = self._KIND
path_element.id = _ID
prop = entity_pb.property.add()
prop.name = 'foo'
prop.value.string_value = u'Foo'
connection._results.append(
([entity_pb], cursor, MORE if more else NO_MORE))
def test_ctor_defaults(self):
connection = _Connection()
query = object()
iterator = self._makeOne(query, connection)
self.assertTrue(iterator._query is query)
self.assertEqual(iterator._limit, None)
self.assertEqual(iterator._offset, 0)
def test_ctor_explicit(self):
connection = _Connection()
query = _Query()
iterator = self._makeOne(query, connection, 13, 29)
self.assertTrue(iterator._query is query)
self.assertEqual(iterator._limit, 13)
self.assertEqual(iterator._offset, 29)
def test_next_page_no_cursors_no_more(self):
from base64 import b64encode
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection)
entities, more_results, cursor = iterator.next_page()
self.assertEqual(cursor, b64encode(self._END))
self.assertFalse(more_results)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.offset = 0
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED])
def test_next_page_no_cursors_no_more_w_offset_and_limit(self):
from base64 import b64encode
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection, 13, 29)
entities, more_results, cursor = iterator.next_page()
self.assertEqual(cursor, b64encode(self._END))
self.assertFalse(more_results)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.limit = 13
qpb.offset = 29
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED])
def test_next_page_w_cursors_w_more(self):
from base64 import b64decode
from base64 import b64encode
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
iterator = self._makeOne(query, connection)
iterator._start_cursor = self._START
iterator._end_cursor = self._END
entities, more_results, cursor = iterator.next_page()
self.assertEqual(cursor, b64encode(self._END))
self.assertTrue(more_results)
self.assertTrue(iterator._more_results)
self.assertEqual(iterator._end_cursor, None)
self.assertEqual(b64decode(iterator._start_cursor), self._END)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.offset = 0
qpb.start_cursor = b64decode(self._START)
qpb.end_cursor = b64decode(self._END)
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED])
def test_next_page_w_cursors_w_bogus_more(self):
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
epb, cursor, _ = connection._results.pop()
connection._results.append((epb, cursor, 4)) # invalid enum
iterator = self._makeOne(query, connection)
self.assertRaises(ValueError, iterator.next_page)
def test___iter___no_more(self):
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection)
entities = list(iterator)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 1)
self.assertEqual(entities[0].key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[0]['foo'], u'Foo')
qpb = _pb_from_query(query)
qpb.offset = 0
EXPECTED = {
'dataset_id': self._DATASET,
'query_pb': qpb,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(connection._called_with, [EXPECTED])
def test___iter___w_more(self):
from gcloud.datastore.query import _pb_from_query
connection = _Connection()
query = _Query(self._DATASET, self._KIND, self._NAMESPACE)
self._addQueryResults(connection, cursor=self._END, more=True)
self._addQueryResults(connection)
iterator = self._makeOne(query, connection)
entities = list(iterator)
self.assertFalse(iterator._more_results)
self.assertEqual(len(entities), 2)
for entity in entities:
self.assertEqual(
entity.key.path,
[{'kind': self._KIND, 'id': self._ID}])
self.assertEqual(entities[1]['foo'], u'Foo')
qpb1 = _pb_from_query(query)
qpb1.offset = 0
qpb2 = _pb_from_query(query)
qpb2.offset = 0
qpb2.start_cursor = self._END
EXPECTED1 = {
'dataset_id': self._DATASET,
'query_pb': qpb1,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
EXPECTED2 = {
'dataset_id': self._DATASET,
'query_pb': qpb2,
'namespace': self._NAMESPACE,
'transaction_id': None,
}
self.assertEqual(len(connection._called_with), 2)
self.assertEqual(connection._called_with[0], EXPECTED1)
self.assertEqual(connection._called_with[1], EXPECTED2)
class Test__pb_from_query(unittest2.TestCase):
def _callFUT(self, query):
from gcloud.datastore.query import _pb_from_query
return _pb_from_query(query)
def test_empty(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
pb = self._callFUT(_Query())
self.assertEqual(list(pb.projection), [])
self.assertEqual(list(pb.kind), [])
self.assertEqual(list(pb.order), [])
self.assertEqual(list(pb.group_by), [])
self.assertEqual(pb.filter.property_filter.property.name, '')
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(list(cfilter.filter), [])
self.assertEqual(pb.start_cursor, b'')
self.assertEqual(pb.end_cursor, b'')
self.assertEqual(pb.limit, 0)
self.assertEqual(pb.offset, 0)
def test_projection(self):
pb = self._callFUT(_Query(projection=['a', 'b', 'c']))
self.assertEqual([item.property.name for item in pb.projection],
['a', 'b', 'c'])
def test_kind(self):
pb = self._callFUT(_Query(kind='KIND'))
self.assertEqual([item.name for item in pb.kind], ['KIND'])
def test_ancestor(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.key import Key
from gcloud.datastore.helpers import _prepare_key_for_request
ancestor = Key('Ancestor', 123, dataset_id='DATASET')
pb = self._callFUT(_Query(ancestor=ancestor))
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
ancestor_pb = _prepare_key_for_request(ancestor.to_protobuf())
self.assertEqual(pfilter.value.key_value, ancestor_pb)
def test_filter(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
query = _Query(filters=[('name', '=', u'John')])
query.OPERATORS = {
'=': datastore_pb.PropertyFilter.EQUAL,
}
pb = self._callFUT(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, 'name')
self.assertEqual(pfilter.value.string_value, u'John')
def test_filter_key(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.key import Key
from gcloud.datastore.helpers import _prepare_key_for_request
key = Key('Kind', 123, dataset_id='DATASET')
query = _Query(filters=[('__key__', '=', key)])
query.OPERATORS = {
'=': datastore_pb.PropertyFilter.EQUAL,
}
pb = self._callFUT(query)
cfilter = pb.filter.composite_filter
self.assertEqual(cfilter.operator, datastore_pb.CompositeFilter.AND)
self.assertEqual(len(cfilter.filter), 1)
pfilter = cfilter.filter[0].property_filter
self.assertEqual(pfilter.property.name, '__key__')
key_pb = _prepare_key_for_request(key.to_protobuf())
self.assertEqual(pfilter.value.key_value, key_pb)
def test_order(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
pb = self._callFUT(_Query(order=['a', '-b', 'c']))
self.assertEqual([item.property.name for item in pb.order],
['a', 'b', 'c'])
self.assertEqual([item.direction for item in pb.order],
[datastore_pb.PropertyOrder.ASCENDING,
datastore_pb.PropertyOrder.DESCENDING,
datastore_pb.PropertyOrder.ASCENDING])
def test_group_by(self):
pb = self._callFUT(_Query(group_by=['a', 'b', 'c']))
self.assertEqual([item.name for item in pb.group_by],
['a', 'b', 'c'])
class _Query(object):
def __init__(self,
dataset_id=None,
kind=None,
namespace=None,
ancestor=None,
filters=(),
projection=(),
order=(),
group_by=()):
self.dataset_id = dataset_id
self.kind = kind
self.namespace = namespace
self.ancestor = ancestor
self.filters = filters
self.projection = projection
self.order = order
self.group_by = group_by
class _Connection(object):
_called_with = None
_cursor = b'\x00'
_skipped = 0
def __init__(self):
self._results = []
self._called_with = []
def run_query(self, **kw):
self._called_with.append(kw)
result, self._results = self._results[0], self._results[1:]
return result<|fim▁end|>
| |
<|file_name|>matching.rs<|end_file_name|><|fim▁begin|>use std::collections::HashMap;
use std::cmp::Ordering;
use std::cmp;
use std::iter::Iterator;
use fancy_regex::Regex as FancyRegex;
use regex::Regex;
use chrono::{NaiveDate, Datelike, Local};
use scoring;
use keygraph_rs::*;
include!(concat!(env!("OUT_DIR"), "/frequency_data.rs"));
lazy_static! {
/// This map goes the other way in the original implementation.
/// However, this complicates the logic and requires another map to be made
/// inside the l33t_dictionary_match. This was deemed a cleaner and simpler
/// implementation.
static ref L33T_TABLE: HashMap<char, &'static str> = {
let mut m = HashMap::new();
m.insert('4', "a");
m.insert('@', "a");
m.insert('8', "b");
m.insert('(', "c");
m.insert('{', "c");
m.insert('[', "c");
m.insert('<', "c");
m.insert('3', "e");
m.insert('6', "g");
m.insert('9', "g");
m.insert('1', "il");
m.insert('!', "il");
m.insert('|', "i");
m.insert('7', "lt");
m.insert('0', "o");
m.insert('$', "s");
m.insert('5', "s");
m.insert('+', "t");
m.insert('%', "x");
m.insert('2', "z");
m
};
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct L33tData {
/// Hashmap containing a key of l33t characters and a string of the characters
/// they replace
pub l33t_subs: HashMap<char, String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MatchData {
/// Used for matches which don't require metadata.
Plain,
Dictionary {
matched_word: String,
rank: usize,
dictionary_name: String,
reversed: bool,
l33t: Option<L33tData>,
},
Spatial {
graph: String,
turns: usize,
shifted_count: usize,
},
Repeat {
base_token: String,
base_guesses: u64,
repeat_count: usize,
},
Sequence {
name: String,
space: u32,
ascending: bool,
},
Regex {
name: String,
},
Date {
separator: char,
date: NaiveDate,
},
}
#[derive(Clone, Debug, Eq)]
pub struct BaseMatch {
pub pattern: String,
pub start: usize,
pub end: usize,
pub token: String,
pub data: MatchData,
}
impl Ord for BaseMatch {
fn cmp(&self, other: &BaseMatch) -> Ordering {
let t1 = (self.start, self.end);
let t2 = (other.start, other.end);
t1.cmp(&t2)
}
}
impl PartialOrd for BaseMatch {
fn partial_cmp(&self, other: &BaseMatch) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for BaseMatch {
fn eq(&self, other: &BaseMatch) -> bool {
(self.start, self.end) == (other.start, other.end)
}
}
pub fn matches_from_all_dicts(password: &str,
matcher: &Fn(&str, &str, &[&str])->Vec<BaseMatch>) -> Vec<BaseMatch> {
let dicts:HashMap<&str, &[&str]> = {
let mut m = HashMap::new();
m.insert("Female names", FEMALE_NAMES);
m.insert("Male names", MALE_NAMES);
m.insert("Surnames", SURNAMES);
m.insert("Passwords", PASSWORDS);
m.insert("Wikipedia", ENGLISH_WIKIPEDIA);
m.insert("TV and Film", US_TV_AND_FILM);
m
};
dicts.iter()
.map(|(&k, &v)| matcher(password, k, v))
.flat_map(|x| x.into_iter())
.collect::<Vec<BaseMatch>>()
}
/// Matches the password against every matcher returning the matches
pub fn omnimatch(password: &str) -> Vec<BaseMatch> {
let default_regex:HashMap<String, Regex> = {
let mut m = HashMap::new();
m.insert(String::from("recent year"),
Regex::new(r"19\d\d|200\d|201\d").unwrap());
m
};
let mut result:Vec<BaseMatch> = Vec::new();
result.append(&mut matches_from_all_dicts(password, &dictionary_match));
result.append(&mut matches_from_all_dicts(password, &reverse_dictionary_match));
result.append(&mut matches_from_all_dicts(password, &l33t_match));
result.append(&mut sequence_match(password));
result.append(&mut regex_match(password, default_regex));
result.append(&mut date_match(password));
result.append(&mut repeat_match(password));
result.append(&mut spatial_match(password));
result.sort();
result
}
fn dictionary_match(password: &str,
dictionary_name: &str,
dictionary: &[&str]) -> Vec<BaseMatch> {
let mut matches: Vec<BaseMatch> = Vec::new();
let lower = password.to_lowercase();
for i in 0..password.len() {
for j in i..password.len() {
let slice = &lower[i..j + 1];
if let Some(pass) = dictionary.iter().position(|&x| x == slice) {
let dict = MatchData::Dictionary {
matched_word: slice.to_string(),
rank: pass + 1,
dictionary_name: dictionary_name.to_string(),
reversed: false,
l33t: None,
};
matches.push(BaseMatch {
pattern: String::from("Dictionary"),
start: i,
end: j,
token: password[i..j+1].to_string(),
data: dict,
});
}
}
}
matches.sort();
matches
}
#[test]
fn dictionary_test() {
let m = dictionary_match("password", "test", &["pass", "password", "dave"]);
assert_eq!(m.len(), 2);
for temp in m.iter() {
match temp.data {
// Simple test
MatchData::Dictionary{ref matched_word, ..} => assert!(matched_word != "dave"),
_ => assert!(false),
}
}
}
pub fn reverse_dictionary_match(password: &str,
dictionary_name: &str,
dictionary: &[&str]) -> Vec<BaseMatch> {
let length = password.chars().count();
let reversed = password.chars().rev().collect::<String>();
let mut matches = dictionary_match(reversed.as_ref(),
dictionary_name,
dictionary);
for m in matches.iter_mut() {
m.token = m.token.chars().rev().collect::<String>();
let (start, end) = (length - 1 - m.end, length - 1 - m.start);
m.start = start;
m.end = end;
match m.data {
MatchData::Dictionary{ref mut reversed, ..} => *reversed = true,
_ => {}
}
}
matches.sort();
matches
}
#[test]
fn reverse_test() {
let m = reverse_dictionary_match("drowssap", "test", &["password"]);
assert_eq!(m.len(), 1);
let ref temp = m[0];
assert_eq!("drowssap", temp.token);
match temp.data {
MatchData::Dictionary{ref matched_word, ref rank,
ref reversed, ref l33t, ..} => {
assert_eq!(*reversed, true);
assert_eq!(*matched_word, "password");
assert_eq!(*rank, 1);
assert_eq!(*l33t, None);
}
_ => assert!(false),
}
}
fn replace_single_l33t_char(c: &char) -> char {
let res = L33T_TABLE.get(c);
match res {
Some(s) => {
if s.chars().count() == 1 {
s.chars().nth(0).unwrap_or(*c)
} else {
*c
}
}
None => *c,
}
}
fn check_l33t_sub(password: &str,
sub: &str,
dictionary_name: &str,
dictionary: &[&str]) -> Vec<BaseMatch> {
let mut tm = dictionary_match(sub, dictionary_name, dictionary);
for m in tm.iter_mut() {
m.token = password[m.start..(m.end + 1)].to_string();
match m.data {
MatchData::Dictionary{ref mut l33t, ref matched_word, ..} => {
let mut tmap:HashMap<char, String> = HashMap::new();
for (k, v) in m.token.chars().zip(matched_word.chars()) {
if k == v {
continue;
}
if tmap.contains_key(&k) {
let c_as_s = v.to_string();
let ref mut value = tmap.get_mut(&k).unwrap();
if false == value.contains(v) {
value.push_str(c_as_s.as_ref());
}
} else {
tmap.insert(k, v.to_string());
}
}
*l33t = Some(L33tData { l33t_subs: tmap });
},
_ => {},
}
}
tm
}
/// l33t match assumes that a symbol which can mean multiple letters will only
/// be used for one of those letters during a match.
/// Behaviour slightly differs from dropbox on this currently
pub fn l33t_match(password: &str,
dictionary_name: &str,
dictionary: &[&str]) -> Vec<BaseMatch> {
let mut matches: Vec<BaseMatch> = Vec::new();
// First we do all the simple subs. Then go through permutations
let partial_sub: String = password.chars()
.map(|c| replace_single_l33t_char(&c))
.collect();
let remaining_l33ts = partial_sub.chars()
.fold(0u32, |acc, c| acc + L33T_TABLE.contains_key(&c) as u32);
if remaining_l33ts == 0 && partial_sub != password {
let mut tm = check_l33t_sub(password, partial_sub.as_ref(),
dictionary_name, dictionary);
matches.append(&mut tm);
} else if remaining_l33ts > 0 {
let subtable = L33T_TABLE.iter()
.filter(|&(k, _)| partial_sub.contains(*k))
.map(|(k, v)| (*k, *v))
.collect::<Vec<(char, &str)>>();
let sizes = subtable.iter()
.map(|&(_, v)| (*v).chars().count())
.collect::<Vec<usize>>();
let mut current = 0;
let mut indexes: Vec<usize> = vec![0; sizes.len()];
while current != sizes.len() {
let sub = subtable.iter()
.enumerate()
.map(|(i, &(k, v))| (v.chars().nth(indexes[i]).unwrap(), k))
.collect::<HashMap<char, char>>();
if sub.len() == sizes.len() {
let sub = sub.iter()
.map(|(k, v)| (*v, *k))
.collect::<HashMap<char, char>>();
let full_sub = partial_sub.chars()
.map(|c| {
match sub.get(&c) {
Some(v) => *v,
None => c,
}
})
.collect::<String>();
let mut tm = check_l33t_sub(password, full_sub.as_ref(),
dictionary_name, dictionary);
matches.append(&mut tm);
}
indexes[current] += 1;
if indexes[current] == sizes[current] {
indexes[current] = 0;
current += 1;
if current < sizes.len() {
indexes[current] += 1;
}
}
}
}
matches.sort();
matches
}
#[test]
fn l33t_match_test() {
let m = l33t_match("pa$$w0rd", "t3st", &["password", "pass"]);
assert_eq!(m.len(), 2);
for temp in m.iter() {
println!("{:?}\n", temp);
match temp.data {<|fim▁hole|> MatchData::Dictionary{ref l33t, ..} => {
assert!(l33t.is_some());
},
_ => assert!(false),
}
}
let m = l33t_match("!llus1on", "t3st", &["illusion"]);
assert_eq!(m.len(), 0);
}
fn sequence_update(token:&str,
i:usize,
j:usize,
delta:i16) -> Option<BaseMatch> {
let mut result:Option<BaseMatch> = None;
let max_delta = 5;
if (j as i32 - i as i32) > 1 || delta.abs() == 1 {
if 0 < delta.abs() && delta.abs() <= max_delta {
let lower = Regex::new(r"^[a-z]+$").unwrap();
let upper = Regex::new(r"^[A-Z]+$").unwrap();
let digits = Regex::new(r"^\d+$").unwrap();
let(name, space) = if lower.is_match(token) {
("lower".to_string(), 26u32)
} else if upper.is_match(token) {
("upper".to_string(), 26u32)
} else if digits.is_match(token) {
("digits".to_string(), 10u32)
} else {
("unicode".to_string(), 26u32)
};
let data = MatchData::Sequence {
name:name,
space:space,
ascending: delta>0
};
let res = BaseMatch{
pattern: String::from("Sequence"),
start: i,
end: j,
token: token.to_string(),
data: data
};
result = Some(res);
}
}
result
}
pub fn sequence_match(password: &str) -> Vec<BaseMatch> {
let mut matches:Vec<BaseMatch> = Vec::new();
let mut i = 0;
let mut last_delta:Option<i16> = None;
let length = password.chars().count();
for k in 1..length {
let mut chars = password[(k-1)..(k+1)].chars();
// Prevent overflow/underflow
let delta = - (chars.next().unwrap() as i16) +
(chars.next().unwrap() as i16);
if last_delta.is_none() {
last_delta = Some(delta);
}
match last_delta {
Some(ld) if ld == delta => continue,
_ => {},
}
let j = k - 1;
match sequence_update(&password[i..j+1], i, j, last_delta.unwrap()) {
Some(r) => matches.push(r),
None => {},
}
i = j;
last_delta = Some(delta);
}
if let Some(ld) = last_delta {
match sequence_update(&password[i..length], i, length, ld) {
Some(r) => matches.push(r),
None => {},
}
}
matches
}
#[test]
fn sequence_test() {
let pass = "123456789";
let matches = sequence_match(pass);
assert_eq!(1, matches.len());
let m = matches.iter().nth(0).unwrap();
assert_eq!(m.pattern, "Sequence");
assert_eq!(m.start, 0);
assert_eq!(m.end, 9);
assert_eq!(m.token, "123456789");
match m.data {
MatchData::Sequence{ref name, ref space, ref ascending} => {
assert_eq!(*name, "digits");
assert_eq!(*space, 10);
assert_eq!(*ascending, true);
},
_ => assert!(false),
}
}
pub fn regex_match(password: &str,
regexes: HashMap<String, Regex>) -> Vec<BaseMatch> {
let mut result: Vec<BaseMatch> = Vec::new();
for (name, reg) in regexes.iter() {
if let Some(mat) = reg.find(password) {
let metadata = MatchData::Regex{ name:name.clone() };
let rmatch = BaseMatch {
pattern: String::from("Regex"),
start: mat.start(),
end: mat.end(),
token: password[mat.start()..mat.end()].to_string(),
data: metadata,
};
result.push(rmatch);
}
}
result
}
fn map_ints_to_dmy(vals: &[i32; 3]) -> Option<NaiveDate> {
let mut result:Option<NaiveDate> = None;
const MIN_YEAR:i32 = 1000;
const MAX_YEAR:i32 = 2050;
if vals[1] < 32 || vals[1] > 0 {
let mut in_range = true;
let mut over_12 = 0;
let mut over_31 = 0;
let mut under_1 = 0;
for i in vals.into_iter() {
match *i {
// Relies on fact ints have been parsed into valid magnitudes
99 ... MIN_YEAR | MAX_YEAR ... 9999 => {
in_range = false;
},
_ if *i > 31 => over_31 += 1,
_ if *i > 12 => over_12 += 1,
_ if *i < 1 => under_1 += 1,
_ => {},
}
}
if in_range || over_31 < 2 || over_12 != 3 || under_1 < 2 {
let possible_splits = [(vals[2], (vals[0], vals[1])),
(vals[0], (vals[1], vals[2]))];
for &(year, dm) in possible_splits.into_iter() {
if MIN_YEAR <= year && year <= MAX_YEAR {
if let Some(date) = map_ints_to_dm(&dm) {
result = date.with_year(year);
}
}
}
if result.is_none() {
for &(year, dm) in possible_splits.into_iter() {
if let Some(date) = map_ints_to_dm(&dm) {
result = date.with_year(two_to_four_digit_year(year));
}
}
}
}
}
result
}
fn map_ints_to_dm(i:&(i32, i32)) -> Option<NaiveDate> {
let year = Local::now().year() as i32;
// TODO Change to (1..32).contains() etc. when stable
if 1 <= i.0 && i.0 <= 31 && 1 <= i.1 && i.1 <= 12 {
NaiveDate::from_ymd_opt(year, i.1 as u32, i.0 as u32)
} else if 1 <= i.1 && i.1 <= 31 && 1 <= i.0 && i.0 <= 12 {
NaiveDate::from_ymd_opt(year, i.0 as u32, i.1 as u32)
} else {
None
}
}
fn two_to_four_digit_year(year: i32) -> i32 {
if year > 99 {
year
} else if year > 50 {
year + 1900
} else {
year + 2000
}
}
pub fn date_match(password: &str) -> Vec<BaseMatch> {
let mut result: Vec<BaseMatch> = Vec::new();
let password_len = password.chars().count();
let date_splits:HashMap<usize, Vec<(usize, usize)>> = {
let mut m = HashMap::new();
m.insert(4, vec![(1, 2), (2, 3)]);
m.insert(5, vec![(1, 3), (2, 3)]);
m.insert(6, vec![(1, 2), (2, 4), (4, 5)]);
m.insert(7, vec![(1, 3), (2, 3), (4, 5), (4, 6)]);
m.insert(8, vec![(2, 4), (4, 6)]);
m
};
let maybe_date_no_sep = Regex::new(r"^\d{4,8}$").unwrap();
// Can't access previous captures in matches
let maybe_date_with_sep = Regex::new(r"^(\d{1,4})([\s/\\_.-])(\d{1,2})([\s/\\_.-])(\d{1,4})$")
.unwrap();
let ref_year = Local::now().year() as i32;
for i in 0..(cmp::max(password_len, 3)-3) {
for j in (i+3)..(i+8) {
if j >= password_len {
break;
}
let token = &password[i..j+1];
if !maybe_date_no_sep.is_match(&token) {
continue;
}
let mut candidates:Vec<NaiveDate> = Vec::new();
for &(k, l) in date_splits.get(&token.chars().count()).unwrap().iter() {
let a = token[0..k].parse();
let b = token[k..l].parse();
let c = token[l..].parse();
if a.is_err() || b.is_err() || c.is_err() {
break;
}
if let Some(d) = map_ints_to_dmy(&[a.unwrap(), b.unwrap(),c.unwrap()]) {
candidates.push(d);
}
}
if candidates.is_empty() {
continue;
}
let mut best:usize = 0;
let mut min_distance = i32::max_value();
for (index, cand) in candidates.iter().enumerate() {
let distance = (cand.year() - ref_year).abs();
if distance < min_distance {
best = index;
min_distance = distance;
}
}
let metadata = MatchData::Date {
separator:'\0',
date:*candidates.iter().nth(best).unwrap()
};
let mat = BaseMatch {
pattern: String::from("Date"),
token: token.to_string(),
start: i,
end: j,
data: metadata,
};
result.push(mat);
}
}
for i in 0..cmp::max(password_len, 6)-6 {
for j in (i+5)..(i+10) {
if j >= password_len {
break;
}
let token = &password[i..j+1];
if let Some(cap) = maybe_date_with_sep.captures(token)
{
// Thanks to regex we know these are strings hence the lack of checks
let dmy = &[
cap.get(1).unwrap().as_str().parse().unwrap(),
cap.get(3).unwrap().as_str().parse().unwrap(),
cap.get(5).unwrap().as_str().parse().unwrap()
];
if let Some(d) = map_ints_to_dmy(dmy) {
let sep= cap.get(2)
.unwrap()
.as_str()
.chars()
.next()
.unwrap();
let metadata = MatchData::Date {
separator: sep,
date: d,
};
let mat = BaseMatch {
pattern: String::from("Date"),
token: token.to_string(),
start: i,
end: j,
data: metadata,
};
result.push(mat);
}
}
}
}
result
}
#[test]
fn date_match_test() {
}
pub fn repeat_match(password: &str) -> Vec<BaseMatch> {
let mut result:Vec<BaseMatch> = Vec::new();
let count = password.chars().count();
let greedy = FancyRegex::new(r"(.+)\1+").unwrap();
let lazy = FancyRegex::new(r"(.+?)\1+").unwrap();
let lazy_anchored = FancyRegex::new(r"^(.+?)\1+$").unwrap();
let mut last_index = 0;
while last_index < count {
let gmatch = greedy.captures(&password[last_index..]).unwrap();
if let Some(gcap) = gmatch {
let (gstart, gend) = gcap.pos(0).unwrap();
// if greedy matches lazy will
let lcap = lazy.captures(&password[last_index..]).unwrap().unwrap();
let (lstart, lend) = lcap.pos(0).unwrap();
let base: String;
let mut start = gstart + last_index;
let mut end = gend + last_index;
if gend - gstart > lend - lstart {
let lamatch = lazy_anchored.captures(&password[gstart..gend]).unwrap().unwrap();
base = lamatch.at(1).unwrap().to_string();
} else {
start = lstart + last_index;
end = lend + last_index;
base = lcap.at(1).unwrap().to_string();
}
let base_analysis = scoring::most_guessable_match_sequence(base.clone(),
omnimatch(base.as_ref()),
false);
let repeat_count = (end - start) / base.chars().count();
let metadata = MatchData::Repeat {
base_token: base,
base_guesses: base_analysis.guesses,
repeat_count: repeat_count
};
let data = BaseMatch {
pattern: String::from("Repeat"),
start: start,
end: end,
token: gcap.at(0).unwrap().to_string(),
data: metadata
};
result.push(data);
last_index += 1;
} else {
break;
}
}
result
}
#[test]
fn repeat_match_test() {
let test = "aabaabaabaab";
let result = repeat_match(test);
assert_eq!(result.len(), 10);
let first = result.iter().nth(0).unwrap();
assert_eq!(first.pattern, "Repeat");
assert_eq!(first.start, 0);
assert_eq!(first.end, test.chars().count());
assert_eq!(first.token, test);
match first.data {
MatchData::Repeat{ref base_token, ref repeat_count, ..} => {
assert_eq!(*repeat_count, 4);
assert_eq!(*base_token, "aab");
},
_=> assert!(false),
};
//let result = repeat_match("abcdefghijklmnopqrstuvwxyz");
}
pub fn spatial_match(password: &str) -> Vec<BaseMatch> {
let mut result:Vec<BaseMatch> = Vec::new();
let graphs = vec![
&*QWERTY_US,
&*DVORAK,
&*STANDARD_NUMPAD,
&*MAC_NUMPAD
];
let names = vec![
"qwerty",
"dvorak",
"Keypad",
"Mac keypad"
];
for (name, graph) in names.iter().zip(graphs.iter()) {
result.append(&mut spatial_helper(password, name, graph));
}
result.sort();
result
}
fn spatial_helper(password: &str,
graph_name: &str,
graph: &Keyboard) -> Vec<BaseMatch> {
let mut result:Vec<BaseMatch> = Vec::new();
let password_len = password.chars().count();
let mut i = 0;
while i < password_len {
let mut j = i + 1;
let mut turns = 0;
let mut previous_direction: Option<&Edge> = None;
let current_char = password.chars().nth(i).unwrap();
let current_key = graph.find_key(current_char);
if current_key.is_none() {
i = i +1;
continue;
}
let current_key = current_key.unwrap();
let mut previous_key = current_key;
let mut shift_count = current_key.is_shifted(current_char) as usize;
loop {
let mut found = false;
if j < password_len {
let current_char = password.chars().nth(j).unwrap();
let current_key = graph.find_key(current_char);
if current_key.is_some() {
let current_key = current_key.unwrap();
if let Some(dir) = graph.edge_weight(previous_key, current_key) {
found = true;
shift_count += current_key.is_shifted(current_char) as usize;
if Some(dir) != previous_direction {
turns += 1;
previous_direction = Some(dir);
}
}
previous_key = current_key;
}
}
if found {
j += 1;
} else {
if j - i > 2 {
let data = MatchData::Spatial {
graph: graph_name.to_string(),
turns: turns,
shifted_count: shift_count,
};
let mat = BaseMatch {
pattern: String::from("Spatial"),
start: i,
end: j - 1,
token: password[i..j].to_string(),
data: data,
};
result.push(mat);
}
i = j;
break;
}
}
}
result
}
#[test]
fn test_spatial_match() {
let password = "mNbVcvBnM,.?";
let matches = spatial_match(password);
assert_eq!(matches.len(), 1);
let mat = matches.iter().nth(0).unwrap();
match mat.data {
MatchData::Spatial{ref graph, ref turns, ref shifted_count} => {
assert_eq!(*graph, "qwerty");
assert_eq!(*turns, 2);
assert_eq!(*shifted_count, 5);
},
_ => assert!(false),
}
}<|fim▁end|>
| |
<|file_name|>app_event.test.js<|end_file_name|><|fim▁begin|>'use strict';
const path = require('path');
const request = require('supertest');
const pedding = require('pedding');
const assert = require('assert');
const sleep = require('ko-sleep');
const mm = require('..');
const fixtures = path.join(__dirname, 'fixtures');
const baseDir = path.join(fixtures, 'app-event');
describe('test/app_event.test.js', () => {
afterEach(mm.restore);
describe('after ready', () => {
let app;
before(() => {
app = mm.app({<|fim▁hole|> });
return app.ready();
});
after(() => app.close());
it('should listen by eventByRequest', done => {
done = pedding(3, done);
app.once('eventByRequest', done);
app.on('eventByRequest', done);
request(app.callback())
.get('/event')
.expect(200)
.expect('done', done);
});
});
describe('before ready', () => {
let app;
beforeEach(() => {
app = mm.app({
baseDir,
cache: false,
});
});
afterEach(() => app.ready());
afterEach(() => app.close());
it('should listen after app ready', done => {
done = pedding(2, done);
app.once('appReady', done);
app.on('appReady', done);
});
it('should listen after app instantiate', done => {
done = pedding(2, done);
app.once('appInstantiated', done);
app.on('appInstantiated', done);
});
});
describe('throw before app init', () => {
let app;
beforeEach(() => {
const baseDir = path.join(fixtures, 'app');
const customEgg = path.join(fixtures, 'error-framework');
app = mm.app({
baseDir,
customEgg,
cache: false,
});
});
afterEach(() => app.close());
it('should listen using app.on', done => {
app.on('error', err => {
assert(err.message === 'start error');
done();
});
});
it('should listen using app.once', done => {
app.once('error', err => {
assert(err.message === 'start error');
done();
});
});
it('should throw error from ready', function* () {
try {
yield app.ready();
} catch (err) {
assert(err.message === 'start error');
}
});
it('should close when app init failed', function* () {
app.once('error', () => {});
yield sleep(1000);
// app._app is undefined
yield app.close();
});
});
});<|fim▁end|>
|
baseDir,
cache: false,
|
<|file_name|>S15.4.4.9_A5.1.js<|end_file_name|><|fim▁begin|>// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: The length property of shift has the attribute DontEnum
es5id: 15.4.4.9_A5.1
description: Checking use propertyIsEnumerable, for-in
---*/
//CHECK#1
if (Array.prototype.shift.propertyIsEnumerable('length') !== false) {<|fim▁hole|>
//CHECK#2
var result = true;
for (var p in Array.prototype.shift){
if (p === "length") {
result = false;
}
}
if (result !== true) {
$ERROR('#2: result = true; for (p in Array.prototype.shift) { if (p === "length") result = false; } result === true;');
}<|fim▁end|>
|
$ERROR('#1: Array.prototype.shift.propertyIsEnumerable(\'length\') === false. Actual: ' + (Array.prototype.shift.propertyIsEnumerable('length')));
}
|
<|file_name|>wait_job.go<|end_file_name|><|fim▁begin|>// Copyright 2017 <chaishushan{AT}gmail.com>. All rights reserved.
// Use of this source code is governed by a Apache
// license that can be found in the LICENSE file.
package wait
import (
"fmt"
"time"
pb "github.com/chai2010/qingcloud-go/pkg/api"
statuspkg "github.com/chai2010/qingcloud-go/pkg/status"
)
func WaitJob(server *pb.ServerInfo, jobId string, timeout time.Duration) error {
return WaitForIntervalWorkDone(
fmt.Sprintf("job:%v", jobId), timeout, func() (done bool, err error) {
return waitJob(server, jobId)
},
)
}
func waitJob(server *pb.ServerInfo, jobId string) (done bool, err error) {
reply, err := pb.NewJobService(server).DescribeJobs(&pb.DescribeJobsInput{
Jobs: []string{jobId},
})
if err != nil {
return false, err
}
if len(reply.GetJobSet()) != 1 {
return false, fmt.Errorf("can not find job [%s]", jobId)
}
status := statuspkg.JobStatus(reply.GetJobSet()[0].GetStatus())
switch status {
case statuspkg.JobStatus_Successful:<|fim▁hole|> return false, fmt.Errorf("job [%s] failed", jobId)
default:
return false, fmt.Errorf("unknow status [%s] for job [%s]", status, jobId)
}
}<|fim▁end|>
|
return true, nil // OK
case statuspkg.JobStatus_Pending, statuspkg.JobStatus_Working:
return false, nil
case statuspkg.JobStatus_Failed:
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.base import * # noqa
from keystoneclient.auth.cli import * # noqa
from keystoneclient.auth.conf import * # noqa
__all__ = [
# auth.base
'AUTH_INTERFACE',
'BaseAuthPlugin',<|fim▁hole|>
# auth.cli
'load_from_argparse_arguments',
'register_argparse_arguments',
# auth.conf
'get_common_conf_options',
'get_plugin_options',
'load_from_conf_options',
'register_conf_options',
]<|fim▁end|>
|
'get_plugin_class',
'PLUGIN_NAMESPACE',
|
<|file_name|>circular_queue_test.cc<|end_file_name|><|fim▁begin|>/* Copyright 2017 Istio Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/istio/prefetch/circular_queue.h"
#include "gtest/gtest.h"
namespace istio {
namespace prefetch {
namespace {
void ASSERT_RESULT(CircularQueue<int>& q, const std::vector<int>& expected) {
std::vector<int> v;
q.Iterate([&](int& i) -> bool {
v.push_back(i);
return true;
});
ASSERT_EQ(v, expected);
}
TEST(CircularQueueTest, TestNotResize) {
CircularQueue<int> q(5);
q.Push(1);
q.Push(2);
q.Push(3);
ASSERT_RESULT(q, {1, 2, 3});
q.Pop();
q.Pop();
q.Push(4);<|fim▁hole|> q.Push(5);
q.Push(6);
ASSERT_RESULT(q, {3, 4, 5, 6});
}
TEST(CircularQueueTest, TestResize1) {
CircularQueue<int> q(3);
for (int i = 1; i < 6; i++) {
q.Push(i);
}
ASSERT_RESULT(q, {1, 2, 3, 4, 5});
}
TEST(CircularQueueTest, TestResize2) {
CircularQueue<int> q(3);
// move head and tail
q.Push(1);
q.Push(2);
q.Push(3);
q.Pop();
q.Pop();
for (int i = 4; i < 10; i++) {
q.Push(i);
}
ASSERT_RESULT(q, {3, 4, 5, 6, 7, 8, 9});
}
} // namespace
} // namespace prefetch
} // namespace istio<|fim▁end|>
| |
<|file_name|>AuthenticationServiceImpl.java<|end_file_name|><|fim▁begin|>package org.sagebionetworks.auth.services;
import org.sagebionetworks.repo.manager.AuthenticationManager;
import org.sagebionetworks.repo.manager.MessageManager;
import org.sagebionetworks.repo.manager.UserManager;
import org.sagebionetworks.repo.manager.authentication.PersonalAccessTokenManager;
import org.sagebionetworks.repo.manager.oauth.AliasAndType;
import org.sagebionetworks.repo.manager.oauth.OAuthManager;
import org.sagebionetworks.repo.manager.oauth.OpenIDConnectManager;
import org.sagebionetworks.repo.model.AuthorizationUtils;
import org.sagebionetworks.repo.model.UnauthorizedException;
import org.sagebionetworks.repo.model.UserInfo;
import org.sagebionetworks.repo.model.auth.AccessToken;
import org.sagebionetworks.repo.model.auth.AccessTokenGenerationRequest;
import org.sagebionetworks.repo.model.auth.AccessTokenGenerationResponse;
import org.sagebionetworks.repo.model.auth.AccessTokenRecord;
import org.sagebionetworks.repo.model.auth.AccessTokenRecordList;
import org.sagebionetworks.repo.model.auth.AuthenticatedOn;
import org.sagebionetworks.repo.model.auth.ChangePasswordInterface;
import org.sagebionetworks.repo.model.auth.LoginRequest;
import org.sagebionetworks.repo.model.auth.LoginResponse;
import org.sagebionetworks.repo.model.auth.NewUser;
import org.sagebionetworks.repo.model.auth.PasswordResetSignedToken;
import org.sagebionetworks.repo.model.oauth.OAuthAccountCreationRequest;
import org.sagebionetworks.repo.model.oauth.OAuthProvider;
import org.sagebionetworks.repo.model.oauth.OAuthUrlRequest;
import org.sagebionetworks.repo.model.oauth.OAuthUrlResponse;
import org.sagebionetworks.repo.model.oauth.OAuthValidationRequest;
import org.sagebionetworks.repo.model.oauth.ProvidedUserInfo;
import org.sagebionetworks.repo.model.principal.AliasType;
import org.sagebionetworks.repo.model.principal.PrincipalAlias;
import org.sagebionetworks.repo.transactions.WriteTransaction;
import org.sagebionetworks.repo.web.NotFoundException;
import org.sagebionetworks.util.ValidateArgument;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
@Service
public class AuthenticationServiceImpl implements AuthenticationService {
@Autowired
private UserManager userManager;
@Autowired
private AuthenticationManager authManager;
@Autowired
private OAuthManager oauthManager;
<|fim▁hole|> private OpenIDConnectManager oidcManager;
@Autowired
private MessageManager messageManager;
@Autowired
private PersonalAccessTokenManager personalAccessTokenManager;
@WriteTransaction
@Override
public void changePassword(ChangePasswordInterface request) throws NotFoundException {
final long userId = authManager.changePassword(request);
messageManager.sendPasswordChangeConfirmationEmail(userId);
}
@Override
@WriteTransaction
public void signTermsOfUse(AccessToken accessToken) throws NotFoundException {
ValidateArgument.required(accessToken, "Access token");
ValidateArgument.required(accessToken.getAccessToken(), "Access token contents");
Long principalId = Long.parseLong(oidcManager.validateAccessToken(accessToken.getAccessToken()));
// Save the state of acceptance
authManager.setTermsOfUseAcceptance(principalId, true);
}
@Override
public String getSecretKey(Long principalId) throws NotFoundException {
return authManager.getSecretKey(principalId);
}
@Override
@WriteTransaction
public void deleteSecretKey(Long principalId) throws NotFoundException {
authManager.changeSecretKey(principalId);
}
@Override
public boolean hasUserAcceptedTermsOfUse(Long userId) throws NotFoundException {
return authManager.hasUserAcceptedTermsOfUse(userId);
}
@Override
public void sendPasswordResetEmail(String passwordResetUrlPrefix, String usernameOrEmail) {
try {
PrincipalAlias principalAlias = userManager.lookupUserByUsernameOrEmail(usernameOrEmail);
PasswordResetSignedToken passwordRestToken = authManager.createPasswordResetToken(principalAlias.getPrincipalId());
messageManager.sendNewPasswordResetEmail(passwordResetUrlPrefix, passwordRestToken, principalAlias);
} catch (NotFoundException e) {
// should not indicate that a email/user could not be found
}
}
@Override
public OAuthUrlResponse getOAuthAuthenticationUrl(OAuthUrlRequest request) {
String url = oauthManager.getAuthorizationUrl(request.getProvider(), request.getRedirectUrl(), request.getState());
OAuthUrlResponse response = new OAuthUrlResponse();
response.setAuthorizationUrl(url);
return response;
}
@Override
public LoginResponse validateOAuthAuthenticationCodeAndLogin(
OAuthValidationRequest request, String tokenIssuer) throws NotFoundException {
// Use the authentication code to lookup the user's information.
ProvidedUserInfo providedInfo = oauthManager.validateUserWithProvider(
request.getProvider(), request.getAuthenticationCode(), request.getRedirectUrl());
if(providedInfo.getUsersVerifiedEmail() == null){
throw new IllegalArgumentException("OAuthProvider: "+request.getProvider().name()+" did not provide a user email");
}
// This is the ID of the user within the provider's system.
PrincipalAlias emailAlias = userManager.lookupUserByUsernameOrEmail(providedInfo.getUsersVerifiedEmail());
// Return the user's access token
return authManager.loginWithNoPasswordCheck(emailAlias.getPrincipalId(), tokenIssuer);
}
@WriteTransaction
public LoginResponse createAccountViaOauth(OAuthAccountCreationRequest request, String tokenIssuer) {
// Use the authentication code to lookup the user's information.
ProvidedUserInfo providedInfo = oauthManager.validateUserWithProvider(
request.getProvider(), request.getAuthenticationCode(), request.getRedirectUrl());
if(providedInfo.getUsersVerifiedEmail() == null){
throw new IllegalArgumentException("OAuthProvider: "+request.getProvider().name()+" did not provide a user email");
}
// create account with the returned user info.
NewUser newUser = new NewUser();
newUser.setEmail(providedInfo.getUsersVerifiedEmail());
newUser.setFirstName(providedInfo.getFirstName());
newUser.setLastName(providedInfo.getLastName());
newUser.setUserName(request.getUserName());
long newPrincipalId = userManager.createUser(newUser);
return authManager.loginWithNoPasswordCheck(newPrincipalId, tokenIssuer);
}
@Override
public PrincipalAlias bindExternalID(Long userId, OAuthValidationRequest validationRequest) {
if (AuthorizationUtils.isUserAnonymous(userId)) throw new UnauthorizedException("User ID is required.");
AliasAndType providersUserId = oauthManager.retrieveProvidersId(
validationRequest.getProvider(),
validationRequest.getAuthenticationCode(),
validationRequest.getRedirectUrl());
// now bind the ID to the user account
return userManager.bindAlias(providersUserId.getAlias(), providersUserId.getType(), userId);
}
@Override
public void unbindExternalID(Long userId, OAuthProvider provider, String aliasName) {
if (AuthorizationUtils.isUserAnonymous(userId)) throw new UnauthorizedException("User ID is required.");
AliasType aliasType = oauthManager.getAliasTypeForProvider(provider);
userManager.unbindAlias(aliasName, aliasType, userId);
}
@Override
public LoginResponse login(LoginRequest request, String tokenIssuer) {
return authManager.login(request, tokenIssuer);
}
@Override
public AuthenticatedOn getAuthenticatedOn(long userId) {
UserInfo userInfo = userManager.getUserInfo(userId);
return authManager.getAuthenticatedOn(userInfo);
}
@Override
public PrincipalAlias lookupUserForAuthentication(String alias) {
return userManager.lookupUserByUsernameOrEmail(alias);
}
@Override
public AccessTokenGenerationResponse createPersonalAccessToken(Long userId, String accessToken, AccessTokenGenerationRequest request, String oauthEndpoint) {
UserInfo userInfo = userManager.getUserInfo(userId);
return personalAccessTokenManager.issueToken(userInfo, accessToken, request, oauthEndpoint);
}
@Override
public AccessTokenRecordList getPersonalAccessTokenRecords(Long userId, String nextPageToken) {
UserInfo userInfo = userManager.getUserInfo(userId);
return personalAccessTokenManager.getTokenRecords(userInfo, nextPageToken);
}
@Override
public AccessTokenRecord getPersonalAccessTokenRecord(Long userId, Long tokenId) {
UserInfo userInfo = userManager.getUserInfo(userId);
return personalAccessTokenManager.getTokenRecord(userInfo, tokenId.toString());
}
@Override
public void revokePersonalAccessToken(Long userId, Long tokenId) {
UserInfo userInfo = userManager.getUserInfo(userId);
personalAccessTokenManager.revokeToken(userInfo, tokenId.toString());
}
}<|fim▁end|>
|
@Autowired
|
<|file_name|>test_hooks.py<|end_file_name|><|fim▁begin|>"""Tests for `cookiecutter.hooks` module."""
import os
import errno
import stat
import sys
import textwrap
import pytest
from cookiecutter import hooks, utils, exceptions
def make_test_repo(name, multiple_hooks=False):
"""Create test repository for test setup methods."""
hook_dir = os.path.join(name, 'hooks')
template = os.path.join(name, 'input{{hooks}}')
os.mkdir(name)
os.mkdir(hook_dir)
os.mkdir(template)
with open(os.path.join(template, 'README.rst'), 'w') as f:
f.write("foo\n===\n\nbar\n")
with open(os.path.join(hook_dir, 'pre_gen_project.py'), 'w') as f:
f.write("#!/usr/bin/env python\n")
f.write("# -*- coding: utf-8 -*-\n")
f.write("from __future__ import print_function\n")
f.write("\n")
f.write("print('pre generation hook')\n")
f.write("f = open('python_pre.txt', 'w')\n")
f.write("f.close()\n")
if sys.platform.startswith('win'):
post = 'post_gen_project.bat'
with open(os.path.join(hook_dir, post), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >shell_post.txt\n")
else:
post = 'post_gen_project.sh'
filename = os.path.join(hook_dir, post)
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("echo 'post generation hook';\n")
f.write("touch 'shell_post.txt'\n")
# Set the execute bit
os.chmod(filename, os.stat(filename).st_mode | stat.S_IXUSR)
# Adding an additional pre script
if multiple_hooks:
if sys.platform.startswith('win'):
pre = 'pre_gen_project.bat'
with open(os.path.join(hook_dir, pre), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >shell_pre.txt\n")
else:
pre = 'pre_gen_project.sh'
filename = os.path.join(hook_dir, pre)
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("echo 'post generation hook';\n")
f.write("touch 'shell_pre.txt'\n")
# Set the execute bit
os.chmod(filename, os.stat(filename).st_mode | stat.S_IXUSR)
return post
class TestFindHooks(object):
"""Class to unite find hooks related tests in one place."""
repo_path = 'tests/test-hooks'
def setup_method(self, method):
"""Find hooks related tests setup fixture."""
self.post_hook = make_test_repo(self.repo_path)
def teardown_method(self, method):
"""Find hooks related tests teardown fixture."""
utils.rmtree(self.repo_path)
def test_find_hook(self):
"""Finds the specified hook."""
with utils.work_in(self.repo_path):
expected_pre = os.path.abspath('hooks/pre_gen_project.py')
actual_hook_path = hooks.find_hook('pre_gen_project')
assert expected_pre == actual_hook_path[0]
expected_post = os.path.abspath('hooks/{}'.format(self.post_hook))
actual_hook_path = hooks.find_hook('post_gen_project')
assert expected_post == actual_hook_path[0]
def test_no_hooks(self):
"""`find_hooks` should return None if the hook could not be found."""
with utils.work_in('tests/fake-repo'):
assert None is hooks.find_hook('pre_gen_project')
def test_unknown_hooks_dir(self):
"""`find_hooks` should return None if hook directory not found."""
with utils.work_in(self.repo_path):
assert hooks.find_hook('pre_gen_project', hooks_dir='hooks_dir') is None
def test_hook_not_found(self):
"""`find_hooks` should return None if the hook could not be found."""
with utils.work_in(self.repo_path):
assert hooks.find_hook('unknown_hook') is None
class TestExternalHooks(object):
"""Class to unite tests for hooks with different project paths."""
repo_path = os.path.abspath('tests/test-hooks/')
hooks_path = os.path.abspath('tests/test-hooks/hooks')
def setup_method(self, method):
"""External hooks related tests setup fixture."""
self.post_hook = make_test_repo(self.repo_path, multiple_hooks=True)
def teardown_method(self, method):
"""External hooks related tests teardown fixture."""
utils.rmtree(self.repo_path)
if os.path.exists('python_pre.txt'):
os.remove('python_pre.txt')
if os.path.exists('shell_post.txt'):
os.remove('shell_post.txt')
if os.path.exists('shell_pre.txt'):
os.remove('shell_pre.txt')
if os.path.exists('tests/shell_post.txt'):
os.remove('tests/shell_post.txt')
if os.path.exists('tests/test-hooks/input{{hooks}}/python_pre.txt'):
os.remove('tests/test-hooks/input{{hooks}}/python_pre.txt')
if os.path.exists('tests/test-hooks/input{{hooks}}/shell_post.txt'):
os.remove('tests/test-hooks/input{{hooks}}/shell_post.txt')
if os.path.exists('tests/context_post.txt'):
os.remove('tests/context_post.txt')
def test_run_script(self):
"""Execute a hook script, independently of project generation."""
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert os.path.isfile('shell_post.txt')
def test_run_failing_script(self, mocker):
"""Test correct exception raise if run_script fails."""
err = OSError()
prompt = mocker.patch('subprocess.Popen')<|fim▁hole|> prompt.side_effect = err
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert 'Hook script failed (error: {})'.format(err) in str(excinfo.value)
def test_run_failing_script_enoexec(self, mocker):
"""Test correct exception raise if run_script fails."""
err = OSError()
err.errno = errno.ENOEXEC
prompt = mocker.patch('subprocess.Popen')
prompt.side_effect = err
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert 'Hook script failed, might be an empty file or missing a shebang' in str(
excinfo.value
)
def test_run_script_cwd(self):
"""Change directory before running hook."""
hooks.run_script(os.path.join(self.hooks_path, self.post_hook), 'tests')
assert os.path.isfile('tests/shell_post.txt')
assert 'tests' not in os.getcwd()
def test_run_script_with_context(self):
"""Execute a hook script, passing a context."""
hook_path = os.path.join(self.hooks_path, 'post_gen_project.sh')
if sys.platform.startswith('win'):
post = 'post_gen_project.bat'
with open(os.path.join(self.hooks_path, post), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >{{cookiecutter.file}}\n")
else:
with open(hook_path, 'w') as fh:
fh.write("#!/bin/bash\n")
fh.write("\n")
fh.write("echo 'post generation hook';\n")
fh.write("touch 'shell_post.txt'\n")
fh.write("touch '{{cookiecutter.file}}'\n")
os.chmod(hook_path, os.stat(hook_path).st_mode | stat.S_IXUSR)
hooks.run_script_with_context(
os.path.join(self.hooks_path, self.post_hook),
'tests',
{'cookiecutter': {'file': 'context_post.txt'}},
)
assert os.path.isfile('tests/context_post.txt')
assert 'tests' not in os.getcwd()
def test_run_hook(self):
"""Execute hook from specified template in specified output \
directory."""
tests_dir = os.path.join(self.repo_path, 'input{{hooks}}')
with utils.work_in(self.repo_path):
hooks.run_hook('pre_gen_project', tests_dir, {})
assert os.path.isfile(os.path.join(tests_dir, 'python_pre.txt'))
assert os.path.isfile(os.path.join(tests_dir, 'shell_pre.txt'))
hooks.run_hook('post_gen_project', tests_dir, {})
assert os.path.isfile(os.path.join(tests_dir, 'shell_post.txt'))
def test_run_failing_hook(self):
"""Test correct exception raise if hook exit code is not zero."""
hook_path = os.path.join(self.hooks_path, 'pre_gen_project.py')
tests_dir = os.path.join(self.repo_path, 'input{{hooks}}')
with open(hook_path, 'w') as f:
f.write("#!/usr/bin/env python\n")
f.write("import sys; sys.exit(1)\n")
with utils.work_in(self.repo_path):
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_hook('pre_gen_project', tests_dir, {})
assert 'Hook script failed' in str(excinfo.value)
@pytest.fixture()
def dir_with_hooks(tmp_path):
"""Yield a directory that contains hook backup files."""
hooks_dir = tmp_path.joinpath('hooks')
hooks_dir.mkdir()
pre_hook_content = textwrap.dedent(
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
print('pre_gen_project.py~')
"""
)
pre_gen_hook_file = hooks_dir.joinpath('pre_gen_project.py~')
pre_gen_hook_file.write_text(pre_hook_content, encoding='utf8')
post_hook_content = textwrap.dedent(
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
print('post_gen_project.py~')
"""
)
post_gen_hook_file = hooks_dir.joinpath('post_gen_project.py~')
post_gen_hook_file.write_text(post_hook_content, encoding='utf8')
# Make sure to yield the parent directory as `find_hooks()`
# looks into `hooks/` in the current working directory
yield str(tmp_path)
pre_gen_hook_file.unlink()
post_gen_hook_file.unlink()
def test_ignore_hook_backup_files(monkeypatch, dir_with_hooks):
"""Test `find_hook` correctly use `valid_hook` verification function."""
# Change the current working directory that contains `hooks/`
monkeypatch.chdir(dir_with_hooks)
assert hooks.find_hook('pre_gen_project') is None
assert hooks.find_hook('post_gen_project') is None<|fim▁end|>
| |
<|file_name|>less.js<|end_file_name|><|fim▁begin|>if (typeof window !== 'undefined') {
var less = require('npm:less/lib/less-browser/index')(window, window.less || {})
var head = document.getElementsByTagName('head')[0];
// get all injected style tags in the page
var styles = document.getElementsByTagName('style');
var styleIds = [];
for (var i = 0; i < styles.length; i++) {
if (!styles[i].hasAttribute("data-href")) continue;
styleIds.push(styles[i].getAttribute("data-href"));
}
var loadStyle = function (url) {
return new Promise(function (resolve, reject) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.onload = function () {
if (request.status >= 200 && request.status < 400) {
// Success!
var data = request.responseText;
var options = window.less || {};
options.filename = url;
options.rootpath = url.replace(/[^\/]*$/, '');
//render it using less
less.render(data, options).then(function (data) {
//inject it into the head as a style tag
var style = document.createElement('style');
style.textContent = '\r\n' + data.css;
style.setAttribute('type', 'text/css');
//store original type in the data-type attribute
style.setAttribute('data-type', 'text/less');
//store the url in the data-href attribute
style.setAttribute('data-href', url);
head.appendChild(style);
resolve('');
});
} else {
// We reached our target server, but it returned an error
reject()
}
};
request.onerror = function (e) {
reject(e)
};
<|fim▁hole|> }
exports.fetch = function (load) {
// don't reload styles loaded in the head
for (var i = 0; i < styleIds.length; i++)
if (load.address == styleIds[i])
return '';
return loadStyle(load.address);
}
}
else {
exports.translate = function (load) {
// setting format = 'defined' means we're managing our own output
load.metadata.format = 'defined';
};
exports.bundle = function (loads, opts) {
var loader = this;
if (loader.buildCSS === false)
return '';
return loader.import('./less-builder', {name: module.id}).then(function (builder) {
return builder.call(loader, loads, opts);
});
}
}<|fim▁end|>
|
request.send();
});
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>pub mod data_cache;
use std::collections::HashMap;
use std::option::Option;
use self::data_cache::base_data::City;
struct DfaState {
name:String,
path:HashMap<char, DfaState>,
}
impl DfaState {
fn new(n:String) -> DfaState {
DfaState{name:n, path:HashMap::default()}
}
fn add_path(&mut self, c:char, name:&str) -> &mut DfaState {
let mut res = self.path.entry(c).or_insert(DfaState::new(name.to_string()));
if res.name == "" && name != "" {
res.name = name.to_string();
}
res
}
fn add_path_by_name(&mut self, name:&str) {
let chars:Vec<char> = name.chars().collect();
self.add_path_by_chars(&chars, 0, name);
}
fn add_path_by_chars(&mut self, chars:&Vec<char>, idx:usize, name:&str) {
if idx < chars.len() { unsafe {
let c = chars.get_unchecked(idx);
<|fim▁hole|> if idx == chars.len() - 1 {
self.add_path(*c, name);
} else {
let state = self.add_path(*c, "");
state.add_path_by_chars(chars, idx+1, name);
}
}}
}
fn is_accepted(&self) -> bool {
self.name != ""
}
fn tran(&self, c:&char) -> Option<&DfaState> {
self.path.get(c)
}
fn to_vv_string(&self) -> Vec<Vec<String>> {
let mut vv:Vec<Vec<String>> = Vec::new();
let mut v = Vec::new();
if self.name == "" {
v.push("~".to_string());
} else {
v.push(self.name.to_string());
}
vv.push(v);
for (c, s) in &self.path {
let mut v2 = Vec::new();
v2.push("".to_string());
let mut cs = String::new();
cs.push(*c);
v2.push(cs);
vv.push(v2);
let svv = s.to_vv_string();
for sv in svv {
let mut sv2 = Vec::new();
sv2.push("".to_string());
sv2.push("".to_string());
sv2.extend(sv.iter().cloned());
// for s in sv {
// sv2.push(s.to_string());
// }
vv.push(sv2);
}
}
vv
}
}
pub struct DFA {
start_state:DfaState,
pub citys:Vec<City>,
pub name_map:HashMap<String, Vec<usize>>
}
impl DFA {
pub fn new() -> DFA {
let (citys, name_map) = data_cache::assemble_data();
let mut dfa = DFA{start_state:DfaState::new("".to_string()), citys:citys, name_map: name_map};
for name in dfa.name_map.keys() {
dfa.start_state.add_path_by_name(name);
}
dfa
}
pub fn print_states(&self) {
println!("++++++++++++++++");
for v in self.start_state.to_vv_string() {
for s in v {
print!("{}\t", s);
}
println!("");
};
println!("++++++++++++++++");
}
pub fn scan(&self, chars:&Vec<char>) -> Vec<String> {
let mut res = Vec::new();
self.scan_recur(chars, 0, 0, 0, &self.start_state, &self.start_state, &mut res);
res
}
fn scan_recur(&self,
chars:&Vec<char>, from_idx:usize, current_idx:usize, currect_accepted_idx:usize,
current_state:&DfaState, current_accepted:&DfaState, res:&mut Vec<String>) {
let len = chars.len();
if current_idx < len { unsafe {
let ch = chars.get_unchecked(current_idx);
// println!("{}", ch);
match current_state.tran(&ch) {
None => if (current_accepted as *const DfaState) != (&self.start_state as * const DfaState) {
if !res.contains(¤t_accepted.name) {
res.push(current_accepted.name.to_string());
}
self.scan_recur(chars, currect_accepted_idx + 1, currect_accepted_idx+1, 0, &self.start_state, &self.start_state, res);
} else {
self.scan_recur(chars, from_idx + 1, from_idx + 1, currect_accepted_idx, &self.start_state, current_accepted, res);
},
Some(cs) => if current_idx + 1 == len {
if cs.is_accepted() {
if !res.contains(&cs.name) {
res.push(cs.name.to_string());
}
self.scan_recur(chars, from_idx, current_idx+1, currect_accepted_idx, &self.start_state, current_accepted, res);
} if (current_accepted as *const DfaState) != (&self.start_state as * const DfaState) {
if !res.contains(¤t_accepted.name) {
res.push(current_accepted.name.to_string());
}
self.scan_recur(chars, currect_accepted_idx + 1, currect_accepted_idx+1, 0, &self.start_state, &self.start_state, res);
} else {
self.scan_recur(chars, from_idx + 1, from_idx, currect_accepted_idx+1, &self.start_state, current_accepted, res);
}
} else if cs.is_accepted() {
self.scan_recur(chars, from_idx, current_idx + 1, current_idx, cs, cs, res);
} else {
self.scan_recur(chars, from_idx, current_idx + 1, currect_accepted_idx, cs, current_accepted, res);
}
}
}}
}
}<|fim▁end|>
| |
<|file_name|>lex.py<|end_file_name|><|fim▁begin|>import operator
import ply.lex as lex
from jpp.parser.operation import Operation
from jpp.parser.expression import SimpleExpression
reserved = {
'extends': 'EXTENDS',
'import': 'IMPORT',
'local': 'LOCAL',
'imported': 'IMPORTED',
'user_input': 'USER_INPUT',
}
NAME_TOK = 'NAME'
tokens = [
'INTEGER',
'STRING_LITERAL',
'COLON',
NAME_TOK,
'COMMA',
'LCURL',
'RCURL',
'LBRAC',
'RBRAC',
'LPAREN',
'RPAREN',
'DOT',
'SEMICOLON',
'BOOLEAN',
'MINUS',
'COMPARISON_OP',
'PLUS',
'MUL_OP',
'BIT_SHIFT_OPS',
'BITWISE_OPS',
'INVERT',
'POW',
'FUNC',
]
tokens.extend(reserved.values())
t_DOT = r'\.'
t_LCURL = r'\{'
t_RCURL = r'\}'
t_COLON = r'\:'
t_LBRAC = r'\['
t_RBRAC = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COMMA = ','
t_SEMICOLON = ';'
def _create_operation_token(t):
t.value = Operation(t.value)
return t
def t_BIT_SHIFT_OPS(t):
"""
<<|>>
"""
return _create_operation_token(t)
def t_COMPARISON_OP(t):
"""
<|<=|==|!=|>=
"""
return _create_operation_token(t)
def t_BITWISE_OPS(t):
r"""
&|\^|\|
"""
return _create_operation_token(t)
def t_PLUS(t):
r"""
\+
"""
return _create_operation_token(t)
def t_MINUS(t):
r"""
-
"""
t.value = Operation(t.value, operator.sub)
return t
def t_POW(t):
r"""
\*\*
"""
return _create_operation_token(t)
def t_MUL_OP(t):
r"""
\*|//|/|%
"""
return _create_operation_token(t)
def t_INVERT(t):
"""
~
"""
return _create_operation_token(t)
def t_FUNC(t):
"""
bool|abs
"""
return _create_operation_token(t)
def t_INTEGER(t):
r"""
\d+
"""
t.value = SimpleExpression(int(t.value))
return t
def t_STRING_LITERAL(t):
"""
"[^"\n]*"
"""
t.value = SimpleExpression(str(t.value).strip('"'))
return t
def t_BOOLEAN(t):
"""
true|false
"""
t.value = SimpleExpression(t.value == 'true')
return t
def t_NAME(t):
"""
[a-zA-Z_][a-zA-Z_0-9]*
"""<|fim▁hole|>
def t_COMMENT(t):
r"""
\#.*
"""
# No return value. Token discarded
pass
def t_newline(t):
r"""
\n+
"""
t.lexer.lineno += len(t.value)
def t_error(_):
return
t_ignore = ' \t'
def create_lexer():
return lex.lex(debug=False)<|fim▁end|>
|
t.type = reserved.get(t.value, NAME_TOK) # Check for reserved words
return t
|
<|file_name|>messages.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import struct
class BBType(object):
command = 1
command_return = 2
consolemsg = 3
ping = 4
pong = 5
getenv = 6
getenv_return = 7
fs = 8
fs_return = 9
class BBPacket(object):
def __init__(self, p_type=0, p_flags=0, payload="", raw=None):
self.p_type = p_type
self.p_flags = p_flags
if raw is not None:
self.unpack(raw)
else:
self.payload = payload
def __repr__(self):
return "BBPacket(%i, %i)" % (self.p_type, self.p_flags)
def _unpack_payload(self, data):
self.payload = data
def _pack_payload(self):
return self.payload
def unpack(self, data):
self.p_type, self.p_flags = struct.unpack("!HH", data[:4])
self._unpack_payload(data[4:])
def pack(self):
return struct.pack("!HH", self.p_type, self.p_flags) + \
self._pack_payload()
class BBPacketCommand(BBPacket):
def __init__(self, raw=None, cmd=None):
self.cmd = cmd
super(BBPacketCommand, self).__init__(BBType.command, raw=raw)
def __repr__(self):
return "BBPacketCommand(cmd=%r)" % self.cmd
def _unpack_payload(self, payload):
self.cmd = payload
def _pack_payload(self):
return self.cmd
class BBPacketCommandReturn(BBPacket):
def __init__(self, raw=None, exit_code=None):
self.exit_code = exit_code
super(BBPacketCommandReturn, self).__init__(BBType.command_return,
raw=raw)
def __repr__(self):
return "BBPacketCommandReturn(exit_code=%i)" % self.exit_code
def _unpack_payload(self, data):
self.exit_code, = struct.unpack("!L", data[:4])
def _pack_payload(self):
return struct.pack("!L", self.exit_code)
class BBPacketConsoleMsg(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketConsoleMsg, self).__init__(BBType.consolemsg, raw=raw)
def __repr__(self):
return "BBPacketConsoleMsg(text=%r)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_payload(self):
return self.text
class BBPacketPing(BBPacket):
def __init__(self, raw=None):
super(BBPacketPing, self).__init__(BBType.ping, raw=raw)
def __repr__(self):
return "BBPacketPing()"
class BBPacketPong(BBPacket):
def __init__(self, raw=None):
super(BBPacketPong, self).__init__(BBType.pong, raw=raw)
def __repr__(self):
return "BBPacketPong()"
class BBPacketGetenv(BBPacket):
def __init__(self, raw=None, varname=None):<|fim▁hole|> return "BBPacketGetenv(varname=%r)" % self.varname
def _unpack_payload(self, payload):
self.varname = payload
def _pack_payload(self):
return self.varname
class BBPacketGetenvReturn(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketGetenvReturn, self).__init__(BBType.getenv_return,
raw=raw)
def __repr__(self):
return "BBPacketGetenvReturn(varvalue=%s)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_payload(self):
return self.text
class BBPacketFS(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFS, self).__init__(BBType.fs, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFS(payload=%r)" % self.payload
class BBPacketFSReturn(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFSReturn, self).__init__(BBType.fs_return, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFSReturn(payload=%r)" % self.payload<|fim▁end|>
|
self.varname = varname
super(BBPacketGetenv, self).__init__(BBType.getenv, raw=raw)
def __repr__(self):
|
<|file_name|>gui.py<|end_file_name|><|fim▁begin|>#*************************************************************************************************
#
# File Name: gui.py
# Project: Erebus Labs Sensor
# Revision Date: 04/13/2014
# Description: This file contains the main Erebus Sensor gui class
#
#*************************************************************************************************
# System Imports
import tkinter as tk
import tkinter.messagebox as mb
import time
import copy
from datetime import datetime
import subprocess
# Project Imports
import erebus_sensor.interface as interface
class ErebusGUI(tk.Frame):
"""
Provides the user interface for the Erebus Labs sensor.
Members:
<Too Many To List> all GUI attributes
sensorHandle serial.Serial object that communicates with the sensor
displayedSettings the collection of sensor settings displayed in the user
interface
Methods:
configureWindow initializes the tkinter window
"""
def __init__(self, master=None):
"""
Initializes the ErebusGUI object and displays the user interface.
Arguments:
master tkinter.Tk root object
"""
# Configure GUI-wide variables
self.sensorHandle = None
self.displayedSettings = interface.Settings()
# Initialize Window
tk.Frame.__init__(self, master)
self.configureWindow()
master.wm_title('Erebus Labs Sensor')
def configureWindow(self):
"""
Configures and applies attributes of the GUI window.
Arguments:
<None>
"""
# *** Configure Top-Level Window ***
self.grid(sticky=tk.N+tk.S+tk.E+tk.W)
self.pack()
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self['padx'] = 10
self['pady'] = 10
# *** Create Top-level Menu ***
self.menuBar = tk.Menu(top, tearoff=0)
top['menu'] = self.menuBar
# Create File Menu Entry
self.subMenuFile = tk.Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label='File', menu=self.subMenuFile)
self.subMenuFile.add_command(label="Exit", command=self.quit)
# Create Sensor Menu Entry
self.subMenuSensor = tk.Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="Sensor", menu=self.subMenuSensor)
self.subMenuSensor.add_command(label="Get Data",
command=self.getData)
self.subMenuSensor.add_command(label="Get Current Configuration",
command=self.getSettings)
self.subMenuSensor.add_command(label="Apply Current Configuration",
command=self.applySettings)
self.subMenuSensor.add_command(label="Reset Sensor",
command=self.resetSensor)
# Create Help Menu Entry
self.subMenuHelp = tk.Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label="Help", menu=self.subMenuHelp)
self.subMenuHelp.add_command(label="User Manual",
command=self.openManual)
self.subMenuHelp.add_command(label="Technical Manual",
command=self.openTRM)
self.subMenuHelp.add_command(label="About",
command=self.showAbout)
# *** Create Sensor Button and Status Box ***
# Create the Frame
self.fStatus = tk.Frame(self)
self.fStatus['borderwidth'] = 3
self.fStatus['padx'] = 10
self.fStatus['pady'] = 10
self.fStatus['relief'] = tk.GROOVE
self.fStatus.grid(row=0, sticky=tk.E+tk.W)
# Create Sensor Status Box
self.lStatus = tk.Label(self.fStatus)
self.lStatus['font'] = ('Helvetica', 16, 'bold')
self.lStatus['width'] = 24
self.lStatus['height'] = 3
self.lStatus['foreground'] = 'black'
self.lStatus.grid(row=0, column=0, padx=10, pady=10, sticky=tk.E+tk.W)
# Create Connect Button
self.bConnectSensor = tk.Button(self.fStatus)
self.bConnectSensor['font'] = ('Helvetica', 12, 'bold')
self.bConnectSensor['command'] = self.toggleSensor
self.bConnectSensor['relief'] = tk.RAISED<|fim▁hole|> # Initialize Connected and Button Text
self._disconnectedMessage()
# *** Create Settings Area ***
self.fSettings = tk.Frame(self)
self.fSettings['borderwidth'] = 3
self.fSettings['padx'] = 10
self.fSettings['pady'] = 10
self.fSettings['relief'] = tk.GROOVE
self.fSettings.grid(row=1, sticky=tk.E+tk.W)
# Create Header
self.lStatusHeader = tk.Label(self.fSettings, text="Data Collection Settings")
self.lStatusHeader['font'] = ('Helvetica', 14, 'bold')
self.lStatusHeader.grid(row=0, column=0, columnspan=3, sticky=tk.W)
# Create Section Labels
self.lSettingSensor = tk.Label(self.fSettings, text="Sensor:")
self.lSettingSensor['font'] = ('Helvetica', 12)
self.lSettingSensor.grid(row=1, column=0, sticky=tk.E, padx=10)
self.lSettingInterval = tk.Label(self.fSettings, text="Interval:")
self.lSettingInterval['font'] = ('Helvetica', 12)
self.lSettingInterval.grid(row=2, column=0, sticky=tk.E, padx=10)
self.lSettingUnit = tk.Label(self.fSettings, text="Unit:")
self.lSettingUnit['font'] = ('Helvetica', 12)
self.lSettingUnit.grid(row=3, column=0, sticky=tk.E, padx=10)
# Create Sensor Selection Dropdown
self.sensorOptions = tk.StringVar()
self.sensorOptions.set(self.displayedSettings.SENSOR)
self.dSettingSensor = tk.OptionMenu(self.fSettings,
self.sensorOptions,
*interface.sensorOptions)
self.dSettingSensor.grid(row=1, column=1, sticky=tk.E+tk.W, padx=20)
# Create Interval Edit Box
self.eSettingInterval = tk.Entry(self.fSettings)
self.eSettingInterval["width"] =25
self.iSettingInterval = tk.StringVar()
self.eSettingInterval["textvariable"] = self.iSettingInterval
self.eSettingInterval["justify"] = tk.RIGHT
self.eSettingInterval.insert(0, str(self.displayedSettings.SAMPLE_INTERVAL))
self.eSettingInterval.grid(row=2, column=1, sticky=tk.E+tk.W, padx=20)
self.lSettingInterval = tk.Label(self.fSettings)
self.lSettingInterval["text"] = ("Interval must be between 0 and {}"
.format(interface.maxInterval))
self.lSettingInterval["font"] = ('Helvetica', 10)
self.lSettingInterval.grid(row=2, column=2, padx=20)
# Create Unit Selection Dropdown
self.unitOptions = tk.StringVar()
self.unitOptions.set(self.displayedSettings.SAMPLE_UNIT)
self.dSettingUnit = tk.OptionMenu(self.fSettings,
self.unitOptions,
*interface.unitOptions)
self.dSettingUnit.grid(row=3, column=1, sticky=tk.E+tk.W, padx=20)
return
def toggleSensor(self):
"""
Toggles the sensor status box based on sensor connectivity state.
Status window possible states:
Red Not Connected
Yellow Connecting
Green Connected
Arguments:
<None>
"""
if self.sensorHandle != None:
self.disconnectSensor()
self._disconnectedMessage()
else:
self.lStatus['background'] = 'yellow'
self.lStatus['text'] = "Connecting..."
proceed = mb.askokcancel("Connecting...",
"This may take up to 30 seconds and cannot be "
"interrupted. Click OK to continue.")
if proceed and not self.connectSensor():
self._connectedMessage()
if self.sensorHandle.update_RTC():
mb.showwarning("RTC Not Updated",
"Warning: The sensor is connected, but the "
"device's time could not be updated. Consider "
"disconnecting and reconnecting the sensor.")
elif proceed:
self._disconnectedMessage(warning="Could not establish communication "
"with Erebus Sensor.")
self.sensorHandle = None
else:
self._disconnectedMessage()
self.sensorHandle = None
return
def _connectedMessage(self):
"""
Sets the status and connection buttons to the connected state.
Arguments:
<None>
"""
self.lStatus['background'] = 'green'
self.lStatus['text'] = "Sensor Connected"
self.bConnectSensor["text"] = "Disconnect Sensor"
return
def _disconnectedMessage(self, warning=None):
"""
Sets the status and connection buttons to the disconnected state and displays a
warning dialog box if necessary.
Arguments:
warning string to display as a warning message to the user
"""
self.lStatus['background'] = 'red'
self.lStatus['text'] = "Sensor Not Connected"
self.bConnectSensor["text"] = "Connect to Erebus Sensor"
if warning:
mb.showwarning("Failure", warning)
return
def _showNotConnected(self):
"""
Displays an error message to the user indicating that their command failed
because the sensor is not currently connected.
Arguments:
<None>
"""
mb.showerror("", "Erebus sensor is not currently connected. Please connect "
"the sensor and try again.")
return
def connectSensor(self):
"""
Creates the ErebusSensor object, attempts to connect to the sensor, and reports the
result.
Arguments:
<None>
"""
connectLimit = 12
status = 1
for x in range(connectLimit):
self.sensorHandle = interface.ErebusSensor()
if self.sensorHandle.isConnected():
status = 0
break
time.sleep(3)
return status
def disconnectSensor(self):
"""
Severs the USBUART connection with the sensor.
Note: This does not send any commands to the device. The sensor does not know the
connection has been severed. It only considers itself disconnected when the
USB cable is unplugged.
Arguments:
<None>
"""
if self.sensorHandle != None:
self.sensorHandle.close()
self.sensorHandle = None
return
def getSettings(self):
"""
Retrives the current sample settings from the device and displays them for the user.
Arguments:
<None>
"""
if self.sensorHandle == None:
self._showNotConnected()
return
sensorSettings = self.sensorHandle.getSettings()
if sensorSettings == -1:
mb.showerror("", "Settings could not be retrieved. Please try again.")
return
self.sensorOptions.set(sensorSettings.SENSOR)
self.eSettingInterval.delete(0, tk.END)
self.eSettingInterval.insert(0, str(sensorSettings.SAMPLE_INTERVAL))
self.unitOptions.set(sensorSettings.SAMPLE_UNIT)
return
def applySettings(self):
"""
Retrieves the current settings from the GUI window that the user has selected and
sends them to the sensor.
Arguments:
<None>
"""
if self.sensorHandle == None:
self._showNotConnected()
return
if self.sensorHandle.applySettings(self.sensorOptions.get(),
self.unitOptions.get(),
int(self.eSettingInterval.get().strip(' ,.'))):
mb.showerror("", "Settings update failed. Please try again.")
else:
mb.showinfo("", "Settings update successful.")
return
def getData(self):
"""
Initiates a dump of stored samples from the sensor and outputs the result in a test
file called "datadump.txt".
Arguments:
<None>
"""
if self.sensorHandle == None:
self._showNotConnected()
return
dataBlocks = self.sensorHandle.getData()
if isinstance(dataBlocks, list) and dataBlocks:
with open('datadump.txt', 'a') as fo:
fo.write("".join(["\n\n", "*"*30,
"\nErebus Sensor Data Dump",
"\n{} Blocks of data samples".format(len(dataBlocks)),
"\nDump Time: {}".format(str(datetime.now())),
"\n", "*"*30]))
for block in dataBlocks:
fo.write(str(block))
mb.showinfo("", "Data export successful.")
elif dataBlocks == []:
mb.showinfo("", "There were no data samples to retrieve from the device.")
else:
mb.showerror("", "There was an error retrieving data from the device. "
"Please try again.")
return
def resetSensor(self):
"""
Initiates a reset of the sample block pointers on the device. This function MUST
be called after the device is reprogrammed, before any samples are collected.
Arguments:
<None>
"""
if self.sensorHandle == None:
self._showNotConnected()
return
if self.sensorHandle.hard_reset():
mb.showerror("", "There was an erorr resetting the device. Please try "
"again.")
else:
mb.showinfo("", "Reset successful.")
return
def showAbout(self):
"""
Displays an information box with the GUI version
Arguments:
<None>
"""
mb.showinfo("About", "Erebus Labs Stem Sensor\n"
"User Application Version 1.0\n"
"(C) 2014 Erebus Labs")
return
def openTRM(self):
"""
Opens the Erebus Labs Sensor Technical Reference Manual
This routine is not cross-platform. It must be modified to enable Windows
support.
Arguments:
<None>
"""
subprocess.call(['xdg-open', './documents/Technical_Reference_Manual.pdf'],
stderr=None,
stdout=None)
return
def openManual(self):
"""
Opens the Erebus Labs Sensor User Manual
This routine is not cross-platform. It must be modified to enable Windows
support.
Arguments:
<None>
"""
subprocess.call(['xdg-open', './documents/User_Manual.pdf'],
stderr=None,
stdout=None)
return<|fim▁end|>
|
self.bConnectSensor.grid(row=0, column=1, padx=10, pady=10)
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for test_remote_project project.
import os.path
import posixpath
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, 'fixtures'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'mzdvd*#0=$g(-!v_vj_7^(=zrh3klia(u&cqd3nr7p^khh^ui#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_remote_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_remote_project.wsgi.application'
TEMPLATE_DIRS = (<|fim▁hole|> # Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'cities_light',
'djangorestframework',
'south',
'autocomplete_light',
'remote_autocomplete',
'remote_autocomplete_inline',
'navigation_autocomplete',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
'cities_light': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
}
}<|fim▁end|>
|
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
|
<|file_name|>svgs_2_pngs.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Basic exporter for svg icons
from os import listdir
from os.path import isfile, join, dirname, realpath
import subprocess
import sys
import rsvg
import cairo
last_svg_path = None
last_svg_data = None
SCRIPT_FOLDER = dirname(realpath(__file__)) + '/'
theme_dir_base = SCRIPT_FOLDER + '../../scene/resources/default_theme/'
theme_dir_source = theme_dir_base + 'source/'
icons_dir_base = SCRIPT_FOLDER + '../editor/icons/'
icons_dir_2x = icons_dir_base + '2x/'
icons_dir_source = icons_dir_base + 'source/'
def svg_to_png(svg_path, png_path, dpi):
global last_svg_path, last_svg_data
zoom = int(dpi / 90)
if last_svg_path != svg_path:
last_svg_data = open(svg_path, 'r').read()
last_svg_path = svg_path
svg = rsvg.Handle(data=last_svg_data)
img = cairo.ImageSurface(
cairo.FORMAT_ARGB32,
svg.props.width * zoom,
svg.props.height * zoom
)
ctx = cairo.Context(img)
ctx.set_antialias(cairo.ANTIALIAS_DEFAULT)
ctx.scale(zoom, zoom)
svg.render_cairo(ctx)
img.write_to_png('%s.png' % png_path)
svg.close()
def export_icons():
svgs_path = icons_dir_source
file_names = [f for f in listdir(svgs_path) if isfile(join(svgs_path, f))]
for file_name in file_names:
# name without extensions
name_only = file_name.replace('.svg', '')
out_icon_names = [name_only] # export to a png with the same file name<|fim▁hole|> # special cases
if special_icons.has_key(name_only):
special_icon = special_icons[name_only]
if type(special_icon) is dict:
if special_icon.get('avoid_self'):
out_icon_names = []
if special_icon.has_key('output_names'):
out_icon_names += special_icon['output_names']
if special_icon.has_key('theme_output_names'):
theme_out_icon_names += special_icon['theme_output_names']
source_path = '%s%s.svg' % (svgs_path, name_only)
for out_icon_name in out_icon_names:
svg_to_png(source_path, icons_dir_base + out_icon_name, 90)
svg_to_png(source_path, icons_dir_2x + out_icon_name, 180)
for theme_out_icon_name in theme_out_icon_names:
svg_to_png(source_path, theme_dir_base + theme_out_icon_name, 90)
def export_theme():
svgs_path = theme_dir_source
file_names = [f for f in listdir(svgs_path) if isfile(join(svgs_path, f))]
for file_name in file_names:
# name without extensions
name_only = file_name.replace('.svg', '')
out_icon_names = [name_only] # export to a png with the same file name
# special cases
if theme_icons.has_key(name_only):
special_icon = theme_icons[name_only]
if type(special_icon) is dict:
if special_icon.has_key('output_names'):
out_icon_names += special_icon['output_names']
source_path = '%s%s.svg' % (svgs_path, name_only)
for out_icon_name in out_icon_names:
svg_to_png(source_path, theme_dir_base + out_icon_name, 90)
# special cases for icons that will be exported to multiple target pngs or that require transforms.
special_icons = {
'icon_add_track': dict(
output_names=['icon_add'],
theme_output_names=['icon_add', 'icon_zoom_more']
),
'icon_new': dict(output_names=['icon_file']),
'icon_animation_tree_player': dict(output_names=['icon_animation_tree']),
'icon_tool_rotate': dict(
output_names=['icon_reload'],
theme_output_names=['icon_reload']
),
'icon_multi_edit': dict(output_names=['icon_multi_node_edit']),
'icon_folder': dict(
output_names=['icon_load', 'icon_open'],
theme_output_names=['icon_folder']
),
'icon_file_list': dict(output_names=['icon_enum']),
'icon_collision_2d': dict(output_names=['icon_collision_polygon_2d', 'icon_polygon_2d']),
'icon_class_list': dict(output_names=['icon_filesystem']),
'icon_color_ramp': dict(output_names=['icon_graph_color_ramp']),
'icon_translation': dict(output_names=['icon_p_hash_translation']),
'icon_shader': dict(output_names=['icon_shader_material', 'icon_material_shader']),
'icon_canvas_item_shader_graph': dict(output_names=['icon_material_shader_graph']),
'icon_color_pick': dict(theme_output_names=['icon_color_pick'], avoid_self=True),
'icon_play': dict(theme_output_names=['icon_play']),
'icon_stop': dict(theme_output_names=['icon_stop']),
'icon_zoom_less': dict(theme_output_names=['icon_zoom_less'], avoid_self=True),
'icon_zoom_reset': dict(theme_output_names=['icon_zoom_reset'], avoid_self=True),
'icon_snap': dict(theme_output_names=['icon_snap'])
}
theme_icons = {
'icon_close': dict(output_names=['close', 'close_hl']),
'tab_menu': dict(output_names=['tab_menu_hl'])
}
export_icons()
export_theme()<|fim▁end|>
|
theme_out_icon_names = []
|
<|file_name|>EventListener.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2010-2018 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.binding.innogysmarthome.internal.listener;
import org.openhab.binding.innogysmarthome.internal.InnogyWebSocket;
/**
* The {@link EventListener} is called by the {@link InnogyWebSocket} on new Events and if the {@link InnogyWebSocket}
* closed the connection.
*
* @author Oliver Kuhl - Initial contribution
*/
public interface EventListener {
/**
* This method is called, whenever a new event comes from the innogy service (like a device change for example).
*
* @param msg
*/
public void onEvent(String msg);
/**
* This method is called, when the evenRunner stops abnormally (statuscode <> 1000).
*/<|fim▁hole|><|fim▁end|>
|
public void connectionClosed();
}
|
<|file_name|>comp-4615.component.spec.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { Comp4615Component } from './comp-4615.component';
describe('Comp4615Component', () => {
let component: Comp4615Component;
let fixture: ComponentFixture<Comp4615Component>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ Comp4615Component ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(Comp4615Component);
component = fixture.componentInstance;<|fim▁hole|> expect(component).toBeTruthy();
});
});<|fim▁end|>
|
fixture.detectChanges();
});
it('should create', () => {
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|># Django settings for babynames project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'data_monging', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'mark',
'PASSWORD': 'blah',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Detroit'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_p-f$nh91u39((laogm+tx^b37=$)g*023pk5tp!8bih74a0u!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',<|fim▁hole|> # Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'babynames.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'babynames.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}<|fim▁end|>
|
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
|
<|file_name|>Button.js<|end_file_name|><|fim▁begin|>/* ************************************************************************
qooxdoo - the new era of web development
http://qooxdoo.org
Copyright:
2004-2008 1&1 Internet AG, Germany, http://www.1und1.de
License:
LGPL: http://www.gnu.org/licenses/lgpl.html
EPL: http://www.eclipse.org/org/documents/epl-v10.php
See the LICENSE file in the project's top-level directory for details.
Authors:
* Sebastian Werner (wpbasti)
* Andreas Ecker (ecker)
************************************************************************ */
/**
* @appearance toolbar-button
*/
qx.Class.define("qx.legacy.ui.toolbar.Button",
{
extend : qx.legacy.ui.form.Button,
/*
*****************************************************************************
PROPERTIES
*****************************************************************************
*/
properties :
{
// Omit focus
tabIndex :
{
refine : true,
init : -1
},
<|fim▁hole|> init : "toolbar-button"
},
show :
{
refine : true,
init : "inherit"
},
height :
{
refine : true,
init : null
},
allowStretchY :
{
refine : true,
init : true
}
},
/*
*****************************************************************************
MEMBERS
*****************************************************************************
*/
members :
{
/*
---------------------------------------------------------------------------
EVENT HANDLER
---------------------------------------------------------------------------
*/
/**
* @signature function()
*/
_onkeydown : qx.lang.Function.returnTrue,
/**
* @signature function()
*/
_onkeyup : qx.lang.Function.returnTrue
}
});<|fim▁end|>
|
appearance :
{
refine : true,
|
<|file_name|>Encoding.hpp<|end_file_name|><|fim▁begin|>// Copyright Benoit Blanchon 2014-2015
// MIT License
//
// Arduino JSON library
// https://github.com/bblanchon/ArduinoJson
#pragma once
#include "../Arduino/Print.hpp"
namespace ArduinoJson {
namespace Internals {
class Encoding {
public:
// Optimized for code size on a 8-bit AVR
static char escapeChar(char c) {
const char *p = _escapeTable;
while (p[0] && p[1] != c) {
p += 2;
}
return p[0];
}
<|fim▁hole|> const char *p = _escapeTable + 4;
for (;;) {
if (p[0] == '\0') return c;
if (p[0] == c) return p[1];
p += 2;
}
}
private:
static const char _escapeTable[];
};
}
}<|fim▁end|>
|
// Optimized for code size on a 8-bit AVR
static char unescapeChar(char c) {
|
<|file_name|>efficientnet.py<|end_file_name|><|fim▁begin|># Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
"""EfficientNet models for Keras.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
from tensorflow.python.keras import backend<|fim▁hole|>from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = 'https://storage.googleapis.com/keras-applications/'
WEIGHTS_HASHES = {
'b0': ('902e53a9f72be733fc0bcb005b3ebbac',
'50bc09e76180e00e4465e1a485ddc09d'),
'b1': ('1d254153d4ab51201f1646940f018540',
'74c4e6b3e1f6a1eea24c589628592432'),
'b2': ('b15cce36ff4dcbd00b6dd88e7857a6ad',
'111f8e2ac8aa800a7a99e3239f7bfb39'),
'b3': ('ffd1fdc53d0ce67064dc6a9c7960ede0',
'af6d107764bb5b1abb91932881670226'),
'b4': ('18c95ad55216b8f92d7e70b3a046e2fc',
'ebc24e6d6c33eaebbd558eafbeedf1ba'),
'b5': ('ace28f2a6363774853a83a0b21b9421a',
'38879255a25d3c92d5e44e04ae6cec6f'),
'b6': ('165f6e37dce68623721b423839de8be5',
'9ecce42647a20130c1f39a5d4cb75743'),
'b7': ('8c03f828fec3ef71311cd463b6759d99',
'cbcfe4450ddf6f3ad90b1b398090fe4a'),
}
DEFAULT_BLOCKS_ARGS = [{
'kernel_size': 3,
'repeats': 1,
'filters_in': 32,
'filters_out': 16,
'expand_ratio': 1,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 2,
'filters_in': 16,
'filters_out': 24,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 2,
'filters_in': 24,
'filters_out': 40,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 3,
'filters_in': 40,
'filters_out': 80,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 3,
'filters_in': 80,
'filters_out': 112,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}, {
'kernel_size': 5,
'repeats': 4,
'filters_in': 112,
'filters_out': 192,
'expand_ratio': 6,
'id_skip': True,
'strides': 2,
'se_ratio': 0.25
}, {
'kernel_size': 3,
'repeats': 1,
'filters_in': 192,
'filters_out': 320,
'expand_ratio': 6,
'id_skip': True,
'strides': 1,
'se_ratio': 0.25
}]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
'distribution': 'truncated_normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
layers = VersionAwareLayers()
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
If you have never configured it, it defaults to `"channels_last"`.
Arguments:
include_top: Whether to include the fully-connected
layer at the top of the network. Defaults to True.
weights: One of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to 'imagenet'.
input_tensor: Optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`. Defaults to None.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. Defaults to 1000 (number of
ImageNet classes).
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Defaults to 'softmax'.
Returns:
A `keras.Model` instance.
"""
def EfficientNet(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
activation='swish',
blocks_args='default',
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax'):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](
https://arxiv.org/abs/1905.11946) (ICML 2019)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if blocks_args == 'default':
blocks_args = DEFAULT_BLOCKS_ARGS
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def round_filters(filters, divisor=depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
# Build stem
x = img_input
x = layers.Rescaling(1. / 255.)(x)
x = layers.Normalization(axis=bn_axis)(x)
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3),
name='stem_conv_pad')(x)
x = layers.Conv2D(
round_filters(32),
3,
strides=2,
padding='valid',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(round_repeats(args['repeats']) for args in blocks_args))
for (i, args) in enumerate(blocks_args):
assert args['repeats'] > 0
# Update block input and output filters based on depth multiplier.
args['filters_in'] = round_filters(args['filters_in'])
args['filters_out'] = round_filters(args['filters_out'])
for j in range(round_repeats(args.pop('repeats'))):
# The first block needs to take care of stride and filter size increase.
if j > 0:
args['strides'] = 1
args['filters_in'] = args['filters_out']
x = block(
x,
activation,
drop_connect_rate * b / blocks,
name='block{}{}_'.format(i + 1, chr(j + 97)),
**args)
b += 1
# Build top
x = layers.Conv2D(
round_filters(1280),
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='top_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
x = layers.Activation(activation, name='top_activation')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name='top_dropout')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if weights == 'imagenet':
if include_top:
file_suffix = '.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suffix = '_notop.h5'
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suffix
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def block(inputs,
activation='swish',
drop_rate=0.,
name='',
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=0.,
id_skip=True):
"""An inverted residual block.
Arguments:
inputs: input tensor.
activation: activation function.
drop_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
id_skip: boolean.
Returns:
output tensor for the block.
"""
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# Expansion phase
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'expand_conv')(
inputs)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'expand_bn')(x)
x = layers.Activation(activation, name=name + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name=name + 'dwconv_pad')(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.DepthwiseConv2D(
kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'dwconv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'bn')(x)
x = layers.Activation(activation, name=name + 'activation')(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
se = layers.Conv2D(
filters_se,
1,
padding='same',
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_reduce')(
se)
se = layers.Conv2D(
filters,
1,
padding='same',
activation='sigmoid',
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'se_expand')(se)
x = layers.multiply([x, se], name=name + 'se_excite')
# Output phase
x = layers.Conv2D(
filters_out,
1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + 'project_conv')(x)
x = layers.BatchNormalization(axis=bn_axis, name=name + 'project_bn')(x)
if id_skip and strides == 1 and filters_in == filters_out:
if drop_rate > 0:
x = layers.Dropout(
drop_rate, noise_shape=(None, 1, 1, 1), name=name + 'drop')(x)
x = layers.add([x, inputs], name=name + 'add')
return x
@keras_export('keras.applications.efficientnet.EfficientNetB0',
'keras.applications.EfficientNetB0')
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.0,
224,
0.2,
model_name='efficientnetb0',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB1',
'keras.applications.EfficientNetB1')
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.0,
1.1,
240,
0.2,
model_name='efficientnetb1',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB2',
'keras.applications.EfficientNetB2')
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.1,
1.2,
260,
0.3,
model_name='efficientnetb2',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB3',
'keras.applications.EfficientNetB3')
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.2,
1.4,
300,
0.3,
model_name='efficientnetb3',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB4',
'keras.applications.EfficientNetB4')
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.4,
1.8,
380,
0.4,
model_name='efficientnetb4',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB5',
'keras.applications.EfficientNetB5')
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.6,
2.2,
456,
0.4,
model_name='efficientnetb5',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB6',
'keras.applications.EfficientNetB6')
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
1.8,
2.6,
528,
0.5,
model_name='efficientnetb6',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
@keras_export('keras.applications.efficientnet.EfficientNetB7',
'keras.applications.EfficientNetB7')
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
return EfficientNet(
2.0,
3.1,
600,
0.5,
model_name='efficientnetb7',
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
**kwargs)
EfficientNetB0.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB0')
EfficientNetB1.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB1')
EfficientNetB2.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB2')
EfficientNetB3.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB3')
EfficientNetB4.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB4')
EfficientNetB5.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB5')
EfficientNetB6.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB6')
EfficientNetB7.__doc__ = BASE_DOCSTRING.format(name='EfficientNetB7')
@keras_export('keras.applications.efficientnet.preprocess_input')
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
return x
@keras_export('keras.applications.efficientnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__<|fim▁end|>
|
from tensorflow.python.keras.applications import imagenet_utils
|
<|file_name|>state.rs<|end_file_name|><|fim▁begin|>// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use super::test_common::*;
use pod_state::PodState;
use trace;
use client::{EvmTestClient, EvmTestError, TransactResult};
use ethjson;
use transaction::SignedTransaction;
use vm::EnvInfo;
pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
::ethcore_logger::init_log();
let tests = ethjson::state::test::Test::load(json_data).unwrap();
let mut failed = Vec::new();
for (name, test) in tests.into_iter() {
{
let multitransaction = test.transaction;
let env: EnvInfo = test.env.into();
let pre: PodState = test.pre_state.into();
for (spec_name, states) in test.post_states {
let total = states.len();
let spec = match EvmTestClient::spec_from_json(&spec_name) {
Some(spec) => spec,
None => {
println!(" - {} | {:?} Ignoring tests because of missing spec", name, spec_name);
continue;
}
};
for (i, state) in states.into_iter().enumerate() {
let info = format!(" - {} | {:?} ({}/{}) ...", name, spec_name, i + 1, total);
let post_root: H256 = state.hash.into();
let transaction: SignedTransaction = multitransaction.select(&state.indexes).into();
let result = || -> Result<_, EvmTestError> {
Ok(EvmTestClient::from_pod_state(spec, pre.clone())?
.transact(&env, transaction, trace::NoopVMTracer))
};
match result() {
Err(err) => {
println!("{} !!! Unexpected internal error: {:?}", info, err);
flushln!("{} fail", info);
failed.push(name.clone());
},
Ok(TransactResult::Ok { state_root, .. }) if state_root != post_root => {
println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root);
flushln!("{} fail", info);
failed.push(name.clone());
},
Ok(TransactResult::Err { state_root, ref error }) if state_root != post_root => {
println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root);
println!("{} !!! Execution error: {:?}", info, error);
flushln!("{} fail", info);
failed.push(name.clone());
},
Ok(TransactResult::Err { error, .. }) => {
flushln!("{} ok ({:?})", info, error);
},
Ok(_) => {
flushln!("{} ok", info);
},
}
}
}
}
}
if !failed.is_empty() {
println!("!!! {:?} tests failed.", failed.len());
}
failed
}
mod state_tests {
use super::json_chain_test;
fn do_json_test(json_data: &[u8]) -> Vec<String> {
json_chain_test(json_data)
}
declare_test!{GeneralStateTest_stAttackTest, "GeneralStateTests/stAttackTest/"}
declare_test!{GeneralStateTest_stBadOpcodeTest, "GeneralStateTests/stBadOpcode/"}
declare_test!{GeneralStateTest_stCallCodes, "GeneralStateTests/stCallCodes/"}
declare_test!{GeneralStateTest_stCallDelegateCodesCallCodeHomestead, "GeneralStateTests/stCallDelegateCodesCallCodeHomestead/"}
declare_test!{GeneralStateTest_stCallDelegateCodesHomestead, "GeneralStateTests/stCallDelegateCodesHomestead/"}
declare_test!{GeneralStateTest_stChangedEIP150, "GeneralStateTests/stChangedEIP150/"}
declare_test!{GeneralStateTest_stCodeSizeLimit, "GeneralStateTests/stCodeSizeLimit/"}
declare_test!{GeneralStateTest_stCreateTest, "GeneralStateTests/stCreateTest/"}
declare_test!{GeneralStateTest_stDelegatecallTestHomestead, "GeneralStateTests/stDelegatecallTestHomestead/"}
declare_test!{GeneralStateTest_stEIP150singleCodeGasPrices, "GeneralStateTests/stEIP150singleCodeGasPrices/"}
declare_test!{GeneralStateTest_stEIP150Specific, "GeneralStateTests/stEIP150Specific/"}
declare_test!{GeneralStateTest_stEIP158Specific, "GeneralStateTests/stEIP158Specific/"}
declare_test!{GeneralStateTest_stExample, "GeneralStateTests/stExample/"}
declare_test!{GeneralStateTest_stHomesteadSpecific, "GeneralStateTests/stHomesteadSpecific/"}
declare_test!{GeneralStateTest_stInitCodeTest, "GeneralStateTests/stInitCodeTest/"}
declare_test!{GeneralStateTest_stLogTests, "GeneralStateTests/stLogTests/"}
declare_test!{GeneralStateTest_stMemExpandingEIP150Calls, "GeneralStateTests/stMemExpandingEIP150Calls/"}
declare_test!{heavy => GeneralStateTest_stMemoryStressTest, "GeneralStateTests/stMemoryStressTest/"}
declare_test!{GeneralStateTest_stMemoryTest, "GeneralStateTests/stMemoryTest/"}
declare_test!{GeneralStateTest_stNonZeroCallsTest, "GeneralStateTests/stNonZeroCallsTest/"}
declare_test!{GeneralStateTest_stPreCompiledContracts, "GeneralStateTests/stPreCompiledContracts/"}
declare_test!{heavy => GeneralStateTest_stQuadraticComplexityTest, "GeneralStateTests/stQuadraticComplexityTest/"}
declare_test!{GeneralStateTest_stRandom, "GeneralStateTests/stRandom/"}
declare_test!{GeneralStateTest_stRecursiveCreate, "GeneralStateTests/stRecursiveCreate/"}
declare_test!{GeneralStateTest_stRefundTest, "GeneralStateTests/stRefundTest/"}
declare_test!{GeneralStateTest_stReturnDataTest, "GeneralStateTests/stReturnDataTest/"}
declare_test!{GeneralStateTest_stRevertTest, "GeneralStateTests/stRevertTest/"}
declare_test!{GeneralStateTest_stSolidityTest, "GeneralStateTests/stSolidityTest/"}
declare_test!{GeneralStateTest_stSpecialTest, "GeneralStateTests/stSpecialTest/"}
declare_test!{GeneralStateTest_stStackTests, "GeneralStateTests/stStackTests/"}
declare_test!{GeneralStateTest_stStaticCall, "GeneralStateTests/stStaticCall/"}<|fim▁hole|> declare_test!{GeneralStateTest_stWalletTest, "GeneralStateTests/stWalletTest/"}
declare_test!{GeneralStateTest_stZeroCallsRevert, "GeneralStateTests/stZeroCallsRevert/"}
declare_test!{GeneralStateTest_stZeroCallsTest, "GeneralStateTests/stZeroCallsTest/"}
declare_test!{GeneralStateTest_stZeroKnowledge, "GeneralStateTests/stZeroKnowledge/"}
}<|fim▁end|>
|
declare_test!{GeneralStateTest_stSystemOperationsTest, "GeneralStateTests/stSystemOperationsTest/"}
declare_test!{GeneralStateTest_stTransactionTest, "GeneralStateTests/stTransactionTest/"}
declare_test!{GeneralStateTest_stTransitionTest, "GeneralStateTests/stTransitionTest/"}
|
<|file_name|>libstd-re-export.rs<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
#![deny(broken_intra_doc_links)]
#![feature(intra_doc_pointers)]
pub use std::*;
|
<|file_name|>container.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Anne Archibald <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
class ContainerError(ValueError):
"""Error signaling something went wrong with container handling"""
pass
class Container(object):
"""A container is an object that manages objects it contains.
The objects in a container each have a .container attribute that
points to the container. This attribute is managed by the container
itself.
This class is a base class that provides common container functionality,
to be used to simplify implementation of list and dict containers.
"""
def _set_container(self, item):
if hasattr( item, "container" ) and item.container not in (None,self):
# raise ContainerError("Item %s was added to container %s but was already in container %s" % (item, self, item.container))
item.container.remove( item )
item.container = self
def _unset_container(self, item):
if item.container is not self:
raise ContainerError("Item %s was removed from container %s but was not in it" % (item, self))
item.container = None
def _set_container_multi(self, items):
"""Put items in the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._set_container(i)
r.append(i)
r = None
finally: # Make sure items don't get added to this if any fail
if r is not None:
for i in r:
try:
self._unset_container(i)
except ContainerError:
pass
def _unset_container_multi(self, items):
"""Remove items from the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._unset_container(i)
r.append(i)
r = None
finally:
if r is not None:
for i in r:
try:
self._set_container(i)
except ContainerError:
pass
class ContainerList(list,Container):
"""A ContainerList is a list whose children know they're in it.
Each element in the ContainerList has a .container attribute which points
to the ContainerList itself. This container pointer is maintained automatically.
"""
def __init__(self, items=[], owner=None):
list.__init__(self, items)
self._set_container_multi(items)
self.owner = owner
def __repr__(self):
return "<CL %s>" % list.__repr__(self)
def append(self, item):
self._set_container(item)
list.append(self,item)
def extend(self, items):
self._set_container_multi(items)
list.extend(self,items)
def insert(self, i, item):
self._set_container(item)
list.insert(self,i,item)
def remove(self, item):
self._unset_container(item)
list.remove(self,item)
def pop(self, i=-1):
self._unset_container(self[i])
return list.pop(self,i)
# These don't work because they make the elements part of more than one list, or one list more than once
def __add__(self, other):
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __imul__(self,other):
raise NotImplementedError
def __mul__(self, other):
raise NotImplementedError
def __rmul__(self,other):
raise NotImplementedError
# only works if other is not also a Container
def __iadd__(self, other):
self.extend(other)
return self
def __setitem__(self, key, value):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
try:
self._set_container_multi(value)
except ContainerError:
self._set_container_multi(self[key])
raise
else:<|fim▁hole|> self._set_container(self[key])
raise
list.__setitem__(self,key,value)
def __delitem__(self, key):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
else:
self._unset_container(self[key])
list.__delitem__(self,key)
# Needed for python2, forbidden for python3
def __delslice__(self,i,j):
del self[slice(i,j,None)]
class ContainerDict(dict,Container):
"""A ContainerDict is a dict whose children know they're in it.
Each element in the ContainerDict has a .container attribute which points
to the ContainerDict itself. This container pointer is maintained automatically.
"""
def __init__(self, contents=None, **kwargs):
if contents is None:
dict.__init__(self, **kwargs)
else:
dict.__init__(self, contents, **kwargs)
self._set_container_multi(list(self.values()))
def __repr__(self):
return "<CD %s>" % dict.__repr__(self)
def __setitem__(self, key, value):
if key in self:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
if key in self:
self._set_container(self[key])
raise
dict.__setitem__(self,key,value)
def __delitem__(self, key):
if key in self:
self._unset_container(self[key])
dict.__delitem__(self,key)
def pop(self, key):
if key in self:
self._unset_container(self[key])
return dict.pop(self,key)
def popitem(self):
key, value = dict.popitem(self)
self._unset_container(value)
return key, value
def setdefault(self, key, default=None):
if key not in self:
self._set_container(default)
dict.setdefault(self, key, default)
def update(self, other):
for (k,v) in list(other.items()):
self[k] = v
if __name__=='__main__':
class Gear(object):
def __init__(self, name, container=None):
self.name = name
self.container = container
def __repr__(self):
return "<G "+str(self.name)+">"
gears = [Gear(n) for n in range(10)]
a = Gear("A")
b = Gear("B")
c = Gear("C")
d = Gear("D")
e = Gear("E")
p = ContainerList([a,b,c])
print(p)
try:
p.append(a)
except ContainerError as err:
print(err)
else:
raise AssertionError
print(p[1])
print(p[::2])
p[1] = d
print(p)
p[1] = b
p[::2] = [d,e]
print(p)
del p[:]
p2 = ContainerList([a,b,c])
print(p2)
p2.extend([d,e])
print(p2)
print(p2.pop())
print(p2)
p2.remove(d)
print(p2)
p2 += [d,e]
print(p2)
try:
d = ContainerDict(a=a, b=b, c=c)
except ContainerError as err:
print(err)
else:
raise AssertionError
del p2[:]
d = ContainerDict(a=a, b=b, c=c)
print(d)
print(d["a"])
d["a"] = a
try:
d["a"] = b
except ContainerError as err:
print(err)
else:
raise AssertionError
del d["a"]
d["a"] = a
d.pop("a")
print(d)
d["a"] = a
k,v = d.popitem()
d[k] = v
d.setdefault("e",e)
d.setdefault("e",e)
print(d)
del d["e"]
d.update(dict(e=e))
print(d)<|fim▁end|>
|
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
|
<|file_name|>mpq.rs<|end_file_name|><|fim▁begin|>use std::fs::File;
use std::io::Read;
use std::ptr::copy_nonoverlapping;
<|fim▁hole|>// 11011010100010101000001001101
// 1101101010001
fn sp(){
let bytes: [u8; 4] = [77, 80, 81, 27];
let buf_a: [u8; 2] = [77, 80];
let buf_b: [u8; 2] = [81, 27];
let mut num_a: u32 = 0;
let mut num_b: u32 = 0;
unsafe {
copy_nonoverlapping(buf_a.as_ptr(), &mut num_a as *mut u32 as *mut u8, 2);
copy_nonoverlapping(buf_b.as_ptr(), &mut num_b as *mut u32 as *mut u8, 2);
}
println!("SP Bits: {:16b} {:16b}", num_a.to_le(), num_b.to_le());
}
fn main() {
sp();
let mut f: File = File::open("test.replay").unwrap();
let mut buf = [0u8; 4];
let size = f.read(&mut buf).unwrap();
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(buf.as_ptr(), &mut data as *mut u32 as *mut u8, 4)
}
let bits = data.to_le();
let _string = std::str::from_utf8(&buf).unwrap().to_owned();
println!("String: {:?} ", _string );
println!("Bytes: {:?} Size: {:?}", buf, size);
println!("U32: {:?} Bits: {:b}", bits, bits );
}<|fim▁end|>
|
// Result:
|
<|file_name|>login_offline_test.go<|end_file_name|><|fim▁begin|>package engine
import (
"os"
"testing"
"time"
"golang.org/x/net/context"
"github.com/keybase/client/go/libkb"
"github.com/keybase/clockwork"
"github.com/stretchr/testify/require"
)
func TestLoginOffline(t *testing.T) {
tc := SetupEngineTest(t, "login")
defer tc.Cleanup()
u1 := CreateAndSignupFakeUser(tc, "login")
Logout(tc)
u1.LoginOrBust(tc)
// do a upak load to make sure it is cached
arg := libkb.NewLoadUserByUIDArg(context.TODO(), tc.G, u1.UID())
_, _, err := tc.G.GetUPAKLoader().Load(arg)
require.NoError(t, err)
// Simulate restarting the service by wiping out the
// passphrase stream cache and cached secret keys
clearCaches(tc.G)
tc.G.GetUPAKLoader().ClearMemory()
// set server uri to nonexistent ip so api calls will fail
prev := os.Getenv("KEYBASE_SERVER_URI")
os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333")
defer os.Setenv("KEYBASE_SERVER_URI", prev)
err = tc.G.ConfigureAPI()
require.NoError(t, err)
eng := NewLoginOffline(tc.G)
m := NewMetaContextForTest(tc)
if err := RunEngine2(m, eng); err != nil {
t.Fatal(err)
}
uv, deviceID, deviceName, skey, ekey := tc.G.ActiveDevice.AllFields()
if uv.IsNil() {
t.Errorf("uid is nil, expected it to exist")
}
if !uv.Uid.Equal(u1.UID()) {
t.Errorf("uid: %v, expected %v", uv, u1)
}
if deviceID.IsNil() {
t.Errorf("deviceID is nil, expected it to exist")
}
if deviceName != defaultDeviceName {
t.Errorf("device name: %q, expected %q", deviceName, defaultDeviceName)
}
if skey == nil {
t.Errorf("signing key is nil, expected it to exist")
}
if ekey == nil {
t.Errorf("encryption key is nil, expected it to exist")
}
if tc.G.ActiveDevice.Name() != defaultDeviceName {
t.Errorf("device name: %q, expected %q", tc.G.ActiveDevice.Name(), defaultDeviceName)
}
}
// Use fake clock to test login offline after significant delay
// (make sure upak loader won't use network)
func TestLoginOfflineDelay(t *testing.T) {
tc := SetupEngineTest(t, "login")
defer tc.Cleanup()
fakeClock := clockwork.NewFakeClockAt(time.Now())
tc.G.SetClock(fakeClock)
u1 := CreateAndSignupFakeUser(tc, "login")
Logout(tc)
u1.LoginOrBust(tc)
// do a upak load to make sure it is cached
arg := libkb.NewLoadUserByUIDArg(context.TODO(), tc.G, u1.UID())
_, _, err := tc.G.GetUPAKLoader().Load(arg)
require.NoError(t, err)
// Simulate restarting the service by wiping out the
// passphrase stream cache and cached secret keys
clearCaches(tc.G)
tc.G.GetUPAKLoader().ClearMemory()
// set server uri to nonexistent ip so api calls will fail
prev := os.Getenv("KEYBASE_SERVER_URI")
os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333")
defer os.Setenv("KEYBASE_SERVER_URI", prev)
err = tc.G.ConfigureAPI()
require.NoError(t, err)
// advance the clock past the cache timeout
fakeClock.Advance(libkb.CachedUserTimeout * 10)
eng := NewLoginOffline(tc.G)
m := NewMetaContextForTest(tc)
if err := RunEngine2(m, eng); err != nil {
t.Fatal(err)
}
uv, deviceID, deviceName, skey, ekey := tc.G.ActiveDevice.AllFields()
if uv.IsNil() {
t.Errorf("uid is nil, expected it to exist")
}
if !uv.Uid.Equal(u1.UID()) {
t.Errorf("uid: %v, expected %v", uv, u1.UID())
}
if deviceID.IsNil() {
t.Errorf("deviceID is nil, expected it to exist")
}
if deviceName != defaultDeviceName {
t.Errorf("device name: %q, expected %q", deviceName, defaultDeviceName)
}
if skey == nil {
t.Errorf("signing key is nil, expected it to exist")
}
if ekey == nil {
t.Errorf("encryption key is nil, expected it to exist")
}
}
<|fim▁hole|> defer tc.Cleanup()
u1 := CreateAndSignupFakeUser(tc, "login")
Logout(tc)
u1.LoginOrBust(tc)
// Simulate restarting the service by wiping out the
// passphrase stream cache and cached secret keys
tc.SimulateServiceRestart()
tc.G.GetUPAKLoader().ClearMemory()
// invalidate the cache for uid
tc.G.GetUPAKLoader().Invalidate(context.Background(), u1.UID())
// set server uri to nonexistent ip so api calls will fail
prev := os.Getenv("KEYBASE_SERVER_URI")
os.Setenv("KEYBASE_SERVER_URI", "http://127.0.0.127:3333")
defer os.Setenv("KEYBASE_SERVER_URI", prev)
err := tc.G.ConfigureAPI()
require.NoError(t, err)
eng := NewLoginOffline(tc.G)
m := NewMetaContextForTest(tc)
err = RunEngine2(m, eng)
if err != nil {
t.Fatalf("LoginOffline should still work after upak cache invalidation; got %s", err)
}
}<|fim▁end|>
|
// Login offline with nothing in upak cache for self user.
func TestLoginOfflineNoUpak(t *testing.T) {
tc := SetupEngineTest(t, "login")
|
<|file_name|>neato.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# ROS node for the Neato Robot Vacuum
# Copyright (c) 2010 University at Albany. All right reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University at Albany nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
ROS node for Neato XV-11 Robot Vacuum.
"""
__author__ = "[email protected] (Michael Ferguson)"
import roslib; roslib.load_manifest("neato_node")
import rospy
from math import sin,cos
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from tf.broadcaster import TransformBroadcaster
from neato_node.msg import NeatoDropSensor
from neato_driver import xv11, BASE_WIDTH, MAX_SPEED
class NeatoNode:
def __init__(self):
""" Start up connection to the Neato Robot. """
rospy.init_node('neato')
self.port = rospy.get_param('~port', "/dev/ttyACM0")
rospy.loginfo("Using port: %s"%(self.port))
self.robot = xv11(self.port)
rospy.Subscriber("cmd_vel", Twist, self.cmdVelCb)
self.scanPub = rospy.Publisher('base_scan', LaserScan)
self.odomPub = rospy.Publisher('odom',Odometry)
self.dropPub = rospy.Publisher('neato_drop',NeatoDropSensor)
self.odomBroadcaster = TransformBroadcaster()
self.cmd_vel = [0,0]
def spin(self):
encoders = [0,0]
self.x = 0 # position in xy plane
self.y = 0
self.th = 0
then = rospy.Time.now()
# things that don't ever change
scan_link = rospy.get_param('~frame_id','base_laser_link')
scan = LaserScan(header=rospy.Header(frame_id=scan_link))
scan.angle_min = 0
scan.angle_max = 6.26
scan.angle_increment = 0.017437326
scan.range_min = 0.020
scan.range_max = 5.0
odom = Odometry(header=rospy.Header(frame_id="odom"), child_frame_id='base_link')
# main loop of driver
r = rospy.Rate(5)
self.robot.requestScan()
while not rospy.is_shutdown():
# prepare laser scan
scan.header.stamp = rospy.Time.now()
#self.robot.requestScan()
scan.ranges = self.robot.getScanRanges()
# get motor encoder values
left, right = self.robot.getMotors()
# get analog sensors
self.robot.getAnalogSensors()
# get drop sensors<|fim▁hole|> self.robot.setMotors(self.cmd_vel[0], self.cmd_vel[1], max(abs(self.cmd_vel[0]),abs(self.cmd_vel[1])))
# ask for the next scan while we finish processing stuff
self.robot.requestScan()
# now update position information
dt = (scan.header.stamp - then).to_sec()
then = scan.header.stamp
d_left = (left - encoders[0])/1000.0
d_right = (right - encoders[1])/1000.0
encoders = [left, right]
dx = (d_left+d_right)/2
dth = (d_right-d_left)/(BASE_WIDTH/1000.0)
x = cos(dth)*dx
y = -sin(dth)*dx
self.x += cos(self.th)*x - sin(self.th)*y
self.y += sin(self.th)*x + cos(self.th)*y
self.th += dth
# prepare tf from base_link to odom
quaternion = Quaternion()
quaternion.z = sin(self.th/2.0)
quaternion.w = cos(self.th/2.0)
# prepare odometry
odom.header.stamp = rospy.Time.now()
odom.pose.pose.position.x = self.x
odom.pose.pose.position.y = self.y
odom.pose.pose.position.z = 0
odom.pose.pose.orientation = quaternion
odom.twist.twist.linear.x = dx/dt
odom.twist.twist.angular.z = dth/dt
# prepare drop
drop = NeatoDropSensor()
drop.header.stamp = rospy.Time.now()
drop.left = left_drop
drop.right = right_drop
# publish everything
self.odomBroadcaster.sendTransform( (self.x, self.y, 0), (quaternion.x, quaternion.y, quaternion.z, quaternion.w),
then, "base_link", "odom" )
self.scanPub.publish(scan)
self.odomPub.publish(odom)
self.dropPub.publish(drop)
# wait, then do it again
r.sleep()
# shut down
self.robot.setLDS("off")
self.robot.setTestMode("off")
def cmdVelCb(self,req):
x = req.linear.x * 1000
th = req.angular.z * (BASE_WIDTH/2)
k = max(abs(x-th),abs(x+th))
# sending commands higher than max speed will fail
if k > MAX_SPEED:
x = x*MAX_SPEED/k; th = th*MAX_SPEED/k
self.cmd_vel = [ int(x-th) , int(x+th) ]
if __name__ == "__main__":
robot = NeatoNode()
robot.spin()<|fim▁end|>
|
left_drop = self.robot.state["LeftDropInMM"]
right_drop = self.robot.state["RightDropInMM"]
# send updated movement commands
|
<|file_name|>webpack.config.js<|end_file_name|><|fim▁begin|>var path = require("path");
var webpack = require("webpack");
module.exports = function(entries, release) {
var config = {
// entry file to start from
entry: entries,
output: {
// directory to output to
path: path.resolve("./lib"),
// output file name
filename: "[name].js"
},
plugins: [
/*
* This makes the left side variable available in every module and assigned to the right side module
*/
new webpack.ProvidePlugin({
$: "jquery",
jQuery: "jquery",
"window.jQuery": "jquery",<|fim▁hole|>
if (release) {
var uglify = new webpack.optimize.UglifyJsPlugin({
beautify: false,
mangle: {
screw_ie8: true,
keep_fnames: true
},
compress: {
screw_ie8: true
},
comments: false
});
config.plugins.push(uglify);
}else{
//config.devtool = 'cheap-eval-source-map';
}
return config;
};<|fim▁end|>
|
"window.$": "jquery"
})
]
};
|
<|file_name|>test_deprecations.py<|end_file_name|><|fim▁begin|>import sqlalchemy as sa
from sqlalchemy import inspect
from sqlalchemy.ext import declarative as legacy_decl
from sqlalchemy.ext.declarative import instrument_declarative
from sqlalchemy.orm import Mapper
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_deprecated_20
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
class TestInstrumentDeclarative(fixtures.TestBase):
def test_ok(self):
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
meta = sa.MetaData()
reg = {}
with expect_deprecated_20(
"the instrument_declarative function is deprecated"
):
instrument_declarative(Foo, reg, meta)
mapper = sa.inspect(Foo)
is_true(isinstance(mapper, Mapper))
is_(mapper.class_, Foo)
<|fim▁hole|> def _expect_warning(self, name):
return expect_deprecated_20(
r"The ``%s\(\)`` function is now available as "
r"sqlalchemy.orm.%s\(\)" % (name, name)
)
def test_declarative_base(self):
with self._expect_warning("declarative_base"):
Base = legacy_decl.declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
assert inspect(Foo).mapper
def test_as_declarative(self):
with self._expect_warning("as_declarative"):
@legacy_decl.as_declarative()
class Base(object):
pass
class Foo(Base):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
assert inspect(Foo).mapper
def test_has_inherited_table(self, registry):
@registry.mapped
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
@registry.mapped
class Bar(Foo):
__tablename__ = "bar"
id = sa.Column(sa.ForeignKey("foo.id"), primary_key=True)
with self._expect_warning("has_inherited_table"):
is_true(legacy_decl.has_inherited_table(Bar))
with self._expect_warning("has_inherited_table"):
is_false(legacy_decl.has_inherited_table(Foo))
def test_synonym_for(self, registry):
with self._expect_warning("synonym_for"):
@registry.mapped
class Foo(object):
__tablename__ = "foo"
id = sa.Column(sa.Integer, primary_key=True)
@legacy_decl.synonym_for("id")
@property
def id_prop(self):
return self.id
f1 = Foo(id=5)
eq_(f1.id_prop, 5)<|fim▁end|>
|
class DeprecatedImportsTest(fixtures.TestBase):
|
<|file_name|>serialize.py<|end_file_name|><|fim▁begin|># encoding=utf-8
from .types.compound import (
ModelType, EMPTY_LIST, EMPTY_DICT, MultiType
)
import collections
import itertools
###
### Field ACL's
###
class Role(collections.Set):
"""A Role object can be used to filter specific fields against a sequence.
The Role has a set of names and one function that the specific field is
filtered with.
<|fim▁hole|> A Role can be operated on as a Set object representing its fields. It's
important to note that when combining multiple roles using these operations
only the function of the first role is kept on the resulting role.
"""
def __init__(self, function, fields):
self.function = function
self.fields = set(fields)
def _from_iterable(self, iterable):
return Role(self.function, iterable)
def __contains__(self, value):
return value in self.fields
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __eq__(self, other):
return (self.function.func_name == other.function.func_name and
self.fields == other.fields)
def __str__(self):
return '%s(%s)' % (self.function.func_name,
', '.join("'%s'" % f for f in self.fields))
def __repr__(self):
return '<Role %s>' % str(self)
# edit role fields
def __add__(self, other):
fields = self.fields.union(other)
return self._from_iterable(fields)
def __sub__(self, other):
fields = self.fields.difference(other)
return self._from_iterable(fields)
# apply role to field
def __call__(self, k, v):
return self.function(k, v, self.fields)
# static filter functions
@staticmethod
def wholelist(k, v, seq):
return False
@staticmethod
def whitelist(k, v, seq):
if seq is not None and len(seq) > 0:
return k not in seq
# Default to rejecting the value
return True
@staticmethod
def blacklist(k, v, seq):
if seq is not None and len(seq) > 0:
return k in seq
# Default to not rejecting the value
return False
def wholelist(*field_list):
"""Returns a function that evicts nothing. Exists mainly to be an explicit
allowance of all fields instead of a using an empty blacklist.
"""
return Role(Role.wholelist, field_list)
def whitelist(*field_list):
"""Returns a function that operates as a whitelist for the provided list of
fields.
A whitelist is a list of fields explicitly named that are allowed.
"""
return Role(Role.whitelist, field_list)
def blacklist(*field_list):
"""Returns a function that operates as a blacklist for the provided list of
fields.
A blacklist is a list of fields explicitly named that are not allowed.
"""
return Role(Role.blacklist, field_list)
###
### Serialization
###
def filter_roles_instance(fields, roles):
"Skipping field not requested"
if roles:
return [i for i in fields.iteritems() if i[0] in roles.fields]
else:
return fields.iteritems()
def atoms(cls, instance_or_dict, include_serializables=True, roles=None):
"""
Iterator for the atomic components of a model definition and relevant data
that creates a threeple of the field's name, the instance of it's type, and
it's value.
"""
if include_serializables:
all_fields = itertools.chain(filter_roles_instance(cls._fields, roles),
filter_roles_instance(
cls._serializables, roles))
else:
all_fields = filter_roles_instance(cls._fields, roles)
return ((field_name, field, instance_or_dict[field_name])
for field_name, field in all_fields)
def allow_none(cls, field):
"""
Inspects a field and class for ``serialize_when_none`` setting.
The setting defaults to the value of the class. A field can override the
class setting with it's own ``serialize_when_none`` setting.
"""
allowed = cls._options.serialize_when_none
if field.serialize_when_none is not None:
allowed = field.serialize_when_none
return allowed
def apply_shape(cls, instance_or_dict, role, field_converter, model_converter,
raise_error_on_role=False, include_serializables=True):
"""
The apply shape function is intended to be a general loop definition that
can be used for any form of data shaping, such as application of roles or
how a field is transformed.
"""
data = {}
### Translate `role` into `gottago` function
gottago = wholelist()
if role in cls._options.roles:
gottago = cls._options.roles[role]
elif role and raise_error_on_role:
error_msg = u'%s has no role "%s"'
raise ValueError(error_msg % (cls, role))
### Transformation loop
attr_gen = atoms(cls, instance_or_dict, include_serializables, gottago)
for field_name, field, value in attr_gen:
serialized_name = field.serialized_name or field_name
### Value found, convert and store it.
if value is not None:
if isinstance(field, MultiType):
if isinstance(field, ModelType):
primitive_value = model_converter(field, value)
primitive_value = field.filter_by_role(value, primitive_value,
role,
include_serializables=include_serializables)
else:
primitive_value = field_converter(field, value)
primitive_value = field.filter_by_role(value, primitive_value,
role,
raise_error_on_role=raise_error_on_role)
else:
primitive_value = field_converter(field, value)
if primitive_value is not None or allow_none(cls, field):
data[serialized_name] = primitive_value
### Store None if reqeusted
elif allow_none(cls, field):
data[serialized_name] = value
return data
def serialize(instance, role, raise_error_on_role=True):
"""
Implements serialization as a mechanism to convert ``Model`` instances into
dictionaries that represent the field_names => converted data.
The conversion is done by calling ``to_primitive`` on both model and field
instances.
"""
field_converter = lambda field, value: field.to_primitive(value)
model_converter = lambda f, v: f.to_primitive(v, raise_error_on_role)
data = apply_shape(instance, instance, role, field_converter,
model_converter, raise_error_on_role)
return data
def expand(data, context=None):
expanded_dict = {}
if context is None:
context = expanded_dict
for k, v in data.iteritems():
try:
key, remaining = k.split(".", 1)
except ValueError:
if not (v in (EMPTY_DICT, EMPTY_LIST) and k in expanded_dict):
expanded_dict[k] = v
else:
current_context = context.setdefault(key, {})
if current_context in (EMPTY_DICT, EMPTY_LIST):
current_context = {}
context[key] = current_context
current_context.update(expand({remaining: v}, current_context))
return expanded_dict
def flatten_to_dict(o, prefix=None, ignore_none=True):
if hasattr(o, "iteritems"):
iterator = o.iteritems()
else:
iterator = enumerate(o)
flat_dict = {}
for k, v in iterator:
if prefix:
key = ".".join(map(unicode, (prefix, k)))
else:
key = k
if v == []:
v = EMPTY_LIST
elif v == {}:
v = EMPTY_DICT
if isinstance(v, (dict, list)):
flat_dict.update(flatten_to_dict(v, prefix=key))
elif v is not None:
flat_dict[key] = v
elif not ignore_none:
flat_dict[key] = None
return flat_dict
def flatten(instance, role, raise_error_on_role=True, ignore_none=True,
prefix=None, include_serializables=False, **kwargs):
i = include_serializables
field_converter = lambda field, value: field.to_primitive(value)
model_converter = lambda f, v: f.to_primitive(v, include_serializables=i)
data = apply_shape(instance, instance, role, field_converter,
model_converter,
include_serializables=include_serializables)
return flatten_to_dict(data, prefix=prefix, ignore_none=ignore_none)<|fim▁end|>
| |
<|file_name|>runtime.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# Copyright (C) 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module implementing configuration details at runtime.
"""
import grp
import pwd
import threading
from ganeti import constants
from ganeti import errors
_priv = None
_priv_lock = threading.Lock()
def GetUid(user, _getpwnam):
"""Retrieve the uid from the database.
@type user: string
@param user: The username to retrieve
@return: The resolved uid
"""
try:
return _getpwnam(user).pw_uid
except KeyError, err:
raise errors.ConfigurationError("User '%s' not found (%s)" % (user, err))
def GetGid(group, _getgrnam):
"""Retrieve the gid from the database.
@type group: string
@param group: The group name to retrieve
@return: The resolved gid
"""
try:
return _getgrnam(group).gr_gid
except KeyError, err:
raise errors.ConfigurationError("Group '%s' not found (%s)" % (group, err))
class GetentResolver:
"""Resolves Ganeti uids and gids by name.
@ivar masterd_uid: The resolved uid of the masterd user
@ivar masterd_gid: The resolved gid of the masterd group
@ivar confd_uid: The resolved uid of the confd user
@ivar confd_gid: The resolved gid of the confd group
@ivar rapi_uid: The resolved uid of the rapi user
@ivar rapi_gid: The resolved gid of the rapi group
@ivar noded_uid: The resolved uid of the noded user
@ivar daemons_gid: The resolved gid of the daemons group
@ivar admin_gid: The resolved gid of the admin group
"""
def __init__(self, _getpwnam=pwd.getpwnam, _getgrnam=grp.getgrnam):
"""Initialize the resolver.
"""
# Daemon pairs
self.masterd_uid = GetUid(constants.MASTERD_USER, _getpwnam)
self.masterd_gid = GetGid(constants.MASTERD_GROUP, _getgrnam)
self.confd_uid = GetUid(constants.CONFD_USER, _getpwnam)
self.confd_gid = GetGid(constants.CONFD_GROUP, _getgrnam)
self.rapi_uid = GetUid(constants.RAPI_USER, _getpwnam)
self.rapi_gid = GetGid(constants.RAPI_GROUP, _getgrnam)
self.noded_uid = GetUid(constants.NODED_USER, _getpwnam)
# Misc Ganeti groups
self.daemons_gid = GetGid(constants.DAEMONS_GROUP, _getgrnam)
self.admin_gid = GetGid(constants.ADMIN_GROUP, _getgrnam)
def GetEnts(resolver=GetentResolver):
"""Singleton wrapper around resolver instance.
As this method is accessed by multiple threads at the same time
we need to take thread-safty carefully
"""
# We need to use the global keyword here
global _priv # pylint: disable-msg=W0603
if not _priv:
_priv_lock.acquire()
try:
if not _priv:
# W0621: Redefine '_priv' from outer scope (used for singleton)
_priv = resolver() # pylint: disable-msg=W0621
finally:
_priv_lock.release()
return _priv<|fim▁end|>
| |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Blueprint
from flask import current_app
from flask import request
from flask import jsonify
from flask import abort
from flask import render_template
from flask import redirect
from flask import url_for
from flask import flash
from werkzeug.exceptions import NotFound
from printus.web.models import Report
from printus.web.models import User
from printus.web.forms import UserForm
from printus.web.forms import ReportForm
from printus.web.forms import LoginForm
from printus.web.forms import SignupForm
from printus.web.forms import ContactForm
from printus.web.extensions import login_manager
from flask.ext.login import login_required, current_user, login_user, logout_user
bp = Blueprint('general', __name__, template_folder='templates')
@bp.route('/')
@login_required
def index():
try:
page = long(request.args.get('page', 1))
except Exception:
page = 1
try:
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
except NotFound:
page = 1
pagination = current_user.reports.order_by('created_at desc').paginate(page, 10)
return render_template('reports.index.html', pagination=pagination)
@bp.route('/reports/new', methods=['GET', 'POST'])<|fim▁hole|> if form.validate_on_submit():
flash('Report created')
return redirect(url_for('general.index'))
return render_template('reports.new.html', form=form)
@bp.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
form = UserForm(obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.add(current_user)
db.session.commit()
return render_template('profile.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
signupForm = SignupForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data, password=form.password.data).first()
if not user:
return render_template("login.html", form=form, signupForm=signupForm)
else:
login_user(user)
return redirect(request.args.get("next") or url_for("general.index"))
return render_template("login.html", form=form, signupForm=signupForm)
@bp.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm()
if form.validate_on_submit():
return redirect(request.args.get('next') or url_for('general.index'))
return render_template("signup.html", form=form)
@bp.route('/logout')
@login_required
def logout():
logout_user()
flash('Logged out.')
return redirect(url_for('general.index'))
@bp.route('/contact_us')
@login_required
def contact_us():
form = ContactForm()
if form.validate_on_submit():
return redirect(url_for('general.index'))
return render_template('contact_us.html', form=form)<|fim▁end|>
|
@login_required
def reports_new():
form = ReportForm()
|
<|file_name|>RequestMultipleSecurityTokens.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2006 Ali Sabil <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import xml.sax.saxutils as xml
class LiveService(object):
CONTACTS = ("contacts.msn.com", "MBI")
MESSENGER = ("messenger.msn.com", "?id=507")
MESSENGER_CLEAR = ("messengerclear.live.com", "MBI_KEY_OLD")
MESSENGER_SECURE = ("messengersecure.live.com", "MBI_SSL")
SPACES = ("spaces.live.com", "MBI")
STORAGE = ("storage.msn.com", "MBI")
TB = ("http://Passport.NET/tb", None)
VOICE = ("voice.messenger.msn.com", "?id=69264")
@classmethod
def url_to_service(cls, url):
for attr_name in dir(cls):
if attr_name.startswith('_'):
continue
attr = getattr(cls, attr_name)<|fim▁hole|> return attr
return None
def transport_headers():
"""Returns a dictionary, containing transport (http) headers
to use for the request"""
return {}
def soap_action():
"""Returns the SOAPAction value to pass to the transport
or None if no SOAPAction needs to be specified"""
return None
def soap_header(account, password):
"""Returns the SOAP xml header"""
return """
<ps:AuthInfo xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" Id="PPAuthInfo">
<ps:HostingApp>{7108E71A-9926-4FCB-BCC9-9A9D3F32E423}</ps:HostingApp>
<ps:BinaryVersion>4</ps:BinaryVersion>
<ps:UIVersion>1</ps:UIVersion>
<ps:Cookies/>
<ps:RequestParams>AQAAAAIAAABsYwQAAAAxMDMz</ps:RequestParams>
</ps:AuthInfo>
<wsse:Security xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext">
<wsse:UsernameToken Id="user">
<wsse:Username>%(account)s</wsse:Username>
<wsse:Password>%(password)s</wsse:Password>
</wsse:UsernameToken>
</wsse:Security>""" % {'account': xml.escape(account),
'password': xml.escape(password)}
def soap_body(*tokens):
"""Returns the SOAP xml body"""
token_template = """
<wst:RequestSecurityToken xmlns:wst="http://schemas.xmlsoap.org/ws/2004/04/trust" Id="RST%(id)d">
<wst:RequestType>http://schemas.xmlsoap.org/ws/2004/04/security/trust/Issue</wst:RequestType>
<wsp:AppliesTo xmlns:wsp="http://schemas.xmlsoap.org/ws/2002/12/policy">
<wsa:EndpointReference xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/03/addressing">
<wsa:Address>%(address)s</wsa:Address>
</wsa:EndpointReference>
</wsp:AppliesTo>
%(policy_reference)s
</wst:RequestSecurityToken>"""
policy_reference_template = """
<wsse:PolicyReference xmlns:wsse="http://schemas.xmlsoap.org/ws/2003/06/secext" URI=%(uri)s/>"""
tokens = list(tokens)
if LiveService.TB in tokens:
tokens.remove(LiveService.TB)
assert(len(tokens) >= 1)
body = token_template % \
{'id': 0,
'address': xml.escape(LiveService.TB[0]),
'policy_reference': ''}
for id, token in enumerate(tokens):
if token[1] is not None:
policy_reference = policy_reference_template % \
{'uri': xml.quoteattr(token[1])}
else:
policy_reference = ""
t = token_template % \
{'id': id + 1,
'address': xml.escape(token[0]),
'policy_reference': policy_reference}
body += t
return '<ps:RequestMultipleSecurityTokens ' \
'xmlns:ps="http://schemas.microsoft.com/Passport/SoapServices/PPCRL" ' \
'Id="RSTS">%s</ps:RequestMultipleSecurityTokens>' % body
def process_response(soap_response):
body = soap_response.body
return body.findall("./wst:RequestSecurityTokenResponseCollection/" \
"wst:RequestSecurityTokenResponse")<|fim▁end|>
|
if isinstance(attr, tuple) and attr[0] == url:
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__release__ = '2.0rc4'
__version__ = '$Id: e26392a530582f286edf2d99e729218b2e93405e $'
import datetime
import math
import re
import sys
import threading
import json
if sys.version_info[0] > 2:
from queue import Queue
long = int
else:
from Queue import Queue
from warnings import warn
# Use pywikibot. prefix for all in-package imports; this is to prevent
# confusion with similarly-named modules in version 1 framework, for users
# who want to continue using both
from pywikibot import config2 as config
from pywikibot.bot import (
output, warning, error, critical, debug, stdout, exception,
input, input_choice, input_yn, inputChoice, handle_args, showHelp, ui, log,
calledModuleName, Bot, CurrentPageBot, WikidataBot, QuitKeyboardInterrupt,
# the following are flagged as deprecated on usage
handleArgs,
)
from pywikibot.exceptions import (
Error, InvalidTitle, BadTitle, NoPage, SectionError,
SiteDefinitionError, NoSuchSite, UnknownSite, UnknownFamily,
UnknownExtension,
NoUsername, UserBlocked,
PageRelatedError, IsRedirectPage, IsNotRedirectPage,
PageSaveRelatedError, PageNotSaved, OtherPageSaveError,
LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError,
EditConflict, PageDeletedConflict, PageCreatedConflict,
ServerError, FatalServerError, Server504Error,
CaptchaError, SpamfilterError, CircularRedirect, InterwikiRedirectPage,
WikiBaseError, CoordinateGlobeUnknownException,
)
from pywikibot.tools import UnicodeMixin, redirect_func
from pywikibot.i18n import translate
from pywikibot.data.api import UploadWarning
from pywikibot.diff import PatchManager
import pywikibot.textlib as textlib
import pywikibot.tools
textlib_methods = (
'unescape', 'replaceExcept', 'removeDisabledParts', 'removeHTMLParts',
'isDisabled', 'interwikiFormat', 'interwikiSort',
'getLanguageLinks', 'replaceLanguageLinks',
'removeLanguageLinks', 'removeLanguageLinksAndSeparator',
'getCategoryLinks', 'categoryFormat', 'replaceCategoryLinks',
'removeCategoryLinks', 'removeCategoryLinksAndSeparator',
'replaceCategoryInPlace', 'compileLinkR', 'extract_templates_and_params',
'TimeStripper',
)
# pep257 doesn't understand when the first entry is on the next line
__all__ = ('config', 'ui', 'UnicodeMixin', 'translate',
'Page', 'FilePage', 'Category', 'Link', 'User',
'ItemPage', 'PropertyPage', 'Claim',
'html2unicode', 'url2unicode', 'unicode2html',
'stdout', 'output', 'warning', 'error', 'critical', 'debug',
'exception', 'input_choice', 'input', 'input_yn', 'inputChoice',
'handle_args', 'handleArgs', 'showHelp', 'ui', 'log',
'calledModuleName', 'Bot', 'CurrentPageBot', 'WikidataBot',
'Error', 'InvalidTitle', 'BadTitle', 'NoPage', 'SectionError',
'SiteDefinitionError', 'NoSuchSite', 'UnknownSite', 'UnknownFamily',
'UnknownExtension',
'NoUsername', 'UserBlocked', 'UserActionRefuse',
'PageRelatedError', 'IsRedirectPage', 'IsNotRedirectPage',
'PageSaveRelatedError', 'PageNotSaved', 'OtherPageSaveError',
'LockedPage', 'CascadeLockedPage', 'LockedNoPage', 'NoCreateError',
'EditConflict', 'PageDeletedConflict', 'PageCreatedConflict',
'UploadWarning',
'ServerError', 'FatalServerError', 'Server504Error',
'CaptchaError', 'SpamfilterError', 'CircularRedirect',
'InterwikiRedirectPage',
'WikiBaseError', 'CoordinateGlobeUnknownException',
'QuitKeyboardInterrupt',
)
# flake8 is unable to detect concatenation in the same operation
# like:
# ) + textlib_methods
# pep257 also doesn't support __all__ multiple times in a document
# so instead use this trick
globals()['__all__'] = globals()['__all__'] + textlib_methods
if sys.version_info[0] == 2:
# T111615: Python 2 requires __all__ is bytes
globals()['__all__'] = tuple(bytes(item) for item in __all__)
for _name in textlib_methods:
target = getattr(textlib, _name)
wrapped_func = redirect_func(target)
globals()[_name] = wrapped_func
deprecated = redirect_func(pywikibot.tools.deprecated)
deprecate_arg = redirect_func(pywikibot.tools.deprecate_arg)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.getcurrenttime() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = "%Y%m%d%H%M%S"
ISO8601Format = "%Y-%m-%dT%H:%M:%SZ"
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classmethod
def fromISOformat(cls, ts):
"""Convert an ISO 8601 timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.ISO8601Format)
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a MediaWiki internal timestamp to a Timestamp object."""
# If inadvertantly passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self.ISO8601Format)
toISOformat = redirect_func(isoformat, old_name='toISOformat',
class_name='Timestamp')
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
def __sub__(self, other):
"""Perform substraction, returning a Timestamp instead of datetime."""
newdt = super(Timestamp, self).__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
else:
return newdt
class Coordinate(object):
"""
Class for handling and storing Coordinates.
For now its just being used for DataSite, but
in the future we can use it for the GeoData extension.
"""
def __init__(self, lat, lon, alt=None, precision=None, globe='earth',
typ="", name="", dim=None, site=None, entity=''):
"""
Represent a geo coordinate.
@param lat: Latitude
@type lat: float
@param lon: Longitude
@type lon: float
@param alt: Altitute? TODO FIXME
@param precision: precision
@type precision: float
@param globe: Which globe the point is on
@type globe: str
@param typ: The type of coordinate point
@type typ: str
@param name: The name
@type name: str
@param dim: Dimension (in meters)
@type dim: int
@param entity: The URL entity of a Wikibase item
@type entity: str
"""
self.lat = lat
self.lon = lon
self.alt = alt
self._precision = precision
if globe:
globe = globe.lower()
self.globe = globe
self._entity = entity
self.type = typ
self.name = name
self._dim = dim
if not site:
self.site = Site().data_repository()
else:
self.site = site
def __repr__(self):
string = 'Coordinate(%s, %s' % (self.lat, self.lon)
if self.globe != 'earth':
string += ', globe="%s"' % self.globe
string += ')'
return string
@property
def entity(self):
if self._entity:
return self._entity
return self.site.globes()[self.globe]
def toWikibase(self):
"""
Export the data to a JSON object for the Wikibase API.
FIXME: Should this be in the DataSite object?
"""
if self.globe not in self.site.globes():
raise CoordinateGlobeUnknownException(
u"%s is not supported in Wikibase yet."
% self.globe)
return {'latitude': self.lat,
'longitude': self.lon,
'altitude': self.alt,
'globe': self.entity,
'precision': self.precision,
}
@classmethod
def fromWikibase(cls, data, site):
"""Constructor to create an object from Wikibase's JSON output."""
globes = {}
for k in site.globes():
globes[site.globes()[k]] = k
globekey = data['globe']
if globekey:
globe = globes.get(data['globe'])
else:
# Default to earth or should we use None here?
globe = 'earth'
return cls(data['latitude'], data['longitude'],
data['altitude'], data['precision'],
globe, site=site, entity=data['globe'])
@property
def precision(self):
u"""
Return the precision of the geo coordinate.
The biggest error (in degrees) will be given by the longitudinal error;
the same error in meters becomes larger (in degrees) further up north.
We can thus ignore the latitudinal error.
The longitudinal can be derived as follows:
In small angle approximation (and thus in radians):
M{Δλ ≈ Δpos / r_φ}, where r_φ is the radius of earth at the given latitude.
Δλ is the error in longitude.
M{r_φ = r cos φ}, where r is the radius of earth, φ the latitude
Therefore::
precision = math.degrees(self._dim/(radius*math.cos(math.radians(self.lat))))
"""
if not self._precision:
radius = 6378137 # TODO: Support other globes
self._precision = math.degrees(
self._dim / (radius * math.cos(math.radians(self.lat))))
return self._precision
def precisionToDim(self):
"""Convert precision from Wikibase to GeoData's dim."""
raise NotImplementedError
class WbTime(object):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def __init__(self, year=None, month=None, day=None,
hour=None, minute=None, second=None,
precision=None, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
"""
Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = long(year)
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
if calendarmodel is None:
if site is None:
site = Site().data_repository()
calendarmodel = site.calendarmodel()
self.calendarmodel = calendarmodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int) and
precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "%s"' % precision)
@classmethod
def fromTimestr(cls, datetimestr, precision=14, before=0, after=0,
timezone=0, calendarmodel=None, site=None):
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError(u"Invalid format: '%s'" % datetimestr)
t = match.groups()
return cls(long(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel, site)
def toTimestr(self):
"""
Convert the data to a UTC date/time string.
@return: str
"""
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toWikibase(self):
"""
Convert the data to a JSON object for the Wikibase API.
@return: dict
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, ts):
return cls.fromTimestr(ts[u'time'], ts[u'precision'],
ts[u'before'], ts[u'after'],
ts[u'timezone'], ts[u'calendarmodel'])
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return u"WbTime(year=%(year)d, month=%(month)d, day=%(day)d, " \
u"hour=%(hour)d, minute=%(minute)d, second=%(second)d, " \
u"precision=%(precision)d, before=%(before)d, after=%(after)d, " \
u"timezone=%(timezone)d, calendarmodel='%(calendarmodel)s')" \
% self.__dict__
class WbQuantity(object):
"""A Wikibase quantity representation."""
def __init__(self, amount, unit=None, error=None):
u"""
Create a new WbQuantity object.
@param amount: number representing this quantity
@type amount: float
@param unit: not used (only unit-less quantities are supported)
@param error: the uncertainty of the amount (e.g. ±1)
@type error: float, or tuple of two floats, where the first value is
the upper error and the second is the lower error value.
"""
if amount is None:
raise ValueError('no amount given')
if unit is None:
unit = '1'
self.amount = amount
self.unit = unit
upperError = lowerError = 0
if isinstance(error, tuple):
upperError, lowerError = error
elif error is not None:
upperError = lowerError = error
self.upperBound = self.amount + upperError
self.lowerBound = self.amount - lowerError
def toWikibase(self):
"""Convert the data to a JSON object for the Wikibase API."""
json = {'amount': self.amount,
'upperBound': self.upperBound,
'lowerBound': self.lowerBound,
'unit': self.unit
}
return json
@classmethod
def fromWikibase(cls, wb):
"""
Create a WbQuanity from the JSON data given by the Wikibase API.
@param wb: Wikibase JSON
"""
amount = eval(wb['amount'])
upperBound = eval(wb['upperBound'])
lowerBound = eval(wb['lowerBound'])
error = (upperBound - amount, amount - lowerBound)
return cls(amount, wb['unit'], error)
def __str__(self):
return json.dumps(self.toWikibase(), indent=4, sort_keys=True,
separators=(',', ': '))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return (u"WbQuantity(amount=%(amount)s, upperBound=%(upperBound)s, "
u"lowerBound=%(lowerBound)s, unit=%(unit)s)" % self.__dict__)
_sites = {}
_url_cache = {} # The code/fam pair for each URL
def Site(code=None, fam=None, user=None, sysop=None, interface=None, url=None):
"""A factory method to obtain a Site object.
Site objects are cached and reused by this method.
By default rely on config settings. These defaults may all be overridden
using the method parameters.
@param code: language code (override config.mylang)
@type code: string
@param fam: family name or object (override config.family)
@type fam: string or Family
@param user: bot user name to use on this site (override config.usernames)
@type user: unicode
@param sysop: sysop user to use on this site (override config.sysopnames)
@type sysop: unicode
@param interface: site class or name of class in pywikibot.site
(override config.site_interface)
@type interface: subclass of L{pywikibot.site.BaseSite} or string
@param url: Instead of code and fam, does try to get a Site based on the
URL. Still requires that the family supporting that URL exists.
@type url: string
"""
# Either code and fam or only url
assert(not url or (not code and not fam))
_logger = "wiki"
if url:
if url in _url_cache:
cached = _url_cache[url]
if cached:
code = cached[0]
fam = cached[1]
else:
raise SiteDefinitionError("Unknown URL '{0}'.".format(url))
else:
# Iterate through all families and look, which does apply to
# the given URL
for fam in config.family_files:
try:
family = pywikibot.family.Family.load(fam)
code = family.from_url(url)
if code:
_url_cache[url] = (code, fam)
break
except Exception as e:
pywikibot.warning('Error in Family(%s).from_url: %s'
% (fam, e))
else:
_url_cache[url] = None
# TODO: As soon as AutoFamily is ready, try and use an
# AutoFamily
raise SiteDefinitionError("Unknown URL '{0}'.".format(url))
else:
# Fallback to config defaults
code = code or config.mylang
fam = fam or config.family
interface = interface or config.site_interface
# config.usernames is initialised with a dict for each family name
family_name = str(fam)
if family_name in config.usernames:
user = user or config.usernames[family_name].get(code) \
or config.usernames[family_name].get('*')
sysop = sysop or config.sysopnames[family_name].get(code) \
or config.sysopnames[family_name].get('*')
if not isinstance(interface, type):
# If it isnt a class, assume it is a string
try:
tmp = __import__('pywikibot.site', fromlist=[interface])
interface = getattr(tmp, interface)
except ImportError:
raise ValueError("Invalid interface name '%(interface)s'" % locals())
if not issubclass(interface, pywikibot.site.BaseSite):
warning('Site called with interface=%s' % interface.__name__)
user = pywikibot.tools.normalize_username(user)
key = '%s:%s:%s:%s' % (interface.__name__, fam, code, user)
if key not in _sites or not isinstance(_sites[key], interface):
_sites[key] = interface(code=code, fam=fam, user=user, sysop=sysop)
debug(u"Instantiated %s object '%s'"
% (interface.__name__, _sites[key]), _logger)
if _sites[key].code != code:
warn('Site %s instantiated using different code "%s"'
% (_sites[key], code), UserWarning, 2)
return _sites[key]
# alias for backwards-compability
getSite = pywikibot.tools.redirect_func(Site, old_name='getSite')
from .page import (
Page,
FilePage,
Category,
Link,
User,
ItemPage,
PropertyPage,
Claim,
)
from .page import html2unicode, url2unicode, unicode2html
link_regex = re.compile(r'\[\[(?P<title>[^\]|[<>{}]*)(\|.*?)?\]\]')
@pywikibot.tools.deprecated("comment parameter for page saving method")
def setAction(s):
"""Set a summary to use for changed page submissions."""
config.default_edit_summary = s
def showDiff(oldtext, newtext, context=0):
"""
Output a string showing the differences between oldtext and newtext.
The differences are highlighted (only on compatible systems) to show which
changes were made.
"""
PatchManager(oldtext, newtext, context=context).print_hunks()
# Throttle and thread handling
stopped = False
def stopme():
"""Drop this process from the throttle log, after pending threads finish.
Can be called manually if desired, but if not, will be called automatically
at Python exit.
"""
global stopped
_logger = "wiki"
if not stopped:
debug(u"stopme() called", _logger)
def remaining():
remainingPages = page_put_queue.qsize() - 1
# -1 because we added a None element to stop the queue
remainingSeconds = datetime.timedelta(
seconds=(remainingPages * config.put_throttle))
return (remainingPages, remainingSeconds)
page_put_queue.put((None, [], {}))
stopped = True
if page_put_queue.qsize() > 1:
num, sec = remaining()
format_values = dict(num=num, sec=sec)
output(u'\03{lightblue}'
u'Waiting for %(num)i pages to be put. '
u'Estimated time remaining: %(sec)s'
u'\03{default}' % format_values)
while(_putthread.isAlive()):
try:
_putthread.join(1)
except KeyboardInterrupt:
if input_yn('There are %i pages remaining in the queue. '
'Estimated time remaining: %s\nReally exit?'
% remaining(), default=False, automatic_quit=False):
return
# only need one drop() call because all throttles use the same global pid
try:
list(_sites.values())[0].throttle.drop()
log(u"Dropped throttle(s).")
except IndexError:
pass
import atexit
atexit.register(stopme)
# Create a separate thread for asynchronous page saves (and other requests)
def async_manager():
"""Daemon; take requests from the queue and execute them in background."""
while True:<|fim▁hole|> if request is None:
break
request(*args, **kwargs)
page_put_queue.task_done()
def async_request(request, *args, **kwargs):
"""Put a request on the queue, and start the daemon if necessary."""
if not _putthread.isAlive():
try:
page_put_queue.mutex.acquire()
try:
_putthread.start()
except (AssertionError, RuntimeError):
pass
finally:
page_put_queue.mutex.release()
page_put_queue.put((request, args, kwargs))
# queue to hold pending requests
page_put_queue = Queue(config.max_queue_size)
# set up the background thread
_putthread = threading.Thread(target=async_manager)
# identification for debugging purposes
_putthread.setName('Put-Thread')
_putthread.setDaemon(True)
wrapper = pywikibot.tools.ModuleDeprecationWrapper(__name__)
wrapper._add_deprecated_attr('ImagePage', FilePage)
wrapper._add_deprecated_attr(
'PageNotFound', pywikibot.exceptions.DeprecatedPageNotFoundError,
warning_message=('{0}.{1} is deprecated, and no longer '
'used by pywikibot; use http.fetch() instead.'))
wrapper._add_deprecated_attr(
'UserActionRefuse', pywikibot.exceptions._EmailUserError,
warning_message='UserActionRefuse is deprecated; '
'use UserRightsError and/or NotEmailableError')<|fim▁end|>
|
(request, args, kwargs) = page_put_queue.get()
|
<|file_name|>connect_dots.js<|end_file_name|><|fim▁begin|>class Color {
constructor () {
this.r = Color.value();
this.g = Color.value();
this.b = Color.value();
this.style = "rgba(" + this.r + "," + this.g + "," + this.b + ",1)";
//this.style = "rgba(233,72,152,1)";
}
static value () {
return Math.floor(Math.random() * 255);
}
}
class Dot {
constructor (parent) {
this.parent = parent;
this.x = Math.random() * this.parent.parent.width;
this.y = Math.random() * this.parent.parent.height;
// Speed of Dots (+- 0.5)
this.vx = Math.random() - 0.5;
this.vy = Math.random() - 0.5;
this.radius = Math.random() * 2;
this.color = new Color();
}
draw () {
this.parent.context.beginPath();
this.parent.context.fillStyle = this.color.style;
this.parent.context.arc(this.x, this.y, this.radius, 0, 2 * Math.PI, false);
this.parent.context.fill();
}
}
class ConnectDots {
constructor(parent) {
let interval = 70;
let nb_num = 250;
let radius_num = 60;<|fim▁hole|>
this.interval = interval;
this.nb_num = nb_num;
this.radius_num = radius_num;
this.parent = parent;
this.updateSize();
this.connectArea = {
x: 50 * this.parent.width / 100,
y: 50 * this.parent.height / 100
};
this.dots = {
nb: this.nb_num,
distMax: 100,
//connectAreaRadius: canvas.width/4,
connectAreaRadius: this.radius_num,
array: []
};
$(window).resize(this.updateSize());
$(this.parent).on ("mousemove", (e) => {
this.connectArea.x = e.pageX;
this.connectArea.y = e.pageY;
});
}
static mixComponents(comp1, comp2, weight1, weight2) {
return (comp1 * weight1 + comp2 * weight2) / (weight1 + weight2);
}
updateSize () {
this.parent.width = Math.min ($(this.parent).parent().width(), window.innerWidth);
this.parent.height = Math.min ($(this.parent).parent().height(), window.innerHeight);
this.parent.style.display = 'block';
this.context = this.parent.getContext("2d");
this.context.lineWidth = 0.2;
}
gradient(dot1, dot2, midColor) {
let grad = this.context.createLinearGradient(
Math.floor(dot1.x), Math.floor(dot1.y),
Math.floor(dot2.x), Math.floor(dot2.y));
grad.addColorStop(0, dot1.color.style);
grad.addColorStop(Math.floor(dot1.radius / (dot1.radius / dot2.radius)), midColor);
grad.addColorStop(1, dot2.color.style);
return grad;
}
lineStyle(dot1, dot2) {
let r = ConnectDots.mixComponents(dot1.color.r, dot2.color.r, dot1.radius, dot2.radius);
let g = ConnectDots.mixComponents(dot1.color.g, dot2.color.g, dot1.radius, dot2.radius);
let b = ConnectDots.mixComponents(dot1.color.b, dot2.color.b, dot1.radius, dot2.radius);
let midColor = 'rgba(' + Math.floor(r) + ',' + Math.floor(g) + ',' + Math.floor(b) + ', 0.8)';
r = g = b = null;
return this.gradient(dot1, dot2, midColor);
}
moveDots() {
for (let i = 0; i < this.dots.nb; i++) {
let dot = this.dots.array[i];
if (dot.y < 0 || dot.y > this.parent.height)
dot.vy = -dot.vy;
else if (dot.x < 0 || dot.x > this.parent.width)
dot.vx = -dot.vx;
dot.x += dot.vx;
dot.y += dot.vy;
dot = null;
}
}
connectDots() {
for (let i = 0; i < this.dots.nb; i++) {
for (let j = 0; j < this.dots.nb; j++) {
if (i === j) continue;
let dot1 = this.dots.array[i];
let dot2 = this.dots.array[j];
let xDiff = dot1.x - dot2.x;
let yDiff = dot1.y - dot2.y;
let xCoreDiff = dot1.x - this.connectArea.x;
let yCoreDiff = dot1.y - this.connectArea.y;
if ((xDiff < this.dots.distMax && xDiff > -this.dots.distMax)
&& (yDiff < this.dots.distMax && yDiff > -this.dots.distMax)
&& (xCoreDiff < this.dots.connectAreaRadius && xCoreDiff > -this.dots.connectAreaRadius)
&& (yCoreDiff < this.dots.connectAreaRadius && yCoreDiff > -this.dots.connectAreaRadius)) {
this.context.beginPath();
this.context.strokeStyle = this.lineStyle(dot1, dot2);
this.context.moveTo(dot1.x, dot1.y);
this.context.lineTo(dot2.x, dot2.y);
this.context.stroke();
this.context.closePath();
}
dot1 = null;
dot2 = null;
xDiff = null;
yDiff = null;
xCoreDiff = null;
yCoreDiff = null;
}
}
}
createDots() {
for (let i = 0; i < this.dots.nb; i++)
this.dots.array.push(new Dot(this));
}
drawDots() {
for (let i = 0; i < this.dots.nb; i++)
this.dots.array[i].draw();
}
animateDots() {
this.context.clearRect(0, 0, this.parent.width, this.parent.height);
this.moveDots();
this.connectDots();
this.drawDots();
requestAnimationFrame(() => {this.animateDots ()});
}
run () {
this.createDots();
this.animateDots();
}
}<|fim▁end|>
| |
<|file_name|>resource_dns_record_set_test.go<|end_file_name|><|fim▁begin|>package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDnsRecordSet_basic(t *testing.T) {
t.Parallel()
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func TestAccDnsRecordSet_modify(t *testing.T) {
t.Parallel()
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func TestAccDnsRecordSet_changeType(t *testing.T) {
t.Parallel()<|fim▁hole|>
zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
resource.TestStep{
Config: testAccDnsRecordSet_bigChange(zoneName, 600),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func TestAccDnsRecordSet_ns(t *testing.T) {
zoneName := fmt.Sprintf("dnszone-test-ns-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDnsRecordSetDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccDnsRecordSet_ns(zoneName, 300),
Check: resource.ComposeTestCheckFunc(
testAccCheckDnsRecordSetExists(
"google_dns_record_set.foobar", zoneName),
),
},
},
})
}
func testAccCheckDnsRecordSetDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
// Deletion of the managed_zone implies everything is gone
if rs.Type == "google_dns_managed_zone" {
_, err := config.clientDns.ManagedZones.Get(
config.Project, rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("DNS ManagedZone still exists")
}
}
}
return nil
}
func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceType]
if !ok {
return fmt.Errorf("Not found: %s", resourceName)
}
dnsName := rs.Primary.Attributes["name"]
dnsType := rs.Primary.Attributes["type"]
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
resp, err := config.clientDns.ResourceRecordSets.List(
config.Project, resourceName).Name(dnsName).Type(dnsType).Do()
if err != nil {
return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err)
}
switch len(resp.Rrsets) {
case 0:
// The resource doesn't exist anymore
return fmt.Errorf("DNS RecordSet not found")
case 1:
return nil
default:
return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets))
}
}
}
func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string {
return fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
name = "%s"
dns_name = "hashicorptest.com."
description = "Test Description"
}
resource "google_dns_record_set" "foobar" {
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
name = "test-record.hashicorptest.com."
type = "A"
rrdatas = ["127.0.0.1", "%s"]
ttl = %d
}
`, zoneName, addr2, ttl)
}
func testAccDnsRecordSet_ns(name string, ttl int) string {
return fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
name = "%s"
dns_name = "hashicorptest.com."
description = "Test Description"
}
resource "google_dns_record_set" "foobar" {
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
name = "hashicorptest.com."
type = "NS"
rrdatas = ["ns.hashicorp.services.", "ns2.hashicorp.services."]
ttl = %d
}
`, name, ttl)
}
func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string {
return fmt.Sprintf(`
resource "google_dns_managed_zone" "parent-zone" {
name = "%s"
dns_name = "hashicorptest.com."
description = "Test Description"
}
resource "google_dns_record_set" "foobar" {
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
name = "test-record.hashicorptest.com."
type = "CNAME"
rrdatas = ["www.terraform.io."]
ttl = %d
}
`, zoneName, ttl)
}<|fim▁end|>
| |
<|file_name|>testing.py<|end_file_name|><|fim▁begin|><|fim▁hole|>skipiftravis = pytest.mark.skipif(
os.environ.get('TRAVIS') == 'true', reason='skip on Travis-CI')<|fim▁end|>
|
import os
import pytest
|
<|file_name|>checked_next_power_of_two.rs<|end_file_name|><|fim▁begin|>#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
// macro_rules! uint_impl {
// ($ActualT:ty, $BITS:expr,
// $ctpop:path,
// $ctlz:path,
// $cttz:path,
// $bswap:path,
// $add_with_overflow:path,
// $sub_with_overflow:path,
// $mul_with_overflow:path) => {
// /// Returns the smallest value that can be represented by this integer type.
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn min_value() -> Self { 0 }
//
// /// Returns the largest value that can be represented by this integer type.
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn max_value() -> Self { !0 }
//
// /// Converts a string slice in a given base to an integer.
// ///
// /// Leading and trailing whitespace represent an error.
// ///
// /// # Arguments
// ///
// /// * src - A string slice
// /// * radix - The base to use. Must lie in the range [2 .. 36]
// ///
// /// # Return value
// ///
// /// `Err(ParseIntError)` if the string did not represent a valid number.
// /// Otherwise, `Ok(n)` where `n` is the integer represented by `src`.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[allow(deprecated)]
// pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
// from_str_radix(src, radix)
// }
//
// /// Returns the number of ones in the binary representation of `self`.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0b01001100u8;
// ///
// /// assert_eq!(n.count_ones(), 3);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn count_ones(self) -> u32 {
// unsafe { $ctpop(self as $ActualT) as u32 }
// }
//
// /// Returns the number of zeros in the binary representation of `self`.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0b01001100u8;
// ///
// /// assert_eq!(n.count_zeros(), 5);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn count_zeros(self) -> u32 {
// (!self).count_ones()
// }
//
// /// Returns the number of leading zeros in the binary representation
// /// of `self`.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0b0101000u16;
// ///
// /// assert_eq!(n.leading_zeros(), 10);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn leading_zeros(self) -> u32 {
// unsafe { $ctlz(self as $ActualT) as u32 }
// }
//
// /// Returns the number of trailing zeros in the binary representation
// /// of `self`.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0b0101000u16;
// ///
// /// assert_eq!(n.trailing_zeros(), 3);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn trailing_zeros(self) -> u32 {
// // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
// // emits two conditional moves on x86_64. By promoting the value to
// // u16 and setting bit 8, we get better code without any conditional
// // operations.
// // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284)
// // pending, remove this workaround once LLVM generates better code
// // for cttz8.
// unsafe {
// if $BITS == 8 {
// intrinsics::cttz16(self as u16 | 0x100) as u32
// } else {
// $cttz(self as $ActualT) as u32
// }
// }
// }
//
// /// Shifts the bits to the left by a specified amount, `n`,
// /// wrapping the truncated bits to the end of the resulting integer.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// /// let m = 0x3456789ABCDEF012u64;
// ///
// /// assert_eq!(n.rotate_left(12), m);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn rotate_left(self, n: u32) -> Self {
// // Protect against undefined behaviour for over-long bit shifts
// let n = n % $BITS;
// (self << n) | (self >> (($BITS - n) % $BITS))
// }
//
// /// Shifts the bits to the right by a specified amount, `n`,
// /// wrapping the truncated bits to the beginning of the resulting
// /// integer.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// /// let m = 0xDEF0123456789ABCu64;
// ///
// /// assert_eq!(n.rotate_right(12), m);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn rotate_right(self, n: u32) -> Self {
// // Protect against undefined behaviour for over-long bit shifts
// let n = n % $BITS;
// (self >> n) | (self << (($BITS - n) % $BITS))
// }
//
// /// Reverses the byte order of the integer.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// /// let m = 0xEFCDAB8967452301u64;
// ///
// /// assert_eq!(n.swap_bytes(), m);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn swap_bytes(self) -> Self {
// unsafe { $bswap(self as $ActualT) as Self }
// }
//
// /// Converts an integer from big endian to the target's endianness.
// ///
// /// On big endian this is a no-op. On little endian the bytes are
// /// swapped.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// ///
// /// if cfg!(target_endian = "big") {
// /// assert_eq!(u64::from_be(n), n)
// /// } else {
// /// assert_eq!(u64::from_be(n), n.swap_bytes())
// /// }
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn from_be(x: Self) -> Self {
// if cfg!(target_endian = "big") { x } else { x.swap_bytes() }
// }
//
// /// Converts an integer from little endian to the target's endianness.
// ///
// /// On little endian this is a no-op. On big endian the bytes are
// /// swapped.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// ///
// /// if cfg!(target_endian = "little") {
// /// assert_eq!(u64::from_le(n), n)
// /// } else {
// /// assert_eq!(u64::from_le(n), n.swap_bytes())
// /// }
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn from_le(x: Self) -> Self {
// if cfg!(target_endian = "little") { x } else { x.swap_bytes() }
// }
//
// /// Converts `self` to big endian from the target's endianness.
// ///
// /// On big endian this is a no-op. On little endian the bytes are
// /// swapped.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// ///
// /// if cfg!(target_endian = "big") {
// /// assert_eq!(n.to_be(), n)
// /// } else {
// /// assert_eq!(n.to_be(), n.swap_bytes())
// /// }
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn to_be(self) -> Self { // or not to be?
// if cfg!(target_endian = "big") { self } else { self.swap_bytes() }
// }
//
// /// Converts `self` to little endian from the target's endianness.
// ///
// /// On little endian this is a no-op. On big endian the bytes are
// /// swapped.
// ///
// /// # Examples
// ///
// /// ```rust
// /// let n = 0x0123456789ABCDEFu64;
// ///
// /// if cfg!(target_endian = "little") {
// /// assert_eq!(n.to_le(), n)
// /// } else {
// /// assert_eq!(n.to_le(), n.swap_bytes())
// /// }
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn to_le(self) -> Self {
// if cfg!(target_endian = "little") { self } else { self.swap_bytes() }
// }
//
// /// Checked integer addition. Computes `self + other`, returning `None`
// /// if overflow occurred.
// ///
// /// # Examples
// ///
// /// ```rust
// /// assert_eq!(5u16.checked_add(65530), Some(65535));
// /// assert_eq!(6u16.checked_add(65530), None);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn checked_add(self, other: Self) -> Option<Self> {
// checked_op!($ActualT, $add_with_overflow, self, other)
// }
//
// /// Checked integer subtraction. Computes `self - other`, returning
// /// `None` if underflow occurred.
// ///
// /// # Examples
// ///
// /// ```rust
// /// assert_eq!((-127i8).checked_sub(1), Some(-128));
// /// assert_eq!((-128i8).checked_sub(1), None);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn checked_sub(self, other: Self) -> Option<Self> {
// checked_op!($ActualT, $sub_with_overflow, self, other)
// }
//
// /// Checked integer multiplication. Computes `self * other`, returning
// /// `None` if underflow or overflow occurred.
// ///
// /// # Examples
// ///
// /// ```rust
// /// assert_eq!(5u8.checked_mul(51), Some(255));
// /// assert_eq!(5u8.checked_mul(52), None);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn checked_mul(self, other: Self) -> Option<Self> {
// checked_op!($ActualT, $mul_with_overflow, self, other)
// }
//
// /// Checked integer division. Computes `self / other`, returning `None`
// /// if `other == 0` or the operation results in underflow or overflow.
// ///
// /// # Examples
// ///
// /// ```rust
// /// assert_eq!((-127i8).checked_div(-1), Some(127));
// /// assert_eq!((-128i8).checked_div(-1), None);
// /// assert_eq!((1i8).checked_div(0), None);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn checked_div(self, v: Self) -> Option<Self> {
// match v {
// 0 => None,
// v => Some(self / v),
// }
// }
//
// /// Saturating integer addition. Computes `self + other`, saturating at
// /// the numeric bounds instead of overflowing.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn saturating_add(self, other: Self) -> Self {
// match self.checked_add(other) {
// Some(x) => x,
// None if other >= Self::zero() => Self::max_value(),
// None => Self::min_value(),
// }
// }
//
// /// Saturating integer subtraction. Computes `self - other`, saturating
// /// at the numeric bounds instead of overflowing.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn saturating_sub(self, other: Self) -> Self {
// match self.checked_sub(other) {
// Some(x) => x,
// None if other >= Self::zero() => Self::min_value(),
// None => Self::max_value(),
// }
// }
//<|fim▁hole|> // #[inline]
// pub fn wrapping_add(self, rhs: Self) -> Self {
// unsafe {
// intrinsics::overflowing_add(self, rhs)
// }
// }
//
// /// Wrapping (modular) subtraction. Computes `self - other`,
// /// wrapping around at the boundary of the type.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn wrapping_sub(self, rhs: Self) -> Self {
// unsafe {
// intrinsics::overflowing_sub(self, rhs)
// }
// }
//
// /// Wrapping (modular) multiplication. Computes `self *
// /// other`, wrapping around at the boundary of the type.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn wrapping_mul(self, rhs: Self) -> Self {
// unsafe {
// intrinsics::overflowing_mul(self, rhs)
// }
// }
//
// /// Wrapping (modular) division. Computes `floor(self / other)`,
// /// wrapping around at the boundary of the type.
// ///
// /// The only case where such wrapping can occur is when one
// /// divides `MIN / -1` on a signed type (where `MIN` is the
// /// negative minimal value for the type); this is equivalent
// /// to `-MIN`, a positive value that is too large to represent
// /// in the type. In such a case, this function returns `MIN`
// /// itself..
// #[unstable(feature = "core", since = "1.0.0")]
// #[inline(always)]
// pub fn wrapping_div(self, rhs: Self) -> Self {
// self.overflowing_div(rhs).0
// }
//
// /// Wrapping (modular) remainder. Computes `self % other`,
// /// wrapping around at the boundary of the type.
// ///
// /// Such wrap-around never actually occurs mathematically;
// /// implementation artifacts make `x % y` illegal for `MIN /
// /// -1` on a signed type illegal (where `MIN` is the negative
// /// minimal value). In such a case, this function returns `0`.
// #[unstable(feature = "core", since = "1.0.0")]
// #[inline(always)]
// pub fn wrapping_rem(self, rhs: Self) -> Self {
// self.overflowing_rem(rhs).0
// }
//
// /// Wrapping (modular) negation. Computes `-self`,
// /// wrapping around at the boundary of the type.
// ///
// /// The only case where such wrapping can occur is when one
// /// negates `MIN` on a signed type (where `MIN` is the
// /// negative minimal value for the type); this is a positive
// /// value that is too large to represent in the type. In such
// /// a case, this function returns `MIN` itself.
// #[unstable(feature = "core", since = "1.0.0")]
// #[inline(always)]
// pub fn wrapping_neg(self) -> Self {
// self.overflowing_neg().0
// }
//
// /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
// /// where `mask` removes any high-order bits of `rhs` that
// /// would cause the shift to exceed the bitwidth of the type.
// #[unstable(feature = "core", since = "1.0.0")]
// #[inline(always)]
// pub fn wrapping_shl(self, rhs: u32) -> Self {
// self.overflowing_shl(rhs).0
// }
//
// /// Panic-free bitwise shift-left; yields `self >> mask(rhs)`,
// /// where `mask` removes any high-order bits of `rhs` that
// /// would cause the shift to exceed the bitwidth of the type.
// #[unstable(feature = "core", since = "1.0.0")]
// #[inline(always)]
// pub fn wrapping_shr(self, rhs: u32) -> Self {
// self.overflowing_shr(rhs).0
// }
//
// /// Raises self to the power of `exp`, using exponentiation by squaring.
// ///
// /// # Examples
// ///
// /// ```rust
// /// assert_eq!(2i32.pow(4), 16);
// /// ```
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn pow(self, mut exp: u32) -> Self {
// let mut base = self;
// let mut acc = Self::one();
//
// let mut prev_base = self;
// let mut base_oflo = false;
// while exp > 0 {
// if (exp & 1) == 1 {
// if base_oflo {
// // ensure overflow occurs in the same manner it
// // would have otherwise (i.e. signal any exception
// // it would have otherwise).
// acc = acc * (prev_base * prev_base);
// } else {
// acc = acc * base;
// }
// }
// prev_base = base;
// let (new_base, new_base_oflo) = base.overflowing_mul(base);
// base = new_base;
// base_oflo = new_base_oflo;
// exp /= 2;
// }
// acc
// }
//
// /// Returns `true` iff `self == 2^k` for some `k`.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn is_power_of_two(self) -> bool {
// (self.wrapping_sub(Self::one())) & self == Self::zero() &&
// !(self == Self::zero())
// }
//
// /// Returns the smallest power of two greater than or equal to `self`.
// /// Unspecified behavior on overflow.
// #[stable(feature = "rust1", since = "1.0.0")]
// #[inline]
// pub fn next_power_of_two(self) -> Self {
// let bits = size_of::<Self>() * 8;
// let one: Self = Self::one();
// one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits)
// }
//
// /// Returns the smallest power of two greater than or equal to `n`. If
// /// the next power of two is greater than the type's maximum value,
// /// `None` is returned, otherwise the power of two is wrapped in `Some`.
// #[stable(feature = "rust1", since = "1.0.0")]
// pub fn checked_next_power_of_two(self) -> Option<Self> {
// let npot = self.next_power_of_two();
// if npot >= self {
// Some(npot)
// } else {
// None
// }
// }
// }
// }
// impl u8 {
// uint_impl! { u8, 8,
// intrinsics::ctpop8,
// intrinsics::ctlz8,
// intrinsics::cttz8,
// bswap8,
// intrinsics::u8_add_with_overflow,
// intrinsics::u8_sub_with_overflow,
// intrinsics::u8_mul_with_overflow }
// }
macro_rules! checked_next_power_of_two_test {
($value:expr, $result:expr) => ({
let x: u8 = $value;
let result: Option<u8> = x.checked_next_power_of_two();
assert_eq!(result, $result);
})
}
#[test]
fn checked_next_power_of_two_test1() {
checked_next_power_of_two_test!( 0x00, Some::<u8>(0x01) );
checked_next_power_of_two_test!( 0x01, Some::<u8>(0x01) );
checked_next_power_of_two_test!( 0x03, Some::<u8>(0x04) );
checked_next_power_of_two_test!( 0x05, Some::<u8>(0x08) );
checked_next_power_of_two_test!( 0x09, Some::<u8>(0x10) );
checked_next_power_of_two_test!( 0x11, Some::<u8>(0x20) );
checked_next_power_of_two_test!( 0x21, Some::<u8>(0x40) );
checked_next_power_of_two_test!( 0x41, Some::<u8>(0x80) );
checked_next_power_of_two_test!( 0x81, None::<u8> );
}
}<|fim▁end|>
|
// /// Wrapping (modular) addition. Computes `self + other`,
// /// wrapping around at the boundary of the type.
// #[stable(feature = "rust1", since = "1.0.0")]
|
<|file_name|>Grow.js<|end_file_name|><|fim▁begin|>import _extends from "@babel/runtime/helpers/esm/extends";
import _objectWithoutPropertiesLoose from "@babel/runtime/helpers/esm/objectWithoutPropertiesLoose";
import * as React from 'react';
import PropTypes from 'prop-types';
import { Transition } from 'react-transition-group';
import useTheme from '../styles/useTheme';
import { reflow, getTransitionProps } from '../transitions/utils';
import useForkRef from '../utils/useForkRef';
function getScale(value) {
return `scale(${value}, ${value ** 2})`;
}
const styles = {
entering: {
opacity: 1,
transform: getScale(1)
},
entered: {
opacity: 1,
transform: 'none'
}
};
/**
* The Grow transition is used by the [Tooltip](/components/tooltips/) and
* [Popover](/components/popover/) components.
* It uses [react-transition-group](https://github.com/reactjs/react-transition-group) internally.
*/
const Grow = React.forwardRef(function Grow(props, ref) {
const {
children,
in: inProp,
onEnter,
onExit,
style,
timeout = 'auto'
} = props,
other = _objectWithoutPropertiesLoose(props, ["children", "in", "onEnter", "onExit", "style", "timeout"]);
const timer = React.useRef();
const autoTimeout = React.useRef();
const handleRef = useForkRef(children.ref, ref);
const theme = useTheme();
const handleEnter = (node, isAppearing) => {
reflow(node); // So the animation always start from the start.
const {
duration: transitionDuration,
delay
} = getTransitionProps({
style,
timeout
}, {
mode: 'enter'
});
let duration;
if (timeout === 'auto') {
duration = theme.transitions.getAutoHeightDuration(node.clientHeight);
autoTimeout.current = duration;
} else {
duration = transitionDuration;
}
node.style.transition = [theme.transitions.create('opacity', {
duration,
delay
}), theme.transitions.create('transform', {
duration: duration * 0.666,
delay
})].join(',');
if (onEnter) {
onEnter(node, isAppearing);
}
};
const handleExit = node => {
const {
duration: transitionDuration,
delay
} = getTransitionProps({
style,
timeout
}, {
mode: 'exit'
});
let duration;
if (timeout === 'auto') {
duration = theme.transitions.getAutoHeightDuration(node.clientHeight);
autoTimeout.current = duration;<|fim▁hole|> } else {
duration = transitionDuration;
}
node.style.transition = [theme.transitions.create('opacity', {
duration,
delay
}), theme.transitions.create('transform', {
duration: duration * 0.666,
delay: delay || duration * 0.333
})].join(',');
node.style.opacity = '0';
node.style.transform = getScale(0.75);
if (onExit) {
onExit(node);
}
};
const addEndListener = (_, next) => {
if (timeout === 'auto') {
timer.current = setTimeout(next, autoTimeout.current || 0);
}
};
React.useEffect(() => {
return () => {
clearTimeout(timer.current);
};
}, []);
return /*#__PURE__*/React.createElement(Transition, _extends({
appear: true,
in: inProp,
onEnter: handleEnter,
onExit: handleExit,
addEndListener: addEndListener,
timeout: timeout === 'auto' ? null : timeout
}, other), (state, childProps) => {
return React.cloneElement(children, _extends({
style: _extends({
opacity: 0,
transform: getScale(0.75),
visibility: state === 'exited' && !inProp ? 'hidden' : undefined
}, styles[state], {}, style, {}, children.props.style),
ref: handleRef
}, childProps));
});
});
process.env.NODE_ENV !== "production" ? Grow.propTypes = {
// ----------------------------- Warning --------------------------------
// | These PropTypes are generated from the TypeScript type definitions |
// | To update them edit the d.ts file and run "yarn proptypes" |
// ----------------------------------------------------------------------
/**
* A single child content element.
*/
children: PropTypes.element,
/**
* If `true`, show the component; triggers the enter or exit animation.
*/
in: PropTypes.bool,
/**
* @ignore
*/
onEnter: PropTypes.func,
/**
* @ignore
*/
onExit: PropTypes.func,
/**
* @ignore
*/
style: PropTypes.object,
/**
* The duration for the transition, in milliseconds.
* You may specify a single timeout for all transitions, or individually with an object.
*
* Set to 'auto' to automatically calculate transition time based on height.
*/
timeout: PropTypes.oneOfType([PropTypes.oneOf(['auto']), PropTypes.number, PropTypes.shape({
appear: PropTypes.number,
enter: PropTypes.number,
exit: PropTypes.number
})])
} : void 0;
Grow.muiSupportAuto = true;
export default Grow;<|fim▁end|>
| |
<|file_name|>coders.go<|end_file_name|><|fim▁begin|>//
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package pbeam
import (
"bytes"
"encoding/gob"
"reflect"
"github.com/apache/beam/sdks/v2/go/pkg/beam"
)
// Coders for serializing DP Aggregation Accumulators.
func init() {
beam.RegisterCoder(reflect.TypeOf(countAccum{}), encodeCountAccum, decodeCountAccum)
beam.RegisterCoder(reflect.TypeOf(boundedSumAccumInt64{}), encodeBoundedSumAccumInt64, decodeBoundedSumAccumInt64)
beam.RegisterCoder(reflect.TypeOf(boundedSumAccumFloat64{}), encodeBoundedSumAccumFloat64, decodeBoundedSumAccumFloat64)
beam.RegisterCoder(reflect.TypeOf(boundedMeanAccum{}), encodeBoundedMeanAccum, decodeBoundedMeanAccum)
beam.RegisterCoder(reflect.TypeOf(boundedQuantilesAccum{}), encodeBoundedQuantilesAccum, decodeBoundedQuantilesAccum)
beam.RegisterCoder(reflect.TypeOf(expandValuesAccum{}), encodeExpandValuesAccum, decodeExpandValuesAccum)
beam.RegisterCoder(reflect.TypeOf(expandFloat64ValuesAccum{}), encodeExpandFloat64ValuesAccum, decodeExpandFloat64ValuesAccum)
beam.RegisterCoder(reflect.TypeOf(partitionSelectionAccum{}), encodePartitionSelectionAccum, decodePartitionSelectionAccum)
}
func encodeCountAccum(ca countAccum) ([]byte, error) {
return encode(ca)
}
func decodeCountAccum(data []byte) (countAccum, error) {
var ret countAccum
err := decode(&ret, data)
return ret, err
}
func encodeBoundedSumAccumInt64(v boundedSumAccumInt64) ([]byte, error) {
return encode(v)
}
func decodeBoundedSumAccumInt64(data []byte) (boundedSumAccumInt64, error) {
var ret boundedSumAccumInt64
err := decode(&ret, data)
return ret, err
}
func encodeBoundedSumAccumFloat64(v boundedSumAccumFloat64) ([]byte, error) {
return encode(v)
}
func decodeBoundedSumAccumFloat64(data []byte) (boundedSumAccumFloat64, error) {
var ret boundedSumAccumFloat64
err := decode(&ret, data)
return ret, err
}
func encodeBoundedMeanAccum(v boundedMeanAccum) ([]byte, error) {
return encode(v)
}
func decodeBoundedMeanAccum(data []byte) (boundedMeanAccum, error) {
var ret boundedMeanAccum
err := decode(&ret, data)
return ret, err
}
func encodeBoundedQuantilesAccum(v boundedQuantilesAccum) ([]byte, error) {
return encode(v)
}
func decodeBoundedQuantilesAccum(data []byte) (boundedQuantilesAccum, error) {
var ret boundedQuantilesAccum
err := decode(&ret, data)
return ret, err
}
func encodeExpandValuesAccum(v expandValuesAccum) ([]byte, error) {
return encode(v)
}
func decodeExpandValuesAccum(data []byte) (expandValuesAccum, error) {
var ret expandValuesAccum
err := decode(&ret, data)
return ret, err
}
<|fim▁hole|>
func decodeExpandFloat64ValuesAccum(data []byte) (expandFloat64ValuesAccum, error) {
var ret expandFloat64ValuesAccum
err := decode(&ret, data)
return ret, err
}
func encodePartitionSelectionAccum(v partitionSelectionAccum) ([]byte, error) {
return encode(v)
}
func decodePartitionSelectionAccum(data []byte) (partitionSelectionAccum, error) {
var ret partitionSelectionAccum
err := decode(&ret, data)
return ret, err
}
func encode(v interface{}) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(v)
return buf.Bytes(), err
}
func decode(v interface{}, data []byte) error {
return gob.NewDecoder(bytes.NewReader(data)).Decode(v)
}<|fim▁end|>
|
func encodeExpandFloat64ValuesAccum(v expandFloat64ValuesAccum) ([]byte, error) {
return encode(v)
}
|
<|file_name|>relationships_test.go<|end_file_name|><|fim▁begin|>package bdb
import (
"reflect"
"testing"
)
func TestToOneRelationships(t *testing.T) {
t.Parallel()
tables := []Table{
{
Name: "pilots",
Columns: []Column{{Name: "id", Unique: true}, {Name: "name", Unique: true}}},
{
Name: "airports",
Columns: []Column{{Name: "id", Unique: true}, {Name: "size", Unique: true}},
},
{
Name: "jets",
Columns: []Column{{Name: "id", Unique: true}, {Name: "pilot_id", Unique: true}, {Name: "airport_id", Unique: true}},
FKeys: []ForeignKey{
{Name: "jets_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Unique: true},
{Name: "jets_airport_id_fk", Column: "airport_id", ForeignTable: "airports", ForeignColumn: "id", Unique: true},
},
},
{
Name: "licenses",
Columns: []Column{{Name: "id", Unique: true}, {Name: "pilot_id", Unique: true}},
FKeys: []ForeignKey{
{Name: "licenses_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Unique: true},
},
},
{
Name: "hangars",
Columns: []Column{{Name: "id", Unique: true}, {Name: "name", Unique: true}},
},
{
Name: "languages",
Columns: []Column{{Name: "id", Unique: true}, {Name: "language", Unique: true}},
},
{
Name: "pilot_languages",
IsJoinTable: true,
Columns: []Column{{Name: "pilot_id", Unique: true}, {Name: "language_id", Unique: true}},
FKeys: []ForeignKey{
{Name: "pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Unique: true},
{Name: "language_id_fk", Column: "language_id", ForeignTable: "languages", ForeignColumn: "id", Unique: true},
},
},
}
relationships := ToOneRelationships("pilots", tables)
expected := []ToOneRelationship{
{
Table: "pilots",
Column: "id",
Nullable: false,<|fim▁hole|>
ForeignTable: "jets",
ForeignColumn: "pilot_id",
ForeignColumnNullable: false,
ForeignColumnUnique: true,
},
{
Table: "pilots",
Column: "id",
Nullable: false,
Unique: false,
ForeignTable: "licenses",
ForeignColumn: "pilot_id",
ForeignColumnNullable: false,
ForeignColumnUnique: true,
},
}
if len(relationships) != 2 {
t.Error("wrong # of relationships", len(relationships))
}
for i, v := range relationships {
if !reflect.DeepEqual(v, expected[i]) {
t.Errorf("[%d] Mismatch between relationships:\n\nwant:%#v\n\ngot:%#v\n\n", i, expected[i], v)
}
}
}
func TestToManyRelationships(t *testing.T) {
t.Parallel()
tables := []Table{
{
Name: "pilots",
Columns: []Column{{Name: "id"}, {Name: "name"}},
},
{
Name: "airports",
Columns: []Column{{Name: "id"}, {Name: "size"}},
},
{
Name: "jets",
Columns: []Column{{Name: "id"}, {Name: "pilot_id"}, {Name: "airport_id"}},
FKeys: []ForeignKey{
{Name: "jets_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id"},
{Name: "jets_airport_id_fk", Column: "airport_id", ForeignTable: "airports", ForeignColumn: "id"},
},
},
{
Name: "licenses",
Columns: []Column{{Name: "id"}, {Name: "pilot_id"}},
FKeys: []ForeignKey{
{Name: "licenses_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id"},
},
},
{
Name: "hangars",
Columns: []Column{{Name: "id"}, {Name: "name"}},
},
{
Name: "languages",
Columns: []Column{{Name: "id"}, {Name: "language"}},
},
{
Name: "pilot_languages",
IsJoinTable: true,
Columns: []Column{{Name: "pilot_id"}, {Name: "language_id"}},
FKeys: []ForeignKey{
{Name: "pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id"},
{Name: "language_id_fk", Column: "language_id", ForeignTable: "languages", ForeignColumn: "id"},
},
},
}
relationships := ToManyRelationships("pilots", tables)
expected := []ToManyRelationship{
{
Table: "pilots",
Column: "id",
Nullable: false,
Unique: false,
ForeignTable: "jets",
ForeignColumn: "pilot_id",
ForeignColumnNullable: false,
ForeignColumnUnique: false,
ToJoinTable: false,
},
{
Table: "pilots",
Column: "id",
Nullable: false,
Unique: false,
ForeignTable: "licenses",
ForeignColumn: "pilot_id",
ForeignColumnNullable: false,
ForeignColumnUnique: false,
ToJoinTable: false,
},
{
Table: "pilots",
Column: "id",
Nullable: false,
Unique: false,
ForeignTable: "languages",
ForeignColumn: "id",
ForeignColumnNullable: false,
ForeignColumnUnique: false,
ToJoinTable: true,
JoinTable: "pilot_languages",
JoinLocalColumn: "pilot_id",
JoinLocalColumnNullable: false,
JoinLocalColumnUnique: false,
JoinForeignColumn: "language_id",
JoinForeignColumnNullable: false,
JoinForeignColumnUnique: false,
},
}
if len(relationships) != 3 {
t.Error("wrong # of relationships:", len(relationships))
}
for i, v := range relationships {
if !reflect.DeepEqual(v, expected[i]) {
t.Errorf("[%d] Mismatch between relationships:\n\nwant:%#v\n\ngot:%#v\n\n", i, expected[i], v)
}
}
}
func TestToManyRelationshipsNull(t *testing.T) {
t.Parallel()
tables := []Table{
{
Name: "pilots",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "name", Nullable: true}}},
{
Name: "airports",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "size", Nullable: true}},
},
{
Name: "jets",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "pilot_id", Nullable: true}, {Name: "airport_id", Nullable: true}},
FKeys: []ForeignKey{
{Name: "jets_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Nullable: true, ForeignColumnNullable: true},
{Name: "jets_airport_id_fk", Column: "airport_id", ForeignTable: "airports", ForeignColumn: "id", Nullable: true, ForeignColumnNullable: true},
},
},
{
Name: "licenses",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "pilot_id", Nullable: true}},
FKeys: []ForeignKey{
{Name: "licenses_pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Nullable: true, ForeignColumnNullable: true},
},
},
{
Name: "hangars",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "name", Nullable: true}},
},
{
Name: "languages",
Columns: []Column{{Name: "id", Nullable: true}, {Name: "language", Nullable: true}},
},
{
Name: "pilot_languages",
IsJoinTable: true,
Columns: []Column{{Name: "pilot_id", Nullable: true}, {Name: "language_id", Nullable: true}},
FKeys: []ForeignKey{
{Name: "pilot_id_fk", Column: "pilot_id", ForeignTable: "pilots", ForeignColumn: "id", Nullable: true, ForeignColumnNullable: true},
{Name: "language_id_fk", Column: "language_id", ForeignTable: "languages", ForeignColumn: "id", Nullable: true, ForeignColumnNullable: true},
},
},
}
relationships := ToManyRelationships("pilots", tables)
if len(relationships) != 3 {
t.Error("wrong # of relationships:", len(relationships))
}
expected := []ToManyRelationship{
{
Table: "pilots",
Column: "id",
Nullable: true,
Unique: false,
ForeignTable: "jets",
ForeignColumn: "pilot_id",
ForeignColumnNullable: true,
ForeignColumnUnique: false,
ToJoinTable: false,
},
{
Table: "pilots",
Column: "id",
Nullable: true,
Unique: false,
ForeignTable: "licenses",
ForeignColumn: "pilot_id",
ForeignColumnNullable: true,
ForeignColumnUnique: false,
ToJoinTable: false,
},
{
Table: "pilots",
Column: "id",
Nullable: true,
Unique: false,
ForeignTable: "languages",
ForeignColumn: "id",
ForeignColumnNullable: true,
ForeignColumnUnique: false,
ToJoinTable: true,
JoinTable: "pilot_languages",
JoinLocalColumn: "pilot_id",
JoinLocalColumnNullable: true,
JoinLocalColumnUnique: false,
JoinForeignColumn: "language_id",
JoinForeignColumnNullable: true,
JoinForeignColumnUnique: false,
},
}
for i, v := range relationships {
if !reflect.DeepEqual(v, expected[i]) {
t.Errorf("[%d] Mismatch between relationships:\n\nwant:%#v\n\ngot:%#v\n\n", i, expected[i], v)
}
}
}<|fim▁end|>
|
Unique: false,
|
<|file_name|>CustomUserDetailsService.java<|end_file_name|><|fim▁begin|><|fim▁hole|>import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
import org.springframework.stereotype.Service;
import java.util.Optional;
@Service
public class CustomUserDetailsService implements UserDetailsService {
private final UserRepository userRepository;
public CustomUserDetailsService(UserRepository userRepository) {
this.userRepository = userRepository;
}
@Override
public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException {
return userRepository.findByEmail(username)
.map(u -> new UserPrincipal(u))
.orElseThrow(() -> new UsernameNotFoundException("No user found for " + username));
}
}<|fim▁end|>
|
package com.lyubenblagoev.postfixrest.security;
import com.lyubenblagoev.postfixrest.entity.User;
import com.lyubenblagoev.postfixrest.repository.UserRepository;
|
<|file_name|>url_fetcher_core.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/url_request/url_fetcher_core.h"
#include <stdint.h>
#include "base/bind.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/profiler/scoped_tracker.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/thread_task_runner_handle.h"
#include "base/tracked_objects.h"
#include "net/base/elements_upload_data_stream.h"
#include "net/base/io_buffer.h"
#include "net/base/load_flags.h"
#include "net/base/net_errors.h"
#include "net/base/request_priority.h"
#include "net/base/upload_bytes_element_reader.h"
#include "net/base/upload_data_stream.h"
#include "net/base/upload_file_element_reader.h"
#include "net/http/http_response_headers.h"
#include "net/url_request/redirect_info.h"
#include "net/url_request/url_fetcher_delegate.h"
#include "net/url_request/url_fetcher_response_writer.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/url_request_context_getter.h"
#include "net/url_request/url_request_throttler_manager.h"
namespace {
const int kBufferSize = 4096;
const int kUploadProgressTimerInterval = 100;
bool g_ignore_certificate_requests = false;
void EmptyCompletionCallback(int result) {}
} // namespace
namespace net {
// URLFetcherCore::Registry ---------------------------------------------------
URLFetcherCore::Registry::Registry() {}
URLFetcherCore::Registry::~Registry() {}
void URLFetcherCore::Registry::AddURLFetcherCore(URLFetcherCore* core) {
DCHECK(!ContainsKey(fetchers_, core));
fetchers_.insert(core);
}
void URLFetcherCore::Registry::RemoveURLFetcherCore(URLFetcherCore* core) {
DCHECK(ContainsKey(fetchers_, core));
fetchers_.erase(core);
}
void URLFetcherCore::Registry::CancelAll() {
while (!fetchers_.empty())
(*fetchers_.begin())->CancelURLRequest(ERR_ABORTED);
}
// URLFetcherCore -------------------------------------------------------------
// static
base::LazyInstance<URLFetcherCore::Registry>
URLFetcherCore::g_registry = LAZY_INSTANCE_INITIALIZER;
URLFetcherCore::URLFetcherCore(URLFetcher* fetcher,
const GURL& original_url,
URLFetcher::RequestType request_type,
URLFetcherDelegate* d)
: fetcher_(fetcher),
original_url_(original_url),
request_type_(request_type),
delegate_(d),
delegate_task_runner_(base::ThreadTaskRunnerHandle::Get()),
load_flags_(LOAD_NORMAL),
response_code_(URLFetcher::RESPONSE_CODE_INVALID),
buffer_(new IOBuffer(kBufferSize)),
url_request_data_key_(NULL),
was_fetched_via_proxy_(false),
upload_content_set_(false),
upload_range_offset_(0),
upload_range_length_(0),
referrer_policy_(
URLRequest::CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE),
is_chunked_upload_(false),
was_cancelled_(false),
stop_on_redirect_(false),
stopped_on_redirect_(false),
automatically_retry_on_5xx_(true),
num_retries_on_5xx_(0),
max_retries_on_5xx_(0),
num_retries_on_network_changes_(0),
max_retries_on_network_changes_(0),
current_upload_bytes_(-1),
current_response_bytes_(0),
total_response_bytes_(-1) {
CHECK(original_url_.is_valid());
}
void URLFetcherCore::Start() {
DCHECK(delegate_task_runner_.get());
DCHECK(request_context_getter_.get()) << "We need an URLRequestContext!";
if (network_task_runner_.get()) {
DCHECK_EQ(network_task_runner_,
request_context_getter_->GetNetworkTaskRunner());
} else {
network_task_runner_ = request_context_getter_->GetNetworkTaskRunner();
}
DCHECK(network_task_runner_.get()) << "We need an IO task runner";
network_task_runner_->PostTask(
FROM_HERE, base::Bind(&URLFetcherCore::StartOnIOThread, this));
}
void URLFetcherCore::Stop() {
if (delegate_task_runner_.get()) // May be NULL in tests.
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
delegate_ = NULL;
fetcher_ = NULL;
if (!network_task_runner_.get())
return;
if (network_task_runner_->RunsTasksOnCurrentThread()) {
CancelURLRequest(ERR_ABORTED);
} else {
network_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::CancelURLRequest, this, ERR_ABORTED));
}
}
void URLFetcherCore::SetUploadData(const std::string& upload_content_type,
const std::string& upload_content) {
AssertHasNoUploadData();
DCHECK(!is_chunked_upload_);
DCHECK(upload_content_type_.empty());
// Empty |upload_content_type| is allowed iff the |upload_content| is empty.
DCHECK(upload_content.empty() || !upload_content_type.empty());
upload_content_type_ = upload_content_type;
upload_content_ = upload_content;
upload_content_set_ = true;
}
void URLFetcherCore::SetUploadFilePath(
const std::string& upload_content_type,
const base::FilePath& file_path,
uint64 range_offset,
uint64 range_length,
scoped_refptr<base::TaskRunner> file_task_runner) {
AssertHasNoUploadData();
DCHECK(!is_chunked_upload_);
DCHECK_EQ(upload_range_offset_, 0ULL);
DCHECK_EQ(upload_range_length_, 0ULL);
DCHECK(upload_content_type_.empty());
DCHECK(!upload_content_type.empty());
upload_content_type_ = upload_content_type;
upload_file_path_ = file_path;
upload_range_offset_ = range_offset;
upload_range_length_ = range_length;
upload_file_task_runner_ = file_task_runner;
upload_content_set_ = true;
}
void URLFetcherCore::SetUploadStreamFactory(
const std::string& upload_content_type,
const URLFetcher::CreateUploadStreamCallback& factory) {
AssertHasNoUploadData();
DCHECK(!is_chunked_upload_);
DCHECK(upload_content_type_.empty());
upload_content_type_ = upload_content_type;
upload_stream_factory_ = factory;
upload_content_set_ = true;
}
void URLFetcherCore::SetChunkedUpload(const std::string& content_type) {
if (!is_chunked_upload_) {
AssertHasNoUploadData();
DCHECK(upload_content_type_.empty());
}
// Empty |content_type| is not allowed here, because it is impossible
// to ensure non-empty upload content as it is not yet supplied.
DCHECK(!content_type.empty());
upload_content_type_ = content_type;
upload_content_.clear();
is_chunked_upload_ = true;
}
void URLFetcherCore::AppendChunkToUpload(const std::string& content,
bool is_last_chunk) {
DCHECK(delegate_task_runner_.get());
DCHECK(network_task_runner_.get());
network_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::CompleteAddingUploadDataChunk, this, content,
is_last_chunk));
}
void URLFetcherCore::SetLoadFlags(int load_flags) {
load_flags_ = load_flags;
}
int URLFetcherCore::GetLoadFlags() const {
return load_flags_;
}
void URLFetcherCore::SetReferrer(const std::string& referrer) {
referrer_ = referrer;
}
void URLFetcherCore::SetReferrerPolicy(
URLRequest::ReferrerPolicy referrer_policy) {
referrer_policy_ = referrer_policy;
}
void URLFetcherCore::SetExtraRequestHeaders(
const std::string& extra_request_headers) {
extra_request_headers_.Clear();
extra_request_headers_.AddHeadersFromString(extra_request_headers);
}
void URLFetcherCore::AddExtraRequestHeader(const std::string& header_line) {
extra_request_headers_.AddHeaderFromString(header_line);
}
void URLFetcherCore::SetRequestContext(
URLRequestContextGetter* request_context_getter) {
DCHECK(!request_context_getter_.get());
DCHECK(request_context_getter);
request_context_getter_ = request_context_getter;
}
void URLFetcherCore::SetFirstPartyForCookies(
const GURL& first_party_for_cookies) {
DCHECK(first_party_for_cookies_.is_empty());
first_party_for_cookies_ = first_party_for_cookies;
}
void URLFetcherCore::SetURLRequestUserData(
const void* key,
const URLFetcher::CreateDataCallback& create_data_callback) {
DCHECK(key);
DCHECK(!create_data_callback.is_null());
url_request_data_key_ = key;
url_request_create_data_callback_ = create_data_callback;
}
void URLFetcherCore::SetStopOnRedirect(bool stop_on_redirect) {
stop_on_redirect_ = stop_on_redirect;
}
void URLFetcherCore::SetAutomaticallyRetryOn5xx(bool retry) {
automatically_retry_on_5xx_ = retry;
}
void URLFetcherCore::SetMaxRetriesOn5xx(int max_retries) {
max_retries_on_5xx_ = max_retries;
}
int URLFetcherCore::GetMaxRetriesOn5xx() const {
return max_retries_on_5xx_;
}
base::TimeDelta URLFetcherCore::GetBackoffDelay() const {
return backoff_delay_;
}
void URLFetcherCore::SetAutomaticallyRetryOnNetworkChanges(int max_retries) {
max_retries_on_network_changes_ = max_retries;
}
void URLFetcherCore::SaveResponseToFileAtPath(
const base::FilePath& file_path,
scoped_refptr<base::SequencedTaskRunner> file_task_runner) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
SaveResponseWithWriter(scoped_ptr<URLFetcherResponseWriter>(
new URLFetcherFileWriter(file_task_runner, file_path)));
}
void URLFetcherCore::SaveResponseToTemporaryFile(
scoped_refptr<base::SequencedTaskRunner> file_task_runner) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
SaveResponseWithWriter(scoped_ptr<URLFetcherResponseWriter>(
new URLFetcherFileWriter(file_task_runner, base::FilePath())));
}
void URLFetcherCore::SaveResponseWithWriter(
scoped_ptr<URLFetcherResponseWriter> response_writer) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
response_writer_ = response_writer.Pass();
}
HttpResponseHeaders* URLFetcherCore::GetResponseHeaders() const {
return response_headers_.get();
}
// TODO(panayiotis): socket_address_ is written in the IO thread,
// if this is accessed in the UI thread, this could result in a race.
// Same for response_headers_ above and was_fetched_via_proxy_ below.
HostPortPair URLFetcherCore::GetSocketAddress() const {
return socket_address_;
}
bool URLFetcherCore::WasFetchedViaProxy() const {
return was_fetched_via_proxy_;
}
const GURL& URLFetcherCore::GetOriginalURL() const {
return original_url_;
}
const GURL& URLFetcherCore::GetURL() const {
return url_;
}
const URLRequestStatus& URLFetcherCore::GetStatus() const {
return status_;
}
int URLFetcherCore::GetResponseCode() const {
return response_code_;
}
const ResponseCookies& URLFetcherCore::GetCookies() const {
return cookies_;
}
void URLFetcherCore::ReceivedContentWasMalformed() {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
if (network_task_runner_.get()) {
network_task_runner_->PostTask(
FROM_HERE, base::Bind(&URLFetcherCore::NotifyMalformedContent, this));
}
}
bool URLFetcherCore::GetResponseAsString(
std::string* out_response_string) const {
URLFetcherStringWriter* string_writer =
response_writer_ ? response_writer_->AsStringWriter() : NULL;
if (!string_writer)
return false;
*out_response_string = string_writer->data();
UMA_HISTOGRAM_MEMORY_KB("UrlFetcher.StringResponseSize",
(string_writer->data().length() / 1024));
return true;
}
bool URLFetcherCore::GetResponseAsFilePath(bool take_ownership,
base::FilePath* out_response_path) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
URLFetcherFileWriter* file_writer =
response_writer_ ? response_writer_->AsFileWriter() : NULL;
if (!file_writer)
return false;
*out_response_path = file_writer->file_path();
if (take_ownership) {
// Intentionally calling a file_writer_ method directly without posting
// the task to network_task_runner_.
//
// This is for correctly handling the case when file_writer_->DisownFile()
// is soon followed by URLFetcherCore::Stop(). We have to make sure that
// DisownFile takes effect before Stop deletes file_writer_.
//
// This direct call should be thread-safe, since DisownFile itself does no
// file operation. It just flips the state to be referred in destruction.
file_writer->DisownFile();
}
return true;
}
void URLFetcherCore::OnReceivedRedirect(URLRequest* request,
const RedirectInfo& redirect_info,
bool* defer_redirect) {
DCHECK_EQ(request, request_.get());
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (stop_on_redirect_) {
stopped_on_redirect_ = true;
url_ = redirect_info.new_url;
response_code_ = request_->GetResponseCode();
was_fetched_via_proxy_ = request_->was_fetched_via_proxy();
request->Cancel();
OnReadCompleted(request, 0);
}
}
void URLFetcherCore::OnResponseStarted(URLRequest* request) {
DCHECK_EQ(request, request_.get());
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (request_->status().is_success()) {
response_code_ = request_->GetResponseCode();
response_headers_ = request_->response_headers();
socket_address_ = request_->GetSocketAddress();
was_fetched_via_proxy_ = request_->was_fetched_via_proxy();
total_response_bytes_ = request_->GetExpectedContentSize();
}
ReadResponse();
}
void URLFetcherCore::OnCertificateRequested(
URLRequest* request,
SSLCertRequestInfo* cert_request_info) {
DCHECK_EQ(request, request_.get());
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (g_ignore_certificate_requests) {
request->ContinueWithCertificate(NULL);
} else {
request->Cancel();
}
}
void URLFetcherCore::OnReadCompleted(URLRequest* request,
int bytes_read) {
DCHECK(request == request_);
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (!stopped_on_redirect_)
url_ = request->url();
URLRequestThrottlerManager* throttler_manager =
request->context()->throttler_manager();
if (throttler_manager)
url_throttler_entry_ = throttler_manager->RegisterRequestUrl(url_);
do {
if (!request_->status().is_success() || bytes_read <= 0)
break;
current_response_bytes_ += bytes_read;
InformDelegateDownloadProgress();
const int result =
WriteBuffer(new DrainableIOBuffer(buffer_.get(), bytes_read));
if (result < 0) {
// Write failed or waiting for write completion.
return;
}
} while (request_->Read(buffer_.get(), kBufferSize, &bytes_read));
const URLRequestStatus status = request_->status();
if (status.is_success())
request_->GetResponseCookies(&cookies_);
// See comments re: HEAD requests in ReadResponse().
if (!status.is_io_pending() || request_type_ == URLFetcher::HEAD) {
status_ = status;
ReleaseRequest();
// No more data to write.
const int result = response_writer_->Finish(
base::Bind(&URLFetcherCore::DidFinishWriting, this));
if (result != ERR_IO_PENDING)
DidFinishWriting(result);
}
}
void URLFetcherCore::CancelAll() {
g_registry.Get().CancelAll();
}
int URLFetcherCore::GetNumFetcherCores() {
return g_registry.Get().size();
}
void URLFetcherCore::SetIgnoreCertificateRequests(bool ignored) {
g_ignore_certificate_requests = ignored;
}
URLFetcherCore::~URLFetcherCore() {
// |request_| should be NULL. If not, it's unsafe to delete it here since we
// may not be on the IO thread.
DCHECK(!request_.get());
}
void URLFetcherCore::StartOnIOThread() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (!response_writer_)
response_writer_.reset(new URLFetcherStringWriter);
const int result = response_writer_->Initialize(
base::Bind(&URLFetcherCore::DidInitializeWriter, this));
if (result != ERR_IO_PENDING)
DidInitializeWriter(result);
}
void URLFetcherCore::StartURLRequest() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (was_cancelled_) {
// Since StartURLRequest() is posted as a *delayed* task, it may
// run after the URLFetcher was already stopped.
return;
}
DCHECK(request_context_getter_.get());
DCHECK(!request_.get());
g_registry.Get().AddURLFetcherCore(this);
current_response_bytes_ = 0;
request_ = request_context_getter_->GetURLRequestContext()->CreateRequest(
original_url_, DEFAULT_PRIORITY, this);
request_->set_stack_trace(stack_trace_);
int flags = request_->load_flags() | load_flags_;
if (is_chunked_upload_)
request_->EnableChunkedUpload();
request_->SetLoadFlags(flags);
request_->SetReferrer(referrer_);
request_->set_referrer_policy(referrer_policy_);
request_->set_first_party_for_cookies(first_party_for_cookies_.is_empty() ?
original_url_ : first_party_for_cookies_);
if (url_request_data_key_ && !url_request_create_data_callback_.is_null()) {
request_->SetUserData(url_request_data_key_,
url_request_create_data_callback_.Run());
}
switch (request_type_) {
case URLFetcher::GET:
break;
case URLFetcher::POST:
case URLFetcher::PUT:
case URLFetcher::PATCH: {
// Upload content must be set.
DCHECK(is_chunked_upload_ || upload_content_set_);
request_->set_method(
request_type_ == URLFetcher::POST ? "POST" :
request_type_ == URLFetcher::PUT ? "PUT" : "PATCH");
if (!upload_content_type_.empty()) {
extra_request_headers_.SetHeader(HttpRequestHeaders::kContentType,
upload_content_type_);
}
if (!upload_content_.empty()) {
scoped_ptr<UploadElementReader> reader(new UploadBytesElementReader(
upload_content_.data(), upload_content_.size()));
request_->set_upload(
ElementsUploadDataStream::CreateWithReader(reader.Pass(), 0));
} else if (!upload_file_path_.empty()) {
scoped_ptr<UploadElementReader> reader(
new UploadFileElementReader(upload_file_task_runner_.get(),
upload_file_path_,
upload_range_offset_,
upload_range_length_,
base::Time()));
request_->set_upload(
ElementsUploadDataStream::CreateWithReader(reader.Pass(), 0));
} else if (!upload_stream_factory_.is_null()) {
scoped_ptr<UploadDataStream> stream = upload_stream_factory_.Run();
DCHECK(stream);
request_->set_upload(stream.Pass());
}
current_upload_bytes_ = -1;
// TODO(kinaba): http://crbug.com/118103. Implement upload callback in the
// layer and avoid using timer here.
upload_progress_checker_timer_.reset(
new base::RepeatingTimer<URLFetcherCore>());
upload_progress_checker_timer_->Start(
FROM_HERE,
base::TimeDelta::FromMilliseconds(kUploadProgressTimerInterval),
this,
&URLFetcherCore::InformDelegateUploadProgress);
break;
}
case URLFetcher::HEAD:
request_->set_method("HEAD");
break;
case URLFetcher::DELETE_REQUEST:
request_->set_method("DELETE");
break;
default:
NOTREACHED();
}
if (!extra_request_headers_.IsEmpty())
request_->SetExtraRequestHeaders(extra_request_headers_);
request_->Start();
}
void URLFetcherCore::DidInitializeWriter(int result) {
if (result != OK) {
CancelURLRequest(result);
delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, this));
return;
}
StartURLRequestWhenAppropriate();
}
void URLFetcherCore::StartURLRequestWhenAppropriate() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (was_cancelled_)
return;
DCHECK(request_context_getter_.get());
int64 delay = 0;
if (!original_url_throttler_entry_.get()) {
URLRequestThrottlerManager* manager =
request_context_getter_->GetURLRequestContext()->throttler_manager();
if (manager) {
original_url_throttler_entry_ =
manager->RegisterRequestUrl(original_url_);
}
}
if (original_url_throttler_entry_.get()) {
delay = original_url_throttler_entry_->ReserveSendingTimeForNextRequest(
GetBackoffReleaseTime());
}
if (delay == 0) {
StartURLRequest();
} else {
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&URLFetcherCore::StartURLRequest, this),
base::TimeDelta::FromMilliseconds(delay));
}
}
void URLFetcherCore::CancelURLRequest(int error) {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (request_.get()) {
request_->CancelWithError(error);
ReleaseRequest();
}
// Set the error manually.
// Normally, calling URLRequest::CancelWithError() results in calling
// OnReadCompleted() with bytes_read = -1 via an asynchronous task posted by
// URLRequestJob::NotifyDone(). But, because the request was released
// immediately after being canceled, the request could not call
// OnReadCompleted() which overwrites |status_| with the error status.
status_.set_status(URLRequestStatus::CANCELED);
status_.set_error(error);
// Release the reference to the request context. There could be multiple
// references to URLFetcher::Core at this point so it may take a while to
// delete the object, but we cannot delay the destruction of the request
// context.
request_context_getter_ = NULL;
first_party_for_cookies_ = GURL();
url_request_data_key_ = NULL;
url_request_create_data_callback_.Reset();
was_cancelled_ = true;
}
void URLFetcherCore::OnCompletedURLRequest(
base::TimeDelta backoff_delay) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
// Save the status and backoff_delay so that delegates can read it.
if (delegate_) {
backoff_delay_ = backoff_delay;
InformDelegateFetchIsComplete();
}
}
void URLFetcherCore::InformDelegateFetchIsComplete() {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
if (delegate_)
delegate_->OnURLFetchComplete(fetcher_);
}
void URLFetcherCore::NotifyMalformedContent() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (url_throttler_entry_.get()) {
int status_code = response_code_;
if (status_code == URLFetcher::RESPONSE_CODE_INVALID) {
// The status code will generally be known by the time clients
// call the |ReceivedContentWasMalformed()| function (which ends up
// calling the current function) but if it's not, we need to assume
// the response was successful so that the total failure count
// used to calculate exponential back-off goes up.
status_code = 200;
}
url_throttler_entry_->ReceivedContentWasMalformed(status_code);
}
}
void URLFetcherCore::DidFinishWriting(int result) {
if (result != OK) {
CancelURLRequest(result);
delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, this));
return;
}
// If the file was successfully closed, then the URL request is complete.
RetryOrCompleteUrlFetch();
}
void URLFetcherCore::RetryOrCompleteUrlFetch() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
base::TimeDelta backoff_delay;
// Checks the response from server.
if (response_code_ >= 500 ||
status_.error() == ERR_TEMPORARILY_THROTTLED) {
// When encountering a server error, we will send the request again
// after backoff time.
++num_retries_on_5xx_;
// Note that backoff_delay may be 0 because (a) the
// URLRequestThrottlerManager and related code does not
// necessarily back off on the first error, (b) it only backs off
// on some of the 5xx status codes, (c) not all URLRequestContexts
// have a throttler manager.
base::TimeTicks backoff_release_time = GetBackoffReleaseTime();
backoff_delay = backoff_release_time - base::TimeTicks::Now();
if (backoff_delay < base::TimeDelta())
backoff_delay = base::TimeDelta();
if (automatically_retry_on_5xx_ &&
num_retries_on_5xx_ <= max_retries_on_5xx_) {
StartOnIOThread();
return;
}
} else {
backoff_delay = base::TimeDelta();
}
// Retry if the request failed due to network changes.
if (status_.error() == ERR_NETWORK_CHANGED &&
num_retries_on_network_changes_ < max_retries_on_network_changes_) {
++num_retries_on_network_changes_;
// Retry soon, after flushing all the current tasks which may include
// further network change observers.
network_task_runner_->PostTask(
FROM_HERE, base::Bind(&URLFetcherCore::StartOnIOThread, this));
return;
}
request_context_getter_ = NULL;
first_party_for_cookies_ = GURL();
url_request_data_key_ = NULL;
url_request_create_data_callback_.Reset();
bool posted = delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::OnCompletedURLRequest, this, backoff_delay));
// If the delegate message loop does not exist any more, then the delegate
// should be gone too.
DCHECK(posted || !delegate_);
}
void URLFetcherCore::ReleaseRequest() {
upload_progress_checker_timer_.reset();
request_.reset();
g_registry.Get().RemoveURLFetcherCore(this);
}
base::TimeTicks URLFetcherCore::GetBackoffReleaseTime() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (!original_url_throttler_entry_.get())
return base::TimeTicks();
base::TimeTicks original_url_backoff =
original_url_throttler_entry_->GetExponentialBackoffReleaseTime();
base::TimeTicks destination_url_backoff;
if (url_throttler_entry_.get() &&
original_url_throttler_entry_.get() != url_throttler_entry_.get()) {
destination_url_backoff =
url_throttler_entry_->GetExponentialBackoffReleaseTime();
}
return original_url_backoff > destination_url_backoff ?
original_url_backoff : destination_url_backoff;
}
void URLFetcherCore::CompleteAddingUploadDataChunk(
const std::string& content, bool is_last_chunk) {<|fim▁hole|> if (was_cancelled_) {
// Since CompleteAddingUploadDataChunk() is posted as a *delayed* task, it
// may run after the URLFetcher was already stopped.
return;
}
DCHECK(is_chunked_upload_);
DCHECK(request_.get());
DCHECK(!content.empty());
request_->AppendChunkToUpload(content.data(),
static_cast<int>(content.length()),
is_last_chunk);
}
int URLFetcherCore::WriteBuffer(scoped_refptr<DrainableIOBuffer> data) {
while (data->BytesRemaining() > 0) {
const int result = response_writer_->Write(
data.get(),
data->BytesRemaining(),
base::Bind(&URLFetcherCore::DidWriteBuffer, this, data));
if (result < 0) {
if (result != ERR_IO_PENDING)
DidWriteBuffer(data, result);
return result;
}
data->DidConsume(result);
}
return OK;
}
void URLFetcherCore::DidWriteBuffer(scoped_refptr<DrainableIOBuffer> data,
int result) {
if (result < 0) { // Handle errors.
CancelURLRequest(result);
response_writer_->Finish(base::Bind(&EmptyCompletionCallback));
delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, this));
return;
}
// Continue writing.
data->DidConsume(result);
if (WriteBuffer(data) < 0)
return;
// Finished writing buffer_. Read some more, unless the request has been
// cancelled and deleted.
DCHECK_EQ(0, data->BytesRemaining());
if (request_.get())
ReadResponse();
}
void URLFetcherCore::ReadResponse() {
// Some servers may treat HEAD requests as GET requests. To free up the
// network connection as soon as possible, signal that the request has
// completed immediately, without trying to read any data back (all we care
// about is the response code and headers, which we already have).
int bytes_read = 0;
if (request_->status().is_success() &&
(request_type_ != URLFetcher::HEAD)) {
if (!request_->Read(buffer_.get(), kBufferSize, &bytes_read))
bytes_read = -1; // Match OnReadCompleted() interface contract.
}
OnReadCompleted(request_.get(), bytes_read);
}
void URLFetcherCore::InformDelegateUploadProgress() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
if (request_.get()) {
int64 current = request_->GetUploadProgress().position();
if (current_upload_bytes_ != current) {
current_upload_bytes_ = current;
int64 total = -1;
if (!is_chunked_upload_) {
total = static_cast<int64>(request_->GetUploadProgress().size());
// Total may be zero if the UploadDataStream::Init has not been called
// yet. Don't send the upload progress until the size is initialized.
if (!total)
return;
}
delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(
&URLFetcherCore::InformDelegateUploadProgressInDelegateThread,
this, current, total));
}
}
}
void URLFetcherCore::InformDelegateUploadProgressInDelegateThread(
int64 current, int64 total) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
if (delegate_)
delegate_->OnURLFetchUploadProgress(fetcher_, current, total);
}
void URLFetcherCore::InformDelegateDownloadProgress() {
DCHECK(network_task_runner_->BelongsToCurrentThread());
// TODO(pkasting): Remove ScopedTracker below once crbug.com/455952 is fixed.
tracked_objects::ScopedTracker tracking_profile2(
FROM_HERE_WITH_EXPLICIT_FUNCTION(
"455952 delegate_task_runner_->PostTask()"));
delegate_task_runner_->PostTask(
FROM_HERE,
base::Bind(
&URLFetcherCore::InformDelegateDownloadProgressInDelegateThread,
this, current_response_bytes_, total_response_bytes_));
}
void URLFetcherCore::InformDelegateDownloadProgressInDelegateThread(
int64 current, int64 total) {
DCHECK(delegate_task_runner_->BelongsToCurrentThread());
if (delegate_)
delegate_->OnURLFetchDownloadProgress(fetcher_, current, total);
}
void URLFetcherCore::AssertHasNoUploadData() const {
DCHECK(!upload_content_set_);
DCHECK(upload_content_.empty());
DCHECK(upload_file_path_.empty());
DCHECK(upload_stream_factory_.is_null());
}
} // namespace net<|fim▁end|>
| |
<|file_name|>SharedKeyDB.py<|end_file_name|><|fim▁begin|>"""Class for storing shared keys."""
from utils.cryptomath import *
from utils.compat import *
from mathtls import *
from Session import Session
from BaseDB import BaseDB
class SharedKeyDB(BaseDB):<|fim▁hole|> A SharedKeyDB can be passed to a server handshake function to
authenticate a client based on one of the shared keys.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new SharedKeyDB.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "shared key")
def _getItem(self, username, valueStr):
session = Session()
session._createSharedKey(username, valueStr)
return session
def __setitem__(self, username, sharedKey):
"""Add a shared key to the database.
@type username: str
@param username: The username to associate the shared key with.
Must be less than or equal to 16 characters in length, and must
not already be in the database.
@type sharedKey: str
@param sharedKey: The shared key to add. Must be less than 48
characters in length.
"""
BaseDB.__setitem__(self, username, sharedKey)
def _setItem(self, username, value):
if len(username)>16:
raise ValueError("username too long")
if len(value)>=48:
raise ValueError("shared key too long")
return value
def _checkItem(self, value, username, param):
newSession = self._getItem(username, param)
return value.masterSecret == newSession.masterSecret<|fim▁end|>
|
"""This class represent an in-memory or on-disk database of shared
keys.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.