file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
base.py | from .env_spec import EnvSpec
import collections
from cached_property import cached_property
class Env(object):
|
_Step = collections.namedtuple("Step", ["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
"""
Convenience method creating a namedtuple with the results of the
environment.step method.
Put extra diagnostic info in the kwargs
"""
return _Step(observation, reward, done, kwargs)
| def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Input
-----
action : an action provided by the environment
Outputs
-------
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
raise NotImplementedError
@property
def action_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
@property
def observation_space(self):
"""
Returns a Space object
:rtype: rllab.spaces.base.Space
"""
raise NotImplementedError
# Helpers that derive from Spaces
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@cached_property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
"""
Horizon of the environment, if it has one
"""
raise NotImplementedError
def terminate(self):
"""
Clean up operation,
"""
pass
def get_param_values(self):
return None
def set_param_values(self, params):
pass |
util.rs | /*
* $Id$
*
* Copyright (c) 2021, Purushottam A. Kulkarni.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and
* or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE
*
*/
//!
//! # Generic Utilities
//!
//! This module provides generic utilities for convenience purposes.
//!
//! # Layout
//!
//! A number of public APIs are defined here.
//!
///
/// ## Pad the IV upto `ivsize` bytes
///
/// This is a utility function used to pad the IV provided to symmetric key
/// and AEAD ciphers such that unwanted errors do not occur.
///
/// This function takes:
/// * `ivsize` - A known good size for the IV (`usize`).
/// * `iv` - A `Vec<u8>` containing the IV to be padded.
///
/// This function returns an IV padded with `0x00`s up to `ivsize`.
///
pub fn pad_iv(ivsize: usize, iv: Vec<u8>) -> Vec<u8> {
let mut newiv: Vec<u8>;
newiv = vec![0u8; ivsize];
newiv[..iv.len()].clone_from_slice(&iv);
newiv
}
///
/// ## Obtain the version of `libkcapi`
///
/// The function returns a version number that is monotonic increasing for newer
/// versions. The version numbers are multiples of 100. For example, version 1.3.1
/// is converted to 1030100 -- the last two digits are reserved for future use.
///
/// ## Examples
///
/// ```no_run
/// use kcapi::util::lib_version;
///
/// assert_eq!(lib_version(), 1050000);
/// ```
pub fn | () -> u32 {
let version: u32;
unsafe { version = kcapi_sys::kcapi_version() }
version
}
| lib_version |
TagLink.tsx | import Link from "next/link";
import { TagContent } from "../lib/tags";
type Props = {
tag: TagContent;
}; | return (
<Link href={"/posts/tags/[[...slug]]" + location.search} as={`/posts/tags/${tag.slug}${location.search}`}>
<a>{"#" + tag.name}</a>
</Link>
);
} | export default function Tag({ tag }: Props) { |
controllers.py | from flask import Blueprint, jsonify
main_bp = Blueprint('main', __name__)
@main_bp.route('/')
def api_home():
| return jsonify({"message": "Welcome to the Student Hub API"}) |
|
remove_test.go | // +build !integration
package actions
import (
"fmt"
"testing"
"github.com/docker-flow/docker-flow-proxy/proxy"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
)
type RemoveTestSuite struct {
suite.Suite
remove Remove
ServiceName string
ConfigsPath string
TemplatesPath string
InstanceName string
}
func TestRemoveUnitTestSuite(t *testing.T) |
func (s *RemoveTestSuite) SetupTest() {
s.ServiceName = "myService"
s.TemplatesPath = "/path/to/templates"
s.ConfigsPath = "/path/to/configs"
s.InstanceName = "my-proxy-instance"
osRemove = func(name string) error {
return nil
}
s.remove = Remove{
ServiceName: s.ServiceName,
ConfigsPath: s.ConfigsPath,
TemplatesPath: s.TemplatesPath,
InstanceName: s.InstanceName,
}
}
// Execute
func (s RemoveTestSuite) Test_Execute_RemovesConfigurationFile() {
var actual []string
expected := []string{
fmt.Sprintf("%s/%s-fe.cfg", s.TemplatesPath, s.ServiceName),
fmt.Sprintf("%s/%s-be.cfg", s.TemplatesPath, s.ServiceName),
}
osRemove = func(name string) error {
actual = append(actual, name)
return nil
}
s.remove.Execute([]string{})
s.Equal(expected, actual)
}
func (s RemoveTestSuite) Test_Execute_RemovesConfigurationFileUsingAclName_WhenPresent() {
s.remove.AclName = "my-acl"
var actual []string
expected := []string{
fmt.Sprintf("%s/%s-fe.cfg", s.TemplatesPath, s.remove.AclName),
fmt.Sprintf("%s/%s-be.cfg", s.TemplatesPath, s.remove.AclName),
}
osRemove = func(name string) error {
actual = append(actual, name)
return nil
}
s.remove.Execute([]string{})
s.Equal(expected, actual)
}
func (s RemoveTestSuite) Test_Execute_Invokes_HaProxyCreateConfigFromTemplates() {
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
mockObj := getProxyMock("")
proxy.Instance = mockObj
s.remove.Execute([]string{})
mockObj.AssertCalled(s.T(), "CreateConfigFromTemplates")
}
func (s RemoveTestSuite) Test_Execute_ReturnsError_WhenHaProxyCreateConfigFromTemplatesFails() {
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
mockObj := getProxyMock("CreateConfigFromTemplates")
mockObj.On("CreateConfigFromTemplates", mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
proxy.Instance = mockObj
err := s.remove.Execute([]string{})
s.Error(err)
}
func (s RemoveTestSuite) Test_Execute_Invokes_HaProxyReload() {
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
mockObj := getProxyMock("")
proxy.Instance = mockObj
s.remove.Execute([]string{})
mockObj.AssertCalled(s.T(), "Reload")
}
func (s RemoveTestSuite) Test_Execute_If_RemoveService_Does_Not_Invokes_HaProxyReload() {
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
mockObj := getProxyMock("RemoveService")
mockObj.On("RemoveService", mock.Anything).Return(false)
proxy.Instance = mockObj
s.remove.Execute([]string{})
mockObj.AssertNotCalled(s.T(), "Reload")
}
func (s RemoveTestSuite) Test_Execute_ReturnsError_WhenHaProxyReloadFails() {
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
mockObj := getProxyMock("CreateConfigFromTemplates")
mockObj.On("CreateConfigFromTemplates", mock.Anything, mock.Anything).Return(fmt.Errorf("This is an error"))
proxy.Instance = mockObj
err := s.remove.Execute([]string{})
s.Error(err)
}
func (s RemoveTestSuite) Test_Execute_RemovesService() {
mockObj := getProxyMock("")
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
proxy.Instance = mockObj
s.remove.ServiceName = "my-soon-to-be-removed-service"
s.remove.Execute([]string{})
mockObj.AssertCalled(s.T(), "RemoveService", s.remove.ServiceName)
}
| {
logPrintf = func(format string, v ...interface{}) {}
proxyOrig := proxy.Instance
defer func() { proxy.Instance = proxyOrig }()
proxy.Instance = getProxyMock("")
suite.Run(t, new(RemoveTestSuite))
} |
error.rs | //! Server related errors
use std::{fmt, io, result, sync};
use failure::{Backtrace, Context, Fail};
use futures::sync::mpsc;
use crate::ps::agent::cache;
use crate::ps::agent::types::ServiceId;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug)]
pub struct Error {
ctx: Context<ErrorKind>,
}
impl Error {
pub fn kind(&self) -> &ErrorKind {
self.ctx.get_context()
}
pub fn invalid_message_type<S: Into<String>>(message_type: S) -> Error {
ErrorKind::InvalidMessageType {
message_type: message_type.into(),
}
.into()
}
pub fn port_already_in_use(port: u16, service_id: ServiceId) -> Error {
ErrorKind::PortAlreadyInUseError { port, service_id }.into()
}
pub fn startup<S: Into<String>>(message: S) -> Error {
ErrorKind::StartupError {
message: message.into(),
}
.into()
}
pub fn io_error<S: Into<String>>(message: S) -> Error {
ErrorKind::IoError {
error: message.into(),
}
.into()
}
}
impl Fail for Error {
fn cause(&self) -> Option<&dyn Fail> {
self.ctx.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.ctx.backtrace()
}
}
impl Clone for Error {
fn clone(&self) -> Self {
self.kind().clone().into()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.ctx.fmt(f)
}
}
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
pub enum ErrorKind {
#[fail(display = "Server operation cancelled")]
Cancelled,
#[fail(display = "Invalid port: {}:{}", hostname, port)]
InvalidPort { hostname: String, port: u16 },
#[fail(display = "Port already in use: {}", port)]
PortAlreadyInUseError { port: u16, service_id: ServiceId },
#[fail(display = "Invalid message type: {}", message_type)]
InvalidMessageType { message_type: String },
#[fail(display = "Startup error: {}", message)]
StartupError { message: String },
#[fail(display = "Server shutdown unexpectedly")]
ShutdownError,
#[fail(display = "Protobuf error: {}", error)]
ProtobufError { error: String },
#[fail(display = "Tungstenite websocket error: {}", error)]
TungsteniteError { error: String },
#[fail(display = "std::sync::PoisonError: {}", error)]
SyncPoisonError { error: String },
#[fail(display = "MPSC send error: {}", error)]
MpscSendError { error: String },
#[fail(display = "Empty timeseries message segment")]
EmptyMessage,
#[fail(display = "JSON error: {}", error)]
JsonError { error: String },
#[fail(display = "URL parse error: {}", error)]
UrlParseError { error: String },
#[fail(display = "I/O error: {}", error)]
IoError { error: String },
#[fail(display = "Cache error: {}", kind)]
CacheError { kind: cache::ErrorKind },
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error |
}
impl From<Context<ErrorKind>> for Error {
fn from(ctx: Context<ErrorKind>) -> Error {
Error { ctx }
}
}
/// map from cache errors
impl From<cache::ErrorKind> for Error {
fn from(kind: cache::ErrorKind) -> Error {
Error::from(Context::new(ErrorKind::CacheError { kind }))
}
}
impl From<cache::Error> for Error {
fn from(error: cache::Error) -> Error {
error.kind().clone().into()
}
}
/// map from io errors
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error::from(Context::new(ErrorKind::IoError {
error: error.to_string(),
}))
}
}
/// map from url parse errors
impl From<url::ParseError> for Error {
fn from(error: url::ParseError) -> Error {
Error::from(Context::new(ErrorKind::UrlParseError {
error: error.to_string(),
}))
}
}
/// map from serde_json errors
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Error {
Error::from(Context::new(ErrorKind::JsonError {
error: error.to_string(),
}))
}
}
/// map from protobuf errors
impl From<protobuf::ProtobufError> for Error {
fn from(error: protobuf::ProtobufError) -> Error {
Error::from(Context::new(ErrorKind::ProtobufError {
error: error.to_string(),
}))
}
}
/// map from tungstenite errors
impl From<tungstenite::Error> for Error {
fn from(error: tungstenite::Error) -> Error {
Error::from(Context::new(ErrorKind::TungsteniteError {
error: error.to_string(),
}))
}
}
/// map from mpsc errors
impl<T> From<mpsc::SendError<T>> for Error {
fn from(error: mpsc::SendError<T>) -> Error {
Error::from(Context::new(ErrorKind::MpscSendError {
error: error.to_string(),
}))
}
}
/// map from sync PoisonError
impl<T> From<sync::PoisonError<T>> for Error {
fn from(error: sync::PoisonError<T>) -> Error {
Error::from(Context::new(ErrorKind::SyncPoisonError {
error: error.to_string(),
}))
}
}
| {
Error::from(Context::new(kind))
} |
fft_ops_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(test.TestCase):
def _compare(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
self._compareForward(x, rank, fft_length, use_placeholder, rtol, atol)
self._compareBackward(x, rank, fft_length, use_placeholder, rtol, atol)
def _compareForward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False,
rtol=1e-4, atol=1e-4):
x_np = self._npIFFT(x, rank, fft_length)
if use_placeholder:
x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype))
x_tf = self._tfIFFT(x_ph, rank, fft_length, feed_dict={x_ph: x})
else:
x_tf = self._tfIFFT(x, rank, fft_length)
self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol)
def _checkMemoryFail(self, x, rank):
config = config_pb2.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1e-2
with self.cached_session(config=config, force_gpu=True):
self._tfFFT(x, rank, fft_length=None)
def _checkGradComplex(self, func, x, y, result_is_complex=True,
rtol=1e-2, atol=1e-2):
| iny = ops.convert_to_tensor(y)
# func is a forward or inverse, real or complex, batched or unbatched FFT
# function with a complex input.
z = func(math_ops.complex(inx, iny))
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [list(x.shape), list(y.shape)],
loss, [1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol)
def _checkGradReal(self, func, x, rtol=1e-2, atol=1e-2):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
# func is a forward RFFT function (batched or unbatched).
z = func(inx)
# loss = sum(|z|^2)
loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z)))
x_jacob_t, x_jacob_n = test.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfFFTForRank(rank)(x), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
# fft_length unused for complex FFTs.
with self.cached_session(use_gpu=True) as sess:
return sess.run(self._tfIFFTForRank(rank)(x), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.fft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.ifft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.fft
elif rank == 2:
return fft_ops.fft2d
elif rank == 3:
return fft_ops.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.ifft
elif rank == 2:
return fft_ops.ifft2d
elif rank == 3:
return fft_ops.ifft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np_type)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-5)):
self._compare(
np.mod(np.arange(np.power(128, dims)), 10).reshape(
(128,) * dims).astype(np_type), rank, rtol=tol, atol=tol)
# TODO(yangzihao): Disable before we can figure out a way to
# properly test memory fail for large batch fft.
# def testLargeBatchMemoryFail(self):
# if test.is_gpu_available(cuda_only=True):
# rank = 1
# for dims in xrange(rank, rank + 3):
# self._checkMemoryFail(
# np.mod(np.arange(np.power(128, dims)), 64).reshape(
# (128,) * dims).astype(np.complex64), rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 1e-8)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(
np.mod(np.arange(np.power(4, dims)), 10).reshape(
(4,) * dims).astype(np_type),
rank, use_placeholder=True, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.complex64, 1e-4), (np.complex128, 5e-6)):
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._compare(gen((4,) * dims).astype(np_type), rank,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testRandom1D(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type in (np.complex64, np.complex128):
has_gpu = test.is_gpu_available(cuda_only=True)
tol = {(np.complex64, True): 1e-4,
(np.complex64, False): 1e-2,
(np.complex128, True): 1e-4,
(np.complex128, False): 1e-2}[(np_type, has_gpu)]
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
# Check a variety of power-of-2 FFT sizes.
for dim in (128, 256, 512, 1024):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
# Check a variety of non-power-of-2 FFT sizes.
for dim in (127, 255, 511, 1023):
self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testError(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be .*rank {}.*".format(rank)):
self._tfIFFT(x, rank)
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-4), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np_type)
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for np_type, tol in ((np.float32, 1e-2), (np.float64, 1e-10)):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1
self._checkGradComplex(self._tfFFTForRank(rank), re, im,
rtol=tol, atol=tol)
self._checkGradComplex(self._tfIFFTForRank(rank), re, im,
rtol=tol, atol=tol)
class RFFTOpsTest(BaseFFTOpsTest):
def _compareBackward(self, x, rank, fft_length=None, use_placeholder=False):
super(RFFTOpsTest, self)._compareBackward(x, rank, fft_length,
use_placeholder)
def _tfFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _tfIFFT(self, x, rank, fft_length=None, feed_dict=None):
with self.cached_session(use_gpu=True) as sess:
return sess.run(
self._tfIFFTForRank(rank)(x, fft_length), feed_dict=feed_dict)
def _npFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.rfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.rfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _npIFFT(self, x, rank, fft_length=None):
if rank == 1:
return np.fft.irfft2(x, s=fft_length, axes=(-1,))
elif rank == 2:
return np.fft.irfft2(x, s=fft_length, axes=(-2, -1))
elif rank == 3:
return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfFFTForRank(self, rank):
if rank == 1:
return fft_ops.rfft
elif rank == 2:
return fft_ops.rfft2d
elif rank == 3:
return fft_ops.rfft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return fft_ops.irfft
elif rank == 2:
return fft_ops.irfft2d
elif rank == 3:
return fft_ops.irfft3d
else:
raise ValueError("invalid rank")
@test_util.run_deprecated_v1
def testEmpty(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.float32)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
@test_util.run_deprecated_v1
def testBasic(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64), rank, (size,) * rank)
def testLargeBatch(self):
if test.is_gpu_available(cuda_only=True):
rank = 1
for dims in xrange(rank, rank + 3):
for size in (64, 128):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(r2c.astype(np.float32), rank, (size,) * rank)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(c2r.astype(np.complex64), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testBasicPlaceholder(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
self._compareForward(
r2c.astype(np.float32),
rank, (size,) * rank,
use_placeholder=True)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
self._compareBackward(
c2r.astype(np.complex64),
rank, (size,) * rank,
use_placeholder=True)
@test_util.run_deprecated_v1
def testFftLength(self):
if test.is_gpu_available(cuda_only=True):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape(
(size,) * dims)
c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim),
10).reshape((size,) * (dims - 1) + (inner_dim,))
# Test truncation (FFT size < dimensions).
fft_length = (size - 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
# Test padding (FFT size > dimensions).
fft_length = (size + 2,) * rank
self._compareForward(r2c.astype(np.float32), rank, fft_length)
self._compareBackward(c2r.astype(np.complex64), rank, fft_length)
# Confirm it works with unknown shapes as well.
self._compareForward(
r2c.astype(np.float32),
rank,
fft_length,
use_placeholder=True)
self._compareBackward(
c2r.astype(np.complex64),
rank,
fft_length,
use_placeholder=True)
@test_util.run_deprecated_v1
def testRandom(self):
with spectral_ops_test_util.fft_kernel_label_map():
def gen_real(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
ret = re.reshape(shape)
return ret
def gen_complex(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
ret = (re + im * 1j).reshape(shape)
return ret
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
for size in (5, 6):
inner_dim = size // 2 + 1
self._compareForward(gen_real((size,) * dims), rank, (size,) * rank)
complex_dims = (size,) * (dims - 1) + (inner_dim,)
self._compareBackward(
gen_complex(complex_dims), rank, (size,) * rank)
@test_util.run_deprecated_v1
def testError(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape .* must have rank at least {}".format(rank)):
self._tfIFFT(x, rank)
for dims in xrange(rank, rank + 2):
x = np.zeros((1,) * rank)
# Test non-rank-1 fft_length produces an error.
fft_length = np.zeros((1, 1)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(ValueError,
"Shape .* must have rank 1"):
self._tfIFFT(x, rank, fft_length)
# Test wrong fft_length length.
fft_length = np.zeros((rank + 1,)).astype(np.int32)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfFFT(x, rank, fft_length)
with self.assertRaisesWithPredicateMatch(
ValueError, "Dimension must be .*but is {}.*".format(rank + 1)):
self._tfIFFT(x, rank, fft_length)
# Test that calling the kernel directly without padding to fft_length
# produces an error.
rffts_for_rank = {
1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft],
2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d],
3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d]
}
rfft_fn, irfft_fn = rffts_for_rank[rank]
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least 6 but got: 5"):
x = np.zeros((5,) * rank).astype(np.float32)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(rfft_fn(x, fft_length))
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Input dimension .* must have length of at least .* but got: 3"):
x = np.zeros((3,) * rank).astype(np.complex64)
fft_length = [6] * rank
with self.cached_session():
self.evaluate(irfft_fn(x, fft_length))
@test_util.run_deprecated_v1
def testGrad_Simple(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.ones(shape=(size,) * dims, dtype=np.float32)
im = -np.ones(shape=(size,) * dims, dtype=np.float32)
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
@test_util.run_deprecated_v1
def testGrad_Random(self):
with spectral_ops_test_util.fft_kernel_label_map():
for rank in VALID_FFT_RANKS:
# rfft3d/irfft3d do not have gradients yet.
if rank == 3:
continue
for dims in xrange(rank, rank + 2):
for size in (5, 6):
re = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((size,) * dims)).astype(np.float32) * 2 - 1
self._checkGradReal(self._tfFFTForRank(rank), re)
self._checkGradComplex(
self._tfIFFTForRank(rank), re, im, result_is_complex=False)
class FFTShiftTest(test.TestCase):
@test_util.run_deprecated_v1
def testDefinition(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), y)
self.assertAllEqual(fft_ops.ifftshift(y).eval(), x)
@test_util.run_deprecated_v1
def testAxesKeyword(self):
with self.session():
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(fft_ops.fftshift(freqs, axes=(0, 1)).eval(), shifted)
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=0).eval(),
fft_ops.fftshift(freqs, axes=(0,)).eval())
self.assertAllEqual(fft_ops.ifftshift(shifted, axes=(0, 1)).eval(), freqs)
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=0).eval(),
fft_ops.ifftshift(shifted, axes=(0,)).eval())
self.assertAllEqual(fft_ops.fftshift(freqs).eval(), shifted)
self.assertAllEqual(fft_ops.ifftshift(shifted).eval(), freqs)
@test_util.run_deprecated_v1
def testNumpyCompatibility(self):
with self.session():
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
self.assertAllEqual(fft_ops.fftshift(x).eval(), np.fft.fftshift(x))
self.assertAllEqual(fft_ops.ifftshift(y).eval(), np.fft.ifftshift(y))
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
self.assertAllEqual(
fft_ops.fftshift(freqs, axes=(0, 1)).eval(),
np.fft.fftshift(freqs, axes=(0, 1)))
self.assertAllEqual(
fft_ops.ifftshift(shifted, axes=(0, 1)).eval(),
np.fft.ifftshift(shifted, axes=(0, 1)))
if __name__ == "__main__":
test.main() | with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
|
test_annotations.py | # static analysis: ignore
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .test_name_check_visitor import TestNameCheckVisitorBase
from .test_node_visitor import skip_before
from .error_code import ErrorCode
class TestAnnotations(TestNameCheckVisitorBase):
@skip_before((3, 5))
def test_union(self):
self.assert_passes(
"""
import re
from typing import Union, Optional, List, Set, Dict, Match, Pattern
_Pattern = type(re.compile("a"))
_Match = type(re.match("a", "a"))
def capybara() -> Union[int, str]:
return 0
def kerodon() -> Optional[int]:
return None
def complex() -> Union[List[str], Set[int], Dict[float, List[str]], int]:
return []
def check() -> None:
assert_is_value(capybara(), MultiValuedValue([TypedValue(int), TypedValue(str)]))
assert_is_value(kerodon(), MultiValuedValue([TypedValue(int), KnownValue(None)]))
assert_is_value(
complex(),
MultiValuedValue(
[
GenericValue(list, [TypedValue(str)]),
GenericValue(set, [TypedValue(int)]),
GenericValue(
dict, [TypedValue(float), GenericValue(list, [TypedValue(str)])]
),
TypedValue(int),
]
),
)
def rgx(m: Match[str], p: Pattern[bytes]) -> None:
assert_is_value(p, GenericValue(_Pattern, [TypedValue(bytes)]))
assert_is_value(m, GenericValue(_Match, [TypedValue(str)]))
"""
)
@skip_before((3, 5))
def test_generic(self):
self.assert_passes(
""" | def capybara(x: List[int], y: List, z: SupportsInt) -> None:
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(y, TypedValue(list))
assert_is_value(z, TypedValue(SupportsInt))
"""
)
@skip_before((3, 5))
def test_self_type(self):
self.assert_passes(
"""
class Capybara:
def f(self: int) -> None:
assert_is_value(self, TypedValue(int))
def g(self) -> None:
assert_is_value(self, TypedValue(Capybara))
"""
)
@skip_before((3, 5))
def test_newtype(self):
self.assert_passes(
"""
from typing import NewType, Tuple
X = NewType("X", int)
Y = NewType("Y", Tuple[str, ...])
def capybara(x: X, y: Y) -> None:
assert_is_value(x, NewTypeValue(X))
print(y) # just asserting that this doesn't cause errors
"""
)
@skip_before((3, 5))
def test_literal(self):
self.assert_passes(
"""
from typing_extensions import Literal
def capybara(x: Literal[True], y: Literal[True, False]) -> None:
assert_is_value(x, KnownValue(True))
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
"""
)
@skip_before((3, 5))
def test_contextmanager(self):
self.assert_passes(
"""
from contextlib import contextmanager
from typing import Iterator
@contextmanager
def capybara() -> Iterator[int]:
yield 3
def kerodon():
# Ideally should be ContextManager[int], but at least
# it should not be Iterator[int], which is what pyanalyze
# used to infer.
assert_is_value(capybara(), UNRESOLVED_VALUE)
"""
)
@skip_before((3, 0))
def test_none_annotations(self):
self.assert_passes(
"""
def mara() -> None:
pass
class Capybara:
def __init__(self) -> None:
pass
def check() -> None:
# Make sure we don't infer None if __init__ is annotated
# as returning None.
assert_is_value(Capybara(), TypedValue(Capybara))
assert_is_value(mara(), KnownValue(None))
"""
)
@skip_before((3, 0))
def test_annotations(self):
self.assert_passes(
"""
def caviidae() -> None:
x = int
# tests that annotations in a nested functions are not evaluated in a context where they don't exist
def capybara(a: x, *b: x, c: x, d: x=3, **kwargs: x):
pass
assert_is_value(capybara, KnownValue(capybara))
"""
)
self.assert_passes(
"""
class Caviidae:
class Capybara:
pass
def eat(self, x: Capybara):
assert_is_value(self, TypedValue(Caviidae))
@staticmethod
def static(x: "Caviidae"):
assert_is_value(x, TypedValue(Caviidae))
"""
)
self.assert_fails(
ErrorCode.incompatible_argument,
"""
def capybara(x: int) -> None:
pass
def kerodon():
capybara("not an int")
""",
)
@skip_before((3, 0))
def test_incompatible_return_value(self):
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara() -> int:
return "not an int"
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara(x: bool) -> int:
if not x:
return
return 42
""",
)
self.assert_passes(
"""
from typing import Generator
def capybara(x: bool) -> Generator[int, None, None]:
if not x:
return
yield 42
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> int:
pass
""",
)
self.assert_passes(
"""
from abc import abstractmethod
class X:
@abstractmethod
def f(self) -> int:
pass
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> None:
assert_is_value(g(), UNRESOLVED_VALUE)
return g()
def g():
pass
""",
)
@skip_before((3, 0))
def test_incompatible_default(self):
self.assert_fails(
ErrorCode.incompatible_default,
"""
def capybara(x: int = None) -> None:
pass
""",
)
@skip_before((3, 0))
def test_property(self):
self.assert_passes(
"""
class Capybara:
def __init__(self, x):
self.x = x
@property
def f(self) -> int:
return self.x
def get_g(self) -> int:
return self.x * 2
g = property(get_g)
def user(c: Capybara) -> None:
assert_is_value(c.f, TypedValue(int))
assert_is_value(c.get_g(), TypedValue(int))
assert_is_value(c.g, TypedValue(int))
"""
)
@skip_before((3, 0))
def test_annotations_override_return(self):
self.assert_passes(
"""
from typing import Any
def f() -> Any:
return 0
def g():
return 0
def capybara():
assert_is_value(f(), UNRESOLVED_VALUE)
assert_is_value(g(), KnownValue(0))
"""
)
@skip_before((3, 0))
def test_cached_classmethod(self):
# just test that this doesn't crash
self.assert_passes(
"""
from functools import lru_cache
class Capybara:
@classmethod
@lru_cache()
def f(cls) -> int:
return 3
"""
)
@skip_before((3, 6))
def test_annassign(self):
self.assert_passes(
"""
def capybara(y):
x: int = y
assert_is_value(y, UNRESOLVED_VALUE)
assert_is_value(x, TypedValue(int))
"""
)
self.assert_fails(
ErrorCode.incompatible_assignment,
"""
def capybara(y: str):
x: int = y
""",
)
@skip_before((3, 5))
def test_tuples(self):
self.assert_passes(
"""
from typing import Tuple, Union
def capybara(x: Tuple[int, ...], y: Tuple[int], z: Tuple[str, int], omega: Union[Tuple[str, int], None]) -> None:
assert_is_value(x, GenericValue(tuple, [TypedValue(int)]))
assert_is_value(y, SequenceIncompleteValue(tuple, [TypedValue(int)]))
assert_is_value(z, SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]))
assert_is_value(omega, MultiValuedValue([
SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]),
KnownValue(None),
]))
"""
)
@skip_before((3, 0))
def test_invalid_annotation(self):
self.assert_fails(
ErrorCode.invalid_annotation,
"""
def f(x: 1):
pass
""",
)
@skip_before((3, 0))
def test_forward_ref(self):
self.assert_fails(
ErrorCode.undefined_name,
"""
def f(x: "NoSuchType"):
pass
""",
)
self.assert_passes(
"""
import typing
from typing import Optional
def capybara(x: "X", y: "Optional[X]", z: "typing.Optional[X]"):
assert_is_value(x, TypedValue(X))
assert_is_value(y, MultiValuedValue([KnownValue(None), TypedValue(X)]))
assert_is_value(z, MultiValuedValue([KnownValue(None), TypedValue(X)]))
class X:
pass
"""
)
self.assert_passes(
"""
from typing import List
def capybara(x: "List[int]") -> "List[str]":
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(capybara(x), GenericValue(list, [TypedValue(str)]))
return []
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> "int":
return ""
""",
)
@skip_before((3, 0))
def test_pattern(self):
self.assert_passes(
"""
from typing import Pattern
import re
_Pattern = type(re.compile(""))
def capybara(x: Pattern[str]):
assert_is_value(x, GenericValue(_Pattern, [TypedValue(str)]))
"""
)
@skip_before((3, 6))
def test_final(self):
self.assert_passes(
"""
from typing_extensions import Final
x: Final = 3
def capybara():
y: Final = 4
assert_is_value(x, KnownValue(3))
assert_is_value(y, KnownValue(4))
"""
)
@skip_before((3, 6))
def test_type(self):
self.assert_passes(
"""
from typing import Type
def capybara(x: Type[str], y: "Type[int]"):
assert_is_value(x, SubclassValue(str))
assert_is_value(y, SubclassValue(int))
"""
) | from typing import List, SupportsInt
|
bag_of_edits_change_encoder.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from itertools import chain
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import sys
from diff_representation.change_entry import ChangeExample
from diff_representation.model import nn_utils
from diff_representation.model.embedder import EmbeddingTable
class BagOfEditsChangeEncoder(nn.Module):
"""project a CodeChange instance into distributed vectors"""
def __init__(self, token_embedder, vocab, **kwargs):
super(BagOfEditsChangeEncoder, self).__init__()
self.token_embedder = token_embedder
self.token_embedding_size = self.token_embedder.weight.size(1)
self.vocab = vocab
self.change_vector_size = self.token_embedding_size * 2
@property
def | (self):
return self.token_embedder.device
def forward(self, code_changes, *args, **kwargs):
"""
given the token encodings of the previous and updated code,
and the diff information (alignment between the tokens between the
previous and updated code), generate the diff representation
"""
added_tokens = []
added_token_batch_ids = []
deled_tokens = []
deled_token_batch_ids = []
for e_id, example in enumerate(code_changes):
for entry in example.change_seq:
tag, token = entry
if tag == 'ADD':
token_id = self.vocab[token]
added_tokens.append(token_id)
added_token_batch_ids.append(e_id)
elif tag == 'DEL':
token_id = self.vocab[token]
deled_tokens.append(token_id)
deled_token_batch_ids.append(e_id)
elif tag == 'REPLACE':
added_token_id = self.vocab[token[1]]
deled_token_id = self.vocab[token[0]]
added_tokens.append(added_token_id)
deled_tokens.append(deled_token_id)
added_token_batch_ids.append(e_id)
deled_token_batch_ids.append(e_id)
changed_token_ids = added_tokens + deled_tokens
changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)
# (token_num, embed_size)
changed_token_embeds = self.token_embedder.weight[changed_token_ids]
added_token_embeds = changed_token_embeds[:len(added_tokens)]
deled_token_embeds = changed_token_embeds[len(added_tokens):]
added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if added_token_batch_ids:
added_change_embeds = added_change_embeds.scatter_add_(0,
torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),
added_token_embeds)
deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if deled_token_batch_ids:
deled_change_embeds = deled_change_embeds.scatter_add_(0,
torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),
deled_token_embeds)
change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)
return change_vectors
def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):
example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)
change_vec = self.forward([example]).data.cpu().numpy()[0]
return change_vec
def encode_code_changes(self, examples, code_encoder, batch_size=32):
"""encode each change in the list `code_changes`,
return a 2D numpy array of shape (len(code_changes), code_change_embed_dim)"""
change_vecs = []
for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):
batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()
change_vecs.append(batch_change_vecs)
change_vecs = np.concatenate(change_vecs, axis=0)
return change_vecs
| device |
_xpad.py | import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="scattercarpet.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
) | ||
prerequests.ts | import {
CypressIncomingRequest,
BrowserPreRequest,
} from '@packages/proxy'
import Debug from 'debug'
import _ from 'lodash'
const debug = Debug('cypress:proxy:http:util:prerequests')
const debugVerbose = Debug('cypress-verbose:proxy:http:util:prerequests')
const metrics: any = {
browserPreRequestsReceived: 0,
proxyRequestsReceived: 0,
immediatelyMatchedRequests: 0,
eventuallyReceivedPreRequest: [],
neverReceivedPreRequest: [],
}
process.once('exit', () => {
debug('metrics: %o', metrics)
})
function | <T> (a: Array<T>, predicate: (v: T) => boolean): T | void {
for (const i in a) {
const v = a[i]
if (predicate(v)) {
a.splice(i as unknown as number, 1)
return v
}
}
}
function matches (preRequest: BrowserPreRequest, req: Pick<CypressIncomingRequest, 'proxiedUrl' | 'method'>) {
return preRequest.method === req.method && preRequest.url === req.proxiedUrl
}
export type GetPreRequestCb = (browserPreRequest?: BrowserPreRequest) => void
export class PreRequests {
pendingBrowserPreRequests: Array<BrowserPreRequest> = []
requestsPendingPreRequestCbs: Array<{
cb: (browserPreRequest: BrowserPreRequest) => void
method: string
proxiedUrl: string
}> = []
get (req: CypressIncomingRequest, ctxDebug, cb: GetPreRequestCb) {
metrics.proxyRequestsReceived++
const pendingBrowserPreRequest = removeOne(this.pendingBrowserPreRequests, (browserPreRequest) => {
return matches(browserPreRequest, req)
})
if (pendingBrowserPreRequest) {
metrics.immediatelyMatchedRequests++
ctxDebug('matches pending pre-request %o', pendingBrowserPreRequest)
return cb(pendingBrowserPreRequest)
}
const timeout = setTimeout(() => {
metrics.neverReceivedPreRequest.push({ url: req.proxiedUrl })
ctxDebug('500ms passed without a pre-request, continuing request with an empty pre-request field!')
remove()
cb()
}, 500)
const startedMs = Date.now()
const remove = _.once(() => removeOne(this.requestsPendingPreRequestCbs, (v) => v === requestPendingPreRequestCb))
const requestPendingPreRequestCb = {
cb: (browserPreRequest) => {
const afterMs = Date.now() - startedMs
metrics.eventuallyReceivedPreRequest.push({ url: browserPreRequest.url, afterMs })
ctxDebug('received pre-request after %dms %o', afterMs, browserPreRequest)
clearTimeout(timeout)
remove()
cb(browserPreRequest)
},
proxiedUrl: req.proxiedUrl,
method: req.method,
}
this.requestsPendingPreRequestCbs.push(requestPendingPreRequestCb)
}
addPending (browserPreRequest: BrowserPreRequest) {
if (this.pendingBrowserPreRequests.indexOf(browserPreRequest) !== -1) {
return
}
metrics.browserPreRequestsReceived++
const requestPendingPreRequestCb = removeOne(this.requestsPendingPreRequestCbs, (req) => {
return matches(browserPreRequest, req)
})
if (requestPendingPreRequestCb) {
debugVerbose('immediately matched pre-request %o', browserPreRequest)
return requestPendingPreRequestCb.cb(browserPreRequest)
}
debugVerbose('queuing pre-request to be matched later %o %o', browserPreRequest, this.pendingBrowserPreRequests)
this.pendingBrowserPreRequests.push(browserPreRequest)
}
}
| removeOne |
rootResolver.js | /**
* rootResolver.js
* Copyright (c) 2019-present, Aakash Goel
* MIT Licensed
*
* This is the main file where all the resolvers will be imported and combined into one resolver i.e. rootResolver which will then export
*
* Create new resolver inside 'src' folder, import it here and add it to the rootResolver object.
*
* For ex:
* 1. create new folder inside 'src' i.e. 'featureXyz'
* 2. create new file i.e. 'resolvers.js' inside 'src > featureXyz'. Create resolvers related to this feature only and export it.
* 3. create new file i.e. 'index.js' inside 'src > featureXyz'. Import and Export `featureXyzResolvers` in this index file.
* 4. Import here in 'rootResolver.js' i.e. const { featureXyzResolvers } = require('./src/featureXyz')
* 5. Add it to the `rootResolver` i.e. const rootResolver = [<otherResolvers>, featureXyzResolvers]
*
*/
'use strict';
/**
* Module resolvers.
*/
const { messageResolvers } = require('./src/message');
// *** this is a test resolver. Do not create any resolver here. Delete below resolver
const r1 = {
Query: {
field1: () => {},
},
};
// *** this is a test resolver. Do not create any resolver here. Delete below resolver
const r2 = {
Query: {
field2: () => {},
},
};
/**
* Add or Remove resolvers.
* @public
*/
const rootResolver = [
messageResolvers,
r1, // *** test resolver delete this
r2, // *** test resolver delete this
];
// console.log('>>>rootResolver ', rootResolver);
/**
* Module exports.
* @public | */
module.exports = rootResolver; | |
syscall.rs | // MEG-OS System Calls
use core::ffi::c_void;
use megosabi::svc::Function;
#[allow(dead_code)]
#[link(wasm_import_module = "megos-canary")]
extern "C" {
fn svc0(_: Function) -> usize;
fn svc1(_: Function, _: usize) -> usize;
fn svc2(_: Function, _: usize, _: usize) -> usize;
fn svc3(_: Function, _: usize, _: usize, _: usize) -> usize;
fn svc4(_: Function, _: usize, _: usize, _: usize, _: usize) -> usize;
fn svc5(_: Function, _: usize, _: usize, _: usize, _: usize, _: usize) -> usize;
fn svc6(_: Function, _: usize, _: usize, _: usize, _: usize, _: usize, _: usize) -> usize;
}
#[inline]
pub fn os_exit() -> ! {
unsafe {
svc0(Function::Exit);
loop {
asm!("unreachable");
}
}
}
/// Display a string.
#[inline]
pub fn os_print(s: &str) {
unsafe { svc2(Function::PrintString, s.as_ptr() as usize, s.len()) };
}
/// Get the value of the monotonic timer in microseconds.
#[inline]
pub fn os_monotonic() -> u32 {
unsafe { svc0(Function::Monotonic) as u32 }
}
#[inline] | F: FnOnce() -> (),
{
let time0 = unsafe { svc0(Function::Monotonic) };
f();
let time1 = unsafe { svc0(Function::Monotonic) };
time1 - time0
}
#[inline]
pub fn os_time_of_day() -> u32 {
unsafe { svc1(Function::Time, 0) as u32 }
}
/// Blocks a thread for the specified microseconds.
#[inline]
pub fn os_usleep(us: u32) {
unsafe { svc1(Function::Usleep, us as usize) };
}
/// Get the system version information.
#[inline]
pub fn os_version() -> u32 {
unsafe { svc1(Function::GetSystemInfo, 0) as u32 }
}
/// Create a new window.
#[inline]
#[rustfmt::skip]
pub fn os_new_window1(title: &str, width: usize, height: usize) -> usize {
unsafe { svc4(Function::NewWindow, title.as_ptr() as usize, title.len(), width, height) }
}
/// Create a new window.
#[inline]
#[rustfmt::skip]
pub fn os_new_window2(title: &str, width: usize, height: usize, bg_color: usize, flag: usize) -> usize {
unsafe { svc6( Function::NewWindow, title.as_ptr() as usize, title.len(), width, height, bg_color, flag) }
}
/// Close a window.
#[inline]
pub fn os_close_window(window: usize) {
unsafe { svc1(Function::CloseWindow, window) };
}
/// Create a drawing context
#[inline]
pub fn os_begin_draw(window: usize) -> usize {
unsafe { svc1(Function::BeginDraw, window) }
}
/// Discard the drawing context and reflect it to the screen
#[inline]
pub fn os_end_draw(ctx: usize) {
unsafe { svc1(Function::EndDraw, ctx) };
}
/// Draw a string in a window.
#[inline]
pub fn os_win_draw_string(ctx: usize, x: usize, y: usize, s: &str, color: usize) {
let ptr = s.as_ptr() as usize;
unsafe { svc6(Function::DrawString, ctx, x, y, ptr, s.len(), color) };
}
#[inline]
#[rustfmt::skip]
pub fn os_draw_shape(ctx: usize, x: usize, y: usize, width: usize, height: usize, params: &OsDrawShape) {
unsafe { svc6(Function::DrawShape, ctx, x, y, width, height, params as *const _ as usize) };
}
#[allow(dead_code)]
#[derive(Clone, Copy)]
pub struct OsDrawShape {
pub radius: u32,
pub bg_color: u32,
pub border_color: u32,
}
/// Fill a rectangle in a window.
#[inline]
#[rustfmt::skip]
pub fn os_win_fill_rect(ctx: usize, x: usize, y: usize, width: usize, height: usize, color: usize) {
unsafe { svc6(Function::FillRect, ctx, x, y, width, height, color) };
}
#[inline]
pub fn os_win_draw_line(ctx: usize, x1: usize, y1: usize, x2: usize, y2: usize, color: usize) {
unsafe { svc6(Function::DrawLine, ctx, x1, y1, x2, y2, color) };
}
/// Wait for key event
#[inline]
pub fn os_wait_char(window: usize) -> u32 {
unsafe { svc1(Function::WaitChar, window) as u32 }
}
/// Read a key event
#[inline]
pub fn os_read_char(window: usize) -> u32 {
unsafe { svc1(Function::ReadChar, window) as u32 }
}
/// Draw a bitmap in a window
#[inline]
pub fn os_blt8(ctx: usize, x: usize, y: usize, bitmap: usize) {
unsafe { svc4(Function::Blt8, ctx, x, y, bitmap) };
}
#[inline]
pub fn os_blt32(ctx: usize, x: usize, y: usize, bitmap: usize) {
unsafe { svc4(Function::Blt32, ctx, x, y, bitmap) };
}
/// Draw a bitmap in a window
#[inline]
pub fn os_blt1(ctx: usize, x: usize, y: usize, bitmap: usize, color: u32, mode: usize) {
unsafe { svc6(Function::Blt1, ctx, x, y, bitmap, color as usize, mode) };
}
/// TEST
#[inline]
#[rustfmt::skip]
pub fn os_blend_rect(bitmap: usize, x: usize, y: usize, width: usize, height: usize, color: u32) {
unsafe { svc6(Function::BlendRect, bitmap, x, y, width, height, color as usize) };
}
/// Returns a simple pseudo-random number
///
/// # Safety
///
/// Since this system call returns a simple pseudo-random number,
/// it should not be used in situations where random number safety is required.
#[inline]
pub fn os_rand() -> u32 {
unsafe { svc0(Function::Rand) as u32 }
}
/// Set the seed of the random number.
#[inline]
pub fn os_srand(srand: u32) -> u32 {
unsafe { svc1(Function::Srand, srand as usize) as u32 }
}
/// Allocates memory blocks with a simple allocator
#[inline]
pub fn os_alloc(size: usize, align: usize) -> usize {
unsafe { svc2(Function::Alloc, size, align) }
}
/// Frees an allocated memory block
#[inline]
pub fn os_dealloc(ptr: usize, size: usize, align: usize) {
unsafe { svc3(Function::Dealloc, ptr, size, align) };
}
#[inline]
pub unsafe fn game_v1_init(window: usize, screen: *const c_void) -> usize {
svc2(Function::GameV1Init, window, screen as usize)
}
#[inline]
#[rustfmt::skip]
pub unsafe fn game_v1_init_long(window: usize, screen: *const c_void, scale: usize, fps: usize) -> usize {
svc4(Function::GameV1Init, window, screen as usize, scale, fps)
}
#[inline]
pub fn game_v1_sync(handle: usize) -> usize {
unsafe { svc1(Function::GameV1Sync, handle) }
}
#[inline]
pub fn game_v1_rect(handle: usize, x: usize, y: usize, width: usize, height: usize) {
unsafe { svc5(Function::GameV1Rect, handle, x, y, width, height) };
}
#[inline]
pub fn game_v1_move_sprite(handle: usize, index: usize, x: usize, y: usize) {
unsafe { svc4(Function::GameV1MoveSprite, handle, index, x, y) };
}
#[inline]
pub fn game_v1_button(handle: usize) -> u32 {
unsafe { svc1(Function::GameV1Button, handle) as u32 }
}
#[inline]
#[rustfmt::skip]
pub fn game_v1_load_font(handle: usize, start_index: usize, start_char: usize, end_char: usize) {
unsafe { svc4( Function::GameV1LoadFont, handle, start_index, start_char, end_char) };
} | pub fn os_bench<F>(f: F) -> usize
where |
__init__.py |
"""Campdown
Usage:
campdown <url>
[--output=PATH]
[--sleep=NUMBER]
[--quiet]
[--short]
[--no-art]
[--no-id3]
[--no-missing]
campdown (-h | --help)
campdown (-v | --version)
Options:
-h, --help Show this screen.
-v, --version Show version.
-o=PATH, --output=PATH Output folder to work in.
-t=NUMBER, --sleep=NUMBER Connection timeout duration.
-q, --quiet Should output messages be hidden.
-s, --short Should the output filenames be kept short.
--no-art Sets if artwork downloading should be ignored.
--no-id3 Sets if ID3 tagging should be ignored.
--no-missing Sets if album downloads abort on missing tracks.
Description:
Command line Bandcamp downloader. Takes in Bandcamp page URLs and fetches
tracks, albums as well as their metadata and covers while retaining clean
and concise formatting of output information.
Requirements:
Python 3.4+, requests, mutagen, docopt
"""
import sys
import os
from docopt import docopt
from .helpers import *
from .track import Track
from .album import Album
from .discography import Discography
import requests
def cli():
# Acts as the CLI for the project and main entry point for the command.
args = docopt(__doc__, version="campdown 1.49")
try:
output_dir = args["--output"]
except(IndexError):
output_dir = ""
downloader = Downloader(
args["<url>"],
out=output_dir,
verbose=(not args["--quiet"]),
short=(args["--short"]),
sleep=(int(args["--sleep"]) if args["--sleep"] else 30),
art_enabled=(not args["--no-art"]),
id3_enabled=(not args["--no-id3"]),
abort_missing=(args["--no-missing"])
)
try:
downloader.run()
except (KeyboardInterrupt):
if not args["--quiet"]:
print("\nInterrupt caught. Exiting program...")
sys.exit(2)
class | :
"""
Main class of Campdown. This class handles all other Campdown functions and
executes them depending on the information it is given during initilzation.
Args:
url (str): Bandcamp URL to analyse and download from.
out (str): relative or absolute path to write to.
verbose (bool): sets if status messages and general information
should be printed. Errors are still printed regardless of this.
silent (bool): sets if error messages should be hidden.
short (bool): omits arist and album fields from downloaded track filenames.
sleep (number): duration between failed requests to wait for.
art_enabled (bool): if True the Bandcamp page's artwork will be
downloaded and saved alongside each of the found tracks.
id3_enabled (bool): if True tracks downloaded will receive new ID3 tags.
"""
def __init__(self, url, out=None, verbose=False, silent=False, short=False, sleep=30, id3_enabled=True, art_enabled=True, abort_missing=False):
self.url = url
self.output = out
self.verbose = verbose
self.silent = silent
self.short = short
self.sleep = sleep
self.id3_enabled = id3_enabled
self.art_enabled = art_enabled
self.abort_missing = abort_missing
# Variables used during retrieving of information.
self.request = None
self.content = None
# Get the script path in case no output path is specified.
# self.work_path = os.path.join(
# os.path.dirname(os.path.abspath(__file__)), "")
self.work_path = os.path.join(os.getcwd(), "")
if self.output:
# Make sure that the output folder has the right path syntax
if not os.path.isabs(self.output):
if not os.path.exists(os.path.join(self.work_path, self.output)):
os.makedirs(os.path.join(self.work_path, self.output))
self.output = os.path.join(self.work_path, self.output)
else:
# If no path is specified use the absolute path of the main file.
self.output = self.work_path
def run(self):
"""
Begins downloading the content from the prepared settings.
"""
if not valid_url(self.url):
if not self.silent:
print("The supplied URL is not a valid URL.")
return False
# Get the content from the supplied Bandcamp URL.
self.request = safe_get(self.url)
self.content = self.request.content.decode("utf-8")
if self.request.status_code != 200:
if not self.silent:
print("An error occurred while trying to access your supplied URL. Status code: {}".format(
self.request.status_code))
return
# Get the type of the page supplied to the downloader.
pagetype = page_type(self.content)
if pagetype == "track":
if self.verbose:
print("\nDetected Bandcamp track.")
track = Track(
self.url,
self.output,
request=self.request,
verbose=self.verbose,
silent=self.silent,
short=self.short,
sleep=self.sleep,
art_enabled=self.art_enabled,
id3_enabled=self.id3_enabled
)
if track.prepare(): # Prepare the track by filling out content.
track.download() # Begin the download process.
if self.verbose:
print("\nFinished track download. Downloader complete.")
else:
if self.verbose:
print(
"\nThe track you are trying to download is not publicly available. Consider purchasing it if you want it.")
elif pagetype == "album":
if self.verbose:
print("\nDetected Bandcamp album.")
album = Album(
self.url,
self.output,
request=self.request,
verbose=self.verbose,
silent=self.silent,
short=self.short,
sleep=self.sleep,
art_enabled=self.art_enabled,
id3_enabled=self.id3_enabled,
abort_missing=self.abort_missing
)
if album.prepare(): # Prepare the album with information from the supplied URL.
album.download() if album.fetch() else False # Start the download process if fetches succeeded.
if self.verbose:
print("\nFinished album download. Downloader complete.")
elif pagetype == "discography":
if self.verbose:
print("\nDetected Bandcamp discography page.")
page = Discography(
self.url,
self.output,
request=self.request,
verbose=self.verbose,
silent=self.silent,
short=self.short,
sleep=self.sleep,
art_enabled=self.art_enabled,
id3_enabled=self.id3_enabled,
abort_missing=self.abort_missing
)
page.prepare() # Make discography gather all information it requires.
page.fetch() # Begin telling prepared items to fetch their own information.
page.download() # Start the download process.
if self.verbose:
print("\nFinished discography download. Downloader complete.")
else:
if not self.silent:
print("Invalid page type. Exiting.")
| Downloader |
descuentos.js | function calcularPrecioConDescuento(precio, descuento) {
const porcentajePrecioConDescuento = descuento * 0.01;
const precioConDescuento = precio - precio * porcentajePrecioConDescuento;
return precioConDescuento;
}
const cupon = ["Todo2020", "ConToda", "Prime"];
function | () {
const inputPrice = document.getElementById("InputPrice");
const priceValue = inputPrice.value;
const inputDiscount = document.getElementById("InputDiscount");
let discountValue = inputDiscount.value;
const inputCupon = document.getElementById("Cupon");
const cuponValue = inputCupon.value;
switch (cuponValue) {
case "Todo2020":
discountValue = discountValue * 2;
break;
default:
console.log("Hola");
}
const precioCondescuento = calcularPrecioConDescuento(
priceValue,
discountValue
);
const resultP = document.getElementById("ResultP");
resultP.innerText = `${precioCondescuento}`;
const ahorrado = document.getElementById("Ahorrado");
ahorrado.innerText = ` ${priceValue - precioCondescuento}`;
}
| PriceDiscount |
daemon.go | package main
import (
"errors"
_ "expvar"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"runtime"
"sort"
"sync"
"github.com/crustio/go-ipfs-encryptor/crust"
multierror "github.com/hashicorp/go-multierror"
version "github.com/ipfs/go-ipfs"
config "github.com/ipfs/go-ipfs-config"
cserial "github.com/ipfs/go-ipfs-config/serialize"
utilmain "github.com/ipfs/go-ipfs/cmd/ipfs/util"
oldcmds "github.com/ipfs/go-ipfs/commands"
"github.com/ipfs/go-ipfs/core"
commands "github.com/ipfs/go-ipfs/core/commands"
corehttp "github.com/ipfs/go-ipfs/core/corehttp"
corerepo "github.com/ipfs/go-ipfs/core/corerepo"
libp2p "github.com/ipfs/go-ipfs/core/node/libp2p"
nodeMount "github.com/ipfs/go-ipfs/fuse/node"
fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo"
migrate "github.com/ipfs/go-ipfs/repo/fsrepo/migrations"
sockets "github.com/libp2p/go-socket-activation"
cmds "github.com/ipfs/go-ipfs-cmds"
mprome "github.com/ipfs/go-metrics-prometheus"
options "github.com/ipfs/interface-go-ipfs-core/options"
goprocess "github.com/jbenet/goprocess"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
prometheus "github.com/prometheus/client_golang/prometheus"
promauto "github.com/prometheus/client_golang/prometheus/promauto"
)
const (
adjustFDLimitKwd = "manage-fdlimit"
enableGCKwd = "enable-gc"
initOptionKwd = "init"
initConfigOptionKwd = "init-config"
initProfileOptionKwd = "init-profile"
ipfsMountKwd = "mount-ipfs"
ipnsMountKwd = "mount-ipns"
migrateKwd = "migrate"
mountKwd = "mount"
offlineKwd = "offline" // global option
routingOptionKwd = "routing"
routingOptionSupernodeKwd = "supernode"
routingOptionDHTClientKwd = "dhtclient"
routingOptionDHTKwd = "dht"
routingOptionDHTServerKwd = "dhtserver"
routingOptionNoneKwd = "none"
routingOptionDefaultKwd = "default"
unencryptTransportKwd = "disable-transport-encryption"
unrestrictedApiAccessKwd = "unrestricted-api"
writableKwd = "writable"
enablePubSubKwd = "enable-pubsub-experiment"
enableIPNSPubSubKwd = "enable-namesys-pubsub"
enableMultiplexKwd = "enable-mplex-experiment"
// apiAddrKwd = "address-api"
// swarmAddrKwd = "address-swarm"
)
var daemonCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Run a network-connected IPFS node.",
ShortDescription: `
'ipfs daemon' runs a persistent ipfs daemon that can serve commands
over the network. Most applications that use IPFS will do so by
communicating with a daemon over the HTTP API. While the daemon is
running, calls to 'ipfs' commands will be sent over the network to
the daemon.
`,
LongDescription: `
The daemon will start listening on ports on the network, which are
documented in (and can be modified through) 'ipfs config Addresses'.
For example, to change the 'Gateway' port:
ipfs config Addresses.Gateway /ip4/127.0.0.1/tcp/8082
The API address can be changed the same way:
ipfs config Addresses.API /ip4/127.0.0.1/tcp/5002
Make sure to restart the daemon after changing addresses.
By default, the gateway is only accessible locally. To expose it to
other computers in the network, use 0.0.0.0 as the ip address:
ipfs config Addresses.Gateway /ip4/0.0.0.0/tcp/8080
Be careful if you expose the API. It is a security risk, as anyone could
control your node remotely. If you need to control the node remotely,
make sure to protect the port as you would other services or database
(firewall, authenticated proxy, etc).
HTTP Headers
ipfs supports passing arbitrary headers to the API and Gateway. You can
do this by setting headers on the API.HTTPHeaders and Gateway.HTTPHeaders
keys:
ipfs config --json API.HTTPHeaders.X-Special-Header "[\"so special :)\"]"
ipfs config --json Gateway.HTTPHeaders.X-Special-Header "[\"so special :)\"]"
Note that the value of the keys is an _array_ of strings. This is because
headers can have more than one value, and it is convenient to pass through
to other libraries.
CORS Headers (for API)
You can setup CORS headers the same way:
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin "[\"example.com\"]"
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods "[\"PUT\", \"GET\", \"POST\"]"
ipfs config --json API.HTTPHeaders.Access-Control-Allow-Credentials "[\"true\"]"
Shutdown
To shut down the daemon, send a SIGINT signal to it (e.g. by pressing 'Ctrl-C')
or send a SIGTERM signal to it (e.g. with 'kill'). It may take a while for the
daemon to shutdown gracefully, but it can be killed forcibly by sending a
second signal.
IPFS_PATH environment variable
ipfs uses a repository in the local file system. By default, the repo is
located at ~/.ipfs. To change the repo location, set the $IPFS_PATH
environment variable:
export IPFS_PATH=/path/to/ipfsrepo
Routing
IPFS by default will use a DHT for content routing. There is a highly
experimental alternative that operates the DHT in a 'client only' mode that
can be enabled by running the daemon as:
ipfs daemon --routing=dhtclient
This will later be transitioned into a config option once it gets out of the
'experimental' stage.
DEPRECATION NOTICE
Previously, ipfs used an environment variable as seen below:
export API_ORIGIN="http://localhost:8888/"
This is deprecated. It is still honored in this version, but will be removed
in a future version, along with this notice. Please move to setting the HTTP
Headers.
`,
},
Options: []cmds.Option{
cmds.BoolOption(initOptionKwd, "Initialize ipfs with default settings if not already initialized"),
cmds.StringOption(initConfigOptionKwd, "Path to existing configuration file to be loaded during --init"),
cmds.StringOption(initProfileOptionKwd, "Configuration profiles to apply for --init. See ipfs init --help for more"),
cmds.StringOption(routingOptionKwd, "Overrides the routing option").WithDefault(routingOptionDefaultKwd),
cmds.BoolOption(mountKwd, "Mounts IPFS to the filesystem"),
cmds.BoolOption(writableKwd, "Enable writing objects (with POST, PUT and DELETE)"),
cmds.StringOption(ipfsMountKwd, "Path to the mountpoint for IPFS (if using --mount). Defaults to config setting."),
cmds.StringOption(ipnsMountKwd, "Path to the mountpoint for IPNS (if using --mount). Defaults to config setting."),
cmds.BoolOption(unrestrictedApiAccessKwd, "Allow API access to unlisted hashes"),
cmds.BoolOption(unencryptTransportKwd, "Disable transport encryption (for debugging protocols)"),
cmds.BoolOption(enableGCKwd, "Enable automatic periodic repo garbage collection"),
cmds.BoolOption(adjustFDLimitKwd, "Check and raise file descriptor limits if needed").WithDefault(true),
cmds.BoolOption(migrateKwd, "If true, assume yes at the migrate prompt. If false, assume no."),
cmds.BoolOption(enablePubSubKwd, "Instantiate the ipfs daemon with the experimental pubsub feature enabled."),
cmds.BoolOption(enableIPNSPubSubKwd, "Enable IPNS record distribution through pubsub; enables pubsub."),
cmds.BoolOption(enableMultiplexKwd, "DEPRECATED"),
// TODO: add way to override addresses. tricky part: updating the config if also --init.
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
// cmds.StringOption(swarmAddrKwd, "Address for the swarm socket (overrides config)"),
},
Subcommands: map[string]*cmds.Command{},
NoRemote: true,
Extra: commands.CreateCmdExtras(commands.SetDoesNotUseConfigAsInput(true)),
Run: daemonFunc,
}
// defaultMux tells mux to serve path using the default muxer. This is
// mostly useful to hook up things that register in the default muxer,
// and don't provide a convenient http.Handler entry point, such as
// expvar and http/pprof.
func defaultMux(path string) corehttp.ServeOption {
return func(node *core.IpfsNode, _ net.Listener, mux *http.ServeMux) (*http.ServeMux, error) {
mux.Handle(path, http.DefaultServeMux)
return mux, nil
}
}
func | (req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) (_err error) {
// Inject metrics before we do anything
err := mprome.Inject()
if err != nil {
log.Errorf("Injecting prometheus handler for metrics failed with message: %s\n", err.Error())
}
// let the user know we're going.
fmt.Printf("Initializing daemon...\n")
defer func() {
if _err != nil {
// Print an extra line before any errors. This could go
// in the commands lib but doesn't really make sense for
// all commands.
fmt.Println()
}
}()
// print the ipfs version
printVersion()
managefd, _ := req.Options[adjustFDLimitKwd].(bool)
if managefd {
if _, _, err := utilmain.ManageFdLimit(); err != nil {
log.Errorf("setting file descriptor limit: %s", err)
}
}
cctx := env.(*oldcmds.Context)
// check transport encryption flag.
unencrypted, _ := req.Options[unencryptTransportKwd].(bool)
if unencrypted {
log.Warnf(`Running with --%s: All connections are UNENCRYPTED.
You will not be able to connect to regular encrypted networks.`, unencryptTransportKwd)
}
// first, whether user has provided the initialization flag. we may be
// running in an uninitialized state.
initialize, _ := req.Options[initOptionKwd].(bool)
if initialize && !fsrepo.IsInitialized(cctx.ConfigRoot) {
cfgLocation, _ := req.Options[initConfigOptionKwd].(string)
profiles, _ := req.Options[initProfileOptionKwd].(string)
var conf *config.Config
if cfgLocation != "" {
if conf, err = cserial.Load(cfgLocation); err != nil {
return err
}
}
if conf == nil {
identity, err := config.CreateIdentity(os.Stdout, []options.KeyGenerateOption{
options.Key.Type(algorithmDefault),
})
if err != nil {
return err
}
conf, err = config.InitWithIdentity(identity)
if err != nil {
return err
}
}
if err = doInit(os.Stdout, cctx.ConfigRoot, false, profiles, conf); err != nil {
return err
}
}
// acquire the repo lock _before_ constructing a node. we need to make
// sure we are permitted to access the resources (datastore, etc.)
repo, err := fsrepo.Open(cctx.ConfigRoot)
switch err {
default:
return err
case fsrepo.ErrNeedMigration:
domigrate, found := req.Options[migrateKwd].(bool)
fmt.Println("Found outdated fs-repo, migrations need to be run.")
if !found {
domigrate = YesNoPrompt("Run migrations now? [y/N]")
}
if !domigrate {
fmt.Println("Not running migrations of fs-repo now.")
fmt.Println("Please get fs-repo-migrations from https://dist.ipfs.io")
return fmt.Errorf("fs-repo requires migration")
}
err = migrate.RunMigration(fsrepo.RepoVersion)
if err != nil {
fmt.Println("The migrations of fs-repo failed:")
fmt.Printf(" %s\n", err)
fmt.Println("If you think this is a bug, please file an issue and include this whole log output.")
fmt.Println(" https://github.com/ipfs/fs-repo-migrations")
return err
}
repo, err = fsrepo.Open(cctx.ConfigRoot)
if err != nil {
return err
}
case nil:
break
}
// The node will also close the repo but there are many places we could
// fail before we get to that. It can't hurt to close it twice.
defer repo.Close()
offline, _ := req.Options[offlineKwd].(bool)
ipnsps, _ := req.Options[enableIPNSPubSubKwd].(bool)
pubsub, _ := req.Options[enablePubSubKwd].(bool)
if _, hasMplex := req.Options[enableMultiplexKwd]; hasMplex {
log.Errorf("The mplex multiplexer has been enabled by default and the experimental %s flag has been removed.")
log.Errorf("To disable this multiplexer, please configure `Swarm.Transports.Multiplexers'.")
}
// Start assembling node config
ncfg := &core.BuildCfg{
Repo: repo,
Permanent: true, // It is temporary way to signify that node is permanent
Online: !offline,
DisableEncryptedConnections: unencrypted,
ExtraOpts: map[string]bool{
"pubsub": pubsub,
"ipnsps": ipnsps,
},
//TODO(Kubuxu): refactor Online vs Offline by adding Permanent vs Ephemeral
}
routingOption, _ := req.Options[routingOptionKwd].(string)
if routingOption == routingOptionDefaultKwd {
cfg, err := repo.Config()
if err != nil {
return err
}
routingOption = cfg.Routing.Type
if routingOption == "" {
routingOption = routingOptionDHTKwd
}
}
switch routingOption {
case routingOptionSupernodeKwd:
return errors.New("supernode routing was never fully implemented and has been removed")
case routingOptionDHTClientKwd:
ncfg.Routing = libp2p.DHTClientOption
case routingOptionDHTKwd:
ncfg.Routing = libp2p.DHTOption
case routingOptionDHTServerKwd:
ncfg.Routing = libp2p.DHTServerOption
case routingOptionNoneKwd:
ncfg.Routing = libp2p.NilRouterOption
default:
return fmt.Errorf("unrecognized routing option: %s", routingOption)
}
node, err := core.NewNode(req.Context, ncfg)
if err != nil {
log.Error("error from node construction: ", err)
return err
}
node.IsDaemon = true
if node.PNetFingerprint != nil {
fmt.Println("Swarm is limited to private network of peers with the swarm key")
fmt.Printf("Swarm key fingerprint: %x\n", node.PNetFingerprint)
}
printSwarmAddrs(node)
defer func() {
// We wait for the node to close first, as the node has children
// that it will wait for before closing, such as the API server.
node.Close()
select {
case <-req.Context.Done():
log.Info("Gracefully shut down daemon")
default:
}
}()
cctx.ConstructNode = func() (*core.IpfsNode, error) {
return node, nil
}
// Start "core" plugins. We want to do this *before* starting the HTTP
// API as the user may be relying on these plugins.
err = cctx.Plugins.Start(node)
if err != nil {
return err
}
node.Process.AddChild(goprocess.WithTeardown(cctx.Plugins.Close))
// construct api endpoint - every time
apiErrc, err := serveHTTPApi(req, cctx)
if err != nil {
return err
}
// construct fuse mountpoints - if the user provided the --mount flag
mount, _ := req.Options[mountKwd].(bool)
if mount && offline {
return cmds.Errorf(cmds.ErrClient, "mount is not currently supported in offline mode")
}
if mount {
if err := mountFuse(req, cctx); err != nil {
return err
}
}
// repo blockstore GC - if --enable-gc flag is present
gcErrc, err := maybeRunGC(req, node)
if err != nil {
return err
}
// construct http gateway
gwErrc, err := serveHTTPGateway(req, cctx)
if err != nil {
return err
}
// Add ipfs version info to prometheus metrics
var ipfsInfoMetric = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "ipfs_info",
Help: "IPFS version information.",
}, []string{"version", "commit"})
// Setting to 1 lets us multiply it with other stats to add the version labels
ipfsInfoMetric.With(prometheus.Labels{
"version": version.CurrentVersionNumber,
"commit": version.CurrentCommit,
}).Set(1)
// initialize metrics collector
prometheus.MustRegister(&corehttp.IpfsNodeCollector{Node: node})
// start MFS pinning thread
startPinMFS(daemonConfigPollInterval, cctx, &ipfsPinMFSNode{node})
// Set crust
cfg, err := repo.Config()
if err != nil {
return err
}
if cc, ok := cfg.Datastore.Spec["crust"]; ok {
if len(cc.(string)) != 0 {
crust.Worker.SetUrl(cc.(string))
fmt.Printf("Crust sworker url: %s\n", cc.(string))
}
}
// The daemon is *finally* ready.
fmt.Printf("Daemon is ready\n")
notifyReady()
// Give the user some immediate feedback when they hit C-c
go func() {
<-req.Context.Done()
notifyStopping()
fmt.Println("Received interrupt signal, shutting down...")
fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)")
}()
// collect long-running errors and block for shutdown
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
var errs error
for err := range merge(apiErrc, gwErrc, gcErrc) {
if err != nil {
errs = multierror.Append(errs, err)
}
}
return errs
}
// serveHTTPApi collects options, creates listener, prints status message and starts serving requests
func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: GetConfig() failed: %s", err)
}
listeners, err := sockets.TakeListeners("io.ipfs.api")
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: socket activation failed: %s", err)
}
apiAddrs := make([]string, 0, 2)
apiAddr, _ := req.Options[commands.ApiOption].(string)
if apiAddr == "" {
apiAddrs = cfg.Addresses.API
} else {
apiAddrs = append(apiAddrs, apiAddr)
}
listenerAddrs := make(map[string]bool, len(listeners))
for _, listener := range listeners {
listenerAddrs[string(listener.Multiaddr().Bytes())] = true
}
for _, addr := range apiAddrs {
apiMaddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: invalid API address: %q (err: %s)", addr, err)
}
if listenerAddrs[string(apiMaddr.Bytes())] {
continue
}
apiLis, err := manet.Listen(apiMaddr)
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: manet.Listen(%s) failed: %s", apiMaddr, err)
}
listenerAddrs[string(apiMaddr.Bytes())] = true
listeners = append(listeners, apiLis)
}
for _, listener := range listeners {
// we might have listened to /tcp/0 - let's see what we are listing on
fmt.Printf("API server listening on %s\n", listener.Multiaddr())
// Browsers require TCP.
switch listener.Addr().Network() {
case "tcp", "tcp4", "tcp6":
fmt.Printf("WebUI: http://%s/webui\n", listener.Addr())
}
}
// by default, we don't let you load arbitrary ipfs objects through the api,
// because this would open up the api to scripting vulnerabilities.
// only the webui objects are allowed.
// if you know what you're doing, go ahead and pass --unrestricted-api.
unrestricted, _ := req.Options[unrestrictedApiAccessKwd].(bool)
gatewayOpt := corehttp.GatewayOption(false, corehttp.WebUIPaths...)
if unrestricted {
gatewayOpt = corehttp.GatewayOption(true, "/ipfs", "/ipns")
}
var opts = []corehttp.ServeOption{
corehttp.MetricsCollectionOption("api"),
corehttp.MetricsOpenCensusCollectionOption(),
corehttp.CheckVersionOption(),
corehttp.CommandsOption(*cctx),
corehttp.WebUIOption,
gatewayOpt,
corehttp.VersionOption(),
defaultMux("/debug/vars"),
defaultMux("/debug/pprof/"),
corehttp.MutexFractionOption("/debug/pprof-mutex/"),
corehttp.MetricsScrapingOption("/debug/metrics/prometheus"),
corehttp.LogOption(),
}
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
}
node, err := cctx.ConstructNode()
if err != nil {
return nil, fmt.Errorf("serveHTTPApi: ConstructNode() failed: %s", err)
}
if err := node.Repo.SetAPIAddr(listeners[0].Multiaddr()); err != nil {
return nil, fmt.Errorf("serveHTTPApi: SetAPIAddr() failed: %s", err)
}
errc := make(chan error)
var wg sync.WaitGroup
for _, apiLis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(apiLis)
}
go func() {
wg.Wait()
close(errc)
}()
return errc, nil
}
// printSwarmAddrs prints the addresses of the host
func printSwarmAddrs(node *core.IpfsNode) {
if !node.IsOnline {
fmt.Println("Swarm not listening, running in offline mode.")
return
}
var lisAddrs []string
ifaceAddrs, err := node.PeerHost.Network().InterfaceListenAddresses()
if err != nil {
log.Errorf("failed to read listening addresses: %s", err)
}
for _, addr := range ifaceAddrs {
lisAddrs = append(lisAddrs, addr.String())
}
sort.Strings(lisAddrs)
for _, addr := range lisAddrs {
fmt.Printf("Swarm listening on %s\n", addr)
}
var addrs []string
for _, addr := range node.PeerHost.Addrs() {
addrs = append(addrs, addr.String())
}
sort.Strings(addrs)
for _, addr := range addrs {
fmt.Printf("Swarm announcing %s\n", addr)
}
}
// serveHTTPGateway collects options, creates listener, prints status message and starts serving requests
func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error) {
cfg, err := cctx.GetConfig()
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: GetConfig() failed: %s", err)
}
writable, writableOptionFound := req.Options[writableKwd].(bool)
if !writableOptionFound {
writable = cfg.Gateway.Writable
}
listeners, err := sockets.TakeListeners("io.ipfs.gateway")
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: socket activation failed: %s", err)
}
listenerAddrs := make(map[string]bool, len(listeners))
for _, listener := range listeners {
listenerAddrs[string(listener.Multiaddr().Bytes())] = true
}
gatewayAddrs := cfg.Addresses.Gateway
for _, addr := range gatewayAddrs {
gatewayMaddr, err := ma.NewMultiaddr(addr)
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: invalid gateway address: %q (err: %s)", addr, err)
}
if listenerAddrs[string(gatewayMaddr.Bytes())] {
continue
}
gwLis, err := manet.Listen(gatewayMaddr)
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: manet.Listen(%s) failed: %s", gatewayMaddr, err)
}
listenerAddrs[string(gatewayMaddr.Bytes())] = true
listeners = append(listeners, gwLis)
}
// we might have listened to /tcp/0 - let's see what we are listing on
gwType := "readonly"
if writable {
gwType = "writable"
}
for _, listener := range listeners {
fmt.Printf("Gateway (%s) server listening on %s\n", gwType, listener.Multiaddr())
}
cmdctx := *cctx
cmdctx.Gateway = true
var opts = []corehttp.ServeOption{
corehttp.MetricsCollectionOption("gateway"),
corehttp.HostnameOption(),
corehttp.GatewayOption(writable, "/ipfs", "/ipns"),
corehttp.VersionOption(),
corehttp.CheckVersionOption(),
corehttp.CommandsROOption(cmdctx),
}
if cfg.Experimental.P2pHttpProxy {
opts = append(opts, corehttp.P2PProxyOption())
}
if len(cfg.Gateway.RootRedirect) > 0 {
opts = append(opts, corehttp.RedirectOption("", cfg.Gateway.RootRedirect))
}
node, err := cctx.ConstructNode()
if err != nil {
return nil, fmt.Errorf("serveHTTPGateway: ConstructNode() failed: %s", err)
}
errc := make(chan error)
var wg sync.WaitGroup
for _, lis := range listeners {
wg.Add(1)
go func(lis manet.Listener) {
defer wg.Done()
errc <- corehttp.Serve(node, manet.NetListener(lis), opts...)
}(lis)
}
go func() {
wg.Wait()
close(errc)
}()
return errc, nil
}
//collects options and opens the fuse mountpoint
func mountFuse(req *cmds.Request, cctx *oldcmds.Context) error {
cfg, err := cctx.GetConfig()
if err != nil {
return fmt.Errorf("mountFuse: GetConfig() failed: %s", err)
}
fsdir, found := req.Options[ipfsMountKwd].(string)
if !found {
fsdir = cfg.Mounts.IPFS
}
nsdir, found := req.Options[ipnsMountKwd].(string)
if !found {
nsdir = cfg.Mounts.IPNS
}
node, err := cctx.ConstructNode()
if err != nil {
return fmt.Errorf("mountFuse: ConstructNode() failed: %s", err)
}
err = nodeMount.Mount(node, fsdir, nsdir)
if err != nil {
return err
}
fmt.Printf("IPFS mounted at: %s\n", fsdir)
fmt.Printf("IPNS mounted at: %s\n", nsdir)
return nil
}
func maybeRunGC(req *cmds.Request, node *core.IpfsNode) (<-chan error, error) {
enableGC, _ := req.Options[enableGCKwd].(bool)
if !enableGC {
return nil, nil
}
errc := make(chan error)
go func() {
errc <- corerepo.PeriodicGC(req.Context, node)
close(errc)
}()
return errc, nil
}
// merge does fan-in of multiple read-only error channels
// taken from http://blog.golang.org/pipelines
func merge(cs ...<-chan error) <-chan error {
var wg sync.WaitGroup
out := make(chan error)
// Start an output goroutine for each input channel in cs. output
// copies values from c to out until c is closed, then calls wg.Done.
output := func(c <-chan error) {
for n := range c {
out <- n
}
wg.Done()
}
for _, c := range cs {
if c != nil {
wg.Add(1)
go output(c)
}
}
// Start a goroutine to close out once all the output goroutines are
// done. This must start after the wg.Add call.
go func() {
wg.Wait()
close(out)
}()
return out
}
func YesNoPrompt(prompt string) bool {
var s string
for i := 0; i < 3; i++ {
fmt.Printf("%s ", prompt)
fmt.Scanf("%s", &s)
switch s {
case "y", "Y":
return true
case "n", "N":
return false
case "":
return false
}
fmt.Println("Please press either 'y' or 'n'")
}
return false
}
func printVersion() {
v := version.CurrentVersionNumber
if version.CurrentCommit != "" {
v += "-" + version.CurrentCommit
}
fmt.Printf("go-ipfs version: %s\n", v)
fmt.Printf("Repo version: %d\n", fsrepo.RepoVersion)
fmt.Printf("System version: %s\n", runtime.GOARCH+"/"+runtime.GOOS)
fmt.Printf("Golang version: %s\n", runtime.Version())
}
| daemonFunc |
cpplint.py | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
#indent = access_match.group(1)
#if (len(indent) != classinfo.class_indent + 1 and
# Match(r'^\s*$', indent)):
# if classinfo.is_struct:
# parent = 'struct ' + classinfo.name
# else:
# parent = 'class ' + classinfo.name
# slots = ''
# if access_match.group(3):
# slots = access_match.group(3)
# error(filename, linenum, 'whitespace/indent', 3,
# '%s%s: should be indented +1 space inside %s' % (
# access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
# if match:
# include = match.group(2)
# if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# # Many unit tests use cout, so we exempt them.
# if not _IsTestFilename(filename):
# error(filename, linenum, 'readability/streams', 3,
# 'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter | # We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
valid_extensions = ['cc', 'h', 'cpp', 'cu', 'cuh']
if filename != '-' and file_extension not in valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(.cc, .h, .cpp, .cu, .cuh)\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main() | |
tracks.go | package mixxxdb
import (
"errors"
"github.com/upper/db/v4"
)
// Compile-time check on conformance to interface.
var _ = db.Record(&Track{})
var _ = db.Store(&tracksDB{})
var _ = TracksDB(&tracksDB{})
// Track represents a music file that can be played by Mixxx.
type Track struct {
// The track's ID in Mixxx DB. Can be found in `library` table. Note: do not confuse with the ID in `track_locations` table.
ID int `db:"id,omitempty"`
// ID of the current TrackLocation of this music track.
Location int `db:"location"`
}
func (_ *Track) Store(sess db.Session) db.Store {
return NewTracksDB(sess)
}
//-------------------------------------------------------------------------------------------------------------
func NewTracksDB(sess db.Session) TracksDB |
type tracksDB struct {
db.Collection
}
func (tracks *tracksDB) FindByLocationID(id int) (*Track, error) {
var track Track
err := tracks.Find(db.Cond{"location": id}).One(&track)
if errors.Is(err, db.ErrNoMoreRows) {
return nil, nil
}
return &track, err
}
| {
return &tracksDB{sess.Collection("library")}
} |
syscalls.go | package system
//NOTES:
//* syscall constants in the "syscall" package are nice, but some syscalls there are missing
//* future versions will include more than just the syscall name
//* 32bit (x86/i386) and 64bit (x86_64) syscall numbers are different
const (
SyscallX86MinNum = 0
SyscallX86UnknownNum = -1
SyscallX86UnknownName = "unknown_syscall"
)
type NumberResolverFunc func(uint32) string
type NameResolverFunc func(string) (uint32, bool)
func CallNumberResolver(arch ArchName) NumberResolverFunc {
switch arch {
case ArchName386:
return callNameX86Family32
case ArchNameAmd64:
return callNameX86Family64
case ArchNameArm32:
return callNameArmFamily32
default:
return nil
}
}
func | (arch ArchName) NameResolverFunc {
switch arch {
case ArchName386:
return callNumberX86Family32
case ArchNameAmd64:
return callNumberX86Family64
case ArchNameArm32:
return callNumberArmFamily32
default:
return nil
}
}
| CallNameResolver |
0003_auto_20200402_0105.py | # Generated by Django 2.2.11 on 2020-04-01 17:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djpmp', '0002_auto_20200401_1607'),
]
operations = [
migrations.AlterModelOptions(
name='hrcalendar',
options={'verbose_name': '资源日历', 'verbose_name_plural': '资源日历'},
),
migrations.AlterModelOptions(
name='project',
options={'verbose_name': '项目', 'verbose_name_plural': '项目'},
),
migrations.AlterModelOptions(
name='staff',
options={'verbose_name': '成员', 'verbose_name_plural': '成员'},
),
migrations.AlterModelOptions(
name='wbs',
options={'ordering': ('tree_id', 'lft'), 'verbose_name': 'WBS', 'verbose_name_plural': 'WBS'}, | ),
migrations.AddField(
model_name='hrcalendar',
name='tasks_memo',
field=models.CharField(blank=True, max_length=512, null=True, verbose_name='任务说明'),
),
migrations.AlterUniqueTogether(
name='hrcalendar',
unique_together={('work_date', 'staff')},
),
] | |
SpinnerView.ts | import { bridge } from '../decorator/bridge'
import { native } from '../decorator/native'
import { View } from './View'
import './SpinnerView.ds'
@bridge('dezel.view.SpinnerView')
/**
* Displays a spinning indicator.
* @class SpinnerView
* @super View
* @since 0.1.0
*/
export class | extends View {
//--------------------------------------------------------------------------
// Properties
//--------------------------------------------------------------------------
/**
* The spinner view's active status.
* @property active
* @since 0.1.0
*/
@native public active!: boolean
/**
* The spinner view's color.
* @property color
* @since 0.1.0
*/
@native public color!: string
} | SpinnerView |
networkallocator.go | package networkallocator
import (
"fmt"
"net"
"github.com/docker/docker/pkg/plugins"
"github.com/docker/libnetwork/datastore"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/drvregistry"
"github.com/docker/libnetwork/ipamapi"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const (
// DefaultDriver defines the name of the driver to be used by
// default if a network without any driver name specified is
// created.
DefaultDriver = "overlay"
)
// NetworkAllocator acts as the controller for all network related operations
// like managing network and IPAM drivers and also creating and
// deleting networks and the associated resources.
type NetworkAllocator struct {
// The driver register which manages all internal and external
// IPAM and network drivers.
drvRegistry *drvregistry.DrvRegistry
// The port allocator instance for allocating node ports
portAllocator *portAllocator
// Local network state used by NetworkAllocator to do network management.
networks map[string]*network
// Allocator state to indicate if allocation has been
// successfully completed for this service.
services map[string]struct{}
// Allocator state to indicate if allocation has been
// successfully completed for this task.
tasks map[string]struct{}
// Allocator state to indicate if allocation has been
// successfully completed for this node.
nodes map[string]struct{}
}
// Local in-memory state related to netwok that need to be tracked by NetworkAllocator
type network struct {
// A local cache of the store object.
nw *api.Network
// pools is used to save the internal poolIDs needed when
// releasing the pool.
pools map[string]string
// endpoints is a map of endpoint IP to the poolID from which it
// was allocated.
endpoints map[string]string
}
type initializer struct {
fn drvregistry.InitFunc
ntype string
}
// New returns a new NetworkAllocator handle
func | () (*NetworkAllocator, error) {
na := &NetworkAllocator{
networks: make(map[string]*network),
services: make(map[string]struct{}),
tasks: make(map[string]struct{}),
nodes: make(map[string]struct{}),
}
// There are no driver configurations and notification
// functions as of now.
reg, err := drvregistry.New(nil, nil, nil, nil, nil)
if err != nil {
return nil, err
}
if err := initializeDrivers(reg); err != nil {
return nil, err
}
if err = initIPAMDrivers(reg); err != nil {
return nil, err
}
pa, err := newPortAllocator()
if err != nil {
return nil, err
}
na.portAllocator = pa
na.drvRegistry = reg
return na, nil
}
// Allocate allocates all the necessary resources both general
// and driver-specific which may be specified in the NetworkSpec
func (na *NetworkAllocator) Allocate(n *api.Network) error {
if _, ok := na.networks[n.ID]; ok {
return fmt.Errorf("network %s already allocated", n.ID)
}
pools, err := na.allocatePools(n)
if err != nil {
return errors.Wrapf(err, "failed allocating pools and gateway IP for network %s", n.ID)
}
if err := na.allocateDriverState(n); err != nil {
na.freePools(n, pools)
return errors.Wrapf(err, "failed while allocating driver state for network %s", n.ID)
}
na.networks[n.ID] = &network{
nw: n,
pools: pools,
endpoints: make(map[string]string),
}
return nil
}
func (na *NetworkAllocator) getNetwork(id string) *network {
return na.networks[id]
}
// Deallocate frees all the general and driver specific resources
// whichs were assigned to the passed network.
func (na *NetworkAllocator) Deallocate(n *api.Network) error {
localNet := na.getNetwork(n.ID)
if localNet == nil {
return fmt.Errorf("could not get networker state for network %s", n.ID)
}
if err := na.freeDriverState(n); err != nil {
return errors.Wrapf(err, "failed to free driver state for network %s", n.ID)
}
delete(na.networks, n.ID)
return na.freePools(n, localNet.pools)
}
// ServiceAllocate allocates all the network resources such as virtual
// IP and ports needed by the service.
func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
if err = na.portAllocator.serviceAllocatePorts(s); err != nil {
return
}
defer func() {
if err != nil {
na.ServiceDeallocate(s)
}
}()
if s.Endpoint == nil {
s.Endpoint = &api.Endpoint{}
}
s.Endpoint.Spec = s.Spec.Endpoint.Copy()
// If ResolutionMode is DNSRR do not try allocating VIPs, but
// free any VIP from previous state.
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
if s.Endpoint != nil {
for _, vip := range s.Endpoint.VirtualIPs {
if err := na.deallocateVIP(vip); err != nil {
// don't bail here, deallocate as many as possible.
log.L.WithError(err).
WithField("vip.network", vip.NetworkID).
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
}
}
s.Endpoint.VirtualIPs = nil
}
delete(na.services, s.ID)
return
}
// First allocate VIPs for all the pre-populated endpoint attachments
for _, eAttach := range s.Endpoint.VirtualIPs {
if err = na.allocateVIP(eAttach); err != nil {
return
}
}
// Always prefer NetworkAttachmentConfig in the TaskSpec
specNetworks := s.Spec.Task.Networks
if len(specNetworks) == 0 && s != nil && len(s.Spec.Networks) != 0 {
specNetworks = s.Spec.Networks
}
outer:
for _, nAttach := range specNetworks {
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nAttach.Target {
continue outer
}
}
vip := &api.Endpoint_VirtualIP{NetworkID: nAttach.Target}
if err = na.allocateVIP(vip); err != nil {
return
}
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs, vip)
}
na.services[s.ID] = struct{}{}
return
}
// ServiceDeallocate de-allocates all the network resources such as
// virtual IP and ports associated with the service.
func (na *NetworkAllocator) ServiceDeallocate(s *api.Service) error {
if s.Endpoint == nil {
return nil
}
for _, vip := range s.Endpoint.VirtualIPs {
if err := na.deallocateVIP(vip); err != nil {
// don't bail here, deallocate as many as possible.
log.L.WithError(err).
WithField("vip.network", vip.NetworkID).
WithField("vip.addr", vip.Addr).Error("error deallocating vip")
}
}
na.portAllocator.serviceDeallocatePorts(s)
delete(na.services, s.ID)
return nil
}
// IsAllocated returns if the passed network has been allocated or not.
func (na *NetworkAllocator) IsAllocated(n *api.Network) bool {
_, ok := na.networks[n.ID]
return ok
}
// IsTaskAllocated returns if the passed task has its network resources allocated or not.
func (na *NetworkAllocator) IsTaskAllocated(t *api.Task) bool {
// If the task is not found in the allocated set, then it is
// not allocated.
if _, ok := na.tasks[t.ID]; !ok {
return false
}
// If Networks is empty there is no way this Task is allocated.
if len(t.Networks) == 0 {
return false
}
// To determine whether the task has its resources allocated,
// we just need to look at one network(in case of
// multi-network attachment). This is because we make sure we
// allocate for every network or we allocate for none.
// If the network is not allocated, the task cannot be allocated.
localNet, ok := na.networks[t.Networks[0].Network.ID]
if !ok {
return false
}
// Addresses empty. Task is not allocated.
if len(t.Networks[0].Addresses) == 0 {
return false
}
// The allocated IP address not found in local endpoint state. Not allocated.
if _, ok := localNet.endpoints[t.Networks[0].Addresses[0]]; !ok {
return false
}
return true
}
// IsServiceAllocated returns if the passed service has its network resources allocated or not.
func (na *NetworkAllocator) IsServiceAllocated(s *api.Service) bool {
// If endpoint mode is VIP and allocator does not have the
// service in VIP allocated set then it is not allocated.
if (len(s.Spec.Task.Networks) != 0 || len(s.Spec.Networks) != 0) &&
(s.Spec.Endpoint == nil ||
s.Spec.Endpoint.Mode == api.ResolutionModeVirtualIP) {
if _, ok := na.services[s.ID]; !ok {
return false
}
}
// If the endpoint mode is DNSRR and allocator has the service
// in VIP allocated set then we return not allocated to make
// sure the allocator triggers networkallocator to free up the
// resources if any.
if s.Spec.Endpoint != nil && s.Spec.Endpoint.Mode == api.ResolutionModeDNSRoundRobin {
if _, ok := na.services[s.ID]; ok {
return false
}
}
if (s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0) ||
(s.Endpoint != nil && len(s.Endpoint.Ports) != 0) {
return na.portAllocator.isPortsAllocated(s)
}
return true
}
// IsNodeAllocated returns if the passed node has its network resources allocated or not.
func (na *NetworkAllocator) IsNodeAllocated(node *api.Node) bool {
// If the node is not found in the allocated set, then it is
// not allocated.
if _, ok := na.nodes[node.ID]; !ok {
return false
}
// If no attachment, not allocated.
if node.Attachment == nil {
return false
}
// If the network is not allocated, the node cannot be allocated.
localNet, ok := na.networks[node.Attachment.Network.ID]
if !ok {
return false
}
// Addresses empty, not allocated.
if len(node.Attachment.Addresses) == 0 {
return false
}
// The allocated IP address not found in local endpoint state. Not allocated.
if _, ok := localNet.endpoints[node.Attachment.Addresses[0]]; !ok {
return false
}
return true
}
// AllocateNode allocates the IP addresses for the network to which
// the node is attached.
func (na *NetworkAllocator) AllocateNode(node *api.Node) error {
if err := na.allocateNetworkIPs(node.Attachment); err != nil {
return err
}
na.nodes[node.ID] = struct{}{}
return nil
}
// DeallocateNode deallocates the IP addresses for the network to
// which the node is attached.
func (na *NetworkAllocator) DeallocateNode(node *api.Node) error {
delete(na.nodes, node.ID)
return na.releaseEndpoints([]*api.NetworkAttachment{node.Attachment})
}
// AllocateTask allocates all the endpoint resources for all the
// networks that a task is attached to.
func (na *NetworkAllocator) AllocateTask(t *api.Task) error {
for i, nAttach := range t.Networks {
if err := na.allocateNetworkIPs(nAttach); err != nil {
if err := na.releaseEndpoints(t.Networks[:i]); err != nil {
log.G(context.TODO()).WithError(err).Errorf("Failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID)
}
return errors.Wrapf(err, "failed to allocate network IP for task %s network %s", t.ID, nAttach.Network.ID)
}
}
na.tasks[t.ID] = struct{}{}
return nil
}
// DeallocateTask releases all the endpoint resources for all the
// networks that a task is attached to.
func (na *NetworkAllocator) DeallocateTask(t *api.Task) error {
delete(na.tasks, t.ID)
return na.releaseEndpoints(t.Networks)
}
func (na *NetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error {
for _, nAttach := range networks {
ipam, _, err := na.resolveIPAM(nAttach.Network)
if err != nil {
return errors.Wrapf(err, "failed to resolve IPAM while allocating")
}
localNet := na.getNetwork(nAttach.Network.ID)
if localNet == nil {
return fmt.Errorf("could not find network allocater state for network %s", nAttach.Network.ID)
}
// Do not fail and bail out if we fail to release IP
// address here. Keep going and try releasing as many
// addresses as possible.
for _, addr := range nAttach.Addresses {
// Retrieve the poolID and immediately nuke
// out the mapping.
poolID := localNet.endpoints[addr]
delete(localNet.endpoints, addr)
ip, _, err := net.ParseCIDR(addr)
if err != nil {
log.G(context.TODO()).Errorf("Could not parse IP address %s while releasing", addr)
continue
}
if err := ipam.ReleaseAddress(poolID, ip); err != nil {
log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing IP address %s", addr)
}
}
// Clear out the address list when we are done with
// this network.
nAttach.Addresses = nil
}
return nil
}
// allocate virtual IP for a single endpoint attachment of the service.
func (na *NetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {
localNet := na.getNetwork(vip.NetworkID)
if localNet == nil {
return fmt.Errorf("networkallocator: could not find local network state")
}
// If this IP is already allocated in memory we don't need to
// do anything.
if _, ok := localNet.endpoints[vip.Addr]; ok {
return nil
}
ipam, _, err := na.resolveIPAM(localNet.nw)
if err != nil {
return errors.Wrap(err, "failed to resolve IPAM while allocating")
}
var addr net.IP
if vip.Addr != "" {
var err error
addr, _, err = net.ParseCIDR(vip.Addr)
if err != nil {
return err
}
}
for _, poolID := range localNet.pools {
ip, _, err := ipam.RequestAddress(poolID, addr, nil)
if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
return errors.Wrap(err, "could not allocate VIP from IPAM")
}
// If we got an address then we are done.
if err == nil {
ipStr := ip.String()
localNet.endpoints[ipStr] = poolID
vip.Addr = ipStr
return nil
}
}
return errors.New("could not find an available IP while allocating VIP")
}
func (na *NetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error {
localNet := na.getNetwork(vip.NetworkID)
if localNet == nil {
return errors.New("networkallocator: could not find local network state")
}
ipam, _, err := na.resolveIPAM(localNet.nw)
if err != nil {
return errors.Wrap(err, "failed to resolve IPAM while allocating")
}
// Retrieve the poolID and immediately nuke
// out the mapping.
poolID := localNet.endpoints[vip.Addr]
delete(localNet.endpoints, vip.Addr)
ip, _, err := net.ParseCIDR(vip.Addr)
if err != nil {
log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr)
return err
}
if err := ipam.ReleaseAddress(poolID, ip); err != nil {
log.G(context.TODO()).WithError(err).Errorf("IPAM failure while releasing VIP address %s", vip.Addr)
return err
}
return nil
}
// allocate the IP addresses for a single network attachment of the task.
func (na *NetworkAllocator) allocateNetworkIPs(nAttach *api.NetworkAttachment) error {
var ip *net.IPNet
ipam, _, err := na.resolveIPAM(nAttach.Network)
if err != nil {
return errors.Wrap(err, "failed to resolve IPAM while allocating")
}
localNet := na.getNetwork(nAttach.Network.ID)
if localNet == nil {
return fmt.Errorf("could not find network allocator state for network %s", nAttach.Network.ID)
}
addresses := nAttach.Addresses
if len(addresses) == 0 {
addresses = []string{""}
}
for i, rawAddr := range addresses {
var addr net.IP
if rawAddr != "" {
var err error
addr, _, err = net.ParseCIDR(rawAddr)
if err != nil {
addr = net.ParseIP(rawAddr)
if addr == nil {
return errors.Wrapf(err, "could not parse address string %s", rawAddr)
}
}
}
for _, poolID := range localNet.pools {
var err error
ip, _, err = ipam.RequestAddress(poolID, addr, nil)
if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
return errors.Wrap(err, "could not allocate IP from IPAM")
}
// If we got an address then we are done.
if err == nil {
ipStr := ip.String()
localNet.endpoints[ipStr] = poolID
addresses[i] = ipStr
nAttach.Addresses = addresses
return nil
}
}
}
return errors.New("could not find an available IP")
}
func (na *NetworkAllocator) freeDriverState(n *api.Network) error {
d, _, err := na.resolveDriver(n)
if err != nil {
return err
}
return d.NetworkFree(n.ID)
}
func (na *NetworkAllocator) allocateDriverState(n *api.Network) error {
d, dName, err := na.resolveDriver(n)
if err != nil {
return err
}
var options map[string]string
if n.Spec.DriverConfig != nil {
options = n.Spec.DriverConfig.Options
}
// Construct IPAM data for driver consumption.
ipv4Data := make([]driverapi.IPAMData, 0, len(n.IPAM.Configs))
for _, ic := range n.IPAM.Configs {
if ic.Family == api.IPAMConfig_IPV6 {
continue
}
_, subnet, err := net.ParseCIDR(ic.Subnet)
if err != nil {
return errors.Wrapf(err, "error parsing subnet %s while allocating driver state", ic.Subnet)
}
gwIP := net.ParseIP(ic.Gateway)
gwNet := &net.IPNet{
IP: gwIP,
Mask: subnet.Mask,
}
data := driverapi.IPAMData{
Pool: subnet,
Gateway: gwNet,
}
ipv4Data = append(ipv4Data, data)
}
ds, err := d.NetworkAllocate(n.ID, options, ipv4Data, nil)
if err != nil {
return err
}
// Update network object with the obtained driver state.
n.DriverState = &api.Driver{
Name: dName,
Options: ds,
}
return nil
}
// Resolve network driver
func (na *NetworkAllocator) resolveDriver(n *api.Network) (driverapi.Driver, string, error) {
dName := DefaultDriver
if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name != "" {
dName = n.Spec.DriverConfig.Name
}
d, drvcap := na.drvRegistry.Driver(dName)
if d == nil {
var err error
err = na.loadDriver(dName)
if err != nil {
return nil, "", err
}
d, drvcap = na.drvRegistry.Driver(dName)
if d == nil {
return nil, "", fmt.Errorf("could not resolve network driver %s", dName)
}
}
if drvcap.DataScope != datastore.GlobalScope {
return nil, "", fmt.Errorf("swarm can allocate network resources only for global scoped networks. network driver (%s) is scoped %s", dName, drvcap.DataScope)
}
return d, dName, nil
}
func (na *NetworkAllocator) loadDriver(name string) error {
_, err := plugins.Get(name, driverapi.NetworkPluginEndpointType)
return err
}
// Resolve the IPAM driver
func (na *NetworkAllocator) resolveIPAM(n *api.Network) (ipamapi.Ipam, string, error) {
dName := ipamapi.DefaultIPAM
if n.Spec.IPAM != nil && n.Spec.IPAM.Driver != nil && n.Spec.IPAM.Driver.Name != "" {
dName = n.Spec.IPAM.Driver.Name
}
ipam, _ := na.drvRegistry.IPAM(dName)
if ipam == nil {
return nil, "", fmt.Errorf("could not resolve IPAM driver %s", dName)
}
return ipam, dName, nil
}
func (na *NetworkAllocator) freePools(n *api.Network, pools map[string]string) error {
ipam, _, err := na.resolveIPAM(n)
if err != nil {
return errors.Wrapf(err, "failed to resolve IPAM while freeing pools for network %s", n.ID)
}
releasePools(ipam, n.IPAM.Configs, pools)
return nil
}
func releasePools(ipam ipamapi.Ipam, icList []*api.IPAMConfig, pools map[string]string) {
for _, ic := range icList {
if err := ipam.ReleaseAddress(pools[ic.Subnet], net.ParseIP(ic.Gateway)); err != nil {
log.G(context.TODO()).WithError(err).Errorf("Failed to release address %s", ic.Subnet)
}
}
for k, p := range pools {
if err := ipam.ReleasePool(p); err != nil {
log.G(context.TODO()).WithError(err).Errorf("Failed to release pool %s", k)
}
}
}
func (na *NetworkAllocator) allocatePools(n *api.Network) (map[string]string, error) {
ipam, dName, err := na.resolveIPAM(n)
if err != nil {
return nil, err
}
// We don't support user defined address spaces yet so just
// retrive default address space names for the driver.
_, asName, err := na.drvRegistry.IPAMDefaultAddressSpaces(dName)
if err != nil {
return nil, err
}
pools := make(map[string]string)
if n.Spec.IPAM == nil {
n.Spec.IPAM = &api.IPAMOptions{}
}
ipamConfigs := make([]*api.IPAMConfig, len(n.Spec.IPAM.Configs))
copy(ipamConfigs, n.Spec.IPAM.Configs)
// If there is non-nil IPAM state always prefer those subnet
// configs over Spec configs.
if n.IPAM != nil {
ipamConfigs = n.IPAM.Configs
}
// Append an empty slot for subnet allocation if there are no
// IPAM configs from either spec or state.
if len(ipamConfigs) == 0 {
ipamConfigs = append(ipamConfigs, &api.IPAMConfig{Family: api.IPAMConfig_IPV4})
}
// Update the runtime IPAM configurations with initial state
n.IPAM = &api.IPAMOptions{
Driver: &api.Driver{Name: dName},
Configs: ipamConfigs,
}
for i, ic := range ipamConfigs {
poolID, poolIP, _, err := ipam.RequestPool(asName, ic.Subnet, ic.Range, nil, false)
if err != nil {
// Rollback by releasing all the resources allocated so far.
releasePools(ipam, ipamConfigs[:i], pools)
return nil, err
}
pools[poolIP.String()] = poolID
gwIP, _, err := ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), nil)
if err != nil {
// Rollback by releasing all the resources allocated so far.
releasePools(ipam, ipamConfigs[:i], pools)
return nil, err
}
if ic.Subnet == "" {
ic.Subnet = poolIP.String()
}
if ic.Gateway == "" {
ic.Gateway = gwIP.IP.String()
}
}
return pools, nil
}
func initializeDrivers(reg *drvregistry.DrvRegistry) error {
for _, i := range getInitializers() {
if err := reg.AddDriver(i.ntype, i.fn, nil); err != nil {
return err
}
}
return nil
}
| New |
http_server.py | # Run with Python 3
# Copyright 2019 VMware, Inc.
# SPDX-License-Identifier: BSD-2-Clause
"""\
HTTP server that can be used as a back end to Captive Web View applications.
The server is based around a Python3 Simple HTTP Server extended to pick files
from one of a number of directories.
The server will change directory to the common parent of all directories
specified.
"""
#
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# Tutorial: https://docs.python.org/3/howto/argparse.html
# Reference: https://docs.python.org/3/library/argparse.html
import argparse
#
# Module for HTTP server
# https://docs.python.org/3/library/http.server.html
from http.server import HTTPServer, SimpleHTTPRequestHandler
#
# JSON module.
# https://docs.python.org/3/library/json.html
import json
#
# Module for changing the current directory.
# https://docs.python.org/3/library/os.html#os.chdir
from os import chdir
#
# File path module.
# https://docs.python.org/3/library/os.path.html
import os.path
#
# Module for OO path handling.
# https://docs.python.org/3/library/pathlib.html
from pathlib import Path
#
# Module for recursive copy.
# https://docs.python.org/3/library/shutil.html
import shutil
#
# Module to create an HTTP server that spawns a thread for each request.
# https://docs.python.org/3/library/socketserver.html#module-socketserver
# The ThreadingMixIn is needed because of an apparent defect in Python, see:
# https://github.com/Microsoft/WSL/issues/1906
# https://bugs.python.org/issue31639
# The defect is fixed in 3.7 Python.
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L45
from socketserver import ThreadingMixIn
#
# Module for manipulation of the import path.
# https://docs.python.org/3/library/sys.html#sys.path
import sys
#
# Module for text dedentation.
# Only used for --help description.
# https://docs.python.org/3/library/textwrap.html
import textwrap
def project_path(*segments):
return Path(__file__).resolve().parents[1].joinpath(*segments)
class Server(ThreadingMixIn, HTTPServer):
@property
def directories(self):
return self._directories
@directories.setter
def directories(self, directories):
self._directories = tuple(directories)
@property
def relativePaths(self):
return self._relativePaths
def path_for_file(self, filename):
filename = os.path.basename(filename)
if filename == "":
filename = "index.html"
for index, directory in enumerate(self.directories):
if directory.joinpath(filename).is_file():
return self.relativePaths[index].joinpath(filename)
raise ValueError('File "{}" not found.'.format(filename))
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Server method `handle_command` must be set by Main subclass.")
@property
def start_message(self):
"""Message suitable for logging when the server is started."""
def directory_lines(width=80, indent=2):
# This array accumulates diagnostic logs. It is yield'd after
# everything, unless the final yield is commented out.
transcript = ["\n"]
for directory in self.directories:
first = True
lineLen = 0
for index, leg in enumerate(directory.parts):
if leg == os.path.sep and index == 0:
continue
append = ''.join(("" if index == 0 else os.path.sep, leg))
appendLen = len(append)
while True:
lineStart = False
transcript.extend('{:2d} {:2d} "{}"\n'.format(
lineLen, appendLen, append))
if lineLen == 0:
line = "{:<{indent}}".format(
">" if first else "", indent=indent)
lineLen += len(line)
yield "\n"
yield line
lineStart = True
if lineLen + appendLen > width:
if lineStart:
yield append
first = False
lineLen = 0
if lineStart:
break
else:
lineLen += appendLen
yield append
break
# Uncomment the following line to get diagnostic logs.
# yield "".join(transcript)
#
# Get the actual port number and server address. The port number could
# be different, if zero was specified.
address = self.server_address
return 'Starting HTTP server at http://{}:{} for:{}\ncd {}'.format(
'localhost' if address[0] == '127.0.0.1' else address[0]
, int(address[1])
, "".join(tuple(directory_lines()))
, os.path.commonpath(self.directories))
def serve_forever(self):
chdir(os.path.commonpath(self.directories))
fromDir = Path.cwd()
self._relativePaths = tuple(
directory.relative_to(fromDir) for directory in self.directories)
return super().serve_forever()
class Handler(SimpleHTTPRequestHandler):
def do_GET(self):
responsePath = None
# Check for resources that are allowed to be requested from root. Chrome
# seems to request everything other than the favicon with a path though.
try:
parted = self.path.rpartition("/")
if parted[0] == "" and (parted[1] == "/" or parted[1] == ""):
self.log_message("%s", 'Root resource "{}".'.format(self.path))
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
# Check for other resources in allowed directories.
directoryIndex = None
if responsePath is None:
effectivePath = (
self.path[1:] if self.path.startswith("/") else self.path)
for index, prefix in enumerate(self.server.relativePaths):
if effectivePath.startswith(str(prefix)):
directoryIndex = index
break
if directoryIndex is None:
self.send_error(403)
return
# By now, it's determined that the path in the request is one that
# is allowed by the server. It might have been requested from a
# resource in one directory but be in another. The path_for_file()
# method takes care of that.
try:
responsePath = self.server.path_for_file(self.path)
except ValueError as error:
self.send_error(404, str(error))
return
self.log_message("%s", 'Response path "{}" "{}" {}.'.format(
self.path, responsePath, directoryIndex))
if responsePath is not None:
self.path = str(responsePath)
super().do_GET()
def _send_object(self, responseObject):
responseBytes = json.dumps(responseObject).encode()
self.log_message("%s", 'Response object {} {}.'.format(
responseObject, responseBytes))
self.send_response(200)
self.end_headers()
self.wfile.write(responseBytes)
def do_POST(self):
# TOTH: https://github.com/sjjhsjjh/blender-driver/blob/master/blender_driver/application/http.py#L263
contentLengthHeader = self.headers.get('Content-Length')
contentLength = (
0 if contentLengthHeader is None else int(contentLengthHeader))
contentJSON = (
self.rfile.read(contentLength).decode('utf-8') if contentLength > 0
else None)
content = None if contentJSON is None else json.loads(contentJSON)
self.log_message("%s", "POST object {}.".format(
json.dumps(content, indent=2)))
if content is None:
self.send_error(400)
else:
try:
response = self.server.handle_command(content, self)
if response is not None:
self._send_object(response)
except:
self.send_error(501)
raise
# self.path is ignored.
class Main:
def __init__(self, argv):
argumentParser = argparse.ArgumentParser(
# formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(__doc__))
argumentParser.add_argument(
'-p', '--port', type=int, default=8001, help=
'Port number. Default: 8001.')
argumentParser.add_argument(
dest='directories', metavar='directory', type=str, nargs='+', help=
'Directory from which to server web content.')
self.arguments = argumentParser.parse_args(argv[1:])
self.server = Server(('localhost', self.arguments.port), Handler)
self.server.handle_command = self.handle_command
def __call__(self):
self.server.directories = (
*(
Path(directory).resolve()
for directory in self.arguments.directories
), project_path(
'forAndroid', 'captivewebview', 'src', 'main', 'assets',
'library')
)
for directory in self.server.directories:
if not directory.is_dir():
raise ValueError(f'Not a directory "{directory}".')
print(self.server.start_message)
self.server.serve_forever()
def handle_command(self, commandObject, httpHandler):
raise NotImplementedError(
"Method `handle_command` must be implemented by Main subclass.")
class CaptivityMain(Main):
def __init__(self, argv):
argv = (*argv, str(project_path(
'forAndroid', 'Captivity', 'src', 'main', 'assets', 'UserInterface'
)))
return super().__init__(argv)
# Override. |
# Following code would send a redirect to the client. Unfortunately,
# that causes the client to redirect the POST, instead of it loading
# another page instead.
#
# if "load" in commandObject:
# responseBytes = json.dumps({}).encode()
# httpHandler.log_message("%s", 'Redirect {}.'.format(
# responseBytes))
# httpHandler.send_response(303, json.dumps(commandObject))
# httpHandler.send_header('Location', commandObject["load"])
# httpHandler.end_headers()
# httpHandler.wfile.write(responseBytes)
# return None
# TOTH for ** syntax: https://stackoverflow.com/a/26853961
return {
**commandObject,
"confirm": " ".join((self.__class__.__name__,
httpHandler.server_version,
httpHandler.sys_version))
}
if __name__ == '__main__':
sys.exit(CaptivityMain(sys.argv)()) | def handle_command(self, commandObject, httpHandler): |
client_test.go | package bbc
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/kougazhang/bce-sdk-go/model"
"github.com/kougazhang/bce-sdk-go/util/log"
)
var (
BBC_CLIENT *Client
BBC_TestBbcId string
BBC_TestImageId string
BBC_TestFlavorId string
BBC_TestRaidId string
BBC_TestZoneName string
BBC_TestSubnetId string
BBC_TestName string
BBC_TestAdminPass string
BBC_TestDeploySetId string
BBC_TestClientToken string
BBC_TestSecurityGroupId string
BBC_TestTaskId string
BBC_TestErrResult string
BBC_TestRuleId string
)
// For security reason, ak/sk should not hard write here.
type Conf struct {
AK string
SK string
Endpoint string
}
func init() {
_, f, _, _ := runtime.Caller(0)
for i := 0; i < 6; i++ {
f = filepath.Dir(f)
}
conf := filepath.Join(f, "config.json")
fmt.Println(conf)
fp, err := os.Open(conf)
if err != nil {
fmt.Println("config json file of ak/sk not given: ", conf)
log.Fatal("config json file of ak/sk not given:", conf)
os.Exit(1)
}
decoder := json.NewDecoder(fp)
confObj := &Conf{}
decoder.Decode(confObj)
BBC_TestFlavorId = "flavor-id"
BBC_TestImageId = "image-id"
BBC_TestRaidId = "raid-id"
BBC_TestZoneName = "zone-name"
BBC_TestSubnetId = "subnet-id"
BBC_TestName = "sdkTest"
BBC_TestAdminPass = "123@adminPass"
BBC_TestDeploySetId = "deployset-id"
BBC_TestBbcId = "bbc_id"
BBC_TestSecurityGroupId = "bbc-security-group-id"
BBC_TestTaskId = "task-id"
BBC_TestErrResult = "err-result"
BBC_TestRuleId = "rule-id"
BBC_CLIENT, _ = NewClient(confObj.AK, confObj.SK, confObj.Endpoint)
log.SetLogLevel(log.WARN)
//log.SetLogLevel(log.DEBUG)
}
// ExpectEqual is the helper function for test each case
func ExpectEqual(alert func(format string, args ...interface{}),
expected interface{}, actual interface{}) bool {
expectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)
equal := false
switch {
case expected == nil && actual == nil:
return true
case expected != nil && actual == nil:
equal = expectedValue.IsNil()
case expected == nil && actual != nil:
equal = actualValue.IsNil()
default:
if actualType := reflect.TypeOf(actual); actualType != nil {
if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
equal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
}
}
}
if !equal {
_, file, line, _ := runtime.Caller(1)
alert("%s:%d: missmatch, expect %v but %v", file, line, expected, actual)
return false
}
return true
}
func TestCreateInstance(t *testing.T) {
InternalIps := []string{"ip"}
createInstanceArgs := &CreateInstanceArgs{
FlavorId: BBC_TestFlavorId,
ImageId: BBC_TestImageId,
RaidId: BBC_TestRaidId,
RootDiskSizeInGb: 40,
PurchaseCount: 1,
AdminPass: "AdminPass",
ZoneName: BBC_TestZoneName,
SubnetId: BBC_TestSubnetId,
SecurityGroupId: BBC_TestSecurityGroupId,
ClientToken: BBC_TestClientToken,
Billing: Billing{
PaymentTiming: PaymentTimingPostPaid,
},
DeploySetId: BBC_TestDeploySetId,
Name: BBC_TestName,
EnableNuma: false,
InternalIps: InternalIps,
Tags: []model.TagModel{
{
TagKey: "tag1",
TagValue: "var1",
},
},
}
res, err := BBC_CLIENT.CreateInstance(createInstanceArgs)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestListInstances(t *testing.T) {
listArgs := &ListInstancesArgs{
MaxKeys: 500,
}
res, err := BBC_CLIENT.ListInstances(listArgs)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetInstanceDetail(t *testing.T) {
res, err := BBC_CLIENT.GetInstanceDetail("i-4PvLVv37")
fmt.Println(res.Status)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetInstanceDetailWithDeploySetAndFailed(t *testing.T) {
res, err := BBC_CLIENT.GetInstanceDetailWithDeploySetAndFailed(BBC_TestBbcId, false, true)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestStopInstance(t *testing.T) {
err := BBC_CLIENT.StopInstance(BBC_TestBbcId, false)
ExpectEqual(t.Errorf, err, nil)
}
func TestStartInstance(t *testing.T) {
err := BBC_CLIENT.StartInstance(BBC_TestBbcId)
ExpectEqual(t.Errorf, err, nil)
}
func TestRebootInstance(t *testing.T) {
err := BBC_CLIENT.RebootInstance(BBC_TestBbcId, true)
ExpectEqual(t.Errorf, err, nil)
}
func TestModifyInstanceName(t *testing.T) {
modifyInstanceNameArgs := &ModifyInstanceNameArgs{
Name: "new_bbc_name",
}
err := BBC_CLIENT.ModifyInstanceName(BBC_TestBbcId, modifyInstanceNameArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestModifyInstanceDesc(t *testing.T) {
modifyInstanceDescArgs := &ModifyInstanceDescArgs{
Description: "new_bbc_description_02",
ClientToken: "be31b98c-5e42-4838-9230-9be700de5a20",
}
err := BBC_CLIENT.ModifyInstanceDesc(BBC_TestBbcId, modifyInstanceDescArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestRebuildInstance(t *testing.T) {
rebuildArgs := &RebuildInstanceArgs{
ImageId: BBC_TestImageId,
AdminPass: BBC_TestAdminPass,
IsPreserveData: true,
RaidId: BBC_TestRaidId,
SysRootSize: 20,
}
err := BBC_CLIENT.RebuildInstance(BBC_TestBbcId, true, rebuildArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchRebuildInstances(t *testing.T) {
rebuildArgs := &RebuildBatchInstanceArgs{
ImageId: "ImageId",
AdminPass: "123qaz!@#",
InstanceIds: []string{"BBC_TestBbcId"},
IsPreserveData: true,
RaidId: BBC_TestRaidId,
SysRootSize: 20,
}
result, err := BBC_CLIENT.BatchRebuildInstances(rebuildArgs)
fmt.Println(result)
ExpectEqual(t.Errorf, err, nil)
}
func TestReleaseInstance(t *testing.T) {
err := BBC_CLIENT.DeleteInstance(BBC_TestBbcId)
ExpectEqual(t.Errorf, err, nil)
}
func TestModifyInstancePassword(t *testing.T) {
modifyInstancePasswordArgs := &ModifyInstancePasswordArgs{
AdminPass: BBC_TestAdminPass,
}
err := BBC_CLIENT.ModifyInstancePassword(BBC_TestBbcId, modifyInstancePasswordArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetVpcSubnet(t *testing.T) {
getVpcSubnetArgs := &GetVpcSubnetArgs{
BbcIds: []string{BBC_TestBbcId},
}
result, err := BBC_CLIENT.GetVpcSubnet(getVpcSubnetArgs)
fmt.Println(result)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchAddIp(t *testing.T) {
privateIps := []string{"192.168.200.25"}
batchAddIpArgs := &BatchAddIpArgs{
InstanceId: BBC_TestBbcId,
PrivateIps: privateIps,
ClientToken: "be31b98c-5e41-4838-9230-9be700de5a20",
}
result, err := BBC_CLIENT.BatchAddIP(batchAddIpArgs)
fmt.Println(result)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchAddIpCrossSubnet(t *testing.T) {
batchAddIpCrossSubnetArgs := &BatchAddIpCrossSubnetArgs{
InstanceId: BBC_TestBbcId,
SingleEniAndSubentIps: []SingleEniAndSubentIp{
{
EniId: "eni-cc31j8i1nq5f",
IpAndSubnets: []IpAndSubnet{
{
PrivateIp: "192.168.0.6",
SubnetId: "sbn-af5iegk24se1",
},
},
},
},
ClientToken: "be31b98c-5e41-4838-9230-9be700de5a20",
}
result, err := BBC_CLIENT.BatchAddIPCrossSubnet(batchAddIpCrossSubnetArgs)
fmt.Println(result)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchDelIp(t *testing.T) {
privateIps := []string{"192.168.1.25"}
batchDelIpArgs := &BatchDelIpArgs{
InstanceId: BBC_TestBbcId,
PrivateIps: privateIps,
ClientToken: "be31b98c-5e41-4e38-9230-9be700de5120",
}
err := BBC_CLIENT.BatchDelIP(batchDelIpArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestBindTags(t *testing.T) {
bindTagsArgs := &BindTagsArgs{
ChangeTags: []model.TagModel{
{
TagKey: "BBCTestKey",
TagValue: "BBCTestValue",
},
},
}
err := BBC_CLIENT.BindTags(BBC_TestBbcId, bindTagsArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestUnbindTags(t *testing.T) {
unbindTagsArgs := &UnbindTagsArgs{
ChangeTags: []model.TagModel{
{
TagKey: "BCC",
TagValue: "aaa",
},
},
}
err := BBC_CLIENT.UnbindTags(BBC_TestBbcId, unbindTagsArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestListFlavors(t *testing.T) {
res, err := BBC_CLIENT.ListFlavors()
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetFlavorDetail(t *testing.T) {
testFlavorId := BBC_TestFlavorId
rep, err := BBC_CLIENT.GetFlavorDetail(testFlavorId)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetFlavorRaid(t *testing.T) {
testFlavorId := "BBC-G4-01S"
rep, err := BBC_CLIENT.GetFlavorRaid(testFlavorId)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestCreateImageFromInstanceId(t *testing.T) {
testInstanceId := BBC_TestBbcId
testImageName := "testCreateImage"
queryArgs := &CreateImageArgs{
ImageName: testImageName,
InstanceId: testInstanceId,
}
rep, err := BBC_CLIENT.CreateImageFromInstanceId(queryArgs)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestListImage(t *testing.T) {
queryArgs := &ListImageArgs{}
rep, err := BBC_CLIENT.ListImage(queryArgs)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetImageDetail(t *testing.T) {
testImageId := ""
rep, err := BBC_CLIENT.GetImageDetail(testImageId)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestDeleteImage(t *testing.T) {
testImageId := BBC_TestImageId
err := BBC_CLIENT.DeleteImage(testImageId)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetOperationLog(t *testing.T) {
queryArgs := &GetOperationLogArgs{
StartTime: "2021-03-28T15:00:27Z",
EndTime: "2021-03-30T15:00:27Z",
}
rep, err := BBC_CLIENT.GetOperationLog(queryArgs)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestCreateDeploySet(t *testing.T) {
testDeploySetName := "testName"
testDeployDesc := "testDesc"
testConcurrency := 1
testStrategy := "tor_ha"
queryArgs := &CreateDeploySetArgs{
Strategy: testStrategy,
Concurrency: testConcurrency,
Name: testDeploySetName,
Desc: testDeployDesc,
}
rep, err := BBC_CLIENT.CreateDeploySet(queryArgs)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestListDeploySets(t *testing.T) {
rep, err := BBC_CLIENT.ListDeploySets()
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestListDeploySetsPage(t *testing.T) {
queryArgs := &ListDeploySetsArgs{
Strategy: "TOR_HA", // RACK_HA or TOR_HA
MaxKeys: 100,
Marker: "your-marker",
}
rep, err := BBC_CLIENT.ListDeploySetsPage(queryArgs)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetDeploySet(t *testing.T) {
testDeploySetID := BBC_TestDeploySetId
rep, err := BBC_CLIENT.GetDeploySet(testDeploySetID)
fmt.Println(rep)
ExpectEqual(t.Errorf, err, nil)
}
func TestDeleteDeploySet(t *testing.T) {
testDeleteDeploySetId := BBC_TestDeploySetId
err := BBC_CLIENT.DeleteDeploySet(testDeleteDeploySetId)
fmt.Println(err)
ExpectEqual(t.Errorf, err, nil)
}
func TestBindSecurityGroups(t *testing.T) |
func TestUnBindSecurityGroups(t *testing.T) {
args := &UnBindSecurityGroupsArgs{
InstanceId: "",
SecurityGroupId: "",
}
err := BBC_CLIENT.UnBindSecurityGroups(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetFlavorZone(t *testing.T) {
flavorId := "BBC-G3-01"
queryArgs := &ListFlavorZonesArgs{
FlavorId: flavorId,
}
if res, err := BBC_CLIENT.ListFlavorZones(queryArgs); err != nil {
fmt.Println("Get flavor zoneName failed: ", err)
} else {
fmt.Println("Get flavor zoneName success, result: ", res)
}
}
func TestListZoneFlavors(t *testing.T) {
zoneName := "cn-bj-b"
queryArgs := &ListZoneFlavorsArgs{
ZoneName: zoneName,
}
if res, err := BBC_CLIENT.ListZoneFlavors(queryArgs); err != nil {
fmt.Println("Get the specific zone flavor failed: ", err)
} else {
fmt.Println("Get the specific zone flavor success, result: ", res)
}
}
func TestGetCommonImage(t *testing.T) {
flavorIds := []string{"BBC-S3-02"}
queryArgs := &GetFlavorImageArgs{
FlavorIds: flavorIds,
}
if res, err := BBC_CLIENT.GetCommonImage(queryArgs); err != nil {
fmt.Println("Get specific flavor common image failed: ", err)
} else {
fmt.Println("Get specific flavor common image success, result: ", res)
}
}
func TestGetCustomImage(t *testing.T) {
flavorIds := []string{"flavorId"}
queryArgs := &GetFlavorImageArgs{
FlavorIds: flavorIds,
}
if res, err := BBC_CLIENT.GetCustomImage(queryArgs); err != nil {
fmt.Println("Get specific flavor common image failed: ", err)
} else {
fmt.Println("Get specific flavor common image success, result: ", res)
}
}
func TestShareImage(t *testing.T) {
args := &SharedUser{
AccountId: "id",
}
err := BBC_CLIENT.ShareImage(BBC_TestImageId, args)
ExpectEqual(t.Errorf, err, nil)
}
func TestUnShareImage(t *testing.T) {
args := &SharedUser{
AccountId: "id",
}
err := BBC_CLIENT.UnShareImage(BBC_TestImageId, args)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetInstanceEni(t *testing.T) {
instanceId := "instanceId"
if res, err := BBC_CLIENT.GetInstanceEni(instanceId); err != nil {
fmt.Println("Get specific instance eni failed: ", err)
} else {
fmt.Println("Get specific instance eni success, result: ", res)
}
}
func TestGetInstanceStock(t *testing.T) {
args := &CreateInstanceStockArgs{
FlavorId: "BBC-G4-PDDAS",
ZoneName: "cn-su-a",
}
if res, err := BBC_CLIENT.GetInstanceCreateStock(args); err != nil {
fmt.Println("Get specific instance eni failed: ", err)
} else {
fmt.Println("Get specific instance eni success, result: ", res)
}
}
func TestListRepairTasks(t *testing.T) {
listArgs := &ListRepairTaskArgs{
MaxKeys: 100,
}
res, err := BBC_CLIENT.ListRepairTasks(listArgs)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestListClosedRepairTasks(t *testing.T) {
listArgs := &ListClosedRepairTaskArgs{
MaxKeys: 100,
}
res, err := BBC_CLIENT.ListClosedRepairTasks(listArgs)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetTaskDetail(t *testing.T) {
res, err := BBC_CLIENT.GetRepairTaskDetail(BBC_TestTaskId)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestAuthorizeTask(t *testing.T) {
taskIdArgs := &TaskIdArgs{
TaskId: BBC_TestTaskId,
}
err := BBC_CLIENT.AuthorizeRepairTask(taskIdArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestUnAuthorizeTask(t *testing.T) {
taskIdArgs := &TaskIdArgs{
TaskId: BBC_TestTaskId,
}
err := BBC_CLIENT.UnAuthorizeRepairTask(taskIdArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestConfirmTask(t *testing.T) {
taskIdArgs := &TaskIdArgs{
TaskId: BBC_TestTaskId,
}
err := BBC_CLIENT.ConfirmRepairTask(taskIdArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestDisConfirmTask(t *testing.T) {
disconfirmTaskArgs := &DisconfirmTaskArgs{
TaskId: BBC_TestTaskId,
NewErrResult: BBC_TestErrResult,
}
err := BBC_CLIENT.DisConfirmRepairTask(disconfirmTaskArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestGetRepairRecord(t *testing.T) {
taskIdArgs := &TaskIdArgs{
TaskId: BBC_TestTaskId,
}
res, err := BBC_CLIENT.GetRepairTaskRecord(taskIdArgs)
fmt.Println(res)
ExpectEqual(t.Errorf, err, nil)
}
func TestListRule(t *testing.T) {
args := &ListRuleArgs{
Marker: "your-marker",
MaxKeys: 100,
RuleName: "your-choose-rule-name",
RuleId: "your-choose-rule-id",
}
res, err := BBC_CLIENT.ListRule(args)
ExpectEqual(t.Errorf, err, nil)
fmt.Println(res)
}
func TestGetRuleDetail(t *testing.T) {
ruleId := BBC_TestRuleId
res, err := BBC_CLIENT.GetRuleDetail(ruleId)
ExpectEqual(t.Errorf, err, nil)
fmt.Println(res)
}
func TestCreateRule(t *testing.T) {
args := &CreateRuleArgs{
RuleName: "goSdkRule",
Limit: 2,
Enabled: 1,
TagStr: "msinstancekey:msinstancevalue",
Extra: "extra",
}
res, err := BBC_CLIENT.CreateRule(args)
ExpectEqual(t.Errorf, err, nil)
fmt.Println(res)
}
func TestDeleteRule(t *testing.T) {
args := &DeleteRuleArgs{
RuleId: BBC_TestRuleId,
}
err := BBC_CLIENT.DeleteRule(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestDisableRule(t *testing.T) {
args := &DisableRuleArgs{
RuleId: BBC_TestRuleId,
}
err := BBC_CLIENT.DisableRule(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestEnableRule(t *testing.T) {
args := &EnableRuleArgs{
RuleId: BBC_TestRuleId,
}
err := BBC_CLIENT.EnableRule(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchCreateAutoRenewRules(t *testing.T) {
bccAutoRenewArgs := &BbcCreateAutoRenewArgs{
InstanceId: BBC_TestBbcId,
RenewTimeUnit: "month",
RenewTime: 1,
}
err := BBC_CLIENT.BatchCreateAutoRenewRules(bccAutoRenewArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestBatchDeleteAutoRenewRules(t *testing.T) {
bccAutoRenewArgs := &BbcDeleteAutoRenewArgs{
InstanceId: BBC_TestBbcId,
}
err := BBC_CLIENT.BatchDeleteAutoRenewRules(bccAutoRenewArgs)
ExpectEqual(t.Errorf, err, nil)
}
func TestDeleteInstanceIngorePayment(t *testing.T) {
args := &DeleteInstanceIngorePaymentArgs{
InstanceId: "InstanceId",
RelatedReleaseFlag: true,
}
if res, err := BBC_CLIENT.DeleteInstanceIngorePayment(args); err != nil {
fmt.Println("delete instance failed: ", err)
} else {
fmt.Println("delelte instance success, result: ", res)
}
}
func TestListCDSVolume(t *testing.T) {
queryArgs := &ListCDSVolumeArgs{
MaxKeys: 100,
InstanceId: "InstanceId",
Marker: "VolumeId",
ZoneName: "zoneName",
}
if res, err := BBC_CLIENT.ListCDSVolume(queryArgs); err != nil {
fmt.Println("list volume failed: ", err)
} else {
fmt.Println("list volume success, result: ", res)
}
}
func TestDeleteInstanceV2(t *testing.T) {
instanceIds := []string{"instanceId"}
queryArgs := &DeleteInstanceArgs{
BbcRecycleFlag: true,
InstanceIds: instanceIds,
}
if err := BBC_CLIENT.DeleteInstances(queryArgs); err != nil {
fmt.Println("delete instance failed: ", err)
} else {
fmt.Println("delete instance success")
}
}
func TestListRecycledInstances(t *testing.T) {
queryArgs := &ListRecycledInstancesArgs{
Marker: "your marker",
PaymentTiming: "your paymentTiming",
RecycleBegin: "RecycleBegin", // recycled begin time ,eg: 2020-11-23T17:18:24Z
RecycleEnd: "RecycleEnd",
MaxKeys: 10,
InstanceId: "InstanceId",
Name: "InstanceName",
}
if res, err := BBC_CLIENT.ListRecycledInstances(queryArgs); err != nil {
fmt.Println("list recycled bbc failed: ", err)
} else {
fmt.Println("list recycled bbc success, result: ", res)
}
}
func TestInstanceChangeSubnet(t *testing.T) {
args := &InstanceChangeSubnetArgs{
InstanceId: "i-DFlNGqLf",
SubnetId: "sbn-z1y9tcedqnh6",
InternalIp: "10.10.10.1",
Reboot: true,
}
err := BBC_CLIENT.InstanceChangeSubnet(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestInstanceChangeVpc(t *testing.T) {
args := &InstanceChangeVpcArgs{
InstanceId: "i-xxxxx",
SubnetId: "sbn-zyyyyyyy",
Reboot: true,
}
err := BBC_CLIENT.InstanceChangeVpc(args)
ExpectEqual(t.Errorf, err, nil)
}
func TestRecoveryInstances(t *testing.T) {
instanceIds := []string{"instanceId"}
queryArgs := &RecoveryInstancesArgs{
InstanceIds: instanceIds,
}
if err := BBC_CLIENT.RecoveryInstances(queryArgs); err != nil {
fmt.Println("recovery instance failed: ", err)
} else {
fmt.Println("recovery instance success")
}
}
func TestGetInstanceVnc(t *testing.T) {
res, err := BBC_CLIENT.GetInstanceVNC(BBC_TestBbcId)
ExpectEqual(t.Errorf, err, nil)
fmt.Println("get instance vnc success: ", res.VNCUrl)
}
func TestGetBbcStockWithDeploySet(t *testing.T) {
queryArgs := &GetBbcStockArgs{
Flavor: "BBC-S3-02",
DeploySetIds: []string{"dset-0RHZYUfF"},
}
if res, err := BBC_CLIENT.GetBbcStockWithDeploySet(queryArgs); err != nil {
fmt.Println("get bbc stock failed: ", err)
} else {
data, e := json.Marshal(res)
if e != nil {
fmt.Println("json marshal failed!")
return
}
fmt.Printf("get bbc stock, result : %s", data)
}
} | {
instanceIds := []string{""}
sg := []string{""}
args := &BindSecurityGroupsArgs{
InstanceIds: instanceIds,
SecurityGroupIds: sg,
}
err := BBC_CLIENT.BindSecurityGroups(args)
ExpectEqual(t.Errorf, err, nil)
} |
trade_api.py | import requests
import urllib
import time
import hashlib
import hmac
import itertools
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .api import Base
from .errors import ApiError, ArgumentError
def check_values(value, arg, arg_value):
if type(value) == type:
if type(arg_value) != value:
raise ArgumentError(u"Type of argument {} is invalid. It should be {}".format(arg, value))
elif arg_value not in value:
raise ArgumentError(u"Value of argument {} is invalid. It should be one of {}".format(arg, value))
def check_args(kwargs, required_parameters, optional_parameters={}):
args = kwargs.keys()
required_args = required_parameters.keys()
optional_args = optional_parameters.keys()
missing_args = list(set(required_args) - set(args))
if len(missing_args) > 0:
raise ArgumentError(u"Parameter {} is required".format(missing_args))
for arg_name, arg_value in kwargs.items():
if arg_name in optional_args:
optional_value = optional_parameters[arg_name]
check_values(optional_value, arg_name, arg_value)
elif arg_name in required_args:
required_value = required_parameters[arg_name]
check_values(required_value, arg_name, arg_value)
class TradeApi(Base):
def __init__(self, identifier=None, secret=None):
self.id = identifier
self.secret = secret
self.path = "/tapi/v3/"
self.available_pairs = ["BRLBTC", "BRLLTC", "BRLBCH", "BRLXRP", "BRLETH", "BRLUSDC", "BRLMBPRK01", "BRLMBPRK02", "BRLMBPRK03", "BRLMBPRK04", "BRLMBCONS01"]
Base.__init__(self)
def list_system_messages(self, level="INFO"):
"""https://www.mercadobitcoin.com.br/trade-api/#list_system_messages"""
payload = { "level": level }
check_args(payload, { "level": ["INFO", "WARNING", "ERROR"] })
return self.__check_response(self.__post_tapi("list_system_messages", payload))
def get_account_info(self):
"""https://www.mercadobitcoin.com.br/trade-api/#get_account_info"""
return self.__check_response(self.__post_tapi("get_account_info"))
def get_order(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#get_order"""
check_args(kwargs, { "coin_pair": self.available_pairs, "order_id": int })
return self.__check_response(self.__post_tapi("get_order", kwargs))
def list_orders(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#list_orders"""
check_args(kwargs, { "coin_pair": self.available_pairs }, { "order_type": [1, 2], "status_list": str, "has_fills": [True, False], "from_id": int, "to_id": int, "from_timestamp": str, "to_timestamp": str })
return self.__check_response(self.__post_tapi("list_orders", kwargs ))
def list_orderbook(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#list_orderbook"""
check_args(kwargs, { "coin_pair": self.available_pairs }, { "full": [True, False] })
return self.__check_response(self.__post_tapi("list_orderbook", kwargs ))
def place_buy_order(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#place_buy_order"""
check_args(kwargs, { "coin_pair": self.available_pairs, "quantity": str, "limit_price": str })
return self.__check_response(self.__post_tapi("place_buy_order", kwargs ))
def place_sell_order(self, **kwargs):
|
def cancel_order(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#cancel_order"""
check_args(kwargs, { "coin_pair": self.available_pairs, "order_id": int })
return self.__check_response(self.__post_tapi("cancel_order", kwargs ))
def get_withdrawal(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#get_withdrawal"""
check_args(kwargs, { "coin": self.available_pairs, "withdrawal_id": int })
return self.__check_response(self.__post_tapi("get_withdrawal", kwargs ))
def withdraw_coin_brl(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin"""
check_args(kwargs, { "coin": ["BRL"], "quantity": str, "account_ref": str }, { "description": str })
return self.__check_response(self.__post_tapi("withdraw_coin", kwargs ))
def withdraw_coin(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin"""
check_args(kwargs, { "coin": ["BTC", "LTC", "BCH", "ETH"], "quantity": str, "address": str, "tx_fee": str }, { "description": str })
return self.__check_response(self.__post_tapi("withdraw_coin", kwargs ))
def withdraw_coin_xrp(self, **kwargs):
"""https://www.mercadobitcoin.com.br/trade-api/#withdraw_coin"""
check_args(kwargs, { "coin": ["XRP"], "quantity": str, "address": str, "tx_fee": str, "destination_tag": int }, { "description": str })
return self.__check_response(self.__post_tapi("withdraw_coin", kwargs ))
def __check_response(self, response):
if response["status_code"] == 100:
return response["response_data"]
else:
raise ApiError(response["error_message"], response["status_code"])
def __post_tapi(self, method, params={}):
payload = { "tapi_method": method, "tapi_nonce": str(int(time.time()*1000000))}
payload.update(params)
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"TAPI-ID": self.id,
"TAPI-MAC": self.__signature(payload)
}
response = requests.post("https://{}{}".format(self.host, self.path), headers=headers, data=payload)
return response.json()
def __signature(self, payload):
signature = hmac.new(self.secret, digestmod=hashlib.sha512)
params = self.path + '?' + urlencode(payload)
signature.update(params.encode('utf-8'))
return signature.hexdigest()
| """https://www.mercadobitcoin.com.br/trade-api/#place_sell_order"""
check_args(kwargs, { "coin_pair": self.available_pairs, "quantity": str, "limit_price": str })
return self.__check_response(self.__post_tapi("place_sell_order", kwargs )) |
day13.rs | extern crate utils;
use std::env;
use std::io::{self, BufReader};
use std::io::prelude::*;
use std::fs::File;
use utils::*;
#[derive(Debug)]
struct | {
earliest_ts: u64,
bus_ids: Vec<Option<u64>>
}
fn part1(input: &Input) -> u64 {
let (least_wait, bus_id) = input.bus_ids.iter()
.flatten()
.fold((std::u64::MAX, 0), |(least_wait_time, least_wait_bus_id), &bus_id| {
let time_left = bus_id - (input.earliest_ts % bus_id);
if time_left < least_wait_time {
(time_left, bus_id)
} else {
(least_wait_time, least_wait_bus_id)
}
});
least_wait * bus_id
}
fn part2(input: &Input) -> u64 {
let mut start = 0;
let mut step = input.bus_ids[0].unwrap();
for i in 1..input.bus_ids.len() {
if let Some(bus_id) = input.bus_ids[i] {
let bus_id = bus_id;
let mut found = None;
for t in (start..).step_by(step as usize) {
if (t + i as u64) % bus_id == 0 {
if let Some(found) = found {
step = t - found;
start = found + step;
break;
} else {
if i == input.bus_ids.len() - 1 {
return t;
}
found = Some(t)
}
}
}
}
}
0
}
fn main() {
measure(|| {
let input = input().expect("Input failed");
println!("Part1: {}", part1(&input));
println!("Part2: {}", part2(&input));
});
}
fn read_input<R: Read>(reader: BufReader<R>) -> io::Result<Input> {
let mut lines = reader.lines();
Ok(Input {
earliest_ts: lines.next().unwrap()?.parse::<u64>().unwrap(),
bus_ids: lines.next().unwrap()?.split(',').map(|i| i.parse::<u64>().ok()).collect::<Vec<_>>()
})
}
fn input() -> io::Result<Input> {
let f = File::open(env::args().skip(1).next().expect("No input file given"))?;
read_input(BufReader::new(f))
}
#[cfg(test)]
mod tests {
use super::*;
const INPUT: &'static str =
"939
7,13,x,x,59,x,31,19";
fn as_input(s: &str) -> Input {
read_input(BufReader::new(s.split('\n').map(|s| s.trim()).collect::<Vec<_>>().join("\n").as_bytes())).unwrap()
}
#[test]
fn test_part1() {
assert_eq!(part1(&as_input(INPUT)), 295);
}
#[test]
fn test_part2() {
assert_eq!(part2(&as_input(INPUT)), 1068781);
assert_eq!(part2(&as_input("0\n17,x,13,19")), 3417);
assert_eq!(part2(&as_input("0\n67,7,59,61")), 754018);
assert_eq!(part2(&as_input("0\n67,x,7,59,61")), 779210);
assert_eq!(part2(&as_input("0\n67,7,x,59,61")), 1261476);
assert_eq!(part2(&as_input("0\n1789,37,47,1889")), 1202161486);
}
}
| Input |
model.ts | import { TableListData } from './data.d';
import { AnyAction, Reducer } from 'redux';
import { EffectsCommandMap } from 'dva';
import { addSynonyms, querySynonyms, removeSynonyms, updateSynonyms } from './service';
export interface StateType {
data: TableListData;
}
export type Effect = (
action: AnyAction,
effects: EffectsCommandMap & { select: <T>(func: (state: StateType) => T) => T },
) => void;
export interface ModelType {
namespace: string;
state: StateType;
effects: {
fetch: Effect;
add: Effect;
remove: Effect;
update: Effect;
};
reducers: {
save: Reducer<StateType>;
};
}
const Model: ModelType = {
namespace: 'robotSynonyms',
state: {
data: {
list: [],
pagination: {},
},
},
effects: {
*fetch({ payload }, { call, put }) {
const response = yield call(querySynonyms, payload);
yield put({
type: 'save',
payload: response,
});
},
*add({ payload, callback }, { call, put }) {
const response = yield call(addSynonyms, payload);
yield put({
type: 'save',
payload: response,
});
if (callback) callback();
},
*remove({ payload, callback }, { call, put }) {
const response = yield call(removeSynonyms, payload);
yield put({
type: 'save',
payload: response,
});
if (callback) callback();
},
*update({ payload, callback }, { call, put }) { | yield put({
type: 'save',
payload: response,
});
if (callback) callback();
},
},
reducers: {
save(state, action) {
return {
...state,
data: action.payload,
};
},
},
};
export default Model; | const response = yield call(updateSynonyms, payload); |
global_transaction_event.go | package event
import (
"github.com/opentrx/seata-golang/v2/pkg/apis"
)
const (
RoleTC = "tc"
RoleTM = "client"
RoleRM = "rm"
)
type GlobalTransactionEvent struct {
id int64
role string
name string
beginTime int64
endTime int64
status apis.GlobalSession_GlobalStatus
}
func | (id int64, role string, name string, beginTime int64, endTime int64, status apis.GlobalSession_GlobalStatus) GlobalTransactionEvent {
return GlobalTransactionEvent{
id,
role,
name,
beginTime,
endTime,
status,
}
}
func (event GlobalTransactionEvent) GetID() int64 { return event.id }
func (event GlobalTransactionEvent) GetRole() string { return event.role }
func (event GlobalTransactionEvent) GetName() string { return event.name }
func (event GlobalTransactionEvent) GetBeginTime() int64 { return event.beginTime }
func (event GlobalTransactionEvent) GetEndTime() int64 { return event.endTime }
func (event GlobalTransactionEvent) GetStatus() apis.GlobalSession_GlobalStatus { return event.status }
| NewGlobalTransactionEvent |
options.js | import path from 'path';
import cosmiconfig from 'cosmiconfig';
export function | (configDirectory) {
const searchPath = configDirectory || './';
const cosmic = cosmiconfig('graphql-schema-linter', {
cache: false,
}).searchSync(searchPath);
if (cosmic) {
let schemaPaths = [];
let customRulePaths = [];
// If schemaPaths come from cosmic, we resolve the given paths relative to the searchPath.
if (cosmic.config.schemaPaths) {
schemaPaths = cosmic.config.schemaPaths.map(schemaPath =>
path.resolve(searchPath, schemaPath)
);
}
// If customRulePaths come from cosmic, we resolve the given paths relative to the searchPath.
if (cosmic.config.customRulePaths) {
customRulePaths = cosmic.config.customRulePaths.map(schemaPath =>
path.resolve(searchPath, schemaPath)
);
}
return {
rules: cosmic.config.rules,
customRulePaths: customRulePaths || [],
schemaPaths: schemaPaths,
};
} else {
return {};
}
}
| loadOptionsFromConfigDir |
issue-22258.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::Add;
fn f<T: Add>(a: T, b: T) -> <T as Add>::Output |
fn main() {
println!("a + b is {}", f::<f32>(100f32, 200f32));
}
| {
a + b
} |
step_stop_instances_test.go | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"fmt"
"testing"
)
func | (t *testing.T) {
w := testWorkflow()
s, _ := w.NewStep("s")
s.StopInstances = &StopInstances{
Instances: []string{"i", "zones/z/instances/i"},
}
if err := (s.StopInstances).populate(context.Background(), s); err != nil {
t.Error("err should be nil")
}
want := &StopInstances{
Instances: []string{"i", fmt.Sprintf("projects/%s/zones/z/instances/i", w.Project)},
}
if diffRes := diff(s.StopInstances, want, 0); diffRes != "" {
t.Errorf("StopInstances not populated as expected: (-got,+want)\n%s", diffRes)
}
}
func TestStopInstancesValidate(t *testing.T) {
ctx := context.Background()
// Set up.
w := testWorkflow()
s, _ := w.NewStep("s")
iCreator, _ := w.NewStep("iCreator")
iCreator.CreateInstances = &CreateInstances{Instances: []*Instance{&Instance{}}}
w.AddDependency(s, iCreator)
if err := w.instances.regCreate("instance1", &Resource{link: fmt.Sprintf("projects/%s/zones/%s/disks/d", testProject, testZone)}, false, iCreator); err != nil {
t.Fatal(err)
}
if err := (&StopInstances{Instances: []string{"instance1"}}).validate(ctx, s); err != nil {
t.Errorf("validation should not have failed: %v", err)
}
if err := (&StopInstances{Instances: []string{"dne"}}).validate(ctx, s); err == nil {
t.Error("StopInstances should have returned an error when stopping an instance that DNE")
}
}
func TestStopInstancesRun(t *testing.T) {
ctx := context.Background()
w := testWorkflow()
s, _ := w.NewStep("s")
ins := []*Resource{{RealName: "in0", link: "link"}, {RealName: "in1", link: "link"}}
w.instances.m = map[string]*Resource{"in0": ins[0], "in1": ins[1]}
si := &StopInstances{
Instances: []string{"in0"},
}
if err := si.run(ctx, s); err != nil {
t.Fatalf("error running StopInstances.run(): %v", err)
}
stoppedChecks := []struct {
r *Resource
shouldBeStopped bool
}{
{ins[0], true},
{ins[1], false},
}
for _, c := range stoppedChecks {
if c.shouldBeStopped {
if !c.r.stoppedByWf {
t.Errorf("resource %q should have been stopped", c.r.RealName)
}
} else if c.r.stoppedByWf {
t.Errorf("resource %q should not have been stopped", c.r.RealName)
}
}
}
| TestStopInstancesPopulate |
SetSize.js | import { CONFIG_DEFAULTS } from "../const"; | if (resolution === 0) {
resolution = window.devicePixelRatio;
}
ConfigStore.set(CONFIG_DEFAULTS.SIZE, { width, height, resolution });
} | import { ConfigStore } from "../ConfigStore";
export function SetSize(width = 800, height = 600, resolution = 1) { |
CreateComplimentController.ts |
import { Request, Response } from "express";
import { CreateComplimentService } from "../services/CreateComplimentService";
class | {
async handle(request: Request, response: Response) {
const { tag_id, user_receiver, message } = request.body;
const { user_id } = request;
const createComplimentService = new CreateComplimentService();
const compliment = await createComplimentService.execute({
tag_id,
user_sender: user_id ,
user_receiver,
message,
});
return response.json(compliment);
}
}
export { CreateComplimentController }; | CreateComplimentController |
create_video_compress_task.go | package imm
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// CreateVideoCompressTask invokes the imm.CreateVideoCompressTask API synchronously
// api document: https://help.aliyun.com/api/imm/createvideocompresstask.html
func (client *Client) CreateVideoCompressTask(request *CreateVideoCompressTaskRequest) (response *CreateVideoCompressTaskResponse, err error) {
response = CreateCreateVideoCompressTaskResponse()
err = client.DoAction(request, response)
return
}
// CreateVideoCompressTaskWithChan invokes the imm.CreateVideoCompressTask API asynchronously
// api document: https://help.aliyun.com/api/imm/createvideocompresstask.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVideoCompressTaskWithChan(request *CreateVideoCompressTaskRequest) (<-chan *CreateVideoCompressTaskResponse, <-chan error) {
responseChan := make(chan *CreateVideoCompressTaskResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.CreateVideoCompressTask(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// CreateVideoCompressTaskWithCallback invokes the imm.CreateVideoCompressTask API asynchronously
// api document: https://help.aliyun.com/api/imm/createvideocompresstask.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVideoCompressTaskWithCallback(request *CreateVideoCompressTaskRequest, callback func(response *CreateVideoCompressTaskResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *CreateVideoCompressTaskResponse
var err error
defer close(result)
response, err = client.CreateVideoCompressTask(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// CreateVideoCompressTaskRequest is the request struct for api CreateVideoCompressTask
type CreateVideoCompressTaskRequest struct {
*requests.RpcRequest
Project string `position:"Query" name:"Project"`
NotifyEndpoint string `position:"Query" name:"NotifyEndpoint"`
TargetContainer string `position:"Query" name:"TargetContainer"`
CustomMessage string `position:"Query" name:"CustomMessage"`
NotifyTopicName string `position:"Query" name:"NotifyTopicName"`
TargetList string `position:"Query" name:"TargetList"`
VideoUri string `position:"Query" name:"VideoUri"`
}
// CreateVideoCompressTaskResponse is the response struct for api CreateVideoCompressTask
type CreateVideoCompressTaskResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
TaskId string `json:"TaskId" xml:"TaskId"`
TaskType string `json:"TaskType" xml:"TaskType"`
}
// CreateCreateVideoCompressTaskRequest creates a request to invoke CreateVideoCompressTask API
func | () (request *CreateVideoCompressTaskRequest) {
request = &CreateVideoCompressTaskRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("imm", "2017-09-06", "CreateVideoCompressTask", "imm", "openAPI")
return
}
// CreateCreateVideoCompressTaskResponse creates a response to parse from CreateVideoCompressTask response
func CreateCreateVideoCompressTaskResponse() (response *CreateVideoCompressTaskResponse) {
response = &CreateVideoCompressTaskResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| CreateCreateVideoCompressTaskRequest |
save-attributions.test.tsx | // SPDX-FileCopyrightText: Facebook, Inc. and its affiliates
// SPDX-FileCopyrightText: TNG Technology Consulting GmbH <https://www.tngtech.com>
//
// SPDX-License-Identifier: Apache-2.0
import { App } from '../../../Components/App/App';
import {
clickOnButton,
EMPTY_PARSED_FILE_CONTENT,
expectButton,
expectButtonIsNotShown,
expectElementsInAutoCompleteAndSelectFirst,
expectValuesInProgressbarTooltip,
mockElectronIpcRendererOn,
TEST_TIMEOUT,
} from '../../../test-helpers/general-test-helpers';
import { screen } from '@testing-library/react';
import { IpcChannel } from '../../../../shared/ipc-channels';
import { renderComponentWithStore } from '../../../test-helpers/render-component-with-store';
import {
ParsedFileContent,
SaveFileArgs,
} from '../../../../shared/shared-types';
import { ButtonText, DiscreteConfidence } from '../../../enums/enums';
import { IpcRenderer } from 'electron';
import React from 'react';
import {
clickAddNewAttributionButton,
expectValueInManualPackagePanel,
} from '../../../test-helpers/package-panel-helpers';
import {
clickOnButtonInHamburgerMenu,
expectButtonInHamburgerMenu,
expectButtonInHamburgerMenuIsNotShown,
expectValueInConfidenceField,
expectValueInTextBox,
expectValueNotInConfidenceField,
expectValueNotInTextBox,
insertValueIntoTextBox,
selectConfidenceInDropdown,
} from '../../../test-helpers/attribution-column-test-helpers';
import { clickOnElementInResourceBrowser } from '../../../test-helpers/resource-browser-test-helpers';
let originalIpcRenderer: IpcRenderer;
jest.setTimeout(TEST_TIMEOUT);
function mockElectronBackend(
mockFileLoadedChannelReturn: ParsedFileContent
): void {
window.ipcRenderer.on
// @ts-ignore
.mockImplementation(
mockElectronIpcRendererOn(
IpcChannel.FileLoaded,
mockFileLoadedChannelReturn
)
);
}
describe('The App in Audit View', () => {
beforeAll(() => {
originalIpcRenderer = global.window.ipcRenderer;
global.window.ipcRenderer = {
on: jest.fn(),
removeListener: jest.fn(),
invoke: jest.fn(),
} as unknown as IpcRenderer;
});
beforeEach(() => jest.clearAllMocks());
afterAll(() => {
// Important to restore the original value.
global.window.ipcRenderer = originalIpcRenderer;
});
test('saves new attributions to file in AuditView', () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(global as any).document.createRange = (): unknown => ({
setStart: (): void => {},
setEnd: (): void => {},
commonAncestorContainer: {
nodeName: 'BODY',
ownerDocument: document,
},
});
const testPackageName = 'React';
const testLicenseNames = ['MIT', 'MIT License'];
const mockChannelReturn: ParsedFileContent = {
...EMPTY_PARSED_FILE_CONTENT,
resources: { 'something.js': 1 },
manualAttributions: {
attributions: {
uuid_1: {
packageName: 'InitialPackageName',
packageVersion: '16.5.0',
licenseText: 'Custom license text',
},
},
resourcesToAttributions: {
'/something.js': ['uuid_1'],
},
},
frequentLicenses: {
nameOrder: testLicenseNames,
texts: { MIT: 'MIT License Text', 'MIT License': 'MIT License Text' },
},
};
mockElectronBackend(mockChannelReturn);
renderComponentWithStore(<App />);
clickOnElementInResourceBrowser(screen, 'something.js');
expectValueInTextBox(screen, 'Name', 'InitialPackageName');
expectButton(screen, ButtonText.Save, true);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, true);
| expectButtonInHamburgerMenu(screen, ButtonText.Undo, false);
clickOnButtonInHamburgerMenu(screen, ButtonText.Undo);
expectValueNotInTextBox(screen, 'Name', testPackageName);
expectButton(screen, ButtonText.Save, true);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, true);
insertValueIntoTextBox(screen, 'Name', testPackageName);
expectValueInTextBox(screen, 'Name', testPackageName);
selectConfidenceInDropdown(screen, `Low (${DiscreteConfidence.Low})`);
expect(screen.queryAllByText(`Low (${DiscreteConfidence.Low})`).length);
expectButton(screen, ButtonText.Save, false);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, false);
expectElementsInAutoCompleteAndSelectFirst(screen, testLicenseNames);
clickOnButton(screen, ButtonText.Save);
const expectedSaveFileArgs: SaveFileArgs = {
manualAttributions: {
uuid_1: {
licenseName: 'MIT',
attributionConfidence: DiscreteConfidence.Low,
licenseText: 'Custom license text',
packageName: 'React',
packageVersion: '16.5.0',
},
},
resourcesToAttributions: {
'/something.js': ['uuid_1'],
},
resolvedExternalAttributions: new Set<string>(),
};
// @ts-ignore
expect(window.ipcRenderer.invoke.mock.calls).toEqual([
[IpcChannel['SaveFile'], expectedSaveFileArgs],
]);
expectButton(screen, ButtonText.Save, true);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, true);
});
test('save and save for all buttons are shown and work', () => {
const mockChannelReturn: ParsedFileContent = {
...EMPTY_PARSED_FILE_CONTENT,
resources: { 'firstResource.js': 1, 'secondResource.js': 1 },
manualAttributions: {
attributions: {
uuid_1: {
packageName: 'React',
packageVersion: '16.5.0',
licenseText: 'Permission is hereby granted',
},
uuid_2: {
packageName: 'Vue',
packageVersion: '1.2.0',
licenseText: 'Permission is not granted',
},
},
resourcesToAttributions: {
'/firstResource.js': ['uuid_1'],
'/secondResource.js': ['uuid_1'],
},
},
};
mockElectronBackend(mockChannelReturn);
renderComponentWithStore(<App />);
clickOnElementInResourceBrowser(screen, 'firstResource.js');
expectValueInTextBox(screen, 'Name', 'React');
expectButton(screen, ButtonText.Save, true);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, true);
expectButton(screen, ButtonText.SaveGlobally, true);
insertValueIntoTextBox(screen, 'Name', 'Typescript');
expectValueInTextBox(screen, 'Name', 'Typescript');
expectButton(screen, ButtonText.Save, false);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, false);
expectButton(screen, ButtonText.SaveGlobally, false);
clickOnButton(screen, ButtonText.SaveGlobally);
clickOnElementInResourceBrowser(screen, 'secondResource.js');
expectValueInTextBox(screen, 'Name', 'Typescript');
expectButton(screen, ButtonText.Save, true);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, true);
expectButton(screen, ButtonText.SaveGlobally, true);
insertValueIntoTextBox(screen, 'Name', 'Vue');
expectValueInTextBox(screen, 'Name', 'Vue');
expectButton(screen, ButtonText.Save, false);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, false);
expectButton(screen, ButtonText.SaveGlobally, false);
clickOnButton(screen, ButtonText.Save);
clickOnElementInResourceBrowser(screen, 'firstResource.js');
expectValueInTextBox(screen, 'Name', 'Typescript');
clickAddNewAttributionButton(screen);
insertValueIntoTextBox(screen, 'Name', 'Angular');
expectValueInTextBox(screen, 'Name', 'Angular');
expectButton(screen, ButtonText.Save, false);
expectButtonInHamburgerMenu(screen, ButtonText.Undo, false);
clickOnButton(screen, ButtonText.Save);
expectValueInManualPackagePanel(screen, 'Angular');
expectValueInManualPackagePanel(screen, 'Typescript, 16.5.0');
});
test('confirm buttons are shown and work', () => {
const mockChannelReturn: ParsedFileContent = {
...EMPTY_PARSED_FILE_CONTENT,
resources: {
'firstResource.js': 1,
'secondResource.js': 1,
'thirdResource.js': 1,
'fourthResource.js': 1,
},
manualAttributions: {
attributions: {
uuid_1: {
packageName: 'React',
packageVersion: '16.5.0',
licenseText: 'Permission is hereby granted',
comment: 'Attribution of multiple resources',
attributionConfidence: 10,
preSelected: true,
},
uuid_2: {
packageName: 'Vue',
packageVersion: '1.2.0',
licenseText: 'Permission is not granted',
comment: 'Attribution of one resources',
attributionConfidence: 90,
preSelected: true,
},
},
resourcesToAttributions: {
'/firstResource.js': ['uuid_1'],
'/secondResource.js': ['uuid_1'],
'/thirdResource.js': ['uuid_1'],
'/fourthResource.js': ['uuid_2'],
},
},
};
mockElectronBackend(mockChannelReturn);
renderComponentWithStore(<App />);
clickOnElementInResourceBrowser(screen, 'firstResource.js');
expectValueInTextBox(screen, 'Name', 'React');
expectValueInConfidenceField(screen, '10');
expectValuesInProgressbarTooltip(screen, 4, 0, 4, 0);
expectButton(screen, ButtonText.Confirm);
expectButton(screen, ButtonText.ConfirmGlobally);
clickOnButton(screen, ButtonText.Confirm);
expectValueNotInConfidenceField(screen, '10');
expectValueInConfidenceField(screen, `High (${DiscreteConfidence.High})`);
expectValuesInProgressbarTooltip(screen, 4, 1, 3, 0);
expectButtonInHamburgerMenuIsNotShown(screen, ButtonText.Confirm);
expectButtonIsNotShown(screen, ButtonText.ConfirmGlobally);
clickOnElementInResourceBrowser(screen, 'secondResource.js');
expectValueInTextBox(screen, 'Name', 'React');
expectValueInConfidenceField(screen, '10');
expectValueNotInConfidenceField(
screen,
`High (${DiscreteConfidence.High})`
);
expectButton(screen, ButtonText.Confirm);
expectButton(screen, ButtonText.ConfirmGlobally);
clickOnButton(screen, ButtonText.ConfirmGlobally);
expectValueNotInConfidenceField(screen, '10');
expectValueInConfidenceField(screen, `High (${DiscreteConfidence.High})`);
expectValuesInProgressbarTooltip(screen, 4, 3, 1, 0);
expectButtonInHamburgerMenuIsNotShown(screen, ButtonText.Confirm);
expectButtonIsNotShown(screen, ButtonText.ConfirmGlobally);
clickOnElementInResourceBrowser(screen, 'thirdResource.js');
expectValueNotInConfidenceField(screen, '10');
expectValueInConfidenceField(screen, `High (${DiscreteConfidence.High})`);
expectButtonIsNotShown(screen, ButtonText.Confirm);
expectButtonIsNotShown(screen, ButtonText.ConfirmGlobally);
clickOnElementInResourceBrowser(screen, 'fourthResource.js');
expectValueInConfidenceField(screen, '90');
expectValueNotInConfidenceField(
screen,
`High (${DiscreteConfidence.High})`
);
expectValueInTextBox(screen, 'Name', 'Vue');
expectButton(screen, ButtonText.Confirm);
expectButtonIsNotShown(screen, ButtonText.ConfirmGlobally);
clickOnButton(screen, ButtonText.Confirm);
expectValueNotInConfidenceField(screen, '90');
expectValueInConfidenceField(screen, `High (${DiscreteConfidence.High})`);
expectValuesInProgressbarTooltip(screen, 4, 4, 0, 0);
expectButtonIsNotShown(screen, ButtonText.Confirm);
});
}); | insertValueIntoTextBox(screen, 'Name', testPackageName);
expectValueInTextBox(screen, 'Name', testPackageName);
expectButton(screen, ButtonText.Save, false); |
main_scene.rs | use crate::extensions::NodeExt as _;
use crate::hud;
use crate::mob;
use crate::player;
use gdnative::api::{Area2D, PathFollow2D, Position2D, RigidBody2D};
use gdnative::prelude::*;
use rand::*;
use std::f64::consts::PI;
#[derive(NativeClass)]
#[inherit(Node)]
#[user_data(user_data::LocalCellData<Main>)]
pub struct Main {
#[property]
mob: Ref<PackedScene>,
score: i64,
}
#[methods]
impl Main {
fn new(_owner: &Node) -> Self {
Main {
mob: PackedScene::new().into_shared(),
score: 0,
}
}
#[export]
fn game_over(&self, owner: &Node) {
let score_timer = unsafe { owner.get_typed_node::<Timer, _>("score_timer") };
let mob_timer = unsafe { owner.get_typed_node::<Timer, _>("mob_timer") };
score_timer.stop();
mob_timer.stop();
let hud_node = unsafe { owner.get_typed_node::<CanvasLayer, _>("hud") };
hud_node
.cast_instance::<hud::HUD>()
.and_then(|hud| hud.map(|x, o| x.show_game_over(&*o)).ok())
.unwrap_or_else(|| godot_print!("Unable to get hud"));
}
#[export]
fn new_game(&mut self, owner: &Node) {
let start_position = unsafe { owner.get_typed_node::<Position2D, _>("start_position") };
let player = unsafe { owner.get_typed_node::<Area2D, _>("player") };
let start_timer = unsafe { owner.get_typed_node::<Timer, _>("start_timer") };
self.score = 0;
player
.cast_instance::<player::Player>()
.and_then(|player| {
player
.map(|x, o| x.start(&*o, start_position.position()))
.ok()
})
.unwrap_or_else(|| godot_print!("Unable to get player"));
start_timer.start(0.0);
let hud_node = unsafe { owner.get_typed_node::<CanvasLayer, _>("hud") };
hud_node
.cast_instance::<hud::HUD>()
.and_then(|hud| {
hud.map(|x, o| {
x.update_score(&*o, self.score);
x.show_message(&*o, "Get Ready".into());
})
.ok()
})
.unwrap_or_else(|| godot_print!("Unable to get hud"));
}
#[export]
fn on_start_timer_timeout(&self, owner: &Node) {
let mob_timer = unsafe { owner.get_typed_node::<Timer, _>("mob_timer") };
let score_timer = unsafe { owner.get_typed_node::<Timer, _>("score_timer") };
mob_timer.start(0.0);
score_timer.start(0.0);
}
#[export]
fn on_score_timer_timeout(&mut self, owner: &Node) {
self.score += 1;
let hud_node = unsafe { owner.get_typed_node::<CanvasLayer, _>("hud") };
hud_node
.cast_instance::<hud::HUD>()
.and_then(|hud| hud.map(|x, o| x.update_score(&*o, self.score)).ok())
.unwrap_or_else(|| godot_print!("Unable to get hud"));
}
#[export]
fn on_mob_timer_timeout(&self, owner: &Node) {
let mob_spawn_location =
unsafe { owner.get_typed_node::<PathFollow2D, _>("mob_path/mob_spawn_locations") };
let mob_scene: Ref<RigidBody2D, _> = instance_scene(&self.mob);
let mut rng = rand::thread_rng();
let offset = rng.gen_range(std::u32::MIN, std::u32::MAX);
mob_spawn_location.set_offset(offset.into());
let mut direction = mob_spawn_location.rotation() + PI / 2.0;
mob_scene.set_position(mob_spawn_location.position());
direction += rng.gen_range(-PI / 4.0, PI / 4.0);
mob_scene.set_rotation(direction);
let d = direction as f32;
let mob_scene = unsafe { mob_scene.into_shared().assume_safe() };
owner.add_child(mob_scene, false);
let mob = mob_scene.cast_instance::<mob::Mob>().unwrap();
mob.map(|x, mob_owner| {
mob_owner
.set_linear_velocity(Vector2::new(rng.gen_range(x.min_speed, x.max_speed), 0.0));
mob_owner
.set_linear_velocity(mob_owner.linear_velocity().rotated(Angle { radians: d }));
let hud_node = unsafe { owner.get_typed_node::<CanvasLayer, _>("hud") };
let hud = hud_node.cast_instance::<hud::HUD>().unwrap();
hud.map(|_, o| {
o.connect(
"start_game",
mob_owner,
"on_start_game",
VariantArray::new_shared(),
0,
)
.unwrap();
})
.unwrap();
})
.unwrap();
}
} | where
Root: gdnative::GodotObject<RefKind = ManuallyManaged> + SubClass<Node>,
{
let scene = unsafe { scene.assume_safe() };
let instance = scene
.instance(PackedScene::GEN_EDIT_STATE_DISABLED)
.expect("should be able to instance scene");
let instance = unsafe { instance.assume_unique() };
instance
.try_cast::<Root>()
.expect("root node type should be correct")
} |
/// Root here is needs to be the same type (or a parent type) of the node that you put in the child
/// scene as the root. For instance Spatial is used for this example.
fn instance_scene<Root>(scene: &Ref<PackedScene, Shared>) -> Ref<Root, Unique> |
comp_high_b.py | #!/usr/bin/env python
"""
@author: pritesh-mehta
"""
import numpy as np
from scipy.optimize import curve_fit
from pathlib import Path
from argparse import ArgumentParser
from dwi_utilities.monoexponential_decay import log_func, func
import dwi_utilities.nifti_utilities as nutil
def comp_high_b_case(case_dir, target_bval, save_case=False, output_dir=None, extension='.nii.gz'):
"""Generate high b-value DWI using low b-value DWI (case)
"""
eps = 1e-8
data_stack = []
bval_list = []
filepaths = nutil.path_generator(case_dir)
for path in filepaths:
name, nii, data = nutil.load(path)
data_stack.append(data)
bval_list.append(name.replace('.nii.gz','').replace('b',''))
# order data stack in order of ascending b-value
bval_list, data_stack = \
zip(*sorted(zip(bval_list, data_stack)))
# generate high b-value
bval_list = np.array(bval_list)
data = np.array(data_stack)
shape = np.shape(data[0])
highb_data = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
y = []
for array in data:
y.append(array[i][j][k])
x = bval_list
y = np.array(y) + eps
z = np.log(y)
popt, pcov = curve_fit(log_func, x, z)
if popt[1] < 0:
highb_data[i][j][k] = 0
else:
highb_data[i][j][k] = func(target_bval, np.exp(popt[0]), popt[1])
| nutil.save(save_path, nii, highb_data)
return highb_data
def comp_high_b_dir(cases_dir, target_bval, output_dir, extension='.nii.gz'):
"""Generate high b-value DWI using low b-value DWI (directory)
"""
for case_dir in Path(cases_dir).iterdir():
print("Processing:", case_dir)
comp_high_b_case(case_dir, target_bval, save_case=True, output_dir=output_dir, extension=extension)
return None
def process():
parser = ArgumentParser()
parser.add_argument('--input_dir', required=True, type=str)
parser.add_argument('--target_bval', required=True, type=int)
parser.add_argument('--output_dir', required=True, type=str)
parser.add_argument('--case', required=False, action="store_true")
parser.add_argument('--extension', required=False, type=str, default='.nii.gz')
args = parser.parse_args()
if args.case:
comp_high_b_case(args.input_dir, args.target_bval, save_case=True, output_dir=args.output_dir,
extension=args.extension)
else:
comp_high_b_dir(args.input_dir, args.target_bval, args.output_dir,
extension=args.extension)
if __name__ == "__main__":
process() | if save_case:
case_name = Path(case_dir).parts[-1]
save_path = Path(output_dir) / (case_name + extension) |
logical_switch_manager.go | package logicalswitchmanager
import (
"fmt"
"net"
"reflect"
"sync"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config"
ipam "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/ipallocator"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/ipallocator/allocator"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types"
"github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util"
"k8s.io/klog/v2"
)
// logicalSwitchInfo contains information corresponding to the node. It holds the
// subnet allocations (v4 and v6) as well as the IPAM allocator instances for each
// subnet managed for this node
type logicalSwitchInfo struct {
hostSubnets []*net.IPNet
ipams []ipam.Interface
noHostSubnet bool
uuid string
}
type ipamFactoryFunc func(*net.IPNet) (ipam.Interface, error)
// LogicalSwitchManager provides switch info management APIs including IPAM for the host subnets
type LogicalSwitchManager struct {
cache map[string]logicalSwitchInfo
// A RW mutex for LogicalSwitchManager which holds logicalSwitch information
sync.RWMutex
ipamFunc ipamFactoryFunc
}
// NewIPAMAllocator provides an ipam interface which can be used for IPAM
// allocations for a given cidr using a contiguous allocation strategy.
// It also pre-allocates certain special subnet IPs such as the .1, .2, and .3
// addresses as reserved.
func NewIPAMAllocator(cidr *net.IPNet) (ipam.Interface, error) {
subnetRange, err := ipam.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) (allocator.Interface, error) {
return allocator.NewRoundRobinAllocationMap(max, rangeSpec), nil
})
if err != nil {
return nil, err
}
if err := reserveIPs(cidr, subnetRange); err != nil {
klog.Errorf("Failed reserving IPs for subnet %s, err: %v", cidr, err)
return nil, err
}
return subnetRange, nil
}
// Helper function to reserve certain subnet IPs as special
// These are the .1, .2 and .3 addresses in particular
func reserveIPs(subnet *net.IPNet, ipam ipam.Interface) error {
gwIfAddr := util.GetNodeGatewayIfAddr(subnet)
err := ipam.Allocate(gwIfAddr.IP)
if err != nil {
klog.Errorf("Unable to allocate subnet's gateway IP: %s", gwIfAddr.IP)
return err
}
mgmtIfAddr := util.GetNodeManagementIfAddr(subnet)
err = ipam.Allocate(mgmtIfAddr.IP)
if err != nil {
klog.Errorf("Unable to allocate subnet's management IP: %s", mgmtIfAddr.IP)
return err
}
if config.HybridOverlay.Enabled {
hybridOverlayIfAddr := util.GetNodeHybridOverlayIfAddr(subnet)
err = ipam.Allocate(hybridOverlayIfAddr.IP)
if err != nil {
klog.Errorf("Unable to allocate subnet's hybrid overlay interface IP: %s", hybridOverlayIfAddr.IP)
return err
}
}
return nil
}
// Initializes a new logical switch manager
func NewLogicalSwitchManager() *LogicalSwitchManager {
return &LogicalSwitchManager{
cache: make(map[string]logicalSwitchInfo),
RWMutex: sync.RWMutex{},
ipamFunc: NewIPAMAllocator,
}
}
// AddNode adds/updates a node to the logical switch manager for subnet
// and IPAM management.
func (manager *LogicalSwitchManager) AddNode(nodeName, uuid string, hostSubnets []*net.IPNet) error {
manager.Lock()
defer manager.Unlock()
if lsi, ok := manager.cache[nodeName]; ok && !reflect.DeepEqual(lsi.hostSubnets, hostSubnets) {
klog.Warningf("Node %q logical switch already in cache with subnet %s; replacing with %s", nodeName,
util.JoinIPNets(lsi.hostSubnets, ","), util.JoinIPNets(hostSubnets, ","))
}
var ipams []ipam.Interface
for _, subnet := range hostSubnets {
ipam, err := manager.ipamFunc(subnet)
if err != nil {
klog.Errorf("IPAM for subnet %s was not initialized for node %q", subnet, nodeName)
return err
}
ipams = append(ipams, ipam)
}
manager.cache[nodeName] = logicalSwitchInfo{
hostSubnets: hostSubnets,
ipams: ipams,
noHostSubnet: len(hostSubnets) == 0,
uuid: uuid,
}
return nil
}
// AddNoHostSubnetNode adds/updates a node without any host subnets
// to the logical switch manager
func (manager *LogicalSwitchManager) AddNoHostSubnetNode(nodeName string) error {
// setting the hostSubnets slice argument to nil in the cache means an object
// exists for the switch but it was not assigned a hostSubnet by ovn-kubernetes
// this will be true for nodes that are marked as host-subnet only.
return manager.AddNode(nodeName, "", nil)
}
// Remove a switch/node from the the logical switch manager
func (manager *LogicalSwitchManager) DeleteNode(nodeName string) {
manager.Lock()
defer manager.Unlock()
delete(manager.cache, nodeName)
}
// Given a switch name, checks if the switch is a noHostSubnet switch
func (manager *LogicalSwitchManager) IsNonHostSubnetSwitch(nodeName string) bool {
manager.RLock()
defer manager.RUnlock()
lsi, ok := manager.cache[nodeName]
return ok && lsi.noHostSubnet
}
// Given a switch name and UUID, get all its host-subnets
func (manager *LogicalSwitchManager) GetSwitchSubnetsAndUUID(nodeName string) ([]*net.IPNet, string) {
manager.RLock()
defer manager.RUnlock()
lsi, ok := manager.cache[nodeName]
// make a deep-copy of the underlying slice and return so that there is no
// resource contention
if ok && len(lsi.hostSubnets) > 0 {
subnets := make([]*net.IPNet, len(lsi.hostSubnets))
for i, hsn := range lsi.hostSubnets {
subnet := *hsn
subnets[i] = &subnet
}
return subnets, lsi.uuid
}
return nil, ""
}
// AllocateIPs will block off IPs in the ipnets slice as already allocated
// for a given switch
func (manager *LogicalSwitchManager) AllocateIPs(nodeName string, ipnets []*net.IPNet) error {
manager.RLock()
defer manager.RUnlock()
lsi, ok := manager.cache[nodeName]
if len(ipnets) == 0 || !ok || len(lsi.ipams) == 0 {
return fmt.Errorf("unable to allocate ips %v for node: %s",
ipnets, nodeName)
}
var err error
allocated := make(map[int]*net.IPNet)
defer func() {
if err != nil {
// iterate over range of already allocated indices and release
// ips allocated before the error occurred.
for relIdx, relIPNet := range allocated {
if relErr := lsi.ipams[relIdx].Release(relIPNet.IP); relErr != nil {
klog.Errorf("Error while releasing IP: %s, err: %v", relIPNet.IP, relErr)
} else {
klog.Warningf("Reserved IP: %s were released", relIPNet.IP.String())
}
}
}
}()
for _, ipnet := range ipnets {
for idx, ipam := range lsi.ipams {
cidr := ipam.CIDR()
if cidr.Contains(ipnet.IP) {
if _, ok = allocated[idx]; ok {
err = fmt.Errorf("Error: attempt to reserve multiple IPs in the same IPAM instance")
return err
}
if err = ipam.Allocate(ipnet.IP); err != nil {
return err
}
allocated[idx] = ipnet
break
}
}
}
return nil
}
// AllocateNextIPs allocates IP addresses from each of the host subnets
// for a given switch
func (manager *LogicalSwitchManager) AllocateNextIPs(nodeName string) ([]*net.IPNet, error) {
manager.RLock()
defer manager.RUnlock()
var ipnets []*net.IPNet
var ip net.IP
var err error
lsi, ok := manager.cache[nodeName]
if !ok {
return nil, fmt.Errorf("node %s not found in the logical switch manager cache", nodeName)
}
if len(lsi.ipams) == 0 {
return nil, fmt.Errorf("failed to allocate IPs for node %s because there is no IPAM instance", nodeName)
}
if len(lsi.ipams) != len(lsi.hostSubnets) {
return nil, fmt.Errorf("failed to allocate IPs for node %s because host subnet instances: %d"+
" don't match ipam instances: %d", nodeName, len(lsi.hostSubnets), len(lsi.ipams))
}
defer func() {
if err != nil {
// iterate over range of already allocated indices and release
// ips allocated before the error occurred.
for relIdx, relIPNet := range ipnets {
if relErr := lsi.ipams[relIdx].Release(relIPNet.IP); relErr != nil {
klog.Errorf("Error while releasing IP: %s, err: %v", relIPNet.IP, relErr)
}
}
klog.Warningf("Allocated IPs: %s were released", util.JoinIPNetIPs(ipnets, " "))
}
}()
for idx, ipam := range lsi.ipams {
ip, err = ipam.AllocateNext()
if err != nil {
return nil, err
}
ipnet := &net.IPNet{
IP: ip,
Mask: lsi.hostSubnets[idx].Mask,
}
ipnets = append(ipnets, ipnet)
}
return ipnets, nil
}
// Mark the IPs in ipnets slice as available for allocation
// by releasing them from the IPAM pool of allocated IPs.
func (manager *LogicalSwitchManager) ReleaseIPs(nodeName string, ipnets []*net.IPNet) error {
manager.RLock()
defer manager.RUnlock()
if ipnets == nil || nodeName == "" {
klog.V(5).Infof("Node name is empty or ip slice to release is nil")
return nil
}
lsi, ok := manager.cache[nodeName]
if !ok {
return fmt.Errorf("node %s not found in the logical switch manager cache",
nodeName)
}
if len(lsi.ipams) == 0 {
return fmt.Errorf("failed to release IPs for node %s because there is no IPAM instance", nodeName)
}
for _, ipnet := range ipnets {
for _, ipam := range lsi.ipams {
cidr := ipam.CIDR()
if cidr.Contains(ipnet.IP) {
if err := ipam.Release(ipnet.IP); err != nil {
return err
}
break
}
}
}
return nil
}
// IP allocator manager for join switch's IPv4 and IPv6 subnets.
type JoinSwitchIPManager struct {
lsm *LogicalSwitchManager
lrpIPCache map[string][]*net.IPNet
lrpIPCacheLock sync.Mutex
}
// NewJoinIPAMAllocator provides an ipam interface which can be used for join switch IPAM
// allocations for the specified cidr using a contiguous allocation strategy.
func NewJoinIPAMAllocator(cidr *net.IPNet) (ipam.Interface, error) {
subnetRange, err := ipam.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) (allocator.Interface, error) {
return allocator.NewContiguousAllocationMap(max, rangeSpec), nil
})
if err != nil {
return nil, err
}
return subnetRange, nil
}
// Initializes a new join switch logical switch manager.
// This IPmanager guaranteed to always have both IPv4 and IPv6 regardless of dual-stack
func NewJoinLogicalSwitchIPManager(existingNodeNames []string) (*JoinSwitchIPManager, error) {
j := JoinSwitchIPManager{
lsm: &LogicalSwitchManager{
cache: make(map[string]logicalSwitchInfo),
ipamFunc: NewJoinIPAMAllocator,
},
lrpIPCache: make(map[string][]*net.IPNet),
}
var joinSubnets []*net.IPNet
joinSubnetsConfig := []string{}
if config.IPv4Mode {
joinSubnetsConfig = append(joinSubnetsConfig, config.Gateway.V4JoinSubnet)
}
if config.IPv6Mode {
joinSubnetsConfig = append(joinSubnetsConfig, config.Gateway.V6JoinSubnet)
}
for _, joinSubnetString := range joinSubnetsConfig {
_, joinSubnet, err := net.ParseCIDR(joinSubnetString)
if err != nil {
return nil, fmt.Errorf("error parsing join subnet string %s: %v", joinSubnetString, err)
}
joinSubnets = append(joinSubnets, joinSubnet)
}
err := j.lsm.AddNode(types.OVNJoinSwitch, "", joinSubnets)
if err != nil {
return nil, err
}
for _, nodeName := range existingNodeNames {
gwLRPIPs := j.getJoinLRPAddresses(nodeName)
if len(gwLRPIPs) > 0 {
klog.Infof("Initializing and reserving the join switch IP for node: %s to: %v", nodeName, gwLRPIPs)
if err := j.reserveJoinLRPIPs(nodeName, gwLRPIPs); err != nil {
return nil, fmt.Errorf("error initiliazing and reserving the join switch IP for node: %s, err: %v", nodeName, err)
}
}
}
return &j, nil
}
func (jsIPManager *JoinSwitchIPManager) getJoinLRPCacheIPs(nodeName string) ([]*net.IPNet, bool) {
gwLRPIPs, ok := jsIPManager.lrpIPCache[nodeName]
return gwLRPIPs, ok
}
func sameIPs(a, b []*net.IPNet) bool {
if len(a) != len(b) {
return false
}
for _, aip := range a {
found := false
for _, bip := range b {
if aip.String() == bip.String() {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (jsIPManager *JoinSwitchIPManager) setJoinLRPCacheIPs(nodeName string, gwLRPIPs []*net.IPNet) error {
if oldIPs, ok := jsIPManager.lrpIPCache[nodeName]; ok && !sameIPs(oldIPs, gwLRPIPs) {
return fmt.Errorf("join switch IPs %v already cached", oldIPs)
}
jsIPManager.lrpIPCache[nodeName] = gwLRPIPs
return nil
}
func (jsIPManager *JoinSwitchIPManager) delJoinLRPCacheIPs(nodeName string) {
delete(jsIPManager.lrpIPCache, nodeName)
}
// reserveJoinLRPIPs tries to add the LRP IPs to the JoinSwitchIPManager, then they will be stored in the cache;
func (jsIPManager *JoinSwitchIPManager) reserveJoinLRPIPs(nodeName string, gwLRPIPs []*net.IPNet) (err error) {
// reserve the given IP in the allocator
if err = jsIPManager.lsm.AllocateIPs(types.OVNJoinSwitch, gwLRPIPs); err == nil {
defer func() {
if err != nil {
if relErr := jsIPManager.lsm.ReleaseIPs(types.OVNJoinSwitch, gwLRPIPs); relErr != nil {
klog.Errorf("Failed to release logical router port IPs %v just reserved for node %s: %q",
util.JoinIPNetIPs(gwLRPIPs, " "), nodeName, relErr)
}
}
}()
if err = jsIPManager.setJoinLRPCacheIPs(nodeName, gwLRPIPs); err != nil {
klog.Errorf("Failed to add node %s reserved IPs %v to the join switch IP cache: %s", nodeName, gwLRPIPs, err.Error())
}
}
return err
}
// ensureJoinLRPIPs tries to allocate the LRP IPs if it is not yet allocated, then they will be stored in the cache
func (jsIPManager *JoinSwitchIPManager) EnsureJoinLRPIPs(nodeName string) (gwLRPIPs []*net.IPNet, err error) {
jsIPManager.lrpIPCacheLock.Lock()
defer jsIPManager.lrpIPCacheLock.Unlock()
// first check the IP cache, return if an entry already exists
gwLRPIPs, ok := jsIPManager.getJoinLRPCacheIPs(nodeName)
if ok {
return gwLRPIPs, nil
}
// second check the running DB
gwLRPIPs = jsIPManager.getJoinLRPAddresses(nodeName)
if len(gwLRPIPs) > 0 {
// Saving the hit in the cache
err = jsIPManager.reserveJoinLRPIPs(nodeName, gwLRPIPs)
if err != nil {
klog.Errorf("Failed to add reserve IPs to the join switch IP cache: %s", err.Error())
return nil, err
}
return gwLRPIPs, nil
}
gwLRPIPs, err = jsIPManager.lsm.AllocateNextIPs(types.OVNJoinSwitch)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if relErr := jsIPManager.lsm.ReleaseIPs(types.OVNJoinSwitch, gwLRPIPs); relErr != nil {
klog.Errorf("Failed to release logical router port IPs %v for node %s: %q",
util.JoinIPNetIPs(gwLRPIPs, " "), nodeName, relErr)
}
}
}()
if err = jsIPManager.setJoinLRPCacheIPs(nodeName, gwLRPIPs); err != nil {
klog.Errorf("Failed to add node %s reserved IPs %v to the join switch IP cache: %s", nodeName, gwLRPIPs, err.Error())
return nil, err
}
return gwLRPIPs, nil
}
// getJoinLRPAddresses check if IPs of gateway logical router port are within the join switch IP range, and return them if true.
func (jsIPManager *JoinSwitchIPManager) getJoinLRPAddresses(nodeName string) []*net.IPNet {
// try to get the IPs from the logical router port
gwLRPIPs := []*net.IPNet{}
gwLrpName := types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + nodeName
joinSubnets, _ := jsIPManager.lsm.GetSwitchSubnetsAndUUID(types.OVNJoinSwitch)
ifAddrs, err := util.GetLRPAddrs(gwLrpName)
if err == nil {
for _, ifAddr := range ifAddrs {
for _, subnet := range joinSubnets {
if subnet.Contains(ifAddr.IP) {
gwLRPIPs = append(gwLRPIPs, &net.IPNet{IP: ifAddr.IP, Mask: subnet.Mask})
break
}
}
}
}
if len(gwLRPIPs) != len(joinSubnets) {
var errStr string
if len(gwLRPIPs) == 0 | else {
errStr = fmt.Sprintf("Invalid IPs %s (possibly not in the range of subnet %s)",
util.JoinIPNetIPs(gwLRPIPs, " "), util.JoinIPNetIPs(joinSubnets, " "))
}
klog.Warningf("%s for logical router port %s", errStr, gwLrpName)
return []*net.IPNet{}
}
return gwLRPIPs
}
func (jsIPManager *JoinSwitchIPManager) ReleaseJoinLRPIPs(nodeName string) (err error) {
jsIPManager.lrpIPCacheLock.Lock()
defer jsIPManager.lrpIPCacheLock.Unlock()
gwLRPIPs, ok := jsIPManager.getJoinLRPCacheIPs(nodeName)
if ok {
err = jsIPManager.lsm.ReleaseIPs(types.OVNJoinSwitch, gwLRPIPs)
jsIPManager.delJoinLRPCacheIPs(nodeName)
}
return err
}
| {
errStr = fmt.Sprintf("Failed to get IPs for logical router port %s", gwLrpName)
} |
raw_int.go | // Copyright 2018-2019 Workiva Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); | // http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eva
import (
"fmt"
"github.com/Workiva/eva-client-go/edn"
)
type rawIntImpl int64
func RawInt(item int64) edn.Serializable {
return rawIntImpl(item)
}
// String creates a raw string.
func (item rawIntImpl) Int() int64 {
return int64(item)
}
// String creates a raw string.
func (item rawIntImpl) String() string {
return fmt.Sprintf("%d", item.Int())
}
// Serialize will convert this structure to an edn string.
func (item rawIntImpl) Serialize(serialize edn.Serializer) (string, error) {
return item.String(), nil
} | // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// |
Animal.js | import Dynamic from './Dynamic.js';
export default class Animal extends Dynamic {
constructor ( name ) {
console.log('in Animal');
// end of constructor | super( name );
}
} // end of class | |
encoder.rs | use rolling_hash::RollingHash;
use std::cmp;
use std::io;
use std::io::{BufReader, Read, Seek, Write};
use std::mem;
/// cpu/memory efficient hashmap from hash_value to multiple window indexes
/// window hashes must be inserted backward
pub struct WindowHashMap {
window_size: usize,
current_window_index: usize,
/// hashed_value => window_index
table: Vec<usize>,
/// for each window, the next window_index+1 that has the same table index (while > 0)
next_window_indexes: Vec<usize>,
}
struct Matches<'a> {
hash_map: &'a WindowHashMap,
window_index: usize,
}
impl<'a> Iterator for Matches<'a> {
type Item = u64;
fn next(&mut self) -> Option<u64> {
if self.window_index > 0 {
let real_window_index = self.window_index - 1;
self.window_index = self.hash_map.next_window_indexes[real_window_index];
Some(real_window_index as u64 * self.hash_map.window_size as u64)
} else {
None
}
}
}
impl WindowHashMap {
fn new(file_size: u64, window_size: usize, hash_size: usize) -> WindowHashMap {
let mut table = Vec::with_capacity(hash_size);
table.resize(hash_size, 0);
let indexes_size = (file_size / (window_size as u64)) as usize;
let mut next_window_indexes = Vec::with_capacity(indexes_size);
next_window_indexes.resize(indexes_size, 0);
WindowHashMap {
window_size,
current_window_index: indexes_size,
table,
next_window_indexes,
}
}
fn prepend_window(&mut self, hash_value: u32) {
assert!(self.current_window_index > 0);
let table_index = (hash_value as usize) % self.table.len();
let found_window_index = &mut self.table[table_index];
if *found_window_index > 0 {
self.next_window_indexes[self.current_window_index] = *found_window_index;
}
*found_window_index = self.current_window_index;
self.current_window_index -= 1;
}
fn find_matches<'a>(&'a self, hash_value: u32) -> Matches<'a> |
}
pub struct VCDiffEncoder<OLD: Read + Seek, NEW: Read + Seek> {
diff_window_size: usize,
old: OLD,
old_hash_map: WindowHashMap,
new: NEW,
new_hash_map: WindowHashMap,
}
fn hash_map<F: Read + Seek>(
file: &mut F,
rolling_hash: &RollingHash,
) -> Result<WindowHashMap, io::Error> {
let file_size = file.seek(io::SeekFrom::End(0))?;
let diff_window_size = rolling_hash.window_size();
let diff_window_size_u64 = diff_window_size as u64;
let mut hash_map = WindowHashMap::new(file_size, diff_window_size, 1 << 28 /* 268 MB */);
let mut position = file_size - (file_size % diff_window_size_u64);
let mut buffer = [0u8; 32768];
let buffer_len = buffer.len() - buffer.len() % diff_window_size; // force alignment
while position > 0 {
let read_size = cmp::min(position, buffer_len as u64);
let mut read_size_usize = read_size as usize;
file.seek(io::SeekFrom::Start(position - read_size))?;
file.read(&mut buffer[0..read_size_usize])?;
position -= read_size;
while read_size_usize > 0 {
read_size_usize -= diff_window_size;
let h = rolling_hash.hash(&buffer[read_size_usize..diff_window_size]);
hash_map.prepend_window(h);
}
}
file.seek(io::SeekFrom::Start(0))?;
Ok(hash_map)
}
impl<OLD: Read + Seek, NEW: Read + Seek> VCDiffEncoder<OLD, NEW> {
pub fn new(
mut old: OLD,
mut new: NEW,
diff_window_size: usize,
) -> Result<VCDiffEncoder<OLD, NEW>, io::Error> {
assert!(diff_window_size >= 4);
let rolling_hash = RollingHash::new(diff_window_size);
let old_hash_map = hash_map(&mut old, &rolling_hash)?;
let new_hash_map = hash_map(&mut new, &rolling_hash)?;
Ok(VCDiffEncoder {
diff_window_size,
old,
old_hash_map,
new,
new_hash_map,
})
}
}
| {
let table_index = (hash_value as usize) % self.table.len();
let found_window_index = self.table[table_index];
Matches {
hash_map: self,
window_index: found_window_index,
}
} |
log_test.go | package log
import (
"context"
"testing"
"github.com/bilibili/kratos/pkg/net/metadata"
"github.com/stretchr/testify/assert"
)
func initStdout() {
conf := &Config{
Stdout: true,
}
Init(conf)
}
func initFile() {
conf := &Config{
Dir: "/tmp",
// VLevel: 2,
Module: map[string]int32{"log_test": 1},
}
Init(conf)
}
type TestLog struct {
A string
B int
C string
D string
}
func testLog(t *testing.T) {
t.Run("Fatal", func(t *testing.T) {
Fatal("hello %s", "world")
Fatalv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Fatalc(context.Background(), "keys: %s %s...", "key1", "key2")
})
t.Run("Error", func(t *testing.T) {
Error("hello %s", "world")
Errorv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Errorc(context.Background(), "keys: %s %s...", "key1", "key2")
})
t.Run("Warn", func(t *testing.T) {
Warn("hello %s", "world")
Warnv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Warnc(context.Background(), "keys: %s %s...", "key1", "key2")
})
t.Run("Info", func(t *testing.T) {
Info("hello %s", "world")
Infov(context.Background(), KV("key", 2222222), KV("test2", "test"))
Infoc(context.Background(), "keys: %s %s...", "key1", "key2")
})
t.Run("Debug", func(t *testing.T) {
Debug("hello %s", "world")
Debugv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Debugc(context.Background(), "keys: %s %s...", "key1", "key2")
})
}
func TestFile(t *testing.T) {
initFile()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestStdout(t *testing.T) {
initStdout()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestLogW(t *testing.T) {
D := logw([]interface{}{"i", "like", "a", "dog"})
if len(D) != 2 || D[0].Key != "i" || D[0].Value != "like" || D[1].Key != "a" || D[1].Value != "dog" {
t.Fatalf("logw out put should be ' {i like} {a dog}'")
}
D = logw([]interface{}{"i", "like", "dog"})
if len(D) != 1 || D[0].Key != "i" || D[0].Value != "like" {
t.Fatalf("logw out put should be ' {i like}'")
}
}
func TestLogWithMirror(t *testing.T) {
Info("test log")
mdcontext := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "true"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
mdcontext = metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "***"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
Infov(context.Background(), KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
}
func TestOverwriteSouce(t *testing.T) {
ctx := context.Background()
t.Run("test source kv string", func(t *testing.T) {
Infov(ctx, KVString("source", "test"))
})
t.Run("test source kv string", func(t *testing.T) {
Infov(ctx, KV("source", "test"))
})
}
func BenchmarkLog(b *testing.B) | {
ctx := context.Background()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Infov(ctx, KVString("test", "hello"), KV("int", 34), KV("hhh", "hhhh"))
}
})
} |
|
metrics.go | // Copyright 2018 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
restapi "github.com/cilium/cilium/api/v1/server/restapi/metrics"
"github.com/cilium/cilium/pkg/api"
"github.com/cilium/cilium/pkg/metrics"
"github.com/cilium/cilium/pkg/option"
"github.com/go-openapi/runtime/middleware"
)
type getMetrics struct {
daemon *Daemon
}
// NewGetMetricsHandler returns the metrics handler
func NewGetMetricsHandler(d *Daemon) restapi.GetMetricsHandler {
return &getMetrics{daemon: d}
}
func (h *getMetrics) Handle(params restapi.GetMetricsParams) middleware.Responder {
metrics, err := metrics.DumpMetrics()
if err != nil {
return api.Error(
restapi.GetMetricsInternalServerErrorCode,
fmt.Errorf("Cannot gather metrics from daemon"))
}
return restapi.NewGetMetricsOK().WithPayload(metrics)
}
func initMetrics() <-chan error {
var errs <-chan error
if option.Config.PrometheusServeAddr != "" {
log.Infof("Serving prometheus metrics on %s", option.Config.PrometheusServeAddr)
errs = metrics.Enable(option.Config.PrometheusServeAddr)
} | return errs
} | |
index.js | import React from "react";
import Button from "./../Button";
import { getImg } from "../../../utils/Helper";
import Styles from './Signup.scss';
import { Link } from 'react-router-dom';
class | extends React.Component {
constructor(props) {
super(props);
this.state = {};
}
handleClick = () => {
localStorage.setItem('login', false);
this.props.handleHide();
}
render() {
return (
<div className="signup" >
<div>
<div className="container userinfo">
<img src={getImg('home/users/lima.png')} alt="LIMA" />
<div className="user-info">
<p className="username">Jonathan Lima</p>
<p className="usermail">[email protected]</p>
</div>
</div>
<div className="container">
<img className="small" src={getImg('home/icons/watchclock.png')} alt="" />
<p>Central do Cliente</p>
</div>
<div className="container text-center">
<img className="big" src={getImg('home/icons/key.png')} alt="" />
<div>
<Link to='/mypage/keys'>Minhas Chaves</Link>
</div>
</div>
<div className="semi-container">
<div className="semi-half">
<img className="small" src={getImg('home/icons/note.png')} alt="" />
<Link to='/mypage/request'>Pedidos</Link>
</div>
<div className="semi-half">
<img className="small" src={getImg('home/icons/db.png')} alt="" />
<Link to='/mypage/data'>Dados</Link>
</div>
</div>
<div className="container">
<img className="small" src={getImg('home/icons/heart.png')} alt="" />
<Link to='/mypage/wishlist'>Lista de Desejos</Link>
</div>
<button onClick={() => this.handleClick()} style={{ backgroundColor: "#F15A24" }} >
Sair
</button>
</div>
</div>
)
}
}
export default Signin | Signin |
18-extending_bound_32.py | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
|
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
return frozenset(res)
| geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq) |
key.rs | use std::convert::TryFrom;
use crate::error::HederaError;
use crate::key_list::KeyList;
use crate::proto::{services, ToProto};
use crate::ContractId;
use crate::PrivateKey;
use crate::PublicKey;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Key {
Ed25519(PublicKey),
KeyList(KeyList),
ThresholdKey(KeyList),
ContractId(ContractId),
DelegatableContractId(ContractId),
}
impl ToProto<services::Key> for Key {
fn to_proto(&self) -> Result<services::Key, HederaError> {
let key = match &*self {
Key::Ed25519(key) => key.to_proto()?,
Key::KeyList(key_list) => key_list.to_proto_key_list_key()?,
Key::ThresholdKey(key_list) => key_list.to_proto_threshold_key()?,
Key::ContractId(id) => services::key::Key::ContractId(id.to_proto()?),
Key::DelegatableContractId(id) => services::key::Key::DelegatableContractId(id.to_proto()?), | }
}
impl TryFrom<services::Key> for Key {
type Error = HederaError;
fn try_from(services: services::Key) -> Result<Key, Self::Error> {
match services.key {
Some(pb_key) => {
let key = match pb_key {
services::key::Key::Ed25519(bytes) => Key::Ed25519(PublicKey::from_hex_bytes(bytes)?),
services::key::Key::ThresholdKey(key) => Key::ThresholdKey(KeyList::try_from(key)?),
services::key::Key::KeyList(key) => Key::KeyList(KeyList::try_from(key)?),
services::key::Key::ContractId(id) => Key::ContractId(ContractId::from(id)),
services::key::Key::DelegatableContractId(id) => {
Key::DelegatableContractId(ContractId::from(id))
}
_ => return Err(HederaError::UnsupportedKeyType),
};
Ok(key)
}
None => Err(HederaError::NoInnerKey),
}
}
}
impl From<PrivateKey> for Key {
fn from(pk: PrivateKey) -> Key {
Key::Ed25519(pk.public())
}
}
impl From<PublicKey> for Key {
fn from(pk: PublicKey) -> Key {
Key::Ed25519(pk)
}
} | };
Ok(services::Key { key: Some(key) }) |
api.rs | use std::collections::HashMap;
use std::cell::RefCell;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use crate::client;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// View and manage your data across Google Cloud Platform services
CloudPlatform,
/// View and manage your Google Compute Engine resources
Compute,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform",
Scope::Compute => "https://www.googleapis.com/auth/compute",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::Compute
}
}
// ########
// HUB ###
// ######
/// Central instance to access all CloudOSLogin related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_oslogin1 as oslogin1;
/// use oslogin1::api::SshPublicKey;
/// use oslogin1::{Result, Error};
/// # async fn dox() {
/// use std::default::Default;
/// use oauth2;
/// use oslogin1::CloudOSLogin;
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: oauth2::ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SshPublicKey::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().ssh_public_keys_patch(req, "name")
/// .update_mask("At")
/// .doit().await;
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::Io(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
#[derive(Clone)]
pub struct CloudOSLogin<> {
client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>,
auth: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, > client::Hub for CloudOSLogin<> {}
impl<'a, > CloudOSLogin<> {
pub fn new(client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> CloudOSLogin<> {
CloudOSLogin {
client,
auth: authenticator,
_user_agent: "google-api-rust-client/2.0.8".to_string(),
_base_url: "https://oslogin.googleapis.com/".to_string(),
_root_url: "https://oslogin.googleapis.com/".to_string(),
}
}
pub fn users(&'a self) -> UserMethods<'a> {
UserMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/2.0.8`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://oslogin.googleapis.com/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://oslogin.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [projects delete users](UserProjectDeleteCall) (response)
/// * [ssh public keys delete users](UserSshPublicKeyDeleteCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Empty { _never_set: Option<bool> }
impl client::ResponseResult for Empty {}
/// A response message for importing an SSH public key.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [import ssh public key users](UserImportSshPublicKeyCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ImportSshPublicKeyResponse {
/// Detailed information about import results.
pub details: Option<String>,
/// The login profile information for the user.
#[serde(rename="loginProfile")]
pub login_profile: Option<LoginProfile>,
}
impl client::ResponseResult for ImportSshPublicKeyResponse {}
/// The user profile information used for logging in to a virtual machine on Google Compute Engine.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get login profile users](UserGetLoginProfileCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct LoginProfile {
/// Required. A unique user ID.
pub name: Option<String>,
/// The list of POSIX accounts associated with the user.
#[serde(rename="posixAccounts")]
pub posix_accounts: Option<Vec<PosixAccount>>,
/// A map from SSH public key fingerprint to the associated key object.
#[serde(rename="sshPublicKeys")]
pub ssh_public_keys: Option<HashMap<String, SshPublicKey>>,
}
impl client::ResponseResult for LoginProfile {}
/// The POSIX account information associated with a Google account.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PosixAccount {
/// Output only. A POSIX account identifier.
#[serde(rename="accountId")]
pub account_id: Option<String>,
/// The GECOS (user information) entry for this account.
pub gecos: Option<String>,
/// The default group ID.
pub gid: Option<String>,
/// The path to the home directory for this account.
#[serde(rename="homeDirectory")]
pub home_directory: Option<String>,
/// Output only. The canonical resource name.
pub name: Option<String>,
/// The operating system type where this account applies.
#[serde(rename="operatingSystemType")]
pub operating_system_type: Option<String>,
/// Only one POSIX account can be marked as primary.
pub primary: Option<bool>,
/// The path to the logic shell for this account.
pub shell: Option<String>,
/// System identifier for which account the username or uid applies to. By default, the empty value is used.
#[serde(rename="systemId")]
pub system_id: Option<String>,
/// The user ID.
pub uid: Option<String>,
/// The username of the POSIX account.
pub username: Option<String>,
}
impl client::Part for PosixAccount {}
/// The SSH public key information associated with a Google account.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [ssh public keys get users](UserSshPublicKeyGetCall) (response)
/// * [ssh public keys patch users](UserSshPublicKeyPatchCall) (request|response)
/// * [import ssh public key users](UserImportSshPublicKeyCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct SshPublicKey {
/// An expiration time in microseconds since epoch.
#[serde(rename="expirationTimeUsec")]
pub expiration_time_usec: Option<String>,
/// Output only. The SHA-256 fingerprint of the SSH public key.
pub fingerprint: Option<String>,
/// Public key text in SSH format, defined by RFC4253 section 6.6.
pub key: Option<String>,
/// Output only. The canonical resource name.
pub name: Option<String>,
}
impl client::RequestValue for SshPublicKey {}
impl client::ResponseResult for SshPublicKey {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all methods supported on *user* resources.
/// It is not used directly, but through the `CloudOSLogin` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate yup_oauth2 as oauth2;
/// extern crate google_oslogin1 as oslogin1;
///
/// # async fn dox() {
/// use std::default::Default;
/// use oauth2;
/// use oslogin1::CloudOSLogin;
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `get_login_profile(...)`, `import_ssh_public_key(...)`, `projects_delete(...)`, `ssh_public_keys_delete(...)`, `ssh_public_keys_get(...)` and `ssh_public_keys_patch(...)`
/// // to build up your call.
/// let rb = hub.users();
/// # }
/// ```
pub struct UserMethods<'a>
where {
hub: &'a CloudOSLogin<>,
}
impl<'a> client::MethodsBuilder for UserMethods<'a> {}
impl<'a> UserMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Deletes a POSIX account.
///
/// # Arguments
///
/// * `name` - Required. A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format `users/{user}/projects/{project}`.
pub fn projects_delete(&self, name: &str) -> UserProjectDeleteCall<'a> {
UserProjectDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes an SSH public key.
///
/// # Arguments
///
/// * `name` - Required. The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
pub fn ssh_public_keys_delete(&self, name: &str) -> UserSshPublicKeyDeleteCall<'a> {
UserSshPublicKeyDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Retrieves an SSH public key.
///
/// # Arguments
///
/// * `name` - Required. The fingerprint of the public key to retrieve. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
pub fn ssh_public_keys_get(&self, name: &str) -> UserSshPublicKeyGetCall<'a> {
UserSshPublicKeyGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Updates an SSH public key and returns the profile information. This method supports patch semantics.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `name` - Required. The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
pub fn ssh_public_keys_patch(&self, request: SshPublicKey, name: &str) -> UserSshPublicKeyPatchCall<'a> {
UserSshPublicKeyPatchCall {
hub: self.hub,
_request: request,
_name: name.to_string(),
_update_mask: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Retrieves the profile information used for logging in to a virtual machine on Google Compute Engine.
///
/// # Arguments
///
/// * `name` - Required. The unique ID for the user in format `users/{user}`.
pub fn get_login_profile(&self, name: &str) -> UserGetLoginProfileCall<'a> {
UserGetLoginProfileCall {
hub: self.hub,
_name: name.to_string(),
_system_id: Default::default(),
_project_id: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Adds an SSH public key and returns the profile information. Default POSIX account information is set when no username and UID exist as part of the login profile.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `parent` - Required. The unique ID for the user in format `users/{user}`.
pub fn import_ssh_public_key(&self, request: SshPublicKey, parent: &str) -> UserImportSshPublicKeyCall<'a> {
UserImportSshPublicKeyCall {
hub: self.hub,
_request: request,
_parent: parent.to_string(),
_project_id: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Deletes a POSIX account.
///
/// A builder for the *projects.delete* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().projects_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct UserProjectDeleteCall<'a>
where {
hub: &'a CloudOSLogin<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserProjectDeleteCall<'a> {}
impl<'a> UserProjectDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.projects.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. A reference to the POSIX account to update. POSIX accounts are identified by the project ID they are associated with. A reference to the POSIX account is in format `users/{user}/projects/{project}`.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> UserProjectDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserProjectDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserProjectDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserProjectDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes an SSH public key.
///
/// A builder for the *sshPublicKeys.delete* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().ssh_public_keys_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct UserSshPublicKeyDeleteCall<'a>
where {
hub: &'a CloudOSLogin<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserSshPublicKeyDeleteCall<'a> {}
impl<'a> UserSshPublicKeyDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.sshPublicKeys.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> UserSshPublicKeyDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserSshPublicKeyDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserSshPublicKeyDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserSshPublicKeyDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Retrieves an SSH public key.
///
/// A builder for the *sshPublicKeys.get* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().ssh_public_keys_get("name")
/// .doit().await;
/// # }
/// ```
pub struct UserSshPublicKeyGetCall<'a>
where {
hub: &'a CloudOSLogin<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserSshPublicKeyGetCall<'a> {}
impl<'a> UserSshPublicKeyGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SshPublicKey)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.sshPublicKeys.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The fingerprint of the public key to retrieve. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> UserSshPublicKeyGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserSshPublicKeyGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserSshPublicKeyGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserSshPublicKeyGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Updates an SSH public key and returns the profile information. This method supports patch semantics.
///
/// A builder for the *sshPublicKeys.patch* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// use oslogin1::api::SshPublicKey;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SshPublicKey::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().ssh_public_keys_patch(req, "name")
/// .update_mask("amet.")
/// .doit().await;
/// # }
/// ```
pub struct UserSshPublicKeyPatchCall<'a>
where {
hub: &'a CloudOSLogin<>,
_request: SshPublicKey,
_name: String,
_update_mask: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserSshPublicKeyPatchCall<'a> {}
impl<'a> UserSshPublicKeyPatchCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SshPublicKey)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.sshPublicKeys.patch",
http_method: hyper::Method::PATCH });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._update_mask {
params.push(("updateMask", value.to_string()));
}
for &field in ["alt", "name", "updateMask"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: SshPublicKey) -> UserSshPublicKeyPatchCall<'a> {
self._request = new_value;
self
}
/// Required. The fingerprint of the public key to update. Public keys are identified by their SHA-256 fingerprint. The fingerprint of the public key is in format `users/{user}/sshPublicKeys/{fingerprint}`.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> UserSshPublicKeyPatchCall<'a> {
self._name = new_value.to_string();
self
}
/// Mask to control which fields get updated. Updates all if not present.
///
/// Sets the *update mask* query property to the given value.
pub fn update_mask(mut self, new_value: &str) -> UserSshPublicKeyPatchCall<'a> {
self._update_mask = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserSshPublicKeyPatchCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserSshPublicKeyPatchCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserSshPublicKeyPatchCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Retrieves the profile information used for logging in to a virtual machine on Google Compute Engine.
///
/// A builder for the *getLoginProfile* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().get_login_profile("name")
/// .system_id("ipsum")
/// .project_id("gubergren")
/// .doit().await;
/// # }
/// ```
pub struct UserGetLoginProfileCall<'a> | _system_id: Option<String>,
_project_id: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserGetLoginProfileCall<'a> {}
impl<'a> UserGetLoginProfileCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, LoginProfile)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.getLoginProfile",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._system_id {
params.push(("systemId", value.to_string()));
}
if let Some(value) = self._project_id {
params.push(("projectId", value.to_string()));
}
for &field in ["alt", "name", "systemId", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}/loginProfile";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Required. The unique ID for the user in format `users/{user}`.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> UserGetLoginProfileCall<'a> {
self._name = new_value.to_string();
self
}
/// A system ID for filtering the results of the request.
///
/// Sets the *system id* query property to the given value.
pub fn system_id(mut self, new_value: &str) -> UserGetLoginProfileCall<'a> {
self._system_id = Some(new_value.to_string());
self
}
/// The project ID of the Google Cloud Platform project.
///
/// Sets the *project id* query property to the given value.
pub fn project_id(mut self, new_value: &str) -> UserGetLoginProfileCall<'a> {
self._project_id = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserGetLoginProfileCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserGetLoginProfileCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserGetLoginProfileCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Adds an SSH public key and returns the profile information. Default POSIX account information is set when no username and UID exist as part of the login profile.
///
/// A builder for the *importSshPublicKey* method supported by a *user* resource.
/// It is not used directly, but through a `UserMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate yup_oauth2 as oauth2;
/// # extern crate google_oslogin1 as oslogin1;
/// use oslogin1::api::SshPublicKey;
/// # async fn dox() {
/// # use std::default::Default;
/// # use oauth2;
/// # use oslogin1::CloudOSLogin;
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = CloudOSLogin::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = SshPublicKey::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.users().import_ssh_public_key(req, "parent")
/// .project_id("gubergren")
/// .doit().await;
/// # }
/// ```
pub struct UserImportSshPublicKeyCall<'a>
where {
hub: &'a CloudOSLogin<>,
_request: SshPublicKey,
_parent: String,
_project_id: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for UserImportSshPublicKeyCall<'a> {}
impl<'a> UserImportSshPublicKeyCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ImportSshPublicKeyResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "oslogin.users.importSshPublicKey",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len());
params.push(("parent", self._parent.to_string()));
if let Some(value) = self._project_id {
params.push(("projectId", value.to_string()));
}
for &field in ["alt", "parent", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+parent}:importSshPublicKey";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+parent}", "parent")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["parent"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok();
let server_error = json::from_str::<client::ServerError>(&res_body_string)
.or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error))
.ok();
if let client::Retry::After(d) = dlg.http_failure(&res,
json_server_error,
server_error) {
sleep(d);
continue;
}
dlg.finished(false);
return match json::from_str::<client::ErrorResponse>(&res_body_string){
Err(_) => Err(client::Error::Failure(res)),
Ok(serr) => Err(client::Error::BadRequest(serr))
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: SshPublicKey) -> UserImportSshPublicKeyCall<'a> {
self._request = new_value;
self
}
/// Required. The unique ID for the user in format `users/{user}`.
///
/// Sets the *parent* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn parent(mut self, new_value: &str) -> UserImportSshPublicKeyCall<'a> {
self._parent = new_value.to_string();
self
}
/// The project ID of the Google Cloud Platform project.
///
/// Sets the *project id* query property to the given value.
pub fn project_id(mut self, new_value: &str) -> UserImportSshPublicKeyCall<'a> {
self._project_id = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> UserImportSshPublicKeyCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> UserImportSshPublicKeyCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> UserImportSshPublicKeyCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
} | where {
hub: &'a CloudOSLogin<>,
_name: String, |
connection.py | import json
from typing import Any, Dict, Iterable, List, Tuple, Union
import graphene
from django.db.models import Model as DjangoModel, Q, QuerySet
from graphene.relay.connection import Connection
from graphene_django.types import DjangoObjectType
from graphql.error import GraphQLError
from graphql_relay.connection.connectiontypes import Edge, PageInfo
from graphql_relay.utils import base64, unbase64
from ..core.enums import OrderDirection
ConnectionArguments = Dict[str, Any]
def to_global_cursor(values):
if not isinstance(values, Iterable):
values = [values]
values = [value if value is None else str(value) for value in values]
return base64(json.dumps(values))
def from_global_cursor(cursor) -> List[str]:
values = unbase64(cursor)
return json.loads(values)
def get_field_value(instance: DjangoModel, field_name: str):
"""Get field value for given field in filter format 'field__foreign_key_field'."""
field_path = field_name.split("__")
attr = instance
for elem in field_path:
attr = getattr(attr, elem)
if callable(attr):
return "%s" % attr()
return attr
def _prepare_filter_expression(
field_name: str,
index: int,
cursor: List[str],
sorting_fields: List[str],
sorting_direction: str,
) -> Tuple[Q, Dict[str, Union[str, bool]]]:
field_expression: Dict[str, Union[str, bool]] = {}
extra_expression = Q()
for cursor_id, cursor_value in enumerate(cursor[:index]):
field_expression[sorting_fields[cursor_id]] = cursor_value
if sorting_direction == "gt":
extra_expression |= Q(**{f"{field_name}__{sorting_direction}": cursor[index]})
extra_expression |= Q(**{f"{field_name}__isnull": True})
elif cursor[index] is not None:
field_expression[f"{field_name}__{sorting_direction}"] = cursor[index]
else:
field_expression[f"{field_name}__isnull"] = False
return extra_expression, field_expression
def _prepare_filter(
cursor: List[str], sorting_fields: List[str], sorting_direction: str
) -> Q:
"""Create filter arguments based on sorting fields.
:param cursor: list of values that are passed from page_info, used for filtering.
:param sorting_fields: list of fields that were used for sorting.
:param sorting_direction: keyword direction ('lt', gt').
:return: Q() in following format
(OR: ('first_field__gt', 'first_value_form_cursor'),
(AND: ('second_field__gt', 'second_value_form_cursor'),
('first_field', 'first_value_form_cursor')),
(AND: ('third_field__gt', 'third_value_form_cursor'),
('second_field', 'second_value_form_cursor'),
('first_field', 'first_value_form_cursor'))
)
"""
filter_kwargs = Q()
for index, field_name in enumerate(sorting_fields):
if cursor[index] is None and sorting_direction == "gt":
continue
extra_expression, field_expression = _prepare_filter_expression(
field_name, index, cursor, sorting_fields, sorting_direction
)
filter_kwargs |= Q(extra_expression, **field_expression)
return filter_kwargs
def | (args):
first = args.get("first")
last = args.get("last")
if first and not (isinstance(first, int) and first > 0):
raise GraphQLError("Argument `first` must be a non-negative integer.")
if last and not (isinstance(last, int) and last > 0):
raise GraphQLError("Argument `last` must be a non-negative integer.")
if first and last:
raise GraphQLError("Argument `last` cannot be combined with `first`.")
if first and args.get("before"):
raise GraphQLError("Argument `first` cannot be combined with `before`.")
if last and args.get("after"):
raise GraphQLError("Argument `last` cannot be combined with `after`.")
def _get_sorting_fields(sort_by, qs):
sorting_fields = sort_by.get("field")
sorting_attribute = sort_by.get("attribute_id")
if sorting_fields and not isinstance(sorting_fields, list):
return [sorting_fields]
elif not sorting_fields and sorting_attribute is not None:
return qs.model.sort_by_attribute_fields()
elif not sorting_fields:
raise ValueError("Error while preparing cursor values.")
return sorting_fields
def _get_sorting_direction(sort_by, last=None):
direction = sort_by.get("direction", "")
sorting_desc = direction == OrderDirection.DESC
if last:
sorting_desc = not sorting_desc
return "lt" if sorting_desc else "gt"
def _get_page_info(matching_records, cursor, first, last):
requested_count = first or last
page_info = {
"has_previous_page": False,
"has_next_page": False,
"start_cursor": None,
"end_cursor": None,
}
records_left = False
if requested_count is not None:
records_left = len(matching_records) > requested_count
has_pages_before = True if cursor else False
if first:
page_info["has_next_page"] = records_left
page_info["has_previous_page"] = has_pages_before
elif last:
page_info["has_next_page"] = has_pages_before
page_info["has_previous_page"] = records_left
return page_info
def _get_edges_for_connection(edge_type, qs, args, sorting_fields):
before = args.get("before")
after = args.get("after")
first = args.get("first")
last = args.get("last")
cursor = after or before
requested_count = first or last
# If we don't receive `first` and `last` we shouldn't build `edges` and `page_info`
if not first and not last:
return [], {}
if last:
start_slice, end_slice = 1, None
else:
start_slice, end_slice = 0, requested_count
matching_records = list(qs)
if last:
matching_records = list(reversed(matching_records))
if len(matching_records) <= requested_count:
start_slice = 0
page_info = _get_page_info(matching_records, cursor, first, last)
matching_records = matching_records[start_slice:end_slice]
edges = [
edge_type(
node=record,
cursor=to_global_cursor(
[get_field_value(record, field) for field in sorting_fields]
),
)
for record in matching_records
]
if edges:
page_info["start_cursor"] = edges[0].cursor
page_info["end_cursor"] = edges[-1].cursor
return edges, page_info
def connection_from_queryset_slice(
qs: QuerySet,
args: ConnectionArguments = None,
connection_type: Any = Connection,
edge_type: Any = Edge,
pageinfo_type: Any = PageInfo,
) -> Connection:
"""Create a connection object from a QuerySet."""
args = args or {}
before = args.get("before")
after = args.get("after")
first = args.get("first")
last = args.get("last")
_validate_connection_args(args)
requested_count = first or last
end_margin = requested_count + 1 if requested_count else None
cursor = after or before
cursor = from_global_cursor(cursor) if cursor else None
sort_by = args.get("sort_by", {})
sorting_fields = _get_sorting_fields(sort_by, qs)
sorting_direction = _get_sorting_direction(sort_by, last)
if cursor and len(cursor) != len(sorting_fields):
raise GraphQLError("Received cursor is invalid.")
filter_kwargs = (
_prepare_filter(cursor, sorting_fields, sorting_direction) if cursor else Q()
)
qs = qs.filter(filter_kwargs)
qs = qs[:end_margin]
edges, page_info = _get_edges_for_connection(edge_type, qs, args, sorting_fields)
return connection_type(edges=edges, page_info=pageinfo_type(**page_info),)
class NonNullConnection(Connection):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, node=None, name=None, **options):
super().__init_subclass_with_meta__(node=node, name=name, **options)
# Override the original EdgeBase type to make to `node` field required.
class EdgeBase:
node = graphene.Field(
cls._meta.node,
description="The item at the end of the edge.",
required=True,
)
cursor = graphene.String(
required=True, description="A cursor for use in pagination."
)
# Create the edge type using the new EdgeBase.
edge_name = cls.Edge._meta.name
edge_bases = (EdgeBase, graphene.ObjectType)
edge = type(edge_name, edge_bases, {})
cls.Edge = edge
# Override the `edges` field to make it non-null list
# of non-null edges.
cls._meta.fields["edges"] = graphene.Field(
graphene.NonNull(graphene.List(graphene.NonNull(cls.Edge)))
)
class CountableConnection(NonNullConnection):
class Meta:
abstract = True
total_count = graphene.Int(description="A total count of items in the collection.")
@staticmethod
def resolve_total_count(root, *_args, **_kwargs):
if isinstance(root.iterable, list):
return len(root.iterable)
return root.iterable.count()
class CountableDjangoObjectType(DjangoObjectType):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, *args, **kwargs):
# Force it to use the countable connection
countable_conn = CountableConnection.create_type(
"{}CountableConnection".format(cls.__name__), node=cls
)
super().__init_subclass_with_meta__(*args, connection=countable_conn, **kwargs)
| _validate_connection_args |
blacklist.go | package optimus
import (
"context"
"github.com/ethereum/go-ethereum/common"
"github.com/sonm-io/core/proto"
"go.uber.org/zap"
)
// Blacklist is a thing that can be asked to determine whether a specific ETH
// address is in the "owner" blacklist and vise-verse.
// This is used to be sure that the order created via Optimus can be bought
// by the user this order was created for.
type blacklist struct {
owner common.Address
blacklist map[common.Address]struct{}
dwh sonm.DWHClient
log *zap.SugaredLogger
}
func newBlacklist(owner common.Address, dwh sonm.DWHClient, log *zap.SugaredLogger) *blacklist {
return &blacklist{
owner: owner,
blacklist: map[common.Address]struct{}{}, |
// IsAllowed checks whether the given "addr" is allowed for this blacklist.
// This method returns true both if an "owner" is in the "addr" blacklist
// and vice-versa.
// The blacklist needs to be updated before calling this method.
func (m *blacklist) IsAllowed(addr common.Address) bool {
_, ok := m.blacklist[addr]
return !ok
}
func (m *blacklist) Update(ctx context.Context) error {
m.log.Debug("updating blacklist")
blacklist, err := m.dwh.GetBlacklistsContainingUser(ctx, &sonm.BlacklistRequest{
UserID: sonm.NewEthAddress(m.owner),
})
if err != nil {
return err
}
m.blacklist = map[common.Address]struct{}{}
for _, addr := range blacklist.Blacklists {
m.blacklist[addr.Unwrap()] = struct{}{}
}
m.log.Infow("blacklist has been updated", zap.Any("blacklist", m.blacklist))
return nil
}
type multiBlacklist struct {
blacklists []*blacklist
}
func newMultiBlacklist(blacklists ...*blacklist) *multiBlacklist {
return &multiBlacklist{
blacklists: blacklists,
}
}
func (m *multiBlacklist) IsAllowed(addr common.Address) bool {
for _, blacklist := range m.blacklists {
if !blacklist.IsAllowed(addr) {
return false
}
}
return true
}
func (m *multiBlacklist) Update(ctx context.Context) error {
for _, blacklist := range m.blacklists {
if err := blacklist.Update(ctx); err != nil {
return err
}
}
return nil
}
type emptyBlacklist struct{}
func newEmptyBlacklist() *emptyBlacklist {
return &emptyBlacklist{}
}
func (emptyBlacklist) Update(ctx context.Context) error {
return nil
}
func (emptyBlacklist) IsAllowed(addr common.Address) bool {
return true
} | dwh: dwh,
log: log.With(zap.String("addr", owner.Hex())),
}
} |
test_executors.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import testscenarios
from testtools import testcase
import futurist
from futurist.tests import base
# Module level functions need to be used since the process pool
# executor can not access instance or lambda level functions (since those
# are not pickleable).
def returns_one():
return 1
def | ():
raise RuntimeError("no worky")
def delayed(wait_secs):
time.sleep(wait_secs)
class TestExecutors(testscenarios.TestWithScenarios, base.TestCase):
scenarios = [
('sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True, 'executor_kwargs': {}}),
('green_sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True,
'executor_kwargs': {'green': True}}),
('green', {'executor_cls': futurist.GreenThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('thread', {'executor_cls': futurist.ThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('process', {'executor_cls': futurist.ProcessPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
]
def setUp(self):
super(TestExecutors, self).setUp()
self.executor = self.executor_cls(**self.executor_kwargs)
def tearDown(self):
super(TestExecutors, self).tearDown()
self.executor.shutdown()
self.executor = None
def test_run_one(self):
fut = self.executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertTrue(fut.done())
def test_blows_up(self):
fut = self.executor.submit(blows_up)
self.assertRaises(RuntimeError, fut.result)
self.assertIsInstance(fut.exception(), RuntimeError)
def test_gather_stats(self):
self.executor.submit(blows_up)
self.executor.submit(delayed, 0.2)
self.executor.submit(returns_one)
self.executor.shutdown()
self.assertEqual(3, self.executor.statistics.executed)
self.assertEqual(1, self.executor.statistics.failures)
self.assertGreaterEqual(self.executor.statistics.runtime,
# It appears that the the thread run loop
# may call this before 0.2 seconds (or 0.2
# will not be represented as a float correctly)
# is really up so accommodate for that
# happening...
0.199)
def test_post_shutdown_raises(self):
executor = self.executor_cls(**self.executor_kwargs)
executor.shutdown()
self.assertRaises(RuntimeError, executor.submit, returns_one)
def test_restartable(self):
if not self.restartable:
raise testcase.TestSkipped("not restartable")
else:
executor = self.executor_cls(**self.executor_kwargs)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
executor.shutdown()
self.assertEqual(1, executor.statistics.executed)
self.assertRaises(RuntimeError, executor.submit, returns_one)
executor.restart()
self.assertEqual(0, executor.statistics.executed)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertEqual(1, executor.statistics.executed)
executor.shutdown()
def test_alive(self):
with self.executor_cls(**self.executor_kwargs) as executor:
self.assertTrue(executor.alive)
self.assertFalse(executor.alive)
def test_done_callback(self):
happy_completed = []
unhappy_completed = []
def on_done(fut):
if fut.exception():
unhappy_completed.append(fut)
else:
happy_completed.append(fut)
for i in range(0, 10):
if i % 2 == 0:
fut = self.executor.submit(returns_one)
else:
fut = self.executor.submit(blows_up)
fut.add_done_callback(on_done)
self.executor.shutdown()
self.assertEqual(10, len(happy_completed) + len(unhappy_completed))
self.assertEqual(5, len(unhappy_completed))
self.assertEqual(5, len(happy_completed))
| blows_up |
from_into.rs | // The From trait is used for value-to-value conversions.
// If From is implemented correctly for a type, the Into trait should work conversely.
// You can read more about it at https://doc.rust-lang.org/std/convert/trait.From.html
#[derive(Debug)]
struct | {
name: String,
age: usize,
}
// We implement the Default trait to use it as a fallback
// when the provided string is not convertible into a Person object
impl Default for Person {
fn default() -> Person {
Person {
name: String::from("John"),
age: 30,
}
}
}
// Your task is to complete this implementation
// in order for the line `let p = Person::from("Mark,20")` to compile
// Please note that you'll need to parse the age component into a `usize`
// with something like `"4".parse::<usize>()`. The outcome of this needs to
// be handled appropriately.
//
// Steps:
// 1. If the length of the provided string is 0, then return the default of Person
// 2. Split the given string on the commas present in it
// 3. Extract the first element from the split operation and use it as the name
// 4. If the name is empty, then return the default of Person
// 5. Extract the other element from the split operation and parse it into a `usize` as the age
// If while parsing the age, something goes wrong, then return the default of Person
// Otherwise, then return an instantiated Person object with the results
impl From<&str> for Person {
fn from(s: &str) -> Person {
let mut person: Person = Default::default();
if s.len() != 0 {
let mut components = s.split(',');
if let Some(name) = components.next() {
if let Some(ageStr) = components.next() {
if let Ok(age) = ageStr.parse::<usize>() {
if name.len() != 0 && components.next() == None {
person.name = name.to_string();
person.age = age;
}
}
}
}
}
person
}
}
fn main() {
// Use the `from` function
let p1 = Person::from("Mark,20");
// Since From is implemented for Person, we should be able to use Into
let p2: Person = "Gerald,70".into();
println!("{:?}", p1);
println!("{:?}", p2);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default() {
// Test that the default person is 30 year old John
let dp = Person::default();
assert_eq!(dp.name, "John");
assert_eq!(dp.age, 30);
}
#[test]
fn test_bad_convert() {
// Test that John is returned when bad string is provided
let p = Person::from("");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_good_convert() {
// Test that "Mark,20" works
let p = Person::from("Mark,20");
assert_eq!(p.name, "Mark");
assert_eq!(p.age, 20);
}
#[test]
fn test_bad_age() {
// Test that "Mark,twenty" will return the default person due to an error in parsing age
let p = Person::from("Mark,twenty");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_comma_and_age() {
let p: Person = Person::from("Mark");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_age() {
let p: Person = Person::from("Mark,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name() {
let p: Person = Person::from(",1");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_age() {
let p: Person = Person::from(",");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_missing_name_and_invalid_age() {
let p: Person = Person::from(",one");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma() {
let p: Person = Person::from("Mike,32,");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
#[test]
fn test_trailing_comma_and_some_string() {
let p: Person = Person::from("Mike,32,man");
assert_eq!(p.name, "John");
assert_eq!(p.age, 30);
}
}
| Person |
requests_test.go | package testing
import (
"testing"
"github.com/Huawei/gophercloud/openstack/common/extensions"
"github.com/Huawei/gophercloud/pagination"
th "github.com/Huawei/gophercloud/testhelper"
"github.com/Huawei/gophercloud/testhelper/client"
)
func TestList(t *testing.T) {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleListExtensionsSuccessfully(t)
count := 0
extensions.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) {
count++
actual, err := extensions.ExtractExtensions(page)
th.AssertNoErr(t, err)
th.AssertDeepEquals(t, ExpectedExtensions, actual)
return true, nil
})
th.CheckEquals(t, 1, count)
}
func TestGet(t *testing.T) | {
th.SetupHTTP()
defer th.TeardownHTTP()
HandleGetExtensionSuccessfully(t)
actual, err := extensions.Get(client.ServiceClient(), "agent").Extract()
th.AssertNoErr(t, err)
th.CheckDeepEquals(t, SingleExtension, actual)
} |
|
apivipconnectivitycheckcmd.go | package host
import (
"context"
"encoding/json"
"fmt"
"github.com/sirupsen/logrus"
"github.com/jinzhu/gorm"
"github.com/openshift/assisted-service/internal/common"
"github.com/openshift/assisted-service/models"
)
type apivipConnectivityCheckCmd struct {
baseCmd
db *gorm.DB
connectivityCheckImage string
verifyAPIVipCidr bool
}
func NewAPIVIPConnectivityCheckCmd(log logrus.FieldLogger, db *gorm.DB, connectivityCheckImage string, verifyAPIVipCidr bool) *apivipConnectivityCheckCmd |
func (c *apivipConnectivityCheckCmd) GetStep(ctx context.Context, host *models.Host) (*models.Step, error) {
var cluster common.Cluster
if err := c.db.First(&cluster, "id = ?", host.ClusterID).Error; err != nil {
c.log.WithError(err).Errorf("failed to fetch cluster %s", host.ClusterID)
return nil, err
}
apiURL := fmt.Sprintf("http://%s:22624/config/worker", *cluster.APIVipDNSName)
request := models.APIVipConnectivityRequest{
URL: &apiURL,
VerifyCidr: c.verifyAPIVipCidr,
}
requestBytes, err := json.Marshal(request)
if err != nil {
c.log.WithError(err).Errorf("failed to marshal APIVipConnectivityRequest")
return nil, err
}
step := &models.Step{
StepType: models.StepTypeAPIVipConnectivityCheck,
Command: "podman",
Args: []string{
"run", "--privileged", "--net=host", "--rm", "--quiet",
"-v", "/var/log:/var/log",
"-v", "/run/systemd/journal/socket:/run/systemd/journal/socket",
c.connectivityCheckImage,
"apivip_check",
string(requestBytes),
},
}
return step, nil
}
| {
return &apivipConnectivityCheckCmd{
baseCmd: baseCmd{log: log},
db: db,
connectivityCheckImage: connectivityCheckImage,
verifyAPIVipCidr: verifyAPIVipCidr,
}
} |
permutation.go | package permutation
type Interface interface {
// Len is the number of elements in the collection.
Len() int
// Swap swaps the elements with indexes i and j.
Swap(i, j int)
}
type Permutator struct {
first bool // It is first state of elements
v Interface
b []int
}
func New(v Interface) *Permutator {
n := v.Len()
if n > 0 {
n--
}
return &Permutator{
first: true,
v: v,
b: make([]int, n),
}
}
func (p *Permutator) Next() bool {
if p.first {
p.first = false
return true
}
if n, ok := calcFlipSize(p.b); ok {
flip(p.v, n) // It is the main flip.
return true
}
// It is the last flip. It helps to return the elements to the begin state.
flip(p.v, p.v.Len())
p.first = true
return false // End of permutations.
}
func | (b []int) (int, bool) {
for i := range b {
b[i]++
if k := i + 2; b[i] < k {
return k, true
}
b[i] = 0
}
return 0, false
}
// flip is a function which flips first n elements in the slice (v)
func flip(v Interface, n int) {
i, j := 0, n-1
for i < j {
v.Swap(i, j)
i, j = i+1, j-1
}
}
| calcFlipSize |
common_utils.py | r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
from functools import partial
import inspect
import io
import argparse
import unittest
import warnings
import random
import contextlib
import socket
import subprocess
import time
from collections import OrderedDict
from contextlib import contextmanager
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
from urllib.request import urlopen
import __main__
import errno
from typing import cast, Any, Iterable, Optional
from torch.testing._internal import expecttest
from torch.testing import _compare_tensors_internal, _compare_scalars_internal, _compare_return_type
import torch
import torch.cuda
from torch._utils_internal import get_writable_path
from torch._six import string_classes
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
torch.backends.disable_global_flags()
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
torch._C.ScriptFunction.__call__ = prof_func_call
torch._C.ScriptMethod.__call__ = prof_meth_call
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--ge_config', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if bool(os.environ.get('IN_CIRCLECI')) else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
args, remaining = parser.parse_known_args()
if args.ge_config == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.ge_config == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.ge_config == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.
IS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
def run_tests(argv=UNITTEST_ARGS):
if TEST_DISCOVER:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
if IS_WINDOWS:
@contextmanager
def TemporaryFileName():
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName():
with tempfile.NamedTemporaryFile() as f:
yield f.name
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
import importlib
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
if TEST_NUMPY:
import numpy as np
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def get_cpu_type(type_name):
module, name = type_name.rsplit('.', 1)
assert module == 'torch.cuda'
return getattr(torch, name)
def get_gpu_type(type_name):
if isinstance(type_name, type):
type_name = '{}.{}'.format(type_name.__module__, type_name.__name__)
module, name = type_name.rsplit('.', 1)
assert module == 'torch'
return getattr(torch.cuda, name)
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.type(), get_gpu_type(obj.type()))
with torch.no_grad():
res = obj.clone().type(t)
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_PYTORCH_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE',
'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
disabled_test_from_issues = None
def check_disabled(test_name):
global disabled_test_from_issues
if disabled_test_from_issues is None:
disabled_test_from_issues = {}
def read_and_process():
url = 'https://raw.githubusercontent.com/zdevito/pytorch_disabled_tests/master/result.json'
contents = urlopen(url, timeout=1).read().decode('utf-8')
the_response = json.loads(contents)
for item in the_response['items']:
title = item['title']
key = 'DISABLED '
if title.startswith(key):
test_name = title[len(key):].strip()
disabled_test_from_issues[test_name] = item['html_url']
if not IS_SANDCASTLE and os.getenv("PYTORCH_RUN_DISABLED_TESTS", "0") != "1":
try:
read_and_process()
except Exception:
print("Couldn't download test skip set, leaving all tests enabled...")
if test_name in disabled_test_from_issues:
raise unittest.SkipTest(
"Test is disabled because an issue exists disabling it: {}".format(disabled_test_from_issues[test_name]) +
" To enable set the environment variable PYTORCH_RUN_DISABLED_TESTS=1")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride, for
# example.
# TODO: provide a better mechanism for generated tests to set rtol/atol.
_precision: float = 0
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_cuda_policy(test_method, policy))
def wrap_method_with_cuda_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)
def setUp(self):
if TEST_SKIP_FAST:
if not getattr(self, self._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
check_disabled(str(self))
set_rng_seed(SEED)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu'):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = torch.randn(*v_size, device=device)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
r = self.safeCoalesce(t)
return r.to_dense()
def safeCoalesce(self, t):
tc = t.coalesce()
self.assertEqual(tc.to_dense(), t.to_dense())
self.assertTrue(tc.is_coalesced())
# Our code below doesn't work when nnz is 0, because
# then it's a 0D tensor, not a 2D tensor.
if t._nnz() == 0:
self.assertEqual(t._indices(), tc._indices())
self.assertEqual(t._values(), tc._values())
return tc
value_map = {}
for idx, val in zip(t._indices().t(), t._values()):
idx_tup = tuple(idx.tolist())
if idx_tup in value_map:
value_map[idx_tup] += val
else:
value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val
new_indices = sorted(list(value_map.keys()))
new_values = [value_map[idx] for idx in new_indices]
if t._values().ndimension() < 2:
new_values = t._values().new(new_values)
else:
new_values = torch.stack(new_values)
new_indices = t._indices().new(new_indices).t()
tg = t.new(new_indices, new_values, t.size())
self.assertEqual(tc._indices(), tg._indices())
self.assertEqual(tc._values(), tg._values())
if t.is_coalesced():
self.assertEqual(tc._indices(), t._indices())
self.assertEqual(tc._values(), t._values())
return tg
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: support bfloat16 comparisons
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None):
assert TEST_NUMPY
assert dtype is not torch.bfloat16
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
a = tensor_like.detach().cpu().numpy()
t = tensor_like
else:
a = np.array(tensor_like, dtype=torch_to_numpy_dtype_dict[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
self.assertEqual(np_result, torch_result)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
atol = max(atol, self.precision)
return _compare_scalars_internal(a, b, rtol=cast(float, rtol), atol=cast(float, atol), equal_nan=equal_nan)
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified the other must be, too"
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
super().assertEqual(x.is_sparse, y.is_sparse, msg=msg)
super().assertEqual(x.is_quantized, y.is_quantized, msg=msg)
if x.is_sparse:
x = self.safeCoalesce(x)
y = self.safeCoalesce(y)
indices_result, debug_msg = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor indices failed to compare as equal! " + debug_msg
self.assertTrue(indices_result, msg=msg)
values_result, debug_msg = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result and msg is None:
assert debug_msg is not None
msg = "Sparse tensor values failed to compare as equal! " + debug_msg
self.assertTrue(values_result, msg=msg)
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Quantized representations failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
result, debug_msg = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result and msg is None:
assert debug_msg is not None
msg = "Tensors failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
super().assertEqual(x, y, msg=msg)
elif type(x) == set and type(y) == set:
super().assertEqual(x, y, msg=msg)
elif isinstance(x, dict) and isinstance(y, dict):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
super().assertEqual(x, y, msg=msg)
elif is_iterable(x) and is_iterable(y):
super().assertEqual(len(x), len(y), msg=msg)
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
self.assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result and msg is None:
assert debug_msg is not None
msg = "Scalars failed to compare as equal! " + debug_msg
self.assertTrue(result, msg=msg)
else:
super().assertEqual(x, y, msg=msg)
def assertAlmostEqual(self, x, y, *, places=None, msg=None, delta=None):
prec = delta
if places:
prec = 10**(-places)
rtol = None if prec is None else 0
self.assertEqual(x, y, msg=msg, atol=prec, rtol=rtol)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def maybeWarnsRegex(self, category, regex=''):
"""Context manager for code that *may* warn, e.g. ``TORCH_WARN_ONCE``.
This filters expected warnings from the test log and fails the test if
any unexpected warnings are caught.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
# Ignore expected warnings
warnings.filterwarnings("ignore", message=regex, category=category)
try:
yield
finally:
if len(ws) != 0:
msg = 'Caught unexpected warnings:\n'
for w in ws:
msg += warnings.formatwarning(
w.message, w.category, w.filename, w.lineno, w.line)
msg += '\n'
self.fail(msg)
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
f.write(s)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id))
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "XXX"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
import subprocess
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
pipes = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return pipes.communicate()[1].decode('ascii')
if sys.version_info < (3, 2):
# assertRegexpMatches renamed to assertRegex in 3.2
assertRegex = unittest.TestCase.assertRegexpMatches
# assertRaisesRegexp renamed to assertRaisesRegex in 3.2
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
if sys.version_info < (3, 5):
# assertNotRegexpMatches renamed to assertNotRegex in 3.5
assertNotRegex = unittest.TestCase.assertNotRegexpMatches
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg)
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
sockname = sock.getsockname()
sock.close()
return sockname[1]
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
# Used in test_autograd.py and test_torch.py
def prod_single_zero(dim_size):
result = torch.randn(dim_size, dim_size)
result[0, 1] = 0
return result
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, v = A.svd()
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return u.mm(torch.diag(s)).mm(v.transpose(0, 1))
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
def make_nonzero_det(A, sign=None, min_singular_value=0.1):
u, s, v = A.svd()
s.clamp_(min=min_singular_value)
A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))
det = A.det()
if sign is not None:
if A.dim() == 2:
|
else:
cond = ((det < 0) ^ (sign < 0)).nonzero()
if cond.size(0) > 0:
for i in range(cond.size(0)):
A[list(cond[i])][0, :].neg_()
return A
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, v = A.svd()
s = torch.arange(1., matrix_size + 1, dtype=dtype, device=device).mul_(1.0 / (matrix_size + 1)).diag()
return u.matmul(s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1)))
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, v = A.svd(some=False)
s = torch.zeros(rows, columns, dtype=dtype, device=device)
k = min(rows, columns)
for i in range(k):
s[i, i] = float(i + 1) / (k + 1)
if singular:
# make matrix singular
s[k - 1, k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0, 0] = 0
return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
A = torch.sparse_coo_tensor(indices, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices = [icoords, jcoords]
return torch.sparse_coo_tensor(indices, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs))
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
| det = det.item()
if (det < 0) ^ (sign < 0):
A[0, :].neg_() |
setup.py | from distutils.core import setup
try:
with open("README.md","r") as fh:
long_description = fh.read()
except:
long_description = 'Taxation7% by UncleMedia'
setup(
name = 'phasiakon', # How you named your package folder (MyLib)
packages = ['phasiakon'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Taxation7% by UncleMedia', # Give a short description about your library
long_description=long_description,
long_description_content_type = "text/markdown",
author = 'UncleMedia', # Type in your name
author_email = '[email protected]', # Type in your E-Mail
url = 'https://github.com/unclemedia0/phasiakon', # Provide either the link to your github or to your website
download_url = 'https://github.com/unclemedia0/phasiakon/archive/v_01.tar.gz', # I explain this later on
keywords = ['phasiakon', 'Hmong', 'UncleMedia'], # Keywords that define your package best
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
| 'Programming Language :: Python :: 3.9',
],
) | 'Programming Language :: Python :: 3.8',
|
plot_signals_weighted_depth.py |
import logging
from matplotlib.cm import get_cmap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from wildwood.datasets import get_signal, make_regression
from wildwood.forest import ForestRegressor
from wildwood._binning import Binner
pd.set_option("display.max_columns", 20)
pd.set_option("display.precision", 2)
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
colormap = get_cmap("tab20")
n_samples_train = 5000
n_samples_test = 1000
random_state = 42
noise = 0.03
aggregation = True
n_estimators = 100
step = 1 / noise ** 2
signal = "heavisine"
X_train, y_train = make_regression(
n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state
)
X_test = np.linspace(0, 1, num=n_samples_test)
#
# reg = ForestRegressor(
# random_state=random_state,
# aggregation=aggregation,
# max_features=1,
# n_estimators=n_estimators,
# step=step,
# )
#
# reg.fit(X_train.reshape(n_samples_train, 1), y_train)
# y_pred = reg.predict(X_test.reshape(n_samples_test, 1))
#
# df = reg.get_nodes(0)
# print(df)
# exit(0)
signals = ["heavisine", "bumps", "blocks", "doppler"]
def plot_weighted_depth(signal):
|
for signal in signals:
plot_weighted_depth(signal)
plt.show()
| X_train, y_train = make_regression(
n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state
)
X_train = X_train.reshape(-1, 1)
X_test = np.linspace(0, 1, num=n_samples_test).reshape(-1, 1)
binner = Binner().fit(X_train)
X_test_binned = binner.transform(X_test)
reg = ForestRegressor(
random_state=random_state,
aggregation=aggregation,
n_estimators=n_estimators,
step=step,
)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
weighted_depths = reg._weighted_depth(X_test.reshape(n_samples_test, 1))
# print("weighted_depths.shape:", weighted_depths.shape)
# avg_weighted_depth = weighted_depths.mean(axis=0)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(6, 5))
plot_samples = ax1.plot(
X_train, y_train, color=colormap.colors[1], lw=2, label="Samples"
)[0]
plot_signal = ax1.plot(
X_test_binned / 255,
get_signal(X_test_binned / 255, signal),
lw=2,
color=colormap.colors[0],
label="Signal",
)[0]
plot_prediction = ax2.plot(
X_test.ravel(), y_pred, lw=2, color=colormap.colors[2], label="Prediction"
)[0]
# ax3.plot(
# X_test,
# weighted_depths[:, 1:],
# lw=1,
# color=colormap.colors[5],
# alpha=0.2,
# label="Weighted depths",
# )
plot_weighted_depths = ax3.plot(
X_test, weighted_depths.T, lw=1, color=colormap.colors[5], alpha=0.2
)[0]
plot_mean_weighted_depths = ax3.plot(
X_test,
weighted_depths.mean(axis=0),
lw=2,
color=colormap.colors[4],
label="Mean weighted depth",
)[0]
filename = "weighted_depths_%s.pdf" % signal
fig.subplots_adjust(hspace=0.1)
fig.legend(
(
plot_signal,
plot_samples,
plot_mean_weighted_depths,
plot_weighted_depths,
plot_prediction,
),
(
"Signal",
"Samples",
"Average weighted depths",
"Weighted depths",
"Prediction",
),
fontsize=12,
loc="upper center",
bbox_to_anchor=(0.5, 1.0),
ncol=3,
)
# plt.savefig(filename)
logging.info("Saved the decision functions in '%s'" % filename) |
_i18n.py | # Copyright 2015 Cloudbase Solutions Srl | # not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.windows')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
mod.rs | //! Basic graph module without explicit support for deletion.
//!
//! # Panics
//!
//! All methods will panic if given an out-of-bounds element index.
pub mod connectivity;
pub mod flow;
mod util;
/// Represents a union of disjoint sets. Each set's elements are arranged in a
/// tree, whose root is the set's representative.
pub struct DisjointSets {
parent: Vec<usize>,
}
impl DisjointSets {
/// Initializes disjoint sets containing one element each.
pub fn new(size: usize) -> Self {
Self {
parent: (0..size).collect(),
}
}
/// Finds the set's representative. Do path compression along the way to make
/// future queries faster.
pub fn find(&mut self, u: usize) -> usize {
let pu = self.parent[u];
if pu != u {
self.parent[u] = self.find(pu);
}
self.parent[u]
}
/// Merges the sets containing u and v into a single set containing their
/// union. Returns true if u and v were previously in different sets.
pub fn merge(&mut self, u: usize, v: usize) -> bool {
let (pu, pv) = (self.find(u), self.find(v));
self.parent[pu] = pv;
pu != pv
}
}
/// A compact graph representation. Edges are numbered in order of insertion.
/// Each adjacency list consists of all edges pointing out from a given vertex.
pub struct Graph {
/// Maps a vertex id to the first edge in its adjacency list.
first: Vec<Option<usize>>,
/// Maps an edge id to the next edge in the same adjacency list.
next: Vec<Option<usize>>,
/// Maps an edge id to the vertex that it points to.
endp: Vec<usize>,
}
impl Graph {
/// Initializes a graph with vmax vertices and no edges. To reduce
/// unnecessary allocations, emax_hint should be close to the number of
/// edges that will be inserted.
pub fn new(vmax: usize, emax_hint: usize) -> Self {
Self {
first: vec![None; vmax],
next: Vec::with_capacity(emax_hint),
endp: Vec::with_capacity(emax_hint),
}
}
/// Returns the number of vertices.
pub fn num_v(&self) -> usize {
self.first.len()
}
/// Returns the number of edges, double-counting undirected edges.
pub fn num_e(&self) -> usize {
self.endp.len()
}
/// Adds a directed edge from u to v.
pub fn add_edge(&mut self, u: usize, v: usize) {
self.next.push(self.first[u]);
self.first[u] = Some(self.num_e());
self.endp.push(v);
}
/// An undirected edge is two directed edges. If edges are added only via
/// this funcion, the reverse of any edge e can be found at e^1.
pub fn add_undirected_edge(&mut self, u: usize, v: usize) {
self.add_edge(u, v);
self.add_edge(v, u);
}
/// If we think of each even-numbered vertex as a variable, and its
/// odd-numbered successor as its negation, then we can build the
/// implication graph corresponding to any 2-CNF formula.
/// Note that u||v == !u -> v == !v -> u.
pub fn add_two_sat_clause(&mut self, u: usize, v: usize) {
self.add_edge(u ^ 1, v);
self.add_edge(v ^ 1, u);
}
/// Gets vertex u's adjacency list.
pub fn adj_list(&self, u: usize) -> AdjListIterator {
AdjListIterator {
graph: self,
next_e: self.first[u],
}
}
}
/// An iterator for convenient adjacency list traversal.
pub struct AdjListIterator<'a> {
graph: &'a Graph,
next_e: Option<usize>,
}
impl<'a> Iterator for AdjListIterator<'a> {
type Item = (usize, usize);
/// Produces an outgoing edge and vertex.
fn next(&mut self) -> Option<Self::Item> |
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_adj_list() {
let mut graph = Graph::new(4, 4);
graph.add_edge(0, 1);
graph.add_edge(1, 2);
graph.add_edge(1, 3);
graph.add_edge(3, 0);
let adj: Vec<(usize, usize)> = graph.adj_list(1).collect();
assert_eq!(adj, vec![(2, 3), (1, 2)]);
}
}
| {
self.next_e.map(|e| {
let v = self.graph.endp[e];
self.next_e = self.graph.next[e];
(e, v)
})
} |
go1_14_syscall_windows_arm.go | // Code generated by 'github.com/containous/yaegi/extract syscall'. DO NOT EDIT.
// +build go1.14,!go1.15
package syscall
import (
"go/constant"
"go/token"
"reflect"
"syscall"
)
func init() |
// _syscall_Conn is an interface wrapper for Conn type
type _syscall_Conn struct {
WSyscallConn func() (syscall.RawConn, error)
}
func (W _syscall_Conn) SyscallConn() (syscall.RawConn, error) { return W.WSyscallConn() }
// _syscall_RawConn is an interface wrapper for RawConn type
type _syscall_RawConn struct {
WControl func(f func(fd uintptr)) error
WRead func(f func(fd uintptr) (done bool)) error
WWrite func(f func(fd uintptr) (done bool)) error
}
func (W _syscall_RawConn) Control(f func(fd uintptr)) error { return W.WControl(f) }
func (W _syscall_RawConn) Read(f func(fd uintptr) (done bool)) error { return W.WRead(f) }
func (W _syscall_RawConn) Write(f func(fd uintptr) (done bool)) error { return W.WWrite(f) }
// _syscall_Sockaddr is an interface wrapper for Sockaddr type
type _syscall_Sockaddr struct {
}
| {
Symbols["syscall"] = map[string]reflect.Value{
// function, constant and variable definitions
"AF_INET": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"AF_INET6": reflect.ValueOf(constant.MakeFromLiteral("23", token.INT, 0)),
"AF_NETBIOS": reflect.ValueOf(constant.MakeFromLiteral("17", token.INT, 0)),
"AF_UNIX": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"AF_UNSPEC": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"AI_CANONNAME": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"AI_NUMERICHOST": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"AI_PASSIVE": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"APPLICATION_ERROR": reflect.ValueOf(constant.MakeFromLiteral("536870912", token.INT, 0)),
"AUTHTYPE_CLIENT": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"AUTHTYPE_SERVER": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"Accept": reflect.ValueOf(syscall.Accept),
"AcceptEx": reflect.ValueOf(syscall.AcceptEx),
"BASE_PROTOCOL": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"Bind": reflect.ValueOf(syscall.Bind),
"BytePtrFromString": reflect.ValueOf(syscall.BytePtrFromString),
"ByteSliceFromString": reflect.ValueOf(syscall.ByteSliceFromString),
"CERT_CHAIN_POLICY_AUTHENTICODE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"CERT_CHAIN_POLICY_AUTHENTICODE_TS": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"CERT_CHAIN_POLICY_BASE": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"CERT_CHAIN_POLICY_EV": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"CERT_CHAIN_POLICY_MICROSOFT_ROOT": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"CERT_CHAIN_POLICY_NT_AUTH": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"CERT_CHAIN_POLICY_SSL": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"CERT_E_CN_NO_MATCH": reflect.ValueOf(constant.MakeFromLiteral("2148204815", token.INT, 0)),
"CERT_E_EXPIRED": reflect.ValueOf(constant.MakeFromLiteral("2148204801", token.INT, 0)),
"CERT_E_PURPOSE": reflect.ValueOf(constant.MakeFromLiteral("2148204806", token.INT, 0)),
"CERT_E_ROLE": reflect.ValueOf(constant.MakeFromLiteral("2148204803", token.INT, 0)),
"CERT_E_UNTRUSTEDROOT": reflect.ValueOf(constant.MakeFromLiteral("2148204809", token.INT, 0)),
"CERT_STORE_ADD_ALWAYS": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"CERT_STORE_PROV_MEMORY": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT": reflect.ValueOf(constant.MakeFromLiteral("32768", token.INT, 0)),
"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT": reflect.ValueOf(constant.MakeFromLiteral("8192", token.INT, 0)),
"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT": reflect.ValueOf(constant.MakeFromLiteral("16384", token.INT, 0)),
"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT": reflect.ValueOf(constant.MakeFromLiteral("134217728", token.INT, 0)),
"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT": reflect.ValueOf(constant.MakeFromLiteral("4096", token.INT, 0)),
"CERT_TRUST_INVALID_BASIC_CONSTRAINTS": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"CERT_TRUST_INVALID_EXTENSION": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"CERT_TRUST_INVALID_NAME_CONSTRAINTS": reflect.ValueOf(constant.MakeFromLiteral("2048", token.INT, 0)),
"CERT_TRUST_INVALID_POLICY_CONSTRAINTS": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"CERT_TRUST_IS_CYCLIC": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"CERT_TRUST_IS_EXPLICIT_DISTRUST": reflect.ValueOf(constant.MakeFromLiteral("67108864", token.INT, 0)),
"CERT_TRUST_IS_NOT_SIGNATURE_VALID": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"CERT_TRUST_IS_NOT_TIME_VALID": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"CERT_TRUST_IS_NOT_VALID_FOR_USAGE": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"CERT_TRUST_IS_OFFLINE_REVOCATION": reflect.ValueOf(constant.MakeFromLiteral("16777216", token.INT, 0)),
"CERT_TRUST_IS_REVOKED": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"CERT_TRUST_IS_UNTRUSTED_ROOT": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"CERT_TRUST_NO_ERROR": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY": reflect.ValueOf(constant.MakeFromLiteral("33554432", token.INT, 0)),
"CERT_TRUST_REVOCATION_STATUS_UNKNOWN": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"CREATE_ALWAYS": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"CREATE_NEW": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"CREATE_NEW_PROCESS_GROUP": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"CREATE_UNICODE_ENVIRONMENT": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"CRYPT_DEFAULT_CONTAINER_OPTIONAL": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"CRYPT_DELETEKEYSET": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"CRYPT_MACHINE_KEYSET": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"CRYPT_NEWKEYSET": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"CRYPT_SILENT": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"CRYPT_VERIFYCONTEXT": reflect.ValueOf(constant.MakeFromLiteral("4026531840", token.INT, 0)),
"CTRL_BREAK_EVENT": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"CTRL_CLOSE_EVENT": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"CTRL_C_EVENT": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"CTRL_LOGOFF_EVENT": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"CTRL_SHUTDOWN_EVENT": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"CancelIo": reflect.ValueOf(syscall.CancelIo),
"CancelIoEx": reflect.ValueOf(syscall.CancelIoEx),
"CertAddCertificateContextToStore": reflect.ValueOf(syscall.CertAddCertificateContextToStore),
"CertCloseStore": reflect.ValueOf(syscall.CertCloseStore),
"CertCreateCertificateContext": reflect.ValueOf(syscall.CertCreateCertificateContext),
"CertEnumCertificatesInStore": reflect.ValueOf(syscall.CertEnumCertificatesInStore),
"CertFreeCertificateChain": reflect.ValueOf(syscall.CertFreeCertificateChain),
"CertFreeCertificateContext": reflect.ValueOf(syscall.CertFreeCertificateContext),
"CertGetCertificateChain": reflect.ValueOf(syscall.CertGetCertificateChain),
"CertOpenStore": reflect.ValueOf(syscall.CertOpenStore),
"CertOpenSystemStore": reflect.ValueOf(syscall.CertOpenSystemStore),
"CertVerifyCertificateChainPolicy": reflect.ValueOf(syscall.CertVerifyCertificateChainPolicy),
"Chdir": reflect.ValueOf(syscall.Chdir),
"Chmod": reflect.ValueOf(syscall.Chmod),
"Chown": reflect.ValueOf(syscall.Chown),
"Clearenv": reflect.ValueOf(syscall.Clearenv),
"Close": reflect.ValueOf(syscall.Close),
"CloseHandle": reflect.ValueOf(syscall.CloseHandle),
"CloseOnExec": reflect.ValueOf(syscall.CloseOnExec),
"Closesocket": reflect.ValueOf(syscall.Closesocket),
"CommandLineToArgv": reflect.ValueOf(syscall.CommandLineToArgv),
"ComputerName": reflect.ValueOf(syscall.ComputerName),
"Connect": reflect.ValueOf(syscall.Connect),
"ConnectEx": reflect.ValueOf(syscall.ConnectEx),
"ConvertSidToStringSid": reflect.ValueOf(syscall.ConvertSidToStringSid),
"ConvertStringSidToSid": reflect.ValueOf(syscall.ConvertStringSidToSid),
"CopySid": reflect.ValueOf(syscall.CopySid),
"CreateDirectory": reflect.ValueOf(syscall.CreateDirectory),
"CreateFile": reflect.ValueOf(syscall.CreateFile),
"CreateFileMapping": reflect.ValueOf(syscall.CreateFileMapping),
"CreateHardLink": reflect.ValueOf(syscall.CreateHardLink),
"CreateIoCompletionPort": reflect.ValueOf(syscall.CreateIoCompletionPort),
"CreatePipe": reflect.ValueOf(syscall.CreatePipe),
"CreateProcess": reflect.ValueOf(syscall.CreateProcess),
"CreateProcessAsUser": reflect.ValueOf(syscall.CreateProcessAsUser),
"CreateSymbolicLink": reflect.ValueOf(syscall.CreateSymbolicLink),
"CreateToolhelp32Snapshot": reflect.ValueOf(syscall.CreateToolhelp32Snapshot),
"CryptAcquireContext": reflect.ValueOf(syscall.CryptAcquireContext),
"CryptGenRandom": reflect.ValueOf(syscall.CryptGenRandom),
"CryptReleaseContext": reflect.ValueOf(syscall.CryptReleaseContext),
"DNS_INFO_NO_RECORDS": reflect.ValueOf(constant.MakeFromLiteral("9501", token.INT, 0)),
"DNS_TYPE_A": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"DNS_TYPE_A6": reflect.ValueOf(constant.MakeFromLiteral("38", token.INT, 0)),
"DNS_TYPE_AAAA": reflect.ValueOf(constant.MakeFromLiteral("28", token.INT, 0)),
"DNS_TYPE_ADDRS": reflect.ValueOf(constant.MakeFromLiteral("248", token.INT, 0)),
"DNS_TYPE_AFSDB": reflect.ValueOf(constant.MakeFromLiteral("18", token.INT, 0)),
"DNS_TYPE_ALL": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)),
"DNS_TYPE_ANY": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)),
"DNS_TYPE_ATMA": reflect.ValueOf(constant.MakeFromLiteral("34", token.INT, 0)),
"DNS_TYPE_AXFR": reflect.ValueOf(constant.MakeFromLiteral("252", token.INT, 0)),
"DNS_TYPE_CERT": reflect.ValueOf(constant.MakeFromLiteral("37", token.INT, 0)),
"DNS_TYPE_CNAME": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"DNS_TYPE_DHCID": reflect.ValueOf(constant.MakeFromLiteral("49", token.INT, 0)),
"DNS_TYPE_DNAME": reflect.ValueOf(constant.MakeFromLiteral("39", token.INT, 0)),
"DNS_TYPE_DNSKEY": reflect.ValueOf(constant.MakeFromLiteral("48", token.INT, 0)),
"DNS_TYPE_DS": reflect.ValueOf(constant.MakeFromLiteral("43", token.INT, 0)),
"DNS_TYPE_EID": reflect.ValueOf(constant.MakeFromLiteral("31", token.INT, 0)),
"DNS_TYPE_GID": reflect.ValueOf(constant.MakeFromLiteral("102", token.INT, 0)),
"DNS_TYPE_GPOS": reflect.ValueOf(constant.MakeFromLiteral("27", token.INT, 0)),
"DNS_TYPE_HINFO": reflect.ValueOf(constant.MakeFromLiteral("13", token.INT, 0)),
"DNS_TYPE_ISDN": reflect.ValueOf(constant.MakeFromLiteral("20", token.INT, 0)),
"DNS_TYPE_IXFR": reflect.ValueOf(constant.MakeFromLiteral("251", token.INT, 0)),
"DNS_TYPE_KEY": reflect.ValueOf(constant.MakeFromLiteral("25", token.INT, 0)),
"DNS_TYPE_KX": reflect.ValueOf(constant.MakeFromLiteral("36", token.INT, 0)),
"DNS_TYPE_LOC": reflect.ValueOf(constant.MakeFromLiteral("29", token.INT, 0)),
"DNS_TYPE_MAILA": reflect.ValueOf(constant.MakeFromLiteral("254", token.INT, 0)),
"DNS_TYPE_MAILB": reflect.ValueOf(constant.MakeFromLiteral("253", token.INT, 0)),
"DNS_TYPE_MB": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"DNS_TYPE_MD": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"DNS_TYPE_MF": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"DNS_TYPE_MG": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"DNS_TYPE_MINFO": reflect.ValueOf(constant.MakeFromLiteral("14", token.INT, 0)),
"DNS_TYPE_MR": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"DNS_TYPE_MX": reflect.ValueOf(constant.MakeFromLiteral("15", token.INT, 0)),
"DNS_TYPE_NAPTR": reflect.ValueOf(constant.MakeFromLiteral("35", token.INT, 0)),
"DNS_TYPE_NBSTAT": reflect.ValueOf(constant.MakeFromLiteral("65281", token.INT, 0)),
"DNS_TYPE_NIMLOC": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"DNS_TYPE_NS": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"DNS_TYPE_NSAP": reflect.ValueOf(constant.MakeFromLiteral("22", token.INT, 0)),
"DNS_TYPE_NSAPPTR": reflect.ValueOf(constant.MakeFromLiteral("23", token.INT, 0)),
"DNS_TYPE_NSEC": reflect.ValueOf(constant.MakeFromLiteral("47", token.INT, 0)),
"DNS_TYPE_NULL": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"DNS_TYPE_NXT": reflect.ValueOf(constant.MakeFromLiteral("30", token.INT, 0)),
"DNS_TYPE_OPT": reflect.ValueOf(constant.MakeFromLiteral("41", token.INT, 0)),
"DNS_TYPE_PTR": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"DNS_TYPE_PX": reflect.ValueOf(constant.MakeFromLiteral("26", token.INT, 0)),
"DNS_TYPE_RP": reflect.ValueOf(constant.MakeFromLiteral("17", token.INT, 0)),
"DNS_TYPE_RRSIG": reflect.ValueOf(constant.MakeFromLiteral("46", token.INT, 0)),
"DNS_TYPE_RT": reflect.ValueOf(constant.MakeFromLiteral("21", token.INT, 0)),
"DNS_TYPE_SIG": reflect.ValueOf(constant.MakeFromLiteral("24", token.INT, 0)),
"DNS_TYPE_SINK": reflect.ValueOf(constant.MakeFromLiteral("40", token.INT, 0)),
"DNS_TYPE_SOA": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"DNS_TYPE_SRV": reflect.ValueOf(constant.MakeFromLiteral("33", token.INT, 0)),
"DNS_TYPE_TEXT": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"DNS_TYPE_TKEY": reflect.ValueOf(constant.MakeFromLiteral("249", token.INT, 0)),
"DNS_TYPE_TSIG": reflect.ValueOf(constant.MakeFromLiteral("250", token.INT, 0)),
"DNS_TYPE_UID": reflect.ValueOf(constant.MakeFromLiteral("101", token.INT, 0)),
"DNS_TYPE_UINFO": reflect.ValueOf(constant.MakeFromLiteral("100", token.INT, 0)),
"DNS_TYPE_UNSPEC": reflect.ValueOf(constant.MakeFromLiteral("103", token.INT, 0)),
"DNS_TYPE_WINS": reflect.ValueOf(constant.MakeFromLiteral("65281", token.INT, 0)),
"DNS_TYPE_WINSR": reflect.ValueOf(constant.MakeFromLiteral("65282", token.INT, 0)),
"DNS_TYPE_WKS": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"DNS_TYPE_X25": reflect.ValueOf(constant.MakeFromLiteral("19", token.INT, 0)),
"DUPLICATE_CLOSE_SOURCE": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"DUPLICATE_SAME_ACCESS": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"DeleteFile": reflect.ValueOf(syscall.DeleteFile),
"DeviceIoControl": reflect.ValueOf(syscall.DeviceIoControl),
"DnsNameCompare": reflect.ValueOf(syscall.DnsNameCompare),
"DnsQuery": reflect.ValueOf(syscall.DnsQuery),
"DnsRecordListFree": reflect.ValueOf(syscall.DnsRecordListFree),
"DnsSectionAdditional": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"DnsSectionAnswer": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"DnsSectionAuthority": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"DnsSectionQuestion": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"DuplicateHandle": reflect.ValueOf(syscall.DuplicateHandle),
"E2BIG": reflect.ValueOf(syscall.E2BIG),
"EACCES": reflect.ValueOf(syscall.EACCES),
"EADDRINUSE": reflect.ValueOf(syscall.EADDRINUSE),
"EADDRNOTAVAIL": reflect.ValueOf(syscall.EADDRNOTAVAIL),
"EADV": reflect.ValueOf(syscall.EADV),
"EAFNOSUPPORT": reflect.ValueOf(syscall.EAFNOSUPPORT),
"EAGAIN": reflect.ValueOf(syscall.EAGAIN),
"EALREADY": reflect.ValueOf(syscall.EALREADY),
"EBADE": reflect.ValueOf(syscall.EBADE),
"EBADF": reflect.ValueOf(syscall.EBADF),
"EBADFD": reflect.ValueOf(syscall.EBADFD),
"EBADMSG": reflect.ValueOf(syscall.EBADMSG),
"EBADR": reflect.ValueOf(syscall.EBADR),
"EBADRQC": reflect.ValueOf(syscall.EBADRQC),
"EBADSLT": reflect.ValueOf(syscall.EBADSLT),
"EBFONT": reflect.ValueOf(syscall.EBFONT),
"EBUSY": reflect.ValueOf(syscall.EBUSY),
"ECANCELED": reflect.ValueOf(syscall.ECANCELED),
"ECHILD": reflect.ValueOf(syscall.ECHILD),
"ECHRNG": reflect.ValueOf(syscall.ECHRNG),
"ECOMM": reflect.ValueOf(syscall.ECOMM),
"ECONNABORTED": reflect.ValueOf(syscall.ECONNABORTED),
"ECONNREFUSED": reflect.ValueOf(syscall.ECONNREFUSED),
"ECONNRESET": reflect.ValueOf(syscall.ECONNRESET),
"EDEADLK": reflect.ValueOf(syscall.EDEADLK),
"EDEADLOCK": reflect.ValueOf(syscall.EDEADLOCK),
"EDESTADDRREQ": reflect.ValueOf(syscall.EDESTADDRREQ),
"EDOM": reflect.ValueOf(syscall.EDOM),
"EDOTDOT": reflect.ValueOf(syscall.EDOTDOT),
"EDQUOT": reflect.ValueOf(syscall.EDQUOT),
"EEXIST": reflect.ValueOf(syscall.EEXIST),
"EFAULT": reflect.ValueOf(syscall.EFAULT),
"EFBIG": reflect.ValueOf(syscall.EFBIG),
"EHOSTDOWN": reflect.ValueOf(syscall.EHOSTDOWN),
"EHOSTUNREACH": reflect.ValueOf(syscall.EHOSTUNREACH),
"EIDRM": reflect.ValueOf(syscall.EIDRM),
"EILSEQ": reflect.ValueOf(syscall.EILSEQ),
"EINPROGRESS": reflect.ValueOf(syscall.EINPROGRESS),
"EINTR": reflect.ValueOf(syscall.EINTR),
"EINVAL": reflect.ValueOf(syscall.EINVAL),
"EIO": reflect.ValueOf(syscall.EIO),
"EISCONN": reflect.ValueOf(syscall.EISCONN),
"EISDIR": reflect.ValueOf(syscall.EISDIR),
"EISNAM": reflect.ValueOf(syscall.EISNAM),
"EKEYEXPIRED": reflect.ValueOf(syscall.EKEYEXPIRED),
"EKEYREJECTED": reflect.ValueOf(syscall.EKEYREJECTED),
"EKEYREVOKED": reflect.ValueOf(syscall.EKEYREVOKED),
"EL2HLT": reflect.ValueOf(syscall.EL2HLT),
"EL2NSYNC": reflect.ValueOf(syscall.EL2NSYNC),
"EL3HLT": reflect.ValueOf(syscall.EL3HLT),
"EL3RST": reflect.ValueOf(syscall.EL3RST),
"ELIBACC": reflect.ValueOf(syscall.ELIBACC),
"ELIBBAD": reflect.ValueOf(syscall.ELIBBAD),
"ELIBEXEC": reflect.ValueOf(syscall.ELIBEXEC),
"ELIBMAX": reflect.ValueOf(syscall.ELIBMAX),
"ELIBSCN": reflect.ValueOf(syscall.ELIBSCN),
"ELNRNG": reflect.ValueOf(syscall.ELNRNG),
"ELOOP": reflect.ValueOf(syscall.ELOOP),
"EMEDIUMTYPE": reflect.ValueOf(syscall.EMEDIUMTYPE),
"EMFILE": reflect.ValueOf(syscall.EMFILE),
"EMLINK": reflect.ValueOf(syscall.EMLINK),
"EMSGSIZE": reflect.ValueOf(syscall.EMSGSIZE),
"EMULTIHOP": reflect.ValueOf(syscall.EMULTIHOP),
"ENAMETOOLONG": reflect.ValueOf(syscall.ENAMETOOLONG),
"ENAVAIL": reflect.ValueOf(syscall.ENAVAIL),
"ENETDOWN": reflect.ValueOf(syscall.ENETDOWN),
"ENETRESET": reflect.ValueOf(syscall.ENETRESET),
"ENETUNREACH": reflect.ValueOf(syscall.ENETUNREACH),
"ENFILE": reflect.ValueOf(syscall.ENFILE),
"ENOANO": reflect.ValueOf(syscall.ENOANO),
"ENOBUFS": reflect.ValueOf(syscall.ENOBUFS),
"ENOCSI": reflect.ValueOf(syscall.ENOCSI),
"ENODATA": reflect.ValueOf(syscall.ENODATA),
"ENODEV": reflect.ValueOf(syscall.ENODEV),
"ENOENT": reflect.ValueOf(syscall.ENOENT),
"ENOEXEC": reflect.ValueOf(syscall.ENOEXEC),
"ENOKEY": reflect.ValueOf(syscall.ENOKEY),
"ENOLCK": reflect.ValueOf(syscall.ENOLCK),
"ENOLINK": reflect.ValueOf(syscall.ENOLINK),
"ENOMEDIUM": reflect.ValueOf(syscall.ENOMEDIUM),
"ENOMEM": reflect.ValueOf(syscall.ENOMEM),
"ENOMSG": reflect.ValueOf(syscall.ENOMSG),
"ENONET": reflect.ValueOf(syscall.ENONET),
"ENOPKG": reflect.ValueOf(syscall.ENOPKG),
"ENOPROTOOPT": reflect.ValueOf(syscall.ENOPROTOOPT),
"ENOSPC": reflect.ValueOf(syscall.ENOSPC),
"ENOSR": reflect.ValueOf(syscall.ENOSR),
"ENOSTR": reflect.ValueOf(syscall.ENOSTR),
"ENOSYS": reflect.ValueOf(syscall.ENOSYS),
"ENOTBLK": reflect.ValueOf(syscall.ENOTBLK),
"ENOTCONN": reflect.ValueOf(syscall.ENOTCONN),
"ENOTDIR": reflect.ValueOf(syscall.ENOTDIR),
"ENOTEMPTY": reflect.ValueOf(syscall.ENOTEMPTY),
"ENOTNAM": reflect.ValueOf(syscall.ENOTNAM),
"ENOTRECOVERABLE": reflect.ValueOf(syscall.ENOTRECOVERABLE),
"ENOTSOCK": reflect.ValueOf(syscall.ENOTSOCK),
"ENOTSUP": reflect.ValueOf(syscall.ENOTSUP),
"ENOTTY": reflect.ValueOf(syscall.ENOTTY),
"ENOTUNIQ": reflect.ValueOf(syscall.ENOTUNIQ),
"ENXIO": reflect.ValueOf(syscall.ENXIO),
"EOPNOTSUPP": reflect.ValueOf(syscall.EOPNOTSUPP),
"EOVERFLOW": reflect.ValueOf(syscall.EOVERFLOW),
"EOWNERDEAD": reflect.ValueOf(syscall.EOWNERDEAD),
"EPERM": reflect.ValueOf(syscall.EPERM),
"EPFNOSUPPORT": reflect.ValueOf(syscall.EPFNOSUPPORT),
"EPIPE": reflect.ValueOf(syscall.EPIPE),
"EPROTO": reflect.ValueOf(syscall.EPROTO),
"EPROTONOSUPPORT": reflect.ValueOf(syscall.EPROTONOSUPPORT),
"EPROTOTYPE": reflect.ValueOf(syscall.EPROTOTYPE),
"ERANGE": reflect.ValueOf(syscall.ERANGE),
"EREMCHG": reflect.ValueOf(syscall.EREMCHG),
"EREMOTE": reflect.ValueOf(syscall.EREMOTE),
"EREMOTEIO": reflect.ValueOf(syscall.EREMOTEIO),
"ERESTART": reflect.ValueOf(syscall.ERESTART),
"EROFS": reflect.ValueOf(syscall.EROFS),
"ERROR_ACCESS_DENIED": reflect.ValueOf(syscall.ERROR_ACCESS_DENIED),
"ERROR_ALREADY_EXISTS": reflect.ValueOf(syscall.ERROR_ALREADY_EXISTS),
"ERROR_BROKEN_PIPE": reflect.ValueOf(syscall.ERROR_BROKEN_PIPE),
"ERROR_BUFFER_OVERFLOW": reflect.ValueOf(syscall.ERROR_BUFFER_OVERFLOW),
"ERROR_DIR_NOT_EMPTY": reflect.ValueOf(syscall.ERROR_DIR_NOT_EMPTY),
"ERROR_ENVVAR_NOT_FOUND": reflect.ValueOf(syscall.ERROR_ENVVAR_NOT_FOUND),
"ERROR_FILE_EXISTS": reflect.ValueOf(syscall.ERROR_FILE_EXISTS),
"ERROR_FILE_NOT_FOUND": reflect.ValueOf(syscall.ERROR_FILE_NOT_FOUND),
"ERROR_HANDLE_EOF": reflect.ValueOf(syscall.ERROR_HANDLE_EOF),
"ERROR_INSUFFICIENT_BUFFER": reflect.ValueOf(syscall.ERROR_INSUFFICIENT_BUFFER),
"ERROR_IO_PENDING": reflect.ValueOf(syscall.ERROR_IO_PENDING),
"ERROR_MOD_NOT_FOUND": reflect.ValueOf(syscall.ERROR_MOD_NOT_FOUND),
"ERROR_MORE_DATA": reflect.ValueOf(syscall.ERROR_MORE_DATA),
"ERROR_NETNAME_DELETED": reflect.ValueOf(syscall.ERROR_NETNAME_DELETED),
"ERROR_NOT_FOUND": reflect.ValueOf(syscall.ERROR_NOT_FOUND),
"ERROR_NO_MORE_FILES": reflect.ValueOf(syscall.ERROR_NO_MORE_FILES),
"ERROR_OPERATION_ABORTED": reflect.ValueOf(syscall.ERROR_OPERATION_ABORTED),
"ERROR_PATH_NOT_FOUND": reflect.ValueOf(syscall.ERROR_PATH_NOT_FOUND),
"ERROR_PRIVILEGE_NOT_HELD": reflect.ValueOf(syscall.ERROR_PRIVILEGE_NOT_HELD),
"ERROR_PROC_NOT_FOUND": reflect.ValueOf(syscall.ERROR_PROC_NOT_FOUND),
"ESHUTDOWN": reflect.ValueOf(syscall.ESHUTDOWN),
"ESOCKTNOSUPPORT": reflect.ValueOf(syscall.ESOCKTNOSUPPORT),
"ESPIPE": reflect.ValueOf(syscall.ESPIPE),
"ESRCH": reflect.ValueOf(syscall.ESRCH),
"ESRMNT": reflect.ValueOf(syscall.ESRMNT),
"ESTALE": reflect.ValueOf(syscall.ESTALE),
"ESTRPIPE": reflect.ValueOf(syscall.ESTRPIPE),
"ETIME": reflect.ValueOf(syscall.ETIME),
"ETIMEDOUT": reflect.ValueOf(syscall.ETIMEDOUT),
"ETOOMANYREFS": reflect.ValueOf(syscall.ETOOMANYREFS),
"ETXTBSY": reflect.ValueOf(syscall.ETXTBSY),
"EUCLEAN": reflect.ValueOf(syscall.EUCLEAN),
"EUNATCH": reflect.ValueOf(syscall.EUNATCH),
"EUSERS": reflect.ValueOf(syscall.EUSERS),
"EWINDOWS": reflect.ValueOf(syscall.EWINDOWS),
"EWOULDBLOCK": reflect.ValueOf(syscall.EWOULDBLOCK),
"EXDEV": reflect.ValueOf(syscall.EXDEV),
"EXFULL": reflect.ValueOf(syscall.EXFULL),
"Environ": reflect.ValueOf(syscall.Environ),
"EscapeArg": reflect.ValueOf(syscall.EscapeArg),
"Exec": reflect.ValueOf(syscall.Exec),
"Exit": reflect.ValueOf(syscall.Exit),
"ExitProcess": reflect.ValueOf(syscall.ExitProcess),
"FILE_ACTION_ADDED": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_ACTION_MODIFIED": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"FILE_ACTION_REMOVED": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_ACTION_RENAMED_NEW_NAME": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"FILE_ACTION_RENAMED_OLD_NAME": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_APPEND_DATA": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_ATTRIBUTE_ARCHIVE": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"FILE_ATTRIBUTE_DIRECTORY": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"FILE_ATTRIBUTE_HIDDEN": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_ATTRIBUTE_NORMAL": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"FILE_ATTRIBUTE_READONLY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_ATTRIBUTE_REPARSE_POINT": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"FILE_ATTRIBUTE_SYSTEM": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_BEGIN": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"FILE_CURRENT": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_END": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_FLAG_BACKUP_SEMANTICS": reflect.ValueOf(constant.MakeFromLiteral("33554432", token.INT, 0)),
"FILE_FLAG_OPEN_REPARSE_POINT": reflect.ValueOf(constant.MakeFromLiteral("2097152", token.INT, 0)),
"FILE_FLAG_OVERLAPPED": reflect.ValueOf(constant.MakeFromLiteral("1073741824", token.INT, 0)),
"FILE_LIST_DIRECTORY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_MAP_COPY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_MAP_EXECUTE": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"FILE_MAP_READ": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_MAP_WRITE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_NOTIFY_CHANGE_ATTRIBUTES": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_NOTIFY_CHANGE_CREATION": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"FILE_NOTIFY_CHANGE_DIR_NAME": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_NOTIFY_CHANGE_FILE_NAME": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_NOTIFY_CHANGE_LAST_ACCESS": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"FILE_NOTIFY_CHANGE_LAST_WRITE": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"FILE_NOTIFY_CHANGE_SIZE": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"FILE_SHARE_DELETE": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"FILE_SHARE_READ": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_SHARE_WRITE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_SKIP_SET_EVENT_ON_HANDLE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_TYPE_CHAR": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"FILE_TYPE_DISK": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"FILE_TYPE_PIPE": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"FILE_TYPE_REMOTE": reflect.ValueOf(constant.MakeFromLiteral("32768", token.INT, 0)),
"FILE_TYPE_UNKNOWN": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"FILE_WRITE_ATTRIBUTES": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"FORMAT_MESSAGE_ALLOCATE_BUFFER": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"FORMAT_MESSAGE_ARGUMENT_ARRAY": reflect.ValueOf(constant.MakeFromLiteral("8192", token.INT, 0)),
"FORMAT_MESSAGE_FROM_HMODULE": reflect.ValueOf(constant.MakeFromLiteral("2048", token.INT, 0)),
"FORMAT_MESSAGE_FROM_STRING": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"FORMAT_MESSAGE_FROM_SYSTEM": reflect.ValueOf(constant.MakeFromLiteral("4096", token.INT, 0)),
"FORMAT_MESSAGE_IGNORE_INSERTS": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"FORMAT_MESSAGE_MAX_WIDTH_MASK": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)),
"FSCTL_GET_REPARSE_POINT": reflect.ValueOf(constant.MakeFromLiteral("589992", token.INT, 0)),
"Fchdir": reflect.ValueOf(syscall.Fchdir),
"Fchmod": reflect.ValueOf(syscall.Fchmod),
"Fchown": reflect.ValueOf(syscall.Fchown),
"FindClose": reflect.ValueOf(syscall.FindClose),
"FindFirstFile": reflect.ValueOf(syscall.FindFirstFile),
"FindNextFile": reflect.ValueOf(syscall.FindNextFile),
"FlushFileBuffers": reflect.ValueOf(syscall.FlushFileBuffers),
"FlushViewOfFile": reflect.ValueOf(syscall.FlushViewOfFile),
"ForkLock": reflect.ValueOf(&syscall.ForkLock).Elem(),
"FormatMessage": reflect.ValueOf(syscall.FormatMessage),
"FreeAddrInfoW": reflect.ValueOf(syscall.FreeAddrInfoW),
"FreeEnvironmentStrings": reflect.ValueOf(syscall.FreeEnvironmentStrings),
"FreeLibrary": reflect.ValueOf(syscall.FreeLibrary),
"Fsync": reflect.ValueOf(syscall.Fsync),
"Ftruncate": reflect.ValueOf(syscall.Ftruncate),
"FullPath": reflect.ValueOf(syscall.FullPath),
"GENERIC_ALL": reflect.ValueOf(constant.MakeFromLiteral("268435456", token.INT, 0)),
"GENERIC_EXECUTE": reflect.ValueOf(constant.MakeFromLiteral("536870912", token.INT, 0)),
"GENERIC_READ": reflect.ValueOf(constant.MakeFromLiteral("2147483648", token.INT, 0)),
"GENERIC_WRITE": reflect.ValueOf(constant.MakeFromLiteral("1073741824", token.INT, 0)),
"GetAcceptExSockaddrs": reflect.ValueOf(syscall.GetAcceptExSockaddrs),
"GetAdaptersInfo": reflect.ValueOf(syscall.GetAdaptersInfo),
"GetAddrInfoW": reflect.ValueOf(syscall.GetAddrInfoW),
"GetCommandLine": reflect.ValueOf(syscall.GetCommandLine),
"GetComputerName": reflect.ValueOf(syscall.GetComputerName),
"GetConsoleMode": reflect.ValueOf(syscall.GetConsoleMode),
"GetCurrentDirectory": reflect.ValueOf(syscall.GetCurrentDirectory),
"GetCurrentProcess": reflect.ValueOf(syscall.GetCurrentProcess),
"GetEnvironmentStrings": reflect.ValueOf(syscall.GetEnvironmentStrings),
"GetEnvironmentVariable": reflect.ValueOf(syscall.GetEnvironmentVariable),
"GetExitCodeProcess": reflect.ValueOf(syscall.GetExitCodeProcess),
"GetFileAttributes": reflect.ValueOf(syscall.GetFileAttributes),
"GetFileAttributesEx": reflect.ValueOf(syscall.GetFileAttributesEx),
"GetFileExInfoStandard": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"GetFileExMaxInfoLevel": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"GetFileInformationByHandle": reflect.ValueOf(syscall.GetFileInformationByHandle),
"GetFileType": reflect.ValueOf(syscall.GetFileType),
"GetFullPathName": reflect.ValueOf(syscall.GetFullPathName),
"GetHostByName": reflect.ValueOf(syscall.GetHostByName),
"GetIfEntry": reflect.ValueOf(syscall.GetIfEntry),
"GetLastError": reflect.ValueOf(syscall.GetLastError),
"GetLengthSid": reflect.ValueOf(syscall.GetLengthSid),
"GetLongPathName": reflect.ValueOf(syscall.GetLongPathName),
"GetProcAddress": reflect.ValueOf(syscall.GetProcAddress),
"GetProcessTimes": reflect.ValueOf(syscall.GetProcessTimes),
"GetProtoByName": reflect.ValueOf(syscall.GetProtoByName),
"GetQueuedCompletionStatus": reflect.ValueOf(syscall.GetQueuedCompletionStatus),
"GetServByName": reflect.ValueOf(syscall.GetServByName),
"GetShortPathName": reflect.ValueOf(syscall.GetShortPathName),
"GetStartupInfo": reflect.ValueOf(syscall.GetStartupInfo),
"GetStdHandle": reflect.ValueOf(syscall.GetStdHandle),
"GetSystemTimeAsFileTime": reflect.ValueOf(syscall.GetSystemTimeAsFileTime),
"GetTempPath": reflect.ValueOf(syscall.GetTempPath),
"GetTimeZoneInformation": reflect.ValueOf(syscall.GetTimeZoneInformation),
"GetTokenInformation": reflect.ValueOf(syscall.GetTokenInformation),
"GetUserNameEx": reflect.ValueOf(syscall.GetUserNameEx),
"GetUserProfileDirectory": reflect.ValueOf(syscall.GetUserProfileDirectory),
"GetVersion": reflect.ValueOf(syscall.GetVersion),
"Getegid": reflect.ValueOf(syscall.Getegid),
"Getenv": reflect.ValueOf(syscall.Getenv),
"Geteuid": reflect.ValueOf(syscall.Geteuid),
"Getgid": reflect.ValueOf(syscall.Getgid),
"Getgroups": reflect.ValueOf(syscall.Getgroups),
"Getpagesize": reflect.ValueOf(syscall.Getpagesize),
"Getpeername": reflect.ValueOf(syscall.Getpeername),
"Getpid": reflect.ValueOf(syscall.Getpid),
"Getppid": reflect.ValueOf(syscall.Getppid),
"Getsockname": reflect.ValueOf(syscall.Getsockname),
"Getsockopt": reflect.ValueOf(syscall.Getsockopt),
"GetsockoptInt": reflect.ValueOf(syscall.GetsockoptInt),
"Gettimeofday": reflect.ValueOf(syscall.Gettimeofday),
"Getuid": reflect.ValueOf(syscall.Getuid),
"Getwd": reflect.ValueOf(syscall.Getwd),
"HANDLE_FLAG_INHERIT": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"HKEY_CLASSES_ROOT": reflect.ValueOf(constant.MakeFromLiteral("2147483648", token.INT, 0)),
"HKEY_CURRENT_CONFIG": reflect.ValueOf(constant.MakeFromLiteral("2147483653", token.INT, 0)),
"HKEY_CURRENT_USER": reflect.ValueOf(constant.MakeFromLiteral("2147483649", token.INT, 0)),
"HKEY_DYN_DATA": reflect.ValueOf(constant.MakeFromLiteral("2147483654", token.INT, 0)),
"HKEY_LOCAL_MACHINE": reflect.ValueOf(constant.MakeFromLiteral("2147483650", token.INT, 0)),
"HKEY_PERFORMANCE_DATA": reflect.ValueOf(constant.MakeFromLiteral("2147483652", token.INT, 0)),
"HKEY_USERS": reflect.ValueOf(constant.MakeFromLiteral("2147483651", token.INT, 0)),
"IFF_BROADCAST": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"IFF_LOOPBACK": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"IFF_MULTICAST": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"IFF_POINTTOPOINT": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"IFF_UP": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"IGNORE": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"INFINITE": reflect.ValueOf(constant.MakeFromLiteral("4294967295", token.INT, 0)),
"INVALID_FILE_ATTRIBUTES": reflect.ValueOf(constant.MakeFromLiteral("4294967295", token.INT, 0)),
"IOC_IN": reflect.ValueOf(constant.MakeFromLiteral("2147483648", token.INT, 0)),
"IOC_INOUT": reflect.ValueOf(constant.MakeFromLiteral("3221225472", token.INT, 0)),
"IOC_OUT": reflect.ValueOf(constant.MakeFromLiteral("1073741824", token.INT, 0)),
"IOC_VENDOR": reflect.ValueOf(constant.MakeFromLiteral("402653184", token.INT, 0)),
"IOC_WS2": reflect.ValueOf(constant.MakeFromLiteral("134217728", token.INT, 0)),
"IO_REPARSE_TAG_SYMLINK": reflect.ValueOf(constant.MakeFromLiteral("2684354572", token.INT, 0)),
"IPPROTO_IP": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"IPPROTO_IPV6": reflect.ValueOf(constant.MakeFromLiteral("41", token.INT, 0)),
"IPPROTO_TCP": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"IPPROTO_UDP": reflect.ValueOf(constant.MakeFromLiteral("17", token.INT, 0)),
"IPV6_JOIN_GROUP": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"IPV6_LEAVE_GROUP": reflect.ValueOf(constant.MakeFromLiteral("13", token.INT, 0)),
"IPV6_MULTICAST_HOPS": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"IPV6_MULTICAST_IF": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"IPV6_MULTICAST_LOOP": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"IPV6_UNICAST_HOPS": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"IPV6_V6ONLY": reflect.ValueOf(constant.MakeFromLiteral("27", token.INT, 0)),
"IP_ADD_MEMBERSHIP": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"IP_DROP_MEMBERSHIP": reflect.ValueOf(constant.MakeFromLiteral("13", token.INT, 0)),
"IP_MULTICAST_IF": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"IP_MULTICAST_LOOP": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"IP_MULTICAST_TTL": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"IP_TOS": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"IP_TTL": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"ImplementsGetwd": reflect.ValueOf(syscall.ImplementsGetwd),
"InvalidHandle": reflect.ValueOf(syscall.InvalidHandle),
"KEY_ALL_ACCESS": reflect.ValueOf(constant.MakeFromLiteral("983103", token.INT, 0)),
"KEY_CREATE_LINK": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"KEY_CREATE_SUB_KEY": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"KEY_ENUMERATE_SUB_KEYS": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"KEY_EXECUTE": reflect.ValueOf(constant.MakeFromLiteral("131097", token.INT, 0)),
"KEY_NOTIFY": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"KEY_QUERY_VALUE": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"KEY_READ": reflect.ValueOf(constant.MakeFromLiteral("131097", token.INT, 0)),
"KEY_SET_VALUE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"KEY_WOW64_32KEY": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"KEY_WOW64_64KEY": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"KEY_WRITE": reflect.ValueOf(constant.MakeFromLiteral("131078", token.INT, 0)),
"LANG_ENGLISH": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"LAYERED_PROTOCOL": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"Lchown": reflect.ValueOf(syscall.Lchown),
"Link": reflect.ValueOf(syscall.Link),
"Listen": reflect.ValueOf(syscall.Listen),
"LoadCancelIoEx": reflect.ValueOf(syscall.LoadCancelIoEx),
"LoadConnectEx": reflect.ValueOf(syscall.LoadConnectEx),
"LoadCreateSymbolicLink": reflect.ValueOf(syscall.LoadCreateSymbolicLink),
"LoadDLL": reflect.ValueOf(syscall.LoadDLL),
"LoadGetAddrInfo": reflect.ValueOf(syscall.LoadGetAddrInfo),
"LoadLibrary": reflect.ValueOf(syscall.LoadLibrary),
"LoadSetFileCompletionNotificationModes": reflect.ValueOf(syscall.LoadSetFileCompletionNotificationModes),
"LocalFree": reflect.ValueOf(syscall.LocalFree),
"LookupAccountName": reflect.ValueOf(syscall.LookupAccountName),
"LookupAccountSid": reflect.ValueOf(syscall.LookupAccountSid),
"LookupSID": reflect.ValueOf(syscall.LookupSID),
"MAXIMUM_REPARSE_DATA_BUFFER_SIZE": reflect.ValueOf(constant.MakeFromLiteral("16384", token.INT, 0)),
"MAXLEN_IFDESCR": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"MAXLEN_PHYSADDR": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"MAX_ADAPTER_ADDRESS_LENGTH": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"MAX_ADAPTER_DESCRIPTION_LENGTH": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"MAX_ADAPTER_NAME_LENGTH": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"MAX_COMPUTERNAME_LENGTH": reflect.ValueOf(constant.MakeFromLiteral("15", token.INT, 0)),
"MAX_INTERFACE_NAME_LEN": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"MAX_LONG_PATH": reflect.ValueOf(constant.MakeFromLiteral("32768", token.INT, 0)),
"MAX_PATH": reflect.ValueOf(constant.MakeFromLiteral("260", token.INT, 0)),
"MAX_PROTOCOL_CHAIN": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"MapViewOfFile": reflect.ValueOf(syscall.MapViewOfFile),
"MaxTokenInfoClass": reflect.ValueOf(constant.MakeFromLiteral("29", token.INT, 0)),
"Mkdir": reflect.ValueOf(syscall.Mkdir),
"MoveFile": reflect.ValueOf(syscall.MoveFile),
"MustLoadDLL": reflect.ValueOf(syscall.MustLoadDLL),
"NameCanonical": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"NameCanonicalEx": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"NameDisplay": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"NameDnsDomain": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"NameFullyQualifiedDN": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"NameSamCompatible": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"NameServicePrincipal": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"NameUniqueId": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"NameUnknown": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"NameUserPrincipal": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"NetApiBufferFree": reflect.ValueOf(syscall.NetApiBufferFree),
"NetGetJoinInformation": reflect.ValueOf(syscall.NetGetJoinInformation),
"NetSetupDomainName": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"NetSetupUnjoined": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"NetSetupUnknownStatus": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"NetSetupWorkgroupName": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"NetUserGetInfo": reflect.ValueOf(syscall.NetUserGetInfo),
"NewCallback": reflect.ValueOf(syscall.NewCallback),
"NewCallbackCDecl": reflect.ValueOf(syscall.NewCallbackCDecl),
"NewLazyDLL": reflect.ValueOf(syscall.NewLazyDLL),
"NsecToFiletime": reflect.ValueOf(syscall.NsecToFiletime),
"NsecToTimespec": reflect.ValueOf(syscall.NsecToTimespec),
"NsecToTimeval": reflect.ValueOf(syscall.NsecToTimeval),
"Ntohs": reflect.ValueOf(syscall.Ntohs),
"OID_PKIX_KP_SERVER_AUTH": reflect.ValueOf(&syscall.OID_PKIX_KP_SERVER_AUTH).Elem(),
"OID_SERVER_GATED_CRYPTO": reflect.ValueOf(&syscall.OID_SERVER_GATED_CRYPTO).Elem(),
"OID_SGC_NETSCAPE": reflect.ValueOf(&syscall.OID_SGC_NETSCAPE).Elem(),
"OPEN_ALWAYS": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"OPEN_EXISTING": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"O_APPEND": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"O_ASYNC": reflect.ValueOf(constant.MakeFromLiteral("8192", token.INT, 0)),
"O_CLOEXEC": reflect.ValueOf(constant.MakeFromLiteral("524288", token.INT, 0)),
"O_CREAT": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"O_EXCL": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"O_NOCTTY": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"O_NONBLOCK": reflect.ValueOf(constant.MakeFromLiteral("2048", token.INT, 0)),
"O_RDONLY": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"O_RDWR": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"O_SYNC": reflect.ValueOf(constant.MakeFromLiteral("4096", token.INT, 0)),
"O_TRUNC": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"O_WRONLY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"Open": reflect.ValueOf(syscall.Open),
"OpenCurrentProcessToken": reflect.ValueOf(syscall.OpenCurrentProcessToken),
"OpenProcess": reflect.ValueOf(syscall.OpenProcess),
"OpenProcessToken": reflect.ValueOf(syscall.OpenProcessToken),
"PAGE_EXECUTE_READ": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"PAGE_EXECUTE_READWRITE": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"PAGE_EXECUTE_WRITECOPY": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"PAGE_READONLY": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"PAGE_READWRITE": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"PAGE_WRITECOPY": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"PFL_HIDDEN": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"PFL_MATCHES_PROTOCOL_ZERO": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"PFL_MULTIPLE_PROTO_ENTRIES": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"PFL_NETWORKDIRECT_PROVIDER": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"PFL_RECOMMENDED_PROTO_ENTRY": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"PKCS_7_ASN_ENCODING": reflect.ValueOf(constant.MakeFromLiteral("65536", token.INT, 0)),
"PROCESS_QUERY_INFORMATION": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"PROCESS_TERMINATE": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"PROV_DH_SCHANNEL": reflect.ValueOf(constant.MakeFromLiteral("18", token.INT, 0)),
"PROV_DSS": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"PROV_DSS_DH": reflect.ValueOf(constant.MakeFromLiteral("13", token.INT, 0)),
"PROV_EC_ECDSA_FULL": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"PROV_EC_ECDSA_SIG": reflect.ValueOf(constant.MakeFromLiteral("14", token.INT, 0)),
"PROV_EC_ECNRA_FULL": reflect.ValueOf(constant.MakeFromLiteral("17", token.INT, 0)),
"PROV_EC_ECNRA_SIG": reflect.ValueOf(constant.MakeFromLiteral("15", token.INT, 0)),
"PROV_FORTEZZA": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"PROV_INTEL_SEC": reflect.ValueOf(constant.MakeFromLiteral("22", token.INT, 0)),
"PROV_MS_EXCHANGE": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"PROV_REPLACE_OWF": reflect.ValueOf(constant.MakeFromLiteral("23", token.INT, 0)),
"PROV_RNG": reflect.ValueOf(constant.MakeFromLiteral("21", token.INT, 0)),
"PROV_RSA_AES": reflect.ValueOf(constant.MakeFromLiteral("24", token.INT, 0)),
"PROV_RSA_FULL": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"PROV_RSA_SCHANNEL": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"PROV_RSA_SIG": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"PROV_SPYRUS_LYNKS": reflect.ValueOf(constant.MakeFromLiteral("20", token.INT, 0)),
"PROV_SSL": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"Pipe": reflect.ValueOf(syscall.Pipe),
"PostQueuedCompletionStatus": reflect.ValueOf(syscall.PostQueuedCompletionStatus),
"Process32First": reflect.ValueOf(syscall.Process32First),
"Process32Next": reflect.ValueOf(syscall.Process32Next),
"REG_BINARY": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"REG_DWORD": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"REG_DWORD_BIG_ENDIAN": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"REG_DWORD_LITTLE_ENDIAN": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"REG_EXPAND_SZ": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"REG_FULL_RESOURCE_DESCRIPTOR": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"REG_LINK": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"REG_MULTI_SZ": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"REG_NONE": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"REG_QWORD": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"REG_QWORD_LITTLE_ENDIAN": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"REG_RESOURCE_LIST": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"REG_RESOURCE_REQUIREMENTS_LIST": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"REG_SZ": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"Read": reflect.ValueOf(syscall.Read),
"ReadConsole": reflect.ValueOf(syscall.ReadConsole),
"ReadDirectoryChanges": reflect.ValueOf(syscall.ReadDirectoryChanges),
"ReadFile": reflect.ValueOf(syscall.ReadFile),
"Readlink": reflect.ValueOf(syscall.Readlink),
"Recvfrom": reflect.ValueOf(syscall.Recvfrom),
"RegCloseKey": reflect.ValueOf(syscall.RegCloseKey),
"RegEnumKeyEx": reflect.ValueOf(syscall.RegEnumKeyEx),
"RegOpenKeyEx": reflect.ValueOf(syscall.RegOpenKeyEx),
"RegQueryInfoKey": reflect.ValueOf(syscall.RegQueryInfoKey),
"RegQueryValueEx": reflect.ValueOf(syscall.RegQueryValueEx),
"RemoveDirectory": reflect.ValueOf(syscall.RemoveDirectory),
"Rename": reflect.ValueOf(syscall.Rename),
"Rmdir": reflect.ValueOf(syscall.Rmdir),
"SHUT_RD": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"SHUT_RDWR": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"SHUT_WR": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SIGABRT": reflect.ValueOf(syscall.SIGABRT),
"SIGALRM": reflect.ValueOf(syscall.SIGALRM),
"SIGBUS": reflect.ValueOf(syscall.SIGBUS),
"SIGFPE": reflect.ValueOf(syscall.SIGFPE),
"SIGHUP": reflect.ValueOf(syscall.SIGHUP),
"SIGILL": reflect.ValueOf(syscall.SIGILL),
"SIGINT": reflect.ValueOf(syscall.SIGINT),
"SIGKILL": reflect.ValueOf(syscall.SIGKILL),
"SIGPIPE": reflect.ValueOf(syscall.SIGPIPE),
"SIGQUIT": reflect.ValueOf(syscall.SIGQUIT),
"SIGSEGV": reflect.ValueOf(syscall.SIGSEGV),
"SIGTERM": reflect.ValueOf(syscall.SIGTERM),
"SIGTRAP": reflect.ValueOf(syscall.SIGTRAP),
"SIO_GET_EXTENSION_FUNCTION_POINTER": reflect.ValueOf(constant.MakeFromLiteral("3355443206", token.INT, 0)),
"SIO_GET_INTERFACE_LIST": reflect.ValueOf(constant.MakeFromLiteral("1074033791", token.INT, 0)),
"SIO_KEEPALIVE_VALS": reflect.ValueOf(constant.MakeFromLiteral("2550136836", token.INT, 0)),
"SIO_UDP_CONNRESET": reflect.ValueOf(constant.MakeFromLiteral("2550136844", token.INT, 0)),
"SOCK_DGRAM": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"SOCK_RAW": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"SOCK_SEQPACKET": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"SOCK_STREAM": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SOL_SOCKET": reflect.ValueOf(constant.MakeFromLiteral("65535", token.INT, 0)),
"SOMAXCONN": reflect.ValueOf(constant.MakeFromLiteral("2147483647", token.INT, 0)),
"SO_BROADCAST": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"SO_DONTROUTE": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"SO_KEEPALIVE": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"SO_LINGER": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"SO_RCVBUF": reflect.ValueOf(constant.MakeFromLiteral("4098", token.INT, 0)),
"SO_REUSEADDR": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"SO_SNDBUF": reflect.ValueOf(constant.MakeFromLiteral("4097", token.INT, 0)),
"SO_UPDATE_ACCEPT_CONTEXT": reflect.ValueOf(constant.MakeFromLiteral("28683", token.INT, 0)),
"SO_UPDATE_CONNECT_CONTEXT": reflect.ValueOf(constant.MakeFromLiteral("28688", token.INT, 0)),
"STANDARD_RIGHTS_ALL": reflect.ValueOf(constant.MakeFromLiteral("2031616", token.INT, 0)),
"STANDARD_RIGHTS_EXECUTE": reflect.ValueOf(constant.MakeFromLiteral("131072", token.INT, 0)),
"STANDARD_RIGHTS_READ": reflect.ValueOf(constant.MakeFromLiteral("131072", token.INT, 0)),
"STANDARD_RIGHTS_REQUIRED": reflect.ValueOf(constant.MakeFromLiteral("983040", token.INT, 0)),
"STANDARD_RIGHTS_WRITE": reflect.ValueOf(constant.MakeFromLiteral("131072", token.INT, 0)),
"STARTF_USESHOWWINDOW": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"STARTF_USESTDHANDLES": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"STD_ERROR_HANDLE": reflect.ValueOf(constant.MakeFromLiteral("-12", token.INT, 0)),
"STD_INPUT_HANDLE": reflect.ValueOf(constant.MakeFromLiteral("-10", token.INT, 0)),
"STD_OUTPUT_HANDLE": reflect.ValueOf(constant.MakeFromLiteral("-11", token.INT, 0)),
"SUBLANG_ENGLISH_US": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SW_FORCEMINIMIZE": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"SW_HIDE": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"SW_MAXIMIZE": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"SW_MINIMIZE": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"SW_NORMAL": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SW_RESTORE": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"SW_SHOW": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"SW_SHOWDEFAULT": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"SW_SHOWMAXIMIZED": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"SW_SHOWMINIMIZED": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"SW_SHOWMINNOACTIVE": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"SW_SHOWNA": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"SW_SHOWNOACTIVATE": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"SW_SHOWNORMAL": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SYMBOLIC_LINK_FLAG_DIRECTORY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SYNCHRONIZE": reflect.ValueOf(constant.MakeFromLiteral("1048576", token.INT, 0)),
"S_IFBLK": reflect.ValueOf(constant.MakeFromLiteral("24576", token.INT, 0)),
"S_IFCHR": reflect.ValueOf(constant.MakeFromLiteral("8192", token.INT, 0)),
"S_IFDIR": reflect.ValueOf(constant.MakeFromLiteral("16384", token.INT, 0)),
"S_IFIFO": reflect.ValueOf(constant.MakeFromLiteral("4096", token.INT, 0)),
"S_IFLNK": reflect.ValueOf(constant.MakeFromLiteral("40960", token.INT, 0)),
"S_IFMT": reflect.ValueOf(constant.MakeFromLiteral("126976", token.INT, 0)),
"S_IFREG": reflect.ValueOf(constant.MakeFromLiteral("32768", token.INT, 0)),
"S_IFSOCK": reflect.ValueOf(constant.MakeFromLiteral("49152", token.INT, 0)),
"S_IRUSR": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"S_ISGID": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"S_ISUID": reflect.ValueOf(constant.MakeFromLiteral("2048", token.INT, 0)),
"S_ISVTX": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"S_IWRITE": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"S_IWUSR": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"S_IXUSR": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"Seek": reflect.ValueOf(syscall.Seek),
"Sendto": reflect.ValueOf(syscall.Sendto),
"SetCurrentDirectory": reflect.ValueOf(syscall.SetCurrentDirectory),
"SetEndOfFile": reflect.ValueOf(syscall.SetEndOfFile),
"SetEnvironmentVariable": reflect.ValueOf(syscall.SetEnvironmentVariable),
"SetFileAttributes": reflect.ValueOf(syscall.SetFileAttributes),
"SetFileCompletionNotificationModes": reflect.ValueOf(syscall.SetFileCompletionNotificationModes),
"SetFilePointer": reflect.ValueOf(syscall.SetFilePointer),
"SetFileTime": reflect.ValueOf(syscall.SetFileTime),
"SetHandleInformation": reflect.ValueOf(syscall.SetHandleInformation),
"SetNonblock": reflect.ValueOf(syscall.SetNonblock),
"Setenv": reflect.ValueOf(syscall.Setenv),
"Setsockopt": reflect.ValueOf(syscall.Setsockopt),
"SetsockoptIPMreq": reflect.ValueOf(syscall.SetsockoptIPMreq),
"SetsockoptIPv6Mreq": reflect.ValueOf(syscall.SetsockoptIPv6Mreq),
"SetsockoptInet4Addr": reflect.ValueOf(syscall.SetsockoptInet4Addr),
"SetsockoptInt": reflect.ValueOf(syscall.SetsockoptInt),
"SetsockoptLinger": reflect.ValueOf(syscall.SetsockoptLinger),
"SetsockoptTimeval": reflect.ValueOf(syscall.SetsockoptTimeval),
"Shutdown": reflect.ValueOf(syscall.Shutdown),
"SidTypeAlias": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"SidTypeComputer": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"SidTypeDeletedAccount": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"SidTypeDomain": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"SidTypeGroup": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"SidTypeInvalid": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"SidTypeLabel": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"SidTypeUnknown": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"SidTypeUser": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"SidTypeWellKnownGroup": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"Socket": reflect.ValueOf(syscall.Socket),
"SocketDisableIPv6": reflect.ValueOf(&syscall.SocketDisableIPv6).Elem(),
"StartProcess": reflect.ValueOf(syscall.StartProcess),
"Stderr": reflect.ValueOf(&syscall.Stderr).Elem(),
"Stdin": reflect.ValueOf(&syscall.Stdin).Elem(),
"Stdout": reflect.ValueOf(&syscall.Stdout).Elem(),
"StringBytePtr": reflect.ValueOf(syscall.StringBytePtr),
"StringByteSlice": reflect.ValueOf(syscall.StringByteSlice),
"StringToSid": reflect.ValueOf(syscall.StringToSid),
"StringToUTF16": reflect.ValueOf(syscall.StringToUTF16),
"StringToUTF16Ptr": reflect.ValueOf(syscall.StringToUTF16Ptr),
"Symlink": reflect.ValueOf(syscall.Symlink),
"Syscall": reflect.ValueOf(syscall.Syscall),
"Syscall12": reflect.ValueOf(syscall.Syscall12),
"Syscall15": reflect.ValueOf(syscall.Syscall15),
"Syscall18": reflect.ValueOf(syscall.Syscall18),
"Syscall6": reflect.ValueOf(syscall.Syscall6),
"Syscall9": reflect.ValueOf(syscall.Syscall9),
"TCP_NODELAY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TF_DISCONNECT": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TF_REUSE_SOCKET": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"TF_USE_DEFAULT_WORKER": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"TF_USE_KERNEL_APC": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"TF_USE_SYSTEM_THREAD": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"TF_WRITE_BEHIND": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"TH32CS_INHERIT": reflect.ValueOf(constant.MakeFromLiteral("2147483648", token.INT, 0)),
"TH32CS_SNAPALL": reflect.ValueOf(constant.MakeFromLiteral("15", token.INT, 0)),
"TH32CS_SNAPHEAPLIST": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TH32CS_SNAPMODULE": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"TH32CS_SNAPMODULE32": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"TH32CS_SNAPPROCESS": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"TH32CS_SNAPTHREAD": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"TIME_ZONE_ID_DAYLIGHT": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"TIME_ZONE_ID_STANDARD": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TIME_ZONE_ID_UNKNOWN": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"TOKEN_ADJUST_DEFAULT": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"TOKEN_ADJUST_GROUPS": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"TOKEN_ADJUST_PRIVILEGES": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"TOKEN_ADJUST_SESSIONID": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"TOKEN_ALL_ACCESS": reflect.ValueOf(constant.MakeFromLiteral("983551", token.INT, 0)),
"TOKEN_ASSIGN_PRIMARY": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TOKEN_DUPLICATE": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"TOKEN_EXECUTE": reflect.ValueOf(constant.MakeFromLiteral("131072", token.INT, 0)),
"TOKEN_IMPERSONATE": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"TOKEN_QUERY": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"TOKEN_QUERY_SOURCE": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"TOKEN_READ": reflect.ValueOf(constant.MakeFromLiteral("131080", token.INT, 0)),
"TOKEN_WRITE": reflect.ValueOf(constant.MakeFromLiteral("131296", token.INT, 0)),
"TRUNCATE_EXISTING": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"TerminateProcess": reflect.ValueOf(syscall.TerminateProcess),
"TimespecToNsec": reflect.ValueOf(syscall.TimespecToNsec),
"TokenAccessInformation": reflect.ValueOf(constant.MakeFromLiteral("22", token.INT, 0)),
"TokenAuditPolicy": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"TokenDefaultDacl": reflect.ValueOf(constant.MakeFromLiteral("6", token.INT, 0)),
"TokenElevation": reflect.ValueOf(constant.MakeFromLiteral("20", token.INT, 0)),
"TokenElevationType": reflect.ValueOf(constant.MakeFromLiteral("18", token.INT, 0)),
"TokenGroups": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"TokenGroupsAndPrivileges": reflect.ValueOf(constant.MakeFromLiteral("13", token.INT, 0)),
"TokenHasRestrictions": reflect.ValueOf(constant.MakeFromLiteral("21", token.INT, 0)),
"TokenImpersonationLevel": reflect.ValueOf(constant.MakeFromLiteral("9", token.INT, 0)),
"TokenIntegrityLevel": reflect.ValueOf(constant.MakeFromLiteral("25", token.INT, 0)),
"TokenLinkedToken": reflect.ValueOf(constant.MakeFromLiteral("19", token.INT, 0)),
"TokenLogonSid": reflect.ValueOf(constant.MakeFromLiteral("28", token.INT, 0)),
"TokenMandatoryPolicy": reflect.ValueOf(constant.MakeFromLiteral("27", token.INT, 0)),
"TokenOrigin": reflect.ValueOf(constant.MakeFromLiteral("17", token.INT, 0)),
"TokenOwner": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"TokenPrimaryGroup": reflect.ValueOf(constant.MakeFromLiteral("5", token.INT, 0)),
"TokenPrivileges": reflect.ValueOf(constant.MakeFromLiteral("3", token.INT, 0)),
"TokenRestrictedSids": reflect.ValueOf(constant.MakeFromLiteral("11", token.INT, 0)),
"TokenSandBoxInert": reflect.ValueOf(constant.MakeFromLiteral("15", token.INT, 0)),
"TokenSessionId": reflect.ValueOf(constant.MakeFromLiteral("12", token.INT, 0)),
"TokenSessionReference": reflect.ValueOf(constant.MakeFromLiteral("14", token.INT, 0)),
"TokenSource": reflect.ValueOf(constant.MakeFromLiteral("7", token.INT, 0)),
"TokenStatistics": reflect.ValueOf(constant.MakeFromLiteral("10", token.INT, 0)),
"TokenType": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"TokenUIAccess": reflect.ValueOf(constant.MakeFromLiteral("26", token.INT, 0)),
"TokenUser": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"TokenVirtualizationAllowed": reflect.ValueOf(constant.MakeFromLiteral("23", token.INT, 0)),
"TokenVirtualizationEnabled": reflect.ValueOf(constant.MakeFromLiteral("24", token.INT, 0)),
"TranslateAccountName": reflect.ValueOf(syscall.TranslateAccountName),
"TranslateName": reflect.ValueOf(syscall.TranslateName),
"TransmitFile": reflect.ValueOf(syscall.TransmitFile),
"UNIX_PATH_MAX": reflect.ValueOf(constant.MakeFromLiteral("108", token.INT, 0)),
"USAGE_MATCH_TYPE_AND": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"USAGE_MATCH_TYPE_OR": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"UTF16FromString": reflect.ValueOf(syscall.UTF16FromString),
"UTF16PtrFromString": reflect.ValueOf(syscall.UTF16PtrFromString),
"UTF16ToString": reflect.ValueOf(syscall.UTF16ToString),
"Unlink": reflect.ValueOf(syscall.Unlink),
"UnmapViewOfFile": reflect.ValueOf(syscall.UnmapViewOfFile),
"Unsetenv": reflect.ValueOf(syscall.Unsetenv),
"Utimes": reflect.ValueOf(syscall.Utimes),
"UtimesNano": reflect.ValueOf(syscall.UtimesNano),
"VirtualLock": reflect.ValueOf(syscall.VirtualLock),
"VirtualUnlock": reflect.ValueOf(syscall.VirtualUnlock),
"WAIT_ABANDONED": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"WAIT_FAILED": reflect.ValueOf(constant.MakeFromLiteral("4294967295", token.INT, 0)),
"WAIT_OBJECT_0": reflect.ValueOf(constant.MakeFromLiteral("0", token.INT, 0)),
"WAIT_TIMEOUT": reflect.ValueOf(constant.MakeFromLiteral("258", token.INT, 0)),
"WSACleanup": reflect.ValueOf(syscall.WSACleanup),
"WSADESCRIPTION_LEN": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"WSAEACCES": reflect.ValueOf(syscall.WSAEACCES),
"WSAECONNABORTED": reflect.ValueOf(syscall.WSAECONNABORTED),
"WSAECONNRESET": reflect.ValueOf(syscall.WSAECONNRESET),
"WSAEnumProtocols": reflect.ValueOf(syscall.WSAEnumProtocols),
"WSAID_CONNECTEX": reflect.ValueOf(&syscall.WSAID_CONNECTEX).Elem(),
"WSAIoctl": reflect.ValueOf(syscall.WSAIoctl),
"WSAPROTOCOL_LEN": reflect.ValueOf(constant.MakeFromLiteral("255", token.INT, 0)),
"WSARecv": reflect.ValueOf(syscall.WSARecv),
"WSARecvFrom": reflect.ValueOf(syscall.WSARecvFrom),
"WSASYS_STATUS_LEN": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"WSASend": reflect.ValueOf(syscall.WSASend),
"WSASendTo": reflect.ValueOf(syscall.WSASendTo),
"WSASendto": reflect.ValueOf(syscall.WSASendto),
"WSAStartup": reflect.ValueOf(syscall.WSAStartup),
"WaitForSingleObject": reflect.ValueOf(syscall.WaitForSingleObject),
"Write": reflect.ValueOf(syscall.Write),
"WriteConsole": reflect.ValueOf(syscall.WriteConsole),
"WriteFile": reflect.ValueOf(syscall.WriteFile),
"X509_ASN_ENCODING": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"XP1_CONNECTIONLESS": reflect.ValueOf(constant.MakeFromLiteral("1", token.INT, 0)),
"XP1_CONNECT_DATA": reflect.ValueOf(constant.MakeFromLiteral("128", token.INT, 0)),
"XP1_DISCONNECT_DATA": reflect.ValueOf(constant.MakeFromLiteral("256", token.INT, 0)),
"XP1_EXPEDITED_DATA": reflect.ValueOf(constant.MakeFromLiteral("64", token.INT, 0)),
"XP1_GRACEFUL_CLOSE": reflect.ValueOf(constant.MakeFromLiteral("32", token.INT, 0)),
"XP1_GUARANTEED_DELIVERY": reflect.ValueOf(constant.MakeFromLiteral("2", token.INT, 0)),
"XP1_GUARANTEED_ORDER": reflect.ValueOf(constant.MakeFromLiteral("4", token.INT, 0)),
"XP1_IFS_HANDLES": reflect.ValueOf(constant.MakeFromLiteral("131072", token.INT, 0)),
"XP1_MESSAGE_ORIENTED": reflect.ValueOf(constant.MakeFromLiteral("8", token.INT, 0)),
"XP1_MULTIPOINT_CONTROL_PLANE": reflect.ValueOf(constant.MakeFromLiteral("2048", token.INT, 0)),
"XP1_MULTIPOINT_DATA_PLANE": reflect.ValueOf(constant.MakeFromLiteral("4096", token.INT, 0)),
"XP1_PARTIAL_MESSAGE": reflect.ValueOf(constant.MakeFromLiteral("262144", token.INT, 0)),
"XP1_PSEUDO_STREAM": reflect.ValueOf(constant.MakeFromLiteral("16", token.INT, 0)),
"XP1_QOS_SUPPORTED": reflect.ValueOf(constant.MakeFromLiteral("8192", token.INT, 0)),
"XP1_SAN_SUPPORT_SDP": reflect.ValueOf(constant.MakeFromLiteral("524288", token.INT, 0)),
"XP1_SUPPORT_BROADCAST": reflect.ValueOf(constant.MakeFromLiteral("512", token.INT, 0)),
"XP1_SUPPORT_MULTIPOINT": reflect.ValueOf(constant.MakeFromLiteral("1024", token.INT, 0)),
"XP1_UNI_RECV": reflect.ValueOf(constant.MakeFromLiteral("65536", token.INT, 0)),
"XP1_UNI_SEND": reflect.ValueOf(constant.MakeFromLiteral("32768", token.INT, 0)),
// type definitions
"AddrinfoW": reflect.ValueOf((*syscall.AddrinfoW)(nil)),
"ByHandleFileInformation": reflect.ValueOf((*syscall.ByHandleFileInformation)(nil)),
"CertChainContext": reflect.ValueOf((*syscall.CertChainContext)(nil)),
"CertChainElement": reflect.ValueOf((*syscall.CertChainElement)(nil)),
"CertChainPara": reflect.ValueOf((*syscall.CertChainPara)(nil)),
"CertChainPolicyPara": reflect.ValueOf((*syscall.CertChainPolicyPara)(nil)),
"CertChainPolicyStatus": reflect.ValueOf((*syscall.CertChainPolicyStatus)(nil)),
"CertContext": reflect.ValueOf((*syscall.CertContext)(nil)),
"CertEnhKeyUsage": reflect.ValueOf((*syscall.CertEnhKeyUsage)(nil)),
"CertInfo": reflect.ValueOf((*syscall.CertInfo)(nil)),
"CertRevocationCrlInfo": reflect.ValueOf((*syscall.CertRevocationCrlInfo)(nil)),
"CertRevocationInfo": reflect.ValueOf((*syscall.CertRevocationInfo)(nil)),
"CertSimpleChain": reflect.ValueOf((*syscall.CertSimpleChain)(nil)),
"CertTrustListInfo": reflect.ValueOf((*syscall.CertTrustListInfo)(nil)),
"CertTrustStatus": reflect.ValueOf((*syscall.CertTrustStatus)(nil)),
"CertUsageMatch": reflect.ValueOf((*syscall.CertUsageMatch)(nil)),
"Conn": reflect.ValueOf((*syscall.Conn)(nil)),
"DLL": reflect.ValueOf((*syscall.DLL)(nil)),
"DLLError": reflect.ValueOf((*syscall.DLLError)(nil)),
"DNSMXData": reflect.ValueOf((*syscall.DNSMXData)(nil)),
"DNSPTRData": reflect.ValueOf((*syscall.DNSPTRData)(nil)),
"DNSRecord": reflect.ValueOf((*syscall.DNSRecord)(nil)),
"DNSSRVData": reflect.ValueOf((*syscall.DNSSRVData)(nil)),
"DNSTXTData": reflect.ValueOf((*syscall.DNSTXTData)(nil)),
"Errno": reflect.ValueOf((*syscall.Errno)(nil)),
"FileNotifyInformation": reflect.ValueOf((*syscall.FileNotifyInformation)(nil)),
"Filetime": reflect.ValueOf((*syscall.Filetime)(nil)),
"GUID": reflect.ValueOf((*syscall.GUID)(nil)),
"Handle": reflect.ValueOf((*syscall.Handle)(nil)),
"Hostent": reflect.ValueOf((*syscall.Hostent)(nil)),
"IPMreq": reflect.ValueOf((*syscall.IPMreq)(nil)),
"IPv6Mreq": reflect.ValueOf((*syscall.IPv6Mreq)(nil)),
"InterfaceInfo": reflect.ValueOf((*syscall.InterfaceInfo)(nil)),
"IpAdapterInfo": reflect.ValueOf((*syscall.IpAdapterInfo)(nil)),
"IpAddrString": reflect.ValueOf((*syscall.IpAddrString)(nil)),
"IpAddressString": reflect.ValueOf((*syscall.IpAddressString)(nil)),
"IpMaskString": reflect.ValueOf((*syscall.IpMaskString)(nil)),
"LazyDLL": reflect.ValueOf((*syscall.LazyDLL)(nil)),
"LazyProc": reflect.ValueOf((*syscall.LazyProc)(nil)),
"Linger": reflect.ValueOf((*syscall.Linger)(nil)),
"MibIfRow": reflect.ValueOf((*syscall.MibIfRow)(nil)),
"Overlapped": reflect.ValueOf((*syscall.Overlapped)(nil)),
"Pointer": reflect.ValueOf((*syscall.Pointer)(nil)),
"Proc": reflect.ValueOf((*syscall.Proc)(nil)),
"ProcAttr": reflect.ValueOf((*syscall.ProcAttr)(nil)),
"ProcessEntry32": reflect.ValueOf((*syscall.ProcessEntry32)(nil)),
"ProcessInformation": reflect.ValueOf((*syscall.ProcessInformation)(nil)),
"Protoent": reflect.ValueOf((*syscall.Protoent)(nil)),
"RawConn": reflect.ValueOf((*syscall.RawConn)(nil)),
"RawSockaddr": reflect.ValueOf((*syscall.RawSockaddr)(nil)),
"RawSockaddrAny": reflect.ValueOf((*syscall.RawSockaddrAny)(nil)),
"RawSockaddrInet4": reflect.ValueOf((*syscall.RawSockaddrInet4)(nil)),
"RawSockaddrInet6": reflect.ValueOf((*syscall.RawSockaddrInet6)(nil)),
"RawSockaddrUnix": reflect.ValueOf((*syscall.RawSockaddrUnix)(nil)),
"Rusage": reflect.ValueOf((*syscall.Rusage)(nil)),
"SID": reflect.ValueOf((*syscall.SID)(nil)),
"SIDAndAttributes": reflect.ValueOf((*syscall.SIDAndAttributes)(nil)),
"SSLExtraCertChainPolicyPara": reflect.ValueOf((*syscall.SSLExtraCertChainPolicyPara)(nil)),
"SecurityAttributes": reflect.ValueOf((*syscall.SecurityAttributes)(nil)),
"Servent": reflect.ValueOf((*syscall.Servent)(nil)),
"Signal": reflect.ValueOf((*syscall.Signal)(nil)),
"Sockaddr": reflect.ValueOf((*syscall.Sockaddr)(nil)),
"SockaddrGen": reflect.ValueOf((*syscall.SockaddrGen)(nil)),
"SockaddrInet4": reflect.ValueOf((*syscall.SockaddrInet4)(nil)),
"SockaddrInet6": reflect.ValueOf((*syscall.SockaddrInet6)(nil)),
"SockaddrUnix": reflect.ValueOf((*syscall.SockaddrUnix)(nil)),
"StartupInfo": reflect.ValueOf((*syscall.StartupInfo)(nil)),
"SysProcAttr": reflect.ValueOf((*syscall.SysProcAttr)(nil)),
"Systemtime": reflect.ValueOf((*syscall.Systemtime)(nil)),
"TCPKeepalive": reflect.ValueOf((*syscall.TCPKeepalive)(nil)),
"Timespec": reflect.ValueOf((*syscall.Timespec)(nil)),
"Timeval": reflect.ValueOf((*syscall.Timeval)(nil)),
"Timezoneinformation": reflect.ValueOf((*syscall.Timezoneinformation)(nil)),
"Token": reflect.ValueOf((*syscall.Token)(nil)),
"Tokenprimarygroup": reflect.ValueOf((*syscall.Tokenprimarygroup)(nil)),
"Tokenuser": reflect.ValueOf((*syscall.Tokenuser)(nil)),
"TransmitFileBuffers": reflect.ValueOf((*syscall.TransmitFileBuffers)(nil)),
"UserInfo10": reflect.ValueOf((*syscall.UserInfo10)(nil)),
"WSABuf": reflect.ValueOf((*syscall.WSABuf)(nil)),
"WSAData": reflect.ValueOf((*syscall.WSAData)(nil)),
"WSAProtocolChain": reflect.ValueOf((*syscall.WSAProtocolChain)(nil)),
"WSAProtocolInfo": reflect.ValueOf((*syscall.WSAProtocolInfo)(nil)),
"WaitStatus": reflect.ValueOf((*syscall.WaitStatus)(nil)),
"Win32FileAttributeData": reflect.ValueOf((*syscall.Win32FileAttributeData)(nil)),
"Win32finddata": reflect.ValueOf((*syscall.Win32finddata)(nil)),
// interface wrapper definitions
"_Conn": reflect.ValueOf((*_syscall_Conn)(nil)),
"_RawConn": reflect.ValueOf((*_syscall_RawConn)(nil)),
"_Sockaddr": reflect.ValueOf((*_syscall_Sockaddr)(nil)),
}
} |
app.component.ts | import { Component, OnInit } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { FormGroup, FormBuilder } from '@angular/forms';
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.less']
})
export class | implements OnInit {
title = 'client';
postForm: FormGroup;
formattedOutput: any = {testing: "Haven't executed a request."};
ngOnInit(): void{
this.postForm = this.fb.group({
method: ['POST'],
url: ['localhost:4200/'],
params: [],
body: [],
headers: []
})
}
constructor(
private fb: FormBuilder,
private httpClient: HttpClient
) { }
submitForm(): void{
let method = this.postForm.value.method
let url = this.postForm.value.url
let options = {
body: this.postForm.value.body,
params: this.postForm.value.params,
headers: this.postForm.value.headers
}
this.httpClient.request(
method,
url,
options
)
.subscribe(
res => {
this.formattedOutput = res
},
err => {
this.formattedOutput = err
}
);
}
}
| AppComponent |
utils.py | import time
import subprocess
from collections import namedtuple,defaultdict
import logging
import json
import os
import yaml
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import threading
import numpy as np
import gym |
class OUNoise(object):
def __init__(self, action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_space.shape[0]
self.low = action_space.low
self.high = action_space.high
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
return np.clip(action + ou_state, self.low, self.high)
def loadTensorBoard(outdir):
t = threading.Thread(target=launchTensorBoard, args=([outdir]))
t.start()
def launchTensorBoard(tensorBoardPath):
print('tensorboard --logdir=' + tensorBoardPath)
ret=os.system('tensorboard --logdir=' + tensorBoardPath)
if ret!=0:
syspath = os.path.dirname(sys.executable)
print(os.path.dirname(sys.executable))
ret = os.system(syspath+"/"+'tensorboard --logdir=' + tensorBoardPath)
return
class Orn_Uhlen:
def __init__(self, n_actions, mu=0, theta=0.15, sigma=0.2):
self.n_actions = n_actions
self.X = np.ones(n_actions) * mu
self.mu = mu
self.sigma = sigma
self.theta = theta
def reset(self):
self.X = np.ones(self.n_actions) * self.mu
def sample(self):
dX = self.theta * (self.mu - self.X)
dX += self.sigma * np.random.randn(self.n_actions)
self.X += dX
return torch.FloatTensor(self.X)
class FeatureExtractor(object):
def __init__(self):
super().__init__()
def getFeatures(self,obs):
pass
class NothingToDo(FeatureExtractor):
def __init__(self,env):
super().__init__()
ob=env.reset()
self.outSize=len(ob)
def getFeatures(self,obs):
return obs
###### Pour Gridworld #############################"
class MapFromDumpExtractor(FeatureExtractor):
def __init__(self,env):
super().__init__()
outSize = env.start_grid_map.reshape(1, -1).shape[1]
self.outSize=outSize
def getFeatures(self, obs):
#prs(obs)
return obs.reshape(1,-1)
class MapFromDumpExtractor2(FeatureExtractor):
def __init__(self,env):
super().__init__()
outSize=env.start_grid_map.reshape(1, -1).shape[1]
self.outSize=outSize*3
def getFeatures(self, obs):
state=np.zeros((3,np.shape(obs)[0],np.shape(obs)[1]))
state[0]=np.where(obs == 2,1,state[0])
state[1] = np.where(obs == 4, 1, state[1])
state[2] = np.where(obs == 6, 1, state[2])
return state.reshape(1,-1)
class DistsFromStates(FeatureExtractor):
def __init__(self,env):
super().__init__()
self.outSize=16
def getFeatures(self, obs):
#prs(obs)
#x=np.loads(obs)
x=obs
#print(x)
astate = list(map(
lambda x: x[0] if len(x) > 0 else None,
np.where(x == 2)
))
astate=np.array(astate)
a3=np.where(x == 3)
d3=np.array([0])
if len(a3[0])>0:
astate3 = np.concatenate(a3).reshape(2,-1).T
d3=np.power(astate-astate3,2).sum(1).min().reshape(1)
#d3 = np.array(d3).reshape(1)
a4 = np.where(x == 4)
d4 = np.array([0])
if len(a4[0]) > 0:
astate4 = np.concatenate(a4).reshape(2,-1).T
d4 = np.power(astate - astate4, 2).sum(1).min().reshape(1)
#d4 = np.array(d4)
a5 = np.where(x == 5)
d5 = np.array([0])
#prs(a5)
if len(a5[0]) > 0:
astate5 = np.concatenate(a5).reshape(2,-1).T
d5 = np.power(astate - astate5, 2).sum(1).min().reshape(1)
#d5 = np.array(d5)
a6 = np.where(x == 6)
d6 = np.array([0])
if len(a6[0]) > 0:
astate6 = np.concatenate(a6).reshape(2,-1).T
d6 = np.power(astate - astate6, 2).sum(1).min().reshape(1)
#d6=np.array(d6)
#prs("::",d3,d4,d5,d6)
ret=np.concatenate((d3,d4,d5,d6)).reshape(1,-1)
ret=np.dot(ret.T,ret)
return ret.reshape(1,-1)
#######################################################################################
# class Qfunction(nn.Module):
# def __init__(self):
# super(Qfunction,self).__init__()
#
# def setcuda(self, device):
#
# #FeatureExtractor.floatTensor = torch.cuda.FloatTensor(1, device=device)
# #FeatureExtractor.longTensor = torch.cuda.LongTensor(1, device=device)
# self.cuda(device=device)
class convMDP(nn.Module):
def __init__(self, inSize, outSize, layers=[], convs=None, finalActivation=None, batchNorm=False,init_batchNorm=False,activation=torch.tanh):
super(convMDP, self).__init__()
#print(inSize,outSize)
self.inSize=inSize
self.outSize=outSize
self.batchNorm=batchNorm
self.init_batchNorm = init_batchNorm
self.activation=activation
self.convs=None
if convs is not None:
self.convs = nn.ModuleList([])
for x in convs:
self.convs.append(nn.Conv2d(x[0], x[1], x[2], stride=x[3]))
inSize = np.sqrt(inSize / x[0])
inSize=((inSize-x[2])/x[3])+1
inSize=inSize*inSize*x[1]
#print(inSize)
self.layers = nn.ModuleList([])
self.bn = nn.ModuleList([])
i=0
if batchNorm or init_batchNorm:
self.bn.append(nn.BatchNorm1d(num_features=inSize))
for x in layers:
self.layers.append(nn.Linear(inSize, x))
if batchNorm:
self.bn.append(nn.BatchNorm1d(num_features=x))
#nn.init.xavier_uniform_(self.layers[i].weight)
nn.init.normal_(self.layers[i].weight.data, 0.0, 0.02)
nn.init.normal_(self.layers[i].bias.data,0.0,0.02)
i+=1
inSize = x
self.layers.append(nn.Linear(inSize, outSize))
#nn.init.uniform_(self.layers[-1].weight)
nn.init.normal_(self.layers[-1].weight.data, 0.0, 0.02)
nn.init.normal_(self.layers[-1].bias.data, 0.0, 0.02)
self.finalActivation=finalActivation
def setcuda(self, device):
self.cuda(device=device)
def forward(self, x):
#print("d", x.size(),self.inSize)
x=x.view(-1,self.inSize)
if self.convs is not None:
n=x.size()[0]
i=0
for c in self.convs:
if i==0:
w=np.sqrt(x.size()[1])
x=x.view(n,c.in_channels,w,w)
x=c(x)
x=self.activation(x)
i+=1
x=x.view(n,-1)
#print(x.size())
if self.batchNorm or self.init_batchNorm:
x=self.bn[0](x)
x = self.layers[0](x)
for i in range(1, len(self.layers)):
x = self.activation(x)
#if self.drop is not None:
# x = nn.drop(x)
if self.batchNorm:
x = self.bn[i](x)
x = self.layers[i](x)
if self.finalActivation is not None:
x=self.finalActivation(x)
#print("f",x.size())
return x
class NN(nn.Module):
def __init__(self, inSize, outSize, layers=[]):
super(NN, self).__init__()
self.layers = nn.ModuleList([])
for x in layers:
self.layers.append(nn.Linear(inSize, x))
inSize = x
self.layers.append(nn.Linear(inSize, outSize))
def setcuda(self, device):
self.cuda(device=device)
def forward(self, x):
x = self.layers[0](x)
for i in range(1, len(self.layers)):
x = torch.tanh(x)
x = self.layers[i](x)
return x
class Critic(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Critic, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, state, action):
"""
Params state and actions are torch tensors
"""
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class Actor(nn.Module):
def __init__(self, input_size, hidden_size, output_size, learning_rate = 3e-4):
super(Actor, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, state):
"""
Param state is a torch tensor
"""
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = torch.tanh(self.linear3(x))
return x
class LogMe(dict):
def __init__(self,writer,term=True):
self.writer = writer
self.dic = defaultdict(list)
self.term = term
def write(self,i):
if len(self.dic)==0: return
s=f"Epoch {i} : "
for k,v in self.dic.items():
self.writer.add_scalar(k,sum(v)*1./len(v),i)
s+=f"{k}:{sum(v)*1./len(v)} -- "
self.dic.clear()
if self.term: logging.info(s)
def update(self,l):
for k,v in l:
self.add(k,v)
def direct_write(self,k,v,i):
self.writer.add_scalar(k,v,i)
def add(self,k,v):
self.dic[k].append(v)
def save_src(path):
current_dir = os.getcwd()
package_dir = current_dir.split('RL', 1)[0]
#path = os.path.abspath(path)
os.chdir(package_dir)
#print(package_dir)
src_files = subprocess.Popen(('find', 'RL', '-name', '*.py', '-o', '-name', '*.yaml'),
stdout=subprocess.PIPE)
#print(package_dir,path)
#path=os.path.abspath(path)
#print(str(src_files))
subprocess.check_output(('tar', '-zcf', path+"/arch.tar", '-T', '-'), stdin=src_files.stdout, stderr=subprocess.STDOUT)
src_files.wait()
os.chdir(current_dir)
def prs(*args):
st = ""
for s in args:
st += str(s)
print(st)
class DotDict(dict):
"""dot.notation access to dictionary attributes (Thomas Robert)"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_yaml(path):
with open(path, 'r') as stream:
opt = yaml.load(stream,Loader=yaml.Loader)
return DotDict(opt)
def write_yaml(file,dotdict):
d=dict(dotdict)
with open(file, 'w', encoding='utf8') as outfile:
yaml.dump(d, outfile, default_flow_style=False, allow_unicode=True)
class EpsilonGreedyDecay:
def __init__(self, epsilon, eta, epsilon_min):
self.eta = eta
self.epsilon=epsilon
self.epsilon_min=epsilon_min
def act(self, episode, q_values):
decay = self.epsilon / (1 + (self.eta * episode))
if decay<self.epsilon_min:
decay=self.epsilon_min
if np.random.random() > decay:
_,action = torch.max(q_values,0) # we take the action that maximize the q_value
return action.item()
return np.random.randint(len(q_values)) | from collections import deque
import random
import torch.autograd
from torch.autograd import Variable |
ImageMapEditor.js | import React, { Component } from 'react';
import { ResizeSensor } from 'css-element-queries';
import { Badge, Button, Spin, Popconfirm, Menu, Modal } from 'antd';
import debounce from 'lodash/debounce';
import i18n from 'i18next';
import storage from 'store/storages/localStorage';
import Wireframe from '../wireframe/Wireframe';
import Canvas from '../canvas/Canvas';
import ImageMapFooterToolbar from './ImageMapFooterToolbar';
import ImageMapItems from './ImageMapItems';
import ImageMapTitle from './ImageMapTitle';
import ImageMapHeaderToolbar from './ImageMapHeaderToolbar';
import ImageMapPreview from './ImageMapPreview';
import ImageMapConfigurations from './ImageMapConfigurations';
import SandBox from '../sandbox/SandBox';
import '../../libs/fontawesome-5.2.0/css/all.css';
import '../../styles/index.less';
import Container from '../common/Container';
import CommonButton from '../common/CommonButton';
const propertiesToInclude = [
'id',
'name',
'lock',
'file',
'src',
'link',
'tooltip',
'animation',
'layout',
'workareaWidth',
'workareaHeight',
'videoLoadType',
'autoplay',
'shadow',
'muted',
'loop',
'code',
'icon',
'userProperty',
'trigger',
'configuration',
'superType',
];
const defaultOptions = {
fill: 'rgba(0, 0, 0, 1)',
stroke: 'rgba(255, 255, 255, 0)',
resource: {},
link: {
enabled: false,
type: 'resource',
state: 'new',
dashboard: {},
},
tooltip: {
enabled: true,
type: 'resource',
template: '<div>{{message.name}}</div>',
},
animation: {
type: 'none',
loop: true,
autoplay: true,
delay: 100,
duration: 1000,
},
userProperty: {},
trigger: {
enabled: false,
type: 'alarm',
script: 'return message.value > 0;',
effect: 'style',
},
};
class | extends Component {
state = {
selectedItem: null,
zoomRatio: 1,
canvasRect: {
width: 300,
height: 150,
},
preview: false,
loading: false,
progress: 0,
animations: [],
styles: [],
dataSources: [],
editing: false,
descriptors: {},
}
componentDidMount() {
this.showLoading(true);
import('./Descriptors.json').then((descriptors) => {
this.setState({
descriptors,
}, () => {
this.showLoading(false);
});
});
this.resizeSensor = new ResizeSensor(this.container, () => {
const { canvasRect: currentCanvasRect } = this.state;
const canvasRect = Object.assign({}, currentCanvasRect, {
width: this.container.clientWidth,
height: this.container.clientHeight,
});
this.setState({
canvasRect,
});
});
this.setState({
canvasRect: {
width: this.container.clientWidth,
height: this.container.clientHeight,
},
selectedItem: null,
});
}
canvasHandlers = {
onAdd: (target) => {
if (!this.state.editing) {
this.changeEditing(true);
}
if (target.type === 'activeSelection') {
this.canvasHandlers.onSelect(null);
return;
}
this.canvasRef.handlers.select(target);
},
onSelect: (target) => {
if (target
&& target.id
&& target.id !== 'workarea'
&& target.type !== 'activeSelection') {
if (this.state.selectedItem && target.id === this.state.selectedItem.id) {
return;
}
this.canvasRef.handlers.getObjects().forEach((obj) => {
if (obj) {
this.canvasRef.animationHandlers.initAnimation(obj, true);
}
});
this.setState({
selectedItem: target,
});
return;
}
this.canvasRef.handlers.getObjects().forEach((obj) => {
if (obj) {
this.canvasRef.animationHandlers.initAnimation(obj, true);
}
});
this.setState({
selectedItem: null,
});
},
onRemove: (target) => {
if (!this.state.editing) {
this.changeEditing(true);
}
this.canvasHandlers.onSelect(null);
},
onModified: debounce((target) => {
if (!this.state.editing) {
this.changeEditing(true);
}
if (target
&& target.id
&& target.id !== 'workarea'
&& target.type !== 'activeSelection') {
this.setState({
selectedItem: target,
});
return;
}
this.setState({
selectedItem: null,
});
}, 300),
onZoom: (zoom) => {
this.setState({
zoomRatio: zoom,
});
},
onChange: (selectedItem, changedValues, allValues) => {
if (!this.state.editing) {
this.changeEditing(true);
}
const changedKey = Object.keys(changedValues)[0];
const changedValue = changedValues[changedKey];
if (allValues.workarea) {
this.canvasHandlers.onChangeWokarea(changedKey, changedValue, allValues.workarea);
return;
}
if (changedKey === 'width' || changedKey === 'height') {
this.canvasRef.handlers.scaleToResize(allValues.width, allValues.height);
return;
}
if (changedKey === 'lock') {
this.canvasRef.handlers.setObject({
lockMovementX: changedValue,
lockMovementY: changedValue,
hasControls: !changedValue,
hoverCursor: changedValue ? 'pointer' : 'move',
editable: !changedValue,
lock: changedValue,
});
return;
}
if (changedKey === 'file' || changedKey === 'src' || changedKey === 'code') {
if (selectedItem.type === 'image') {
this.canvasRef.handlers.setImageById(selectedItem.id, changedValue);
} else if (this.canvasRef.handlers.isElementType(selectedItem.type)) {
this.canvasRef.elementHandlers.setById(selectedItem.id, changedValue);
}
return;
}
if (changedKey === 'link') {
const link = Object.assign({}, defaultOptions.link, allValues.link);
this.canvasRef.handlers.set(changedKey, link);
return;
}
if (changedKey === 'tooltip') {
const tooltip = Object.assign({}, defaultOptions.tooltip, allValues.tooltip);
this.canvasRef.handlers.set(changedKey, tooltip);
return;
}
if (changedKey === 'animation') {
const animation = Object.assign({}, defaultOptions.animation, allValues.animation);
this.canvasRef.handlers.set(changedKey, animation);
return;
}
if (changedKey === 'icon') {
const { unicode, styles } = changedValue[Object.keys(changedValue)[0]];
const uni = parseInt(unicode, 16);
if (styles[0] === 'brands') {
this.canvasRef.handlers.set('fontFamily', 'Font Awesome 5 Brands');
} else if (styles[0] === 'regular') {
this.canvasRef.handlers.set('fontFamily', 'Font Awesome 5 Regular');
} else {
this.canvasRef.handlers.set('fontFamily', 'Font Awesome 5 Free');
}
this.canvasRef.handlers.set('text', String.fromCodePoint(uni));
this.canvasRef.handlers.set('icon', changedValue);
return;
}
if (changedKey === 'shadow') {
if (allValues.shadow.enabled) {
this.canvasRef.handlers.setShadow(changedKey, allValues.shadow);
} else {
this.canvasRef.handlers.setShadow(changedKey, null);
}
return;
}
if (changedKey === 'fontWeight') {
this.canvasRef.handlers.set(changedKey, changedValue ? 'bold' : 'normal');
return;
}
if (changedKey === 'fontStyle') {
this.canvasRef.handlers.set(changedKey, changedValue ? 'italic' : 'normal');
return;
}
if (changedKey === 'textAlign') {
this.canvasRef.handlers.set(changedKey, Object.keys(changedValue)[0]);
return;
}
if (changedKey === 'trigger') {
const trigger = Object.assign({}, defaultOptions.trigger, allValues.trigger);
this.canvasRef.handlers.set(changedKey, trigger);
return;
}
this.canvasRef.handlers.set(changedKey, changedValue);
},
onChangeWokarea: (changedKey, changedValue, allValues) => {
if (changedKey === 'layout') {
this.canvasRef.workareaHandlers.setLayout(changedValue);
return;
}
if (changedKey === 'file' || changedKey === 'src') {
this.canvasRef.workareaHandlers.setImage(changedValue);
return;
}
if (changedKey === 'width' || changedKey === 'height') {
this.canvasRef.handlers.originScaleToResize(this.canvasRef.workarea, allValues.width, allValues.height);
this.canvasRef.canvas.centerObject(this.canvasRef.workarea);
return;
}
this.canvasRef.workarea.set(changedKey, changedValue);
this.canvasRef.canvas.requestRenderAll();
},
onTooltip: (ref, target) => {
const value = (Math.random() * 10) + 1;
const { animations, styles } = this.state;
// const { code } = target.trigger;
// const compile = SandBox.compile(code);
// const result = compile(value, animations, styles, target.userProperty);
// console.log(result);
return (
<div>
<div>
<div>
<Button>
{target.id}
</Button>
</div>
<Badge count={value} />
</div>
</div>
);
},
onLink: (canvas, target) => {
const { link } = target;
if (link.state === 'current') {
document.location.href = link.url;
return;
}
window.open(link.url);
},
onContext: (ref, event, target) => {
if ((target && target.id === 'workarea') || !target) {
const { layerX: left, layerY: top } = event;
return (
<Menu>
<Menu.SubMenu key="add" style={{ width: 120 }} title={'Add'}>
{
this.transformList().map((item) => {
const option = Object.assign({}, item.option, { left, top });
const newItem = Object.assign({}, item, { option });
return (
<Menu.Item style={{ padding: 0 }} key={item.name}>
{this.itemsRef.renderItem(newItem, false)}
</Menu.Item>
);
})
}
</Menu.SubMenu>
</Menu>
);
}
if (target.type === 'activeSelection') {
return (
<Menu>
<Menu.Item onClick={() => { this.canvasRef.handlers.toGroup(); }}>
{'Group'}
</Menu.Item>
<Menu.Item onClick={() => { this.canvasRef.handlers.duplicate(); }}>
{'Clone'}
</Menu.Item>
<Menu.Item onClick={() => { this.canvasRef.handlers.remove(); }}>
{'Delete'}
</Menu.Item>
</Menu>
);
}
if (target.type === 'group') {
return (
<Menu>
<Menu.Item onClick={() => { this.canvasRef.handlers.toActiveSelection(); }}>
{'Ungroup'}
</Menu.Item>
<Menu.Item onClick={() => { this.canvasRef.handlers.duplicate(); }}>
{'Clone'}
</Menu.Item>
<Menu.Item onClick={() => { this.canvasRef.handlers.remove(); }}>
{'Delete'}
</Menu.Item>
</Menu>
);
}
return (
<Menu>
<Menu.Item onClick={() => { this.canvasRef.handlers.duplicateById(target.id); }}>
{'Clone'}
</Menu.Item>
<Menu.Item onClick={() => { this.canvasRef.handlers.removeById(target.id); }}>
{'Delete'}
</Menu.Item>
</Menu>
);
},
}
handlers = {
onChangePreview: (checked) => {
this.setState({
preview: typeof checked === 'object' ? false : checked,
}, () => {
if (this.state.preview) {
const data = this.canvasRef.handlers.exportJSON().objects.filter((obj) => {
if (!obj.id) {
return false;
}
return true;
});
this.preview.canvasRef.handlers.importJSON(data);
return;
}
this.preview.canvasRef.handlers.clear(true);
});
},
onProgress: (progress) => {
this.setState({
progress,
});
},
onImport: (files) => {
if (files) {
this.showLoading(true);
setTimeout(() => {
const reader = new FileReader();
reader.onprogress = (e) => {
if (e.lengthComputable) {
const progress = parseInt(((e.loaded / e.total) * 100), 10);
this.handlers.onProgress(progress);
}
};
reader.onload = (e) => {
const { objects, animations, styles, dataSources } = JSON.parse(e.target.result);
this.setState({
animations,
styles,
dataSources,
});
if (objects) {
this.canvasRef.handlers.clear(true);
const data = objects.filter((obj) => {
if (!obj.id) {
return false;
}
return true;
});
this.canvasRef.handlers.importJSON(JSON.stringify(data));
}
};
reader.onloadend = () => {
this.showLoading(false);
};
reader.onerror = () => {
this.showLoading(false);
};
reader.readAsText(files[0]);
}, 500);
}
},
onUpload: () => {
const inputEl = document.createElement('input');
inputEl.accept = '.json';
inputEl.type = 'file';
inputEl.hidden = true;
inputEl.onchange = (e) => {
this.handlers.onImport(e.target.files);
};
document.body.appendChild(inputEl); // required for firefox
inputEl.click();
inputEl.remove();
},
onDownload: () => {
this.showLoading(true);
const objects = this.canvasRef.handlers.exportJSON().objects.filter((obj) => {
if (!obj.id) {
return false;
}
return true;
});
const { animations, styles, dataSources } = this.state;
const exportDatas = {
objects,
animations,
styles,
dataSources,
};
const anchorEl = document.createElement('a');
anchorEl.href = `data:text/json;charset=utf-8,${encodeURIComponent(JSON.stringify(exportDatas, null, '\t'))}`;
anchorEl.download = `${this.canvasRef.workarea.name || 'sample'}.json`;
document.body.appendChild(anchorEl); // required for firefox
anchorEl.click();
anchorEl.remove();
this.showLoading(false);
},
onChangeAnimations: (animations) => {
if (!this.state.editing) {
this.changeEditing(true);
}
this.setState({
animations,
});
},
onChangeStyles: (styles) => {
if (!this.state.editing) {
this.changeEditing(true);
}
this.setState({
styles,
});
},
onChangeDataSources: (dataSources) => {
if (!this.state.editing) {
this.changeEditing(true);
}
this.setState({
dataSources,
});
},
}
transformList = () => {
return Object.values(this.state.descriptors).reduce((prev, curr) => prev.concat(curr), []);
}
showLoading = (loading) => {
this.setState({
loading,
});
}
changeEditing = (editing) => {
this.setState({
editing,
});
}
render() {
const {
preview,
selectedItem,
canvasRect,
zoomRatio,
loading,
progress,
animations,
styles,
dataSources,
editing,
descriptors,
} = this.state;
const {
onAdd,
onRemove,
onSelect,
onModified,
onChange,
onZoom,
onTooltip,
onLink,
onContext,
} = this.canvasHandlers;
const {
onChangePreview,
onDownload,
onUpload,
onChangeAnimations,
onChangeStyles,
onChangeDataSources,
} = this.handlers;
const action = (
<React.Fragment>
<CommonButton
className="rde-action-btn"
shape="circle"
icon="file-download"
disabled={!editing}
tooltipTitle={i18n.t('action.save')}
onClick={onDownload}
tooltipPlacement="bottomRight"
/>
{
editing ? (
<Popconfirm
title={i18n.t('rule-chains.rule-chains-editing-confirm')}
okText={i18n.t('action.ok')}
cancelText={i18n.t('action.cancel')}
onConfirm={onUpload}
placement="bottomRight"
>
<CommonButton
className="rde-action-btn"
shape="circle"
icon="file-upload"
tooltipTitle={i18n.t('action.exit')}
tooltipPlacement="bottomRight"
/>
</Popconfirm>
) : (
<CommonButton
className="rde-action-btn"
shape="circle"
icon="file-upload"
tooltipTitle={i18n.t('action.back')}
tooltipPlacement="bottomRight"
onClick={onUpload}
/>
)
}
</React.Fragment>
);
const titleContent = (
<React.Fragment>
<span>{'Image Map Editor'}</span>
</React.Fragment>
);
const title = (
<ImageMapTitle
title={titleContent}
action={action}
/>
);
const content = (
<div className="rde-editor">
<ImageMapItems ref={(c) => { this.itemsRef = c; }} canvasRef={this.canvasRef} descriptors={descriptors} />
<div className="rde-editor-canvas-container">
<div className="rde-editor-header-toolbar">
<ImageMapHeaderToolbar canvasRef={this.canvasRef} selectedItem={selectedItem} onSelect={onSelect} />
</div>
<div
ref={(c) => { this.container = c; }}
className="rde-editor-canvas"
>
<Canvas
ref={(c) => { this.canvasRef = c; }}
canvasOption={{
width: canvasRect.width,
height: canvasRect.height,
backgroundColor: '#f3f3f3',
selection: true,
}}
minZoom={30}
defaultOptions={defaultOptions}
propertiesToInclude={propertiesToInclude}
onModified={onModified}
onAdd={onAdd}
onRemove={onRemove}
onSelect={onSelect}
onZoom={onZoom}
onTooltip={onTooltip}
onLink={onLink}
onContext={onContext}
/>
</div>
<div className="rde-editor-footer-toolbar">
<ImageMapFooterToolbar canvasRef={this.canvasRef} preview={preview} onChangePreview={onChangePreview} zoomRatio={zoomRatio} />
</div>
</div>
<ImageMapConfigurations
canvasRef={this.canvasRef}
onChange={onChange}
selectedItem={selectedItem}
onChangeAnimations={onChangeAnimations}
onChangeStyles={onChangeStyles}
onChangeDataSources={onChangeDataSources}
animations={animations}
styles={styles}
dataSources={dataSources}
/>
<ImageMapPreview ref={(c) => { this.preview = c; }} preview={preview} onChangePreview={onChangePreview} onTooltip={onTooltip} onLink={onLink} />
</div>
);
return (
<Container
title={title}
content={content}
loading={loading}
className=""
/>
);
}
}
export default ImageMapEditor;
| ImageMapEditor |
RecipeWaterCtrl.js | (function() {
var module = angular.module('brew-o-module.controller');
module.controller('RecipeWaterCtrl', function($scope, BrewCalc, WaterReport) {
var reports = JSON.parse( window.localStorage.getItem( "waterReport" ));
if(!reports) {
$scope.updateReport();
}else{
$scope.reports = reports;
}
$scope.updateReport() = function(){
WaterReport.query(function(reports) {
$scope.reports = reports;
window.localStorage.setItem( "waterReport", JSON.stringify(reports));
});
}
$scope.ions = [
{txt: 'Ca',sup:'+2',key:'ca',balance: 'Ca_balance',showLevel:true,wr:'calcium',type:'cations'},
{txt: 'Mg',sup:'+2',key:'mg',balance: 'Mg_balance',showLevel:true,wr:'magnesium',type:'cations'},
{txt: 'SO',sup:'-2',sub:'4',key:'so4',balance: 'SO4_balance',showLevel:true,wr:'sulfate',type:'anions'},
{txt: 'Na',sup:'+',key:'na',balance: 'Na_balance',showLevel:true,wr:'sodium',type:'cations'},
{txt: 'Cl',sup:'-',key:'cl',balance: 'Cl_balance',showLevel:true,wr:'chloride',type:'anions'},
{txt: 'HCO',sup:'-',sub:'3',key:'hco3',balance: 'SO4Cl_balance',wr:'bicarbonate',type:'anions'},
{txt: 'Alkalinity',key:'alc'}
];
$scope.output = {
diluted: new Array(6),
diff: new Array(6),
salts: new Array(6),
result: new Array(6),
adjusted: new Array(6)
};
$scope.updateSource = function() {
var report = $scope.getReport($scope.recipe.water.selectedSource);
if ( report ) {
for ( var i=0;i<$scope.ions.length-1;i++) {
var ion = $scope.ions[i];
$scope.recipe.water.source[ion.key] = report[ion.type][ion.wr];
}
$scope.onChange();
}
};
$scope.sourceEqual = function() {
var report = $scope.getReport($scope.recipe.water.selectedSource);
if ( report ) {
var ret = true;
for ( var i=0;i<$scope.ions.length-1;i++) {
var ion = $scope.ions[i];
ret = ret && ($scope.recipe.water.source[ion.key] === report[ion.type][ion.wr]);
}
return ret;
} else {
return false;
}
};
$scope.updateTarget = function() {
var report = $scope.getReport($scope.recipe.water.selectedTarget);
if ( report ) {
for ( var i=0;i<$scope.ions.length-1;i++) {
var ion = $scope.ions[i];
$scope.recipe.water.target[ion.key] = report[ion.type][ion.wr];
}
$scope.onChange();
}
};
$scope.targetEqual = function() {
var report = $scope.getReport($scope.recipe.water.selectedTarget);
if ( report ) {
var ret = true;
for ( var i=0;i<$scope.ions.length-1;i++) {
var ion = $scope.ions[i];
ret = ret && ($scope.recipe.water.target[ion.key] === report[ion.type][ion.wr]);
}
return ret;
} else {
return false;
}
};
$scope.getReport = function(id) {
if ( $scope.reports ) {
var ret = null;
angular.forEach($scope.reports, function(report) {
if (report._id === id) {
ret = report;
}
});
return ret;
}
};
$scope.getLiters = function() {
var total = BrewCalc.calculateBoilSize($scope.recipe.BATCH_SIZE,
$scope.recipe.TrubChillerLosses,
$scope.recipe.BOIL_TIME,
$scope.recipe.PercentEvap,
$scope.recipe.TopUpWater)
+$scope.recipe.SpargeDeadSpace
+$scope.recipe.GrainAbsorbtion*$scope.recipe.totalAmountMash;
$scope.recipe.water.liters = Math.round(total);
$scope.onChange();
};
$scope.suggest = function() {
var input = {
dilution: $scope.recipe.water.dilution,
mashvolume: $scope.recipe.water.liters,
source: convertArray($scope.recipe.water.source),
target: convertArray($scope.recipe.water.target),
CaCO3: $scope.recipe.water.CaCO3,
NaHCO3: $scope.recipe.water.NaHCO3,
CaSO4: $scope.recipe.water.CaSO4,
CaCl2: $scope.recipe.water.CaCl2,
MgSO4: $scope.recipe.water.MgSO4,
NaCl: $scope.recipe.water.NaCl
};
var suggest = BrewCalc.suggestWaterCalculation(input, {
diluted: new Array(6),
diff: new Array(6),
salts: new Array(6),
result: new Array(6),
adjusted: new Array(6)
});
$scope.recipe.water.CaCO3 = suggest.CaCO3;
$scope.recipe.water.NaHCO3 = suggest.NaHCO3;
$scope.recipe.water.CaSO4 = suggest.CaSO4;
$scope.recipe.water.CaCl2 = suggest.CaCl2;
$scope.recipe.water.MgSO4 = suggest.MgSO4;
$scope.recipe.water.NaCl = suggest.NaCl;
$scope.onChange();
};
$scope.onChange = function() {
var input = {
dilution: $scope.recipe.water.dilution,
mashvolume: $scope.recipe.water.liters,
source: convertArray($scope.recipe.water.source),
target: convertArray($scope.recipe.water.target),
CaCO3: $scope.recipe.water.CaCO3,
NaHCO3: $scope.recipe.water.NaHCO3,
CaSO4: $scope.recipe.water.CaSO4,
CaCl2: $scope.recipe.water.CaCl2,
MgSO4: $scope.recipe.water.MgSO4,
NaCl: $scope.recipe.water.NaCl
};
BrewCalc.waterCalculation(input, $scope.output);
$scope.recipe.water.source.alc = input.source[6];
};
function | (ions) {
var ret = [];
angular.forEach($scope.ions, function(ion) {
ret.push(ions[ion.key]);
})
return ret;
};
$scope.onChange();
})
.filter('result', function() {
return function(value) {
if ( value > 0 ) {
return '+ ' + value;
} if ( value === 0 ) {
return '0';
} else {
return '- '+(-value);
}
}
});
})();
| convertArray |
test_template.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.template import Template
from sentry.models import Event
from sentry.testutils import TestCase
class TemplateTest(TestCase):
@fixture
def interface(self):
return Template.to_python(dict(
filename='foo.html',
context_line='hello world',
lineno=1,
))
def test_serialize(self):
result = self.interface.to_json()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
result = self.interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.template.get_context')
@mock.patch('sentry.interfaces.template.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
result = self.interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
def | (self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
| test_serialize_unserialize_behavior |
rate_limit.rs | use crate::{
models::RateLimit, routes::DiscordRouteInfo, RateLimitBucket, RateLimiter,
};
use anyhow::anyhow;
use chrono::Utc;
use derive_more::{Display, Error, From};
use futures::future::BoxFuture;
use reqwest::{Response, ResponseBuilderExt};
use std::{
collections::HashMap,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use tokio::sync::Mutex;
use tower::{Layer, Service};
use tracing::warn;
use wfbp_http::{middleware::RestRequestBuilder, RequestError};
#[derive(Clone, Debug, Default)]
pub struct RateLimitLayer {
rate_limiters: Arc<Mutex<HashMap<RateLimitBucket, RateLimiter>>>,
}
impl<Next> Layer<Next> for RateLimitLayer {
type Service = RateLimitService<Next>;
fn layer(&self, next: Next) -> Self::Service {
RateLimitService {
rate_limiters: self.rate_limiters.clone(),
next,
}
}
}
#[derive(Clone, Debug)]
pub struct RateLimitService<Next> {
rate_limiters: Arc<Mutex<HashMap<RateLimitBucket, RateLimiter>>>,
next: Next,
}
impl<Next> Service<RestRequestBuilder> for RateLimitService<Next>
where
Next: Service<RestRequestBuilder, Response = Response>
+ Send
+ Sync
+ 'static,
Next::Error: From<RateLimitError>,
Next::Future: Send + 'static,
{
type Response = Next::Response;
type Error = Next::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.next.poll_ready(cx)
}
fn call(&mut self, req: RestRequestBuilder) -> Self::Future {
// Get route info
let info: &DiscordRouteInfo = match req.get() {
Some(info) => info,
None => {
return Box::pin(async move {
Err(RateLimitError::MissingRouteInfo)?
})
}
};
let bucket = info.bucket.clone();
let next_fut = self.next.call(req);
let rate_limiters = self.rate_limiters.clone();
Box::pin(async move {
// Get rate limiter for bucket
let mut limiter_guard = rate_limiters.lock().await;
let limiter =
limiter_guard.entry(bucket.clone()).or_insert(RateLimiter {
bucket: bucket.clone(),
limit: 1,
remaining: 1,
reset: Utc::now(),
});
// Wait until rate limit is refreshed if needed
limiter.wait().await;
// Execute request
let mut response = next_fut.await?;
| // Check for global rate limit
let global_limit_hit = response
.headers()
.get(RateLimiter::RATELIMIT_GLOBAL)
.and_then(|v| v.to_str().ok())
.filter(|v| v.to_ascii_lowercase() == "true")
.is_some();
if global_limit_hit {
let status = response.status();
let headers = response.headers().clone();
let url = response.url().clone();
let body = response
.bytes()
.await
.map_err(RateLimitError::ReadBodyError)?;
// Parse body
let limit: RateLimit = serde_json::from_slice(&body)
.map_err(RateLimitError::GlobalRateLimitParseError)?;
warn!(?limit, "hit global rate limit");
tokio::time::sleep(Duration::from_secs_f32(limit.retry_after))
.await;
// Reconstruct response
let mut builder = http::Response::builder();
*builder.headers_mut().unwrap() = headers;
response = builder
.status(status)
.url(url)
.body(body)
.map_err(RateLimitError::ReconstructResponseError)?
.into();
}
Ok(response)
})
}
}
#[derive(Debug, Display, Error, From)]
#[non_exhaustive]
pub enum RateLimitError {
#[display(fmt = "missing route info for request")]
MissingRouteInfo,
#[display(fmt = "{_0}")]
GlobalRateLimitParseError(serde_json::Error),
#[display(fmt = "error reading response body")]
ReadBodyError(reqwest::Error),
#[display(fmt = "error reconstructing response")]
ReconstructResponseError(http::Error),
}
impl From<RateLimitError> for RequestError {
fn from(err: RateLimitError) -> Self {
match err {
RateLimitError::ReadBodyError(err) => {
RequestError::ReqwestError(err)
}
RateLimitError::MissingRouteInfo => anyhow!("{err}").into(),
RateLimitError::GlobalRateLimitParseError(_) => {
anyhow!("{err}").into()
}
RateLimitError::ReconstructResponseError(_) => {
anyhow!("{err}").into()
}
}
}
} | // Process response
limiter.update(&response);
|
Select.tsx | import React from 'react'
import { ArrowDown } from '../InlineIcons'
import InputStyled from './InputStyled'
type ISelectProps = {
register?: any
options: any[]
name: string
label?: string | required?: boolean
error?: string
onChange?: (event: React.FormEvent<HTMLSelectElement>) => any
}
const Select = ({
register,
options,
name,
label,
placeholder,
required = false,
...rest
}: ISelectProps) => {
return (
<InputStyled
label={label}
name={name}
error={rest.error}
inputElement={
<>
<select name={name} ref={register} required={required} {...rest}>
<option value="" hidden={!!required}>
{placeholder}
</option>
{options.map(value => {
let n, v
if (typeof value === 'object') {
v = value.value
n = value.name
} else {
v = value
n = value
}
return (
<option value={v} key={v}>
{n}
</option>
)
})}
</select>
<span className="input-icon select-arrow">
<ArrowDown />
</span>
</>
}
/>
)
}
export default Select | placeholder?: string |
parallel_ldjson_export_child.js | module.exports = function(o, callback) {
var MongoClient = require('../../').MongoClient,
f = require('util').format,
fs = require('fs');
// Connect to db
MongoClient.connect('mongodb://localhost:27017/benchmark?maxPoolSize=10', function(e, client) {
var indexes = [o.s, o.e];
// Collection
var collection = client.collection('corpus');
// Calculate the skip and limit
var skip = indexes[0] * 5000;
var end = indexes[1] * 5000;
var limit = (indexes[1] - indexes[0]) * 5000;
var docs = [];
var index = indexes[0];
var left = indexes[1] - indexes[0];
var totalDocs = 0;
// console.dir({$gte: {_i: skip}, $lte: {_i: end}})
// Perform the query
collection.find({_i : {$gte: skip, $lte: end}}).each(function(err, doc) {
if(doc == null) return callback();
docs.push(doc);
totalDocs++;
// Do we have 5000 docs
if(docs.length === 5000) {
var docsString = docs.map(function(x) {
return JSON.stringify(x);
}).join('\n');
docs = [];
// Write the file
fs.writeFile(f('%s/../../files%s.tmp', __dirname, index++), docsString, function(e, r) {
left = left - 1;
| if(left == 0) {
callback();
}
});
}
});
});
} | |
acpi_table.rs | // Copyright (c) 2020 Huawei Technologies Co.,Ltd. All rights reserved.
//
// StratoVirt is licensed under Mulan PSL v2.
// You can use this software according to the terms and conditions of the Mulan
// PSL v2.
// You may obtain a copy of Mulan PSL v2 at:
// http://license.coscl.org.cn/MulanPSL2
// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
// See the Mulan PSL v2 for more details.
use util::byte_code::ByteCode;
use super::aml_compiler::AmlBuilder;
/// Offset of checksum field in ACPI table.
pub const TABLE_CHECKSUM_OFFSET: u32 = 9;
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiGenericAddress {
space_id: u8,
bit_width: u8,
bit_offset: u8,
access_size: u8,
address: u64,
}
impl AcpiGenericAddress {
pub fn new_io_address<T: Into<u64>>(addr: T) -> AcpiGenericAddress {
AcpiGenericAddress {
space_id: 1,
bit_width: 8 * std::mem::size_of::<T>() as u8,
bit_offset: 0,
access_size: std::mem::size_of::<T>() as u8,
address: addr.into(),
}
}
}
impl ByteCode for AcpiGenericAddress {}
impl AmlBuilder for AcpiGenericAddress {
fn aml_bytes(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
}
/// The common ACPI table header.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiTableHeader {
/// Signature of this table.
pub signature: [u8; 4],
/// The total length of this table, including this header.
pub length: u32,
/// The revision of this table.
pub revision: u8,
/// The checksum of this table, including this header.
pub checksum: u8,
/// OEM ID.
pub oem_id: [u8; 6],
/// OEM table ID.
pub oem_table_id: [u8; 8],
/// OEM revision of this table.
pub oem_revision: u32,
/// Vendor ID for the ASL Compiler, default zero.
pub asl_compiler_id: [u8; 4],
/// Revision number of the ASL Compiler, default zero.
pub asl_compiler_revision: u32,
}
impl ByteCode for AcpiTableHeader {}
impl AmlBuilder for AcpiTableHeader {
fn aml_bytes(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
}
/// ACPI table.
pub struct AcpiTable {
entries: Vec<u8>,
}
impl AcpiTable {
/// The construct function of ACPI table.
///
/// # Arguments
///
/// `signature` - The signature of this table.
/// `revision` - The revision of this table.
/// `oem_id` - OEM ID.
/// `oem_table_id` - OEM table ID.
/// `oem_revision` - OEM revision.
pub fn new(
signature: [u8; 4],
revision: u8,
oem_id: [u8; 6],
oem_table_id: [u8; 8],
oem_revision: u32,
) -> AcpiTable {
AcpiTable {
entries: AcpiTableHeader {
signature,
length: 0,
revision,
checksum: 0,
oem_id,
oem_table_id,
oem_revision,
asl_compiler_id: [0_u8; 4],
asl_compiler_revision: 0_u32,
}
.aml_bytes(),
}
}
/// Get the length of this table.
pub fn table_len(&self) -> usize {
self.entries.len()
}
/// Append the length of this table, do not support truncation.
pub fn set_table_len(&mut self, new_size: usize) {
if new_size < self.entries.len() |
self.entries
.extend(vec![0_u8; new_size - self.entries.len()].as_slice());
self.entries[4..=7].copy_from_slice((new_size as u32).as_bytes());
}
/// Set the value of one field in table.
///
/// # Arguments
///
/// `byte_index` - The location of field in this table.
/// `new_value` - The new value that will be set in the field.
pub fn set_field<T: ByteCode>(&mut self, byte_index: usize, new_value: T) {
let value_len = std::mem::size_of::<T>();
if byte_index >= self.entries.len() || byte_index + value_len >= self.entries.len() {
panic!("Set field in table failed: overflow occurs.");
}
self.entries[byte_index..(byte_index + value_len)].copy_from_slice(new_value.as_bytes());
}
/// Append byte stream to the end of table.
pub fn append_child(&mut self, bytes: &[u8]) {
self.entries.extend(bytes);
let table_len = self.entries.len() as u32;
self.entries[4..=7].copy_from_slice(table_len.as_bytes());
}
}
impl AmlBuilder for AcpiTable {
fn aml_bytes(&self) -> Vec<u8> {
self.entries.clone()
}
}
/// ACPI RSDP structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiRsdp {
/// The signature of RSDP, which is "RSD PTR ".
signature: [u8; 8],
/// The checksum of the first 20 bytes of RSDP.
checksum: u8,
/// OEM ID.
oem_id: [u8; 6],
/// The revision of this structure, only revision 2 is supported.
revision: u8,
/// 32-bit address of RSDT table.
rsdt_tlb_addr: u32,
/// The length of this table.
length: u32,
/// 64-bit address of XSDT table.
xsdt_tlb_addr: u64,
/// Extended checksum of this RSDP structure.
extended_checksum: u8,
/// Reserved field.
reserved: [u8; 3],
}
impl AcpiRsdp {
pub fn new(oem_id: [u8; 6]) -> AcpiRsdp {
AcpiRsdp {
signature: *b"RSD PTR ",
checksum: 0,
oem_id,
revision: 2,
rsdt_tlb_addr: 0_u32,
length: std::mem::size_of::<AcpiRsdp>() as u32,
xsdt_tlb_addr: 0_u64,
extended_checksum: 0,
reserved: [0_u8; 3],
}
}
}
impl ByteCode for AcpiRsdp {}
impl AmlBuilder for AcpiRsdp {
fn aml_bytes(&self) -> Vec<u8> {
self.as_bytes().to_vec()
}
}
/// This module describes ACPI MADT's sub-tables on x86_64 platform.
#[cfg(target_arch = "x86_64")]
pub mod madt_subtable {
use super::*;
pub const IOAPIC_BASE_ADDR: u32 = 0xfec0_0000;
pub const LAPIC_BASE_ADDR: u32 = 0xfee0_0000;
/// MADT processor Local APIC structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiLocalApic {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// ACPI processor UID.
pub processor_uid: u8,
/// The processor's Local APIC ID.
pub apic_id: u8,
/// Local APIC flags.
pub flags: u32,
}
impl ByteCode for AcpiLocalApic {}
impl AmlBuilder for AcpiLocalApic {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
/// IO APIC structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiIoApic {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// This IO APIC's ID.
pub io_apic_id: u8,
/// Reserved field.
pub reserved: u8,
/// The 32-bit address of this IO APIC.
pub io_apic_addr: u32,
/// The GSI number where this I/O APIC’s interrupt inputs start.
pub gsi_base: u32,
}
impl ByteCode for AcpiIoApic {}
impl AmlBuilder for AcpiIoApic {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
}
/// This module describes ACPI MADT's sub-tables on aarch64 platform.
#[cfg(target_arch = "aarch64")]
pub mod madt_subtable {
use super::*;
pub const ARCH_GIC_MAINT_IRQ: u32 = 9;
/// GIC CPU Interface structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiGicCpu {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// Reserved field.
reserved_1: u16,
/// CPU interface number.
pub cpu_interface_num: u32,
/// ACPI processor UID.
pub processor_uid: u32,
/// Flags.
pub flags: u32,
/// The version of Arm processor parking protocol.
pub parking_version: u32,
/// The GSIV used for performance monitoring interrupts.
pub perf_interrupt: u32,
/// The 64-bit address of the processor’s parking protocol mailbox.
pub parked_addr: u64,
/// CPU can access this CPU interface via this 64-bit address.
pub base_addr: u64,
/// Address of the GIC virtual CPU interface registers.
pub gicv_addr: u64,
/// Address of the GIC virtual interface control block registers.
pub gich_addr: u64,
/// GSIV for Virtual GIC maintenance interrupt.
pub vgic_interrupt: u32,
/// If GIC's version is above 3, this field is 64-bit address of redistributor.
pub gicr_addr: u64,
/// MPIDR.
pub mpidr: u64,
/// Reserved field.
reserved_2: u32,
}
impl ByteCode for AcpiGicCpu {}
impl AmlBuilder for AcpiGicCpu {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
/// GIC distributor structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiGicDistributor {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// Reserved field.
reserved_1: u16,
/// This distributor's hardware ID.
pub gic_id: u32,
/// The 64-bit address of this distributor.
pub base_addr: u64,
/// System vector base, must be zero.
pub sys_vector_base: u32,
/// GIC version.
pub gic_version: u8,
/// Reserved field.
reserved_2: [u8; 3],
}
impl ByteCode for AcpiGicDistributor {}
impl AmlBuilder for AcpiGicDistributor {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
/// GIC Redistributor structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiGicRedistributor {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// Reserved field.
reserved_1: u16,
/// The 64-bit address of this redistributor.
pub base_addr: u64,
/// Length of the GIC redistributor discovery page range.
pub range_length: u32,
}
impl ByteCode for AcpiGicRedistributor {}
impl AmlBuilder for AcpiGicRedistributor {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
/// GIC Interrupt Translation Service (ITS) Structure.
#[repr(C, packed)]
#[derive(Default, Copy, Clone)]
pub struct AcpiGicIts {
/// Type ID.
pub type_id: u8,
/// The length of this structure.
pub length: u8,
/// Reserved field.
reserved_1: u16,
/// ITS ID, must be unique.
pub its_id: u32,
/// The 64-bit address of this ITS.
pub base_addr: u64,
/// Reserved field.
reserved_2: u32,
}
impl ByteCode for AcpiGicIts {}
impl AmlBuilder for AcpiGicIts {
fn aml_bytes(&self) -> Vec<u8> {
Vec::from(self.as_bytes())
}
}
}
| {
panic!("New size is smaller than old-size, truncation is not supported.");
} |
propfind_invite.py | ##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.data.string import ResponseDataString
from caldavclientlibrary.protocol.webdav.definitions import statuscodes, \
headers
from caldavclientlibrary.protocol.webdav.propfind import PropFind
from contrib.performance.sqlusage.requests.httpTests import HTTPTestBase
from caldavclientlibrary.protocol.caldav.definitions import csxml
class | (HTTPTestBase):
"""
A propfind operation
"""
def __init__(self, label, sessions, logFilePath, logFilePrefix, depth=1):
super(PropfindInviteTest, self).__init__(label, sessions, logFilePath, logFilePrefix)
self.depth = headers.Depth1 if depth == 1 else headers.Depth0
def doRequest(self):
"""
Execute the actual HTTP request.
"""
props = (
csxml.invite,
)
# Create WebDAV propfind
request = PropFind(self.sessions[0], self.sessions[0].calendarHref, self.depth, props)
result = ResponseDataString()
request.setOutput(result)
# Process it
self.sessions[0].runSession(request)
# If its a 207 we want to parse the XML
if request.getStatusCode() == statuscodes.MultiStatus:
pass
else:
raise RuntimeError("Propfind request failed: %s" % (request.getStatusCode(),))
| PropfindInviteTest |
test_data_checks_and_actions_integration.py | import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import DefaultDataChecks, OutliersDataCheck
from evalml.data_checks.invalid_target_data_check import InvalidTargetDataCheck
from evalml.data_checks.null_data_check import NullDataCheck
from evalml.pipelines import BinaryClassificationPipeline
from evalml.pipelines.components import (
DropColumns,
DropRowsTransformer,
TargetImputer,
)
from evalml.pipelines.components.transformers.imputers.per_column_imputer import (
PerColumnImputer,
)
from evalml.pipelines.multiclass_classification_pipeline import (
MulticlassClassificationPipeline,
)
from evalml.pipelines.regression_pipeline import RegressionPipeline
from evalml.pipelines.utils import make_pipeline_from_data_check_output
def test_data_checks_with_healthy_data(X_y_binary):
# Checks do not return any error.
X, y = X_y_binary
data_check = DefaultDataChecks(
"binary", get_default_primary_search_objective("binary")
)
data_checks_output = data_check.validate(X, y)
assert make_pipeline_from_data_check_output(
"binary", data_checks_output
) == BinaryClassificationPipeline(component_graph={}, parameters={}, random_seed=0)
def test_data_checks_suggests_drop_and_impute_cols():
X = pd.DataFrame(
{
"null_with_categorical": ["a", None, "b", "c", "c"],
"lots_of_null": [None, 7, None, 3, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
}
)
X.ww.init(logical_types={"null_with_categorical": "categorical"})
y = pd.Series([1, 0, 0, 1, 1])
data_check = NullDataCheck()
data_checks_output = data_check.validate(X, y)
action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={
"Per Column Imputer": [PerColumnImputer, "X", "y"],
"Drop Columns Transformer": [
DropColumns,
"Per Column Imputer.x",
"y",
],
},
parameters={
"Per Column Imputer": {
"impute_strategies": {
"null_with_categorical": {"impute_strategy": "most_frequent"},
"lots_of_null": {"impute_strategy": "mean"},
},
"default_impute_strategy": "most_frequent",
},
"Drop Columns Transformer": {"columns": ["all_null"]},
},
random_seed=0,
)
X_expected = pd.DataFrame(
{
"null_with_categorical": ["a", "c", "b", "c", "c"],
"lots_of_null": [5, 7, 5, 3, 5],
"no_null": [1, 2, 3, 4, 5],
}
)
X_expected.ww.init(
logical_types={"lots_of_null": "double", "null_with_categorical": "categorical"}
)
action_pipeline.fit(X, y)
X_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
@pytest.mark.parametrize("problem_type", ["binary", "multiclass", "regression"])
def test_data_checks_impute_cols(problem_type):
X = pd.DataFrame()
if problem_type == "binary":
y = ww.init_series(pd.Series([0, 1, 1, None, None]))
objective = "Log Loss Binary"
expected_pipeline_class = BinaryClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 1, 1, 1]), logical_type="double")
elif problem_type == "multiclass":
y = ww.init_series(pd.Series([0, 1, 2, 2, None]))
objective = "Log Loss Multiclass"
expected_pipeline_class = MulticlassClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 2, 2, 2]), logical_type="double")
else:
y = ww.init_series(pd.Series([0, 0.1, 0.2, None, None]))
objective = "R2"
expected_pipeline_class = RegressionPipeline
y_expected = ww.init_series(
pd.Series([0, 0.1, 0.2, 0.1, 0.1]), logical_type="double"
)
data_check = InvalidTargetDataCheck(problem_type, objective)
data_checks_output = data_check.validate(None, y)
action_pipeline = make_pipeline_from_data_check_output(
problem_type, data_checks_output
)
expected_parameters = (
{"Target Imputer": {"impute_strategy": "mean", "fill_value": None}}
if problem_type == "regression"
else {
"Target Imputer": {"impute_strategy": "most_frequent", "fill_value": None}
}
)
assert action_pipeline == expected_pipeline_class(
component_graph={"Target Imputer": [TargetImputer, "X", "y"]},
parameters=expected_parameters,
random_seed=0,
)
action_pipeline.fit(X, y)
_, y_t = action_pipeline.transform(X, y)
assert_series_equal(y_expected, y_t)
def test_data_checks_suggests_drop_rows():
a = np.arange(10) * 0.01
data = np.tile(a, (100, 10))
X = pd.DataFrame(data=data)
X.iloc[0, 3] = 1000
X.iloc[3, 25] = 1000
X.iloc[5, 55] = 10000
X.iloc[10, 72] = -1000
X.iloc[:, 90] = "string_values"
y = pd.Series(np.tile([0, 1], 50))
| action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={"Drop Rows Transformer": [DropRowsTransformer, "X", "y"]},
parameters={"Drop Rows Transformer": {"indices_to_drop": [0, 3, 5, 10]}},
random_seed=0,
)
X_expected = X.drop([0, 3, 5, 10])
X_expected.ww.init()
y_expected = y.drop([0, 3, 5, 10])
action_pipeline.fit(X, y)
X_t, y_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
assert_series_equal(y_expected, y_t) | outliers_check = OutliersDataCheck()
data_checks_output = outliers_check.validate(X)
|
main.py | import tkinter as tk
import time
import threading
global autoXP, manualXP, roundTitle, button
from google.cloud import vision
import re
import pyautogui
global autoXPIsOn
autoXPIsOn = False
def getRoundsToPlay():
pyautogui.screenshot('energyCount.png', region=(x + 286, y + 430, 45, 32)) # Get a screenshot of the the current elixer count using coorinants relative to the game boundaries.
pyautogui.screenshot('energyCost.png', region=(x + 494, y + 380, 36, 24)) # Get a screenshot of the the energy cost using coorinants relative to the game boundaries.
energyCount = detect_text("energyCount.png")
energyCost = detect_text("energyCost.png")
return int(energyCount / energyCost)
def | (path):
"""Detects text in the file."""
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.types.Image(content=content)
response = client.text_detection(image=image)
texts = response.text_annotations
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return int(re.search(r'\d+', texts[0].description).group())
def playRounds():
time.sleep(0.5)
pyautogui.click(x + 121, y + 189) # click on unicorn way
time.sleep(0.5)
pyautogui.click(x + 500, y + 430) # click play button
time.sleep(0.5)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
while skipButton[0] != 158 and skipButton[1] != 20 and skipButton[2] != 20: # wait for the pixel color to be red to indicate that the skip button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 215), int(y + 459))
pyautogui.click(x + 215, y + 459) # click on the skip button
time.sleep(0.5)
pyautogui.click(x + 398, y + 254) # click confirm skip
time.sleep(0.5)
pyautogui.click(x + 278, y + 254) # click to place pet
pyautogui.click()
time.sleep(0.5)
pyautogui.click(x + 241, y + 214) # click on space to place first tower
time.sleep(0.5)
pyautogui.click(x + 322, y + 169) # click to buy the first avalon tower
time.sleep(0.5)
pyautogui.click(x + 241, y + 214) # click on space to select first avalon tower
time.sleep(0.5)
pyautogui.click(x + 236, y + 162) # click to upgrade the first avalon tower
time.sleep(0.5)
pyautogui.click(x + 269, y + 310) # click on space to place second tower
time.sleep(0.5)
pyautogui.click(x + 351, y + 260) # click to buy the second avalon tower
time.sleep(0.5)
pyautogui.click(x + 269, y + 310) # click on space to select second avalon tower
time.sleep(0.5)
pyautogui.click(x + 270, y + 258) # click to upgrade the second avalon tower
time.sleep(0.5)
pyautogui.click(x + 602, y + 439) # click on the GO button
time.sleep(0.5)
pyautogui.click(x + 567, y + 19) # click fast forward
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
while skipButton[0] != 105 and skipButton[1] != 202 and skipButton[2] != 10: # wait for the pixel color to be green to indicate that the next button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 586), int(y + 459))
pyautogui.click(x + 586, y + 459) # click next button
time.sleep(0.7)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
while skipButton[0] != 13 and skipButton[1] != 116 and skipButton[2] != 183: # wait for the pixel color to be blue to indicate that the feed pet button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 179), int(y + 270))
time.sleep(0.5)
pyautogui.click(x + 179, y + 270) # click feed pet button
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
while skipButton[0] != 142 and skipButton[1] != 29 and skipButton[2] != 229: # wait for the pixel color to be purple to indicate that pet snacks are on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 317), int(y + 415))
pyautogui.click(x + 112, y + 226) # click on the first pet snack (highest tier)
time.sleep(0.5)
pyautogui.click(x + 317, y + 415) # click on the select button
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
while skipButton[0] != 103 and skipButton[1] != 204 and skipButton[2] != 10: # wait for the pixel color to be green to indicate the play button is on screen
time.sleep(0.1)
skipButton = pyautogui.pixel(int(x + 483), int(y + 421))
pyautogui.click(x + 483, y + 421) # click on the play button
pyautogui.moveTo(x,y)
time.sleep(1)
def startThread(amountOfRuns):
if autoXPIsOn == False:
roundsToPlay = amountOfRuns.get()
if roundsToPlay != "" and roundsToPlay != None and roundsToPlay != " ":
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
else:
button.config(state=tk.DISABLED)
t = threading.Thread(target=lambda: startGame(amountOfRuns))
t.daemon = True
t.start()
def startGame(amountOfRuns):
global x, y
time.sleep(1)
chromeLocation = pyautogui.locateCenterOnScreen('../../Desktop/GrubXPImages/chromeUnfocused.jpg',
confidence=0.94)
if chromeLocation != None:
pyautogui.moveTo(chromeLocation)
pyautogui.click()
time.sleep(1)
findGrubOnScreen = pyautogui.locateOnScreen('../../Desktop/GrubXPImages/grubLevelSelect.jpg',
confidence=0.9)
if findGrubOnScreen == None:
return
x = findGrubOnScreen[0]
y = findGrubOnScreen[1]
if autoXPIsOn == True:
roundsToPlay = getRoundsToPlay()
else:
roundsToPlay = int(amountOfRuns.get())
if roundsToPlay > 0:
for i in range(roundsToPlay):
roundTitle.config(text="Round: " + str(i + 1) + " / " + str(roundsToPlay))
playRounds()
button.config(state=tk.NORMAL)
def switchToAutomatic(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.DISABLED)
autoXPIsOn = True
def switchToManual(entryToChange):
global autoXPIsOn
entryToChange.config(state=tk.NORMAL)
autoXPIsOn = False
r = tk.Tk()
r.geometry("500x500")
r.config(background='#34b518')
r.title('Grub Guardian Bot')
mainTitle = tk.Label(r, text="Grub Guardian XP Tool", font='Helvetica 18 bold', fg='#0059b3', bg="#34b518")
roundTitle = tk.Label(r, text="Round: 0 / 0", font='Helvetica 14 bold', fg='#fc9d03', bg="#34b518")
autoXP = tk.Radiobutton(r, text="Automatic Mode", value=1, command=lambda: switchToAutomatic(runAmount), bg="#34b518", font='Helvetica 12')
manualXP = tk.Radiobutton(r, text="Manual Mode", value=2, command=lambda: switchToManual(runAmount), bg="#34b518", font='Helvetica 12')
roundTitle.place(x=190, y=80)
mainTitle.place(x=110,y=50)
autoXP.place(x=120, y=150)
manualXP.place(x=270, y=150)
runAmount = tk.Entry(r, width=20)
runAmount.place(x=300, y=227)
runLabel = tk.Label(r, text="# of runs:", font='Helvetica 10', bg="#34b518")
runLabel.place(x=240, y=225)
button = tk.Button(r, text='Start', width=25, command=lambda: startThread(runAmount))
button.place(x=165, y=300)
r.mainloop()
| detect_text |
main.go | package main
import (
"flag"
"fmt"
"os"
"path/filepath"
)
// GitVersion is set by the Makefile and contains the version string.
var GitVersion = ""
var stats struct {
total int
errorsNotRegular int
errorsOpening int
errorsWritingXattr int
errorsOther int
inprogress int
corrupt int
timechange int
outdated int
ok int
}
var args struct {
remove bool
recursive bool
q bool
qq bool
}
// walkFn is used when `cshatag` is called with the `--recursive` option. It is the function called
// for each file or directory visited whilst traversing the file tree.
func | (path string, info os.FileInfo, err error) error {
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing %q: %v\n", path, err)
stats.errorsOpening++
} else if info.Mode().IsRegular() {
checkFile(path)
} else if !info.IsDir() {
if !args.qq {
fmt.Printf("<nonregular> %s\n", path)
}
}
return nil
}
// processArg is called for each command-line argument given. For regular files it will call
// `checkFile`. Directories will be processed recursively provided the `--recursive` flag is set.
// Symbolic links are not followed.
func processArg(fn string) {
fi, err := os.Lstat(fn) // Using Lstat to be consistent with filepath.Walk for symbolic links.
if err != nil {
fmt.Fprintln(os.Stderr, err)
stats.errorsOpening++
} else if fi.Mode().IsRegular() {
checkFile(fn)
} else if fi.IsDir() {
if args.recursive {
filepath.Walk(fn, walkFn)
} else {
fmt.Fprintf(os.Stderr, "Error: %q is a directory, did you mean to use the '-recursive' option?\n", fn)
stats.errorsNotRegular++
}
} else {
fmt.Fprintf(os.Stderr, "Error: %q is not a regular file.\n", fn)
stats.errorsNotRegular++
}
}
func main() {
const myname = "cshatag"
if GitVersion == "" {
GitVersion = "(version unknown)"
}
flag.BoolVar(&args.remove, "remove", false, "Remove any previously stored extended attributes.")
flag.BoolVar(&args.q, "q", false, "quiet: don't print <ok> files")
flag.BoolVar(&args.qq, "qq", false, "quiet²: Only print <corrupt> files and errors")
flag.BoolVar(&args.recursive, "recursive", false, "Recursively descend into subdirectories. "+
"Symbolic links are not followed.")
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s %s\n", myname, GitVersion)
fmt.Fprintf(os.Stderr, "Usage: %s [OPTION] FILE [FILE ...]\n", myname)
fmt.Fprintf(os.Stderr, "Options:\n")
flag.PrintDefaults()
os.Exit(1)
}
flag.Parse()
if flag.NArg() == 0 {
flag.Usage()
}
if args.qq {
// quiet2 implies quiet
args.q = true
}
for _, fn := range flag.Args() {
processArg(fn)
}
if stats.corrupt > 0 {
os.Exit(5)
}
totalErrors := stats.errorsOpening + stats.errorsNotRegular + stats.errorsWritingXattr +
stats.errorsOther
if totalErrors > 0 {
if stats.errorsOpening == totalErrors {
os.Exit(2)
} else if stats.errorsNotRegular == totalErrors {
os.Exit(3)
} else if stats.errorsWritingXattr == totalErrors {
os.Exit(4)
}
os.Exit(6)
}
if (stats.ok + stats.outdated + stats.timechange) == stats.total {
os.Exit(0)
}
os.Exit(6)
}
| walkFn |
test_TC09.py | from test_src.Tests.test07_scroll_list.conftest import PyFix
from test_src.Pages.HomePage import HomePage
from test_src.Pages.LoginPage import LoginPage |
class TestScrollList(PyFix):
"""this used to check the title of the loaded url, and check the Sign In button"""
def test_homepage(self):
try:
self.HomePage = HomePage(self.driver)
assert self.HomePage.get_home_page_url() == TestData.BASE_URL
assert self.HomePage.get_home_page_title() == TestData.HOME_PAGE_TITLE
assert self.HomePage.is_sign_in_btn_displayed() is True
self.HomePage.click_sign_in_btn()
time.sleep(1)
except AssertionError as err:
self.pytest.fail(print(TestData.assert_error_msg, err))
"""this used to check the elements of the Login Page"""
"""Login an existing user"""
def test_check_login_form(self):
try:
self.LoginPage = LoginPage(self.driver)
assert self.LoginPage.is_inputs_displayed() is True
assert self.LoginPage.is_inputs_placeholder() is True
assert self.LoginPage.is_password_type() is True
except AssertionError as err:
self.pytest.fail(print(TestData.assert_error_msg, err))
"""this used to fill Login Page and sign in to app"""
def test_login_exist_user(self):
try:
self.LoginPage = LoginPage(self.driver)
self.LoginPage.fill_login_existed_email()
assert self.LoginPage.is_sign_in_btn_displayed() is True
self.LoginPage.click_sign_in_btn()
time.sleep(3)
except AssertionError as err:
self.pytest.fail(print(TestData.assert_error_msg, err))
"""this used to check scroll in the list"""
def test_check_scroll_in_the_list(self):
try:
self.MainPage = MainPage(self.driver)
assert self.MainPage.is_username_displayed() == TestData.reg_test_valid[0]
assert self.MainPage.count_post_fields() == 11
self.MainPage.scroll_to_bottom_of_the_main_page()
assert self.MainPage.is_next_btn_displayed() is True
assert self.MainPage.is_next_btn_selected() is False
self.MainPage.click_next_btn_topic_list()
time.sleep(3)
# assert self.MainPage.is_next_btn_selected() is True
assert self.MainPage.count_post_fields() == 1
assert self.MainPage.is_log_out_btn_displayed() is True
self.MainPage.click_log_out_btn()
time.sleep(1)
except AssertionError as err:
self.pytest.fail(print(TestData.assert_error_msg, err))
"""this used to check successful navigate to home page"""
def test_homepage_is_displayed(self):
try:
self.HomePage = HomePage(self.driver)
assert self.HomePage.get_home_page_url() == TestData.BASE_URL
assert self.HomePage.get_home_page_title() == TestData.HOME_PAGE_TITLE
assert self.HomePage.is_sign_in_btn_displayed() is True
except AssertionError as err:
self.pytest.fail(print(TestData.assert_error_msg, err)) | from test_src.Pages.MainPage import MainPage
from test_src.Data.test_data import TestData
import time
|
index.js | import { h, Component } from 'preact' // eslint-disable-line no-unused-vars
import { connect } from 'preact-redux'
import style from './style.css'
class | extends Component {
isHighlight (env) {
const serviceDescription = this.props.selectedServiceDescriptions[this.props.selectedServiceDescriptionIndex]
if (serviceDescription) {
return serviceDescription.envVars[env.name]
}
return false
}
getEnvs () {
const envs = []
const envMap = new Map()
for (const serviceDescription of this.props.selectedServiceDescriptions) {
const { envVars } = serviceDescription
for (const key of Object.keys(envVars)) {
if (!envMap.has(key)) {
envMap.set(key, envVars[key])
}
}
}
for (const [key, value] of envMap) {
envs.push({
name: key,
...value
})
}
return envs
}
render (props, state) {
const envs = this.getEnvs()
if (envs.length === 0) {
return null
}
return (
<div className="full-width">
<h2 className="ui header">ENV Vars</h2>
<table className="ui fixed table">
<thead>
<tr>
<th>Name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
{envs.map((env, i) => (
<tr className={this.isHighlight(env) ? style.active : style.inactive} key={i}>
<td>{env.name}</td>
<td>{env.description}</td>
</tr>
))}
</tbody>
</table>
</div>
)
}
}
const mapStateToProps = state => ({
selectedServiceDescriptions: state.serviceDescription.selectedServiceDescriptions,
selectedServiceDescriptionIndex: state.serviceDescription.selectedServiceDescriptionIndex
})
export default connect(
mapStateToProps,
undefined
)(ENVTable)
| ENVTable |
banking_stage.rs | //! The `banking_stage` processes Transaction messages. It is intended to be used
//! to contruct a software pipeline. The stage uses all available CPU cores and
//! can do its processing in parallel with signature verification on the GPU.
use crate::packet_hasher::PacketHasher;
use crossbeam_channel::{Receiver as CrossbeamReceiver, RecvTimeoutError};
use itertools::Itertools;
use lru::LruCache;
use retain_mut::RetainMut;
use analog_entry::entry::hash_transactions;
use analog_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo};
use analog_ledger::blockstore_processor::TransactionStatusSender;
use analog_measure::measure::Measure;
use analog_metrics::{inc_new_counter_debug, inc_new_counter_info};
use analog_perf::{
cuda_runtime::PinnedVec,
data_budget::DataBudget,
packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH},
perf_libs,
};
use analog_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder};
use analog_runtime::{
accounts_db::ErrorCounters,
bank::{
Bank, ExecuteTimings, TransactionBalancesSet, TransactionCheckResult,
TransactionExecutionResult,
},
bank_utils,
cost_model::CostModel,
cost_tracker::CostTracker,
transaction_batch::TransactionBatch,
vote_sender_types::ReplayVoteSender,
};
use analog_sdk::{
clock::{
Slot, DEFAULT_TICKS_PER_SLOT, MAX_PROCESSING_AGE, MAX_TRANSACTION_FORWARDING_DELAY,
MAX_TRANSACTION_FORWARDING_DELAY_GPU,
},
feature_set,
message::Message,
pubkey::Pubkey,
short_vec::decode_shortu16_len,
signature::Signature,
timing::{duration_as_ms, timestamp, AtomicInterval},
transaction::{self, SanitizedTransaction, TransactionError, VersionedTransaction},
};
use analog_streamer::sendmmsg::{batch_send, SendPktsError};
use analog_transaction_status::token_balances::{
collect_token_balances, TransactionTokenBalancesSet,
};
use std::{
cmp,
collections::{HashMap, VecDeque},
env,
mem::size_of,
net::{SocketAddr, UdpSocket},
ops::DerefMut,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Mutex, RwLock, RwLockReadGuard},
thread::{self, Builder, JoinHandle},
time::Duration,
time::Instant,
};
/// (packets, valid_indexes, forwarded)
/// Set of packets with a list of which are valid and if this batch has been forwarded.
type PacketsAndOffsets = (Packets, Vec<usize>, bool);
pub type UnprocessedPackets = VecDeque<PacketsAndOffsets>;
/// Transaction forwarding
pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2;
pub const HOLD_TRANSACTIONS_SLOT_OFFSET: u64 = 20;
// Fixed thread size seems to be fastest on GCP setup
pub const NUM_THREADS: u32 = 4;
const TOTAL_BUFFERED_PACKETS: usize = 500_000;
const MAX_NUM_TRANSACTIONS_PER_BATCH: usize = 128;
const DEFAULT_LRU_SIZE: usize = 200_000;
const NUM_VOTE_PROCESSING_THREADS: u32 = 2;
const MIN_THREADS_BANKING: u32 = 1;
#[derive(Debug, Default)]
pub struct BankingStageStats {
last_report: AtomicInterval,
id: u32,
process_packets_count: AtomicUsize,
new_tx_count: AtomicUsize,
dropped_packet_batches_count: AtomicUsize,
dropped_packets_count: AtomicUsize,
dropped_duplicated_packets_count: AtomicUsize,
newly_buffered_packets_count: AtomicUsize,
current_buffered_packets_count: AtomicUsize,
current_buffered_packet_batches_count: AtomicUsize,
rebuffered_packets_count: AtomicUsize,
consumed_buffered_packets_count: AtomicUsize,
cost_tracker_check_count: AtomicUsize,
cost_forced_retry_transactions_count: AtomicUsize,
// Timing
consume_buffered_packets_elapsed: AtomicU64,
process_packets_elapsed: AtomicU64,
handle_retryable_packets_elapsed: AtomicU64,
filter_pending_packets_elapsed: AtomicU64,
packet_duplicate_check_elapsed: AtomicU64,
packet_conversion_elapsed: AtomicU64,
unprocessed_packet_conversion_elapsed: AtomicU64,
transaction_processing_elapsed: AtomicU64,
cost_tracker_update_elapsed: AtomicU64,
cost_tracker_clone_elapsed: AtomicU64,
cost_tracker_check_elapsed: AtomicU64,
}
impl BankingStageStats {
pub fn new(id: u32) -> Self {
BankingStageStats {
id,
..BankingStageStats::default()
}
}
fn report(&self, report_interval_ms: u64) {
if self.last_report.should_update(report_interval_ms) {
datapoint_info!(
"banking_stage-loop-stats",
("id", self.id as i64, i64),
(
"process_packets_count",
self.process_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"new_tx_count",
self.new_tx_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_packet_batches_count",
self.dropped_packet_batches_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_packets_count",
self.dropped_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"dropped_duplicated_packets_count",
self.dropped_duplicated_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"newly_buffered_packets_count",
self.newly_buffered_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"current_buffered_packet_batches_count",
self.current_buffered_packet_batches_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"current_buffered_packets_count",
self.current_buffered_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rebuffered_packets_count",
self.rebuffered_packets_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"consumed_buffered_packets_count",
self.consumed_buffered_packets_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_tracker_check_count",
self.cost_tracker_check_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_forced_retry_transactions_count",
self.cost_forced_retry_transactions_count
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"consume_buffered_packets_elapsed",
self.consume_buffered_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"process_packets_elapsed",
self.process_packets_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_retryable_packets_elapsed",
self.handle_retryable_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"filter_pending_packets_elapsed",
self.filter_pending_packets_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"packet_duplicate_check_elapsed",
self.packet_duplicate_check_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"packet_conversion_elapsed",
self.packet_conversion_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"unprocessed_packet_conversion_elapsed",
self.unprocessed_packet_conversion_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"transaction_processing_elapsed",
self.transaction_processing_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_tracker_update_elapsed",
self.cost_tracker_update_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_tracker_clone_elapsed",
self.cost_tracker_clone_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"cost_tracker_check_elapsed",
self.cost_tracker_check_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
/// Stores the stage's thread handle and output receiver.
pub struct BankingStage {
bank_thread_hdls: Vec<JoinHandle<()>>,
}
#[derive(Debug, Clone)]
pub enum BufferedPacketsDecision {
Consume(u128),
Forward,
ForwardAndHold,
Hold,
}
#[derive(Debug, Clone)]
pub enum ForwardOption {
NotForward,
ForwardTpuVote,
ForwardTransaction,
}
impl BankingStage {
/// Create the stage using `bank`. Exit when `verified_receiver` is dropped.
#[allow(clippy::new_ret_no_self)]
pub fn new(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
cost_model: Arc<RwLock<CostModel>>,
) -> Self {
Self::new_num_threads(
cluster_info,
poh_recorder,
verified_receiver,
tpu_verified_vote_receiver,
verified_vote_receiver,
Self::num_threads(),
transaction_status_sender,
gossip_vote_sender,
cost_model,
)
}
fn new_num_threads(
cluster_info: &Arc<ClusterInfo>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
verified_receiver: CrossbeamReceiver<Vec<Packets>>,
tpu_verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
verified_vote_receiver: CrossbeamReceiver<Vec<Packets>>,
num_threads: u32,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
cost_model: Arc<RwLock<CostModel>>,
) -> Self {
let batch_limit = TOTAL_BUFFERED_PACKETS / ((num_threads - 1) as usize * PACKETS_PER_BATCH);
// Single thread to generate entries from many banks.
// This thread talks to poh_service and broadcasts the entries once they have been recorded.
// Once an entry has been recorded, its blockhash is registered with the bank.
let duplicates = Arc::new(Mutex::new((
LruCache::new(DEFAULT_LRU_SIZE),
PacketHasher::default(),
)));
let data_budget = Arc::new(DataBudget::default());
// Many banks that process transactions in parallel.
assert!(num_threads >= NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING);
let bank_thread_hdls: Vec<JoinHandle<()>> = (0..num_threads)
.map(|i| {
let (verified_receiver, forward_option) = match i {
0 => {
// Disable forwarding of vote transactions
// from gossip. Note - votes can also arrive from tpu
(verified_vote_receiver.clone(), ForwardOption::NotForward)
}
1 => (
tpu_verified_vote_receiver.clone(),
ForwardOption::ForwardTpuVote,
),
_ => (verified_receiver.clone(), ForwardOption::ForwardTransaction),
};
let poh_recorder = poh_recorder.clone();
let cluster_info = cluster_info.clone();
let mut recv_start = Instant::now();
let transaction_status_sender = transaction_status_sender.clone();
let gossip_vote_sender = gossip_vote_sender.clone();
let duplicates = duplicates.clone();
let data_budget = data_budget.clone();
let cost_model = cost_model.clone();
Builder::new()
.name("analog-banking-stage-tx".to_string())
.spawn(move || {
Self::process_loop(
&verified_receiver,
&poh_recorder,
&cluster_info,
&mut recv_start,
forward_option,
i,
batch_limit,
transaction_status_sender,
gossip_vote_sender,
&duplicates,
&data_budget,
cost_model,
);
})
.unwrap()
})
.collect();
Self { bank_thread_hdls }
}
fn filter_valid_packets_for_forwarding<'a>(
all_packets: impl Iterator<Item = &'a PacketsAndOffsets>,
) -> Vec<&'a Packet> {
all_packets
.filter(|(_p, _indexes, forwarded)| !forwarded)
.flat_map(|(p, valid_indexes, _forwarded)| {
valid_indexes.iter().map(move |x| &p.packets[*x])
})
.collect()
}
fn forward_buffered_packets(
socket: &std::net::UdpSocket,
tpu_forwards: &std::net::SocketAddr,
unprocessed_packets: &UnprocessedPackets,
data_budget: &DataBudget,
) -> std::io::Result<()> {
let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets.iter());
inc_new_counter_info!("banking_stage-forwarded_packets", packets.len());
const INTERVAL_MS: u64 = 100;
const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200;
const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000;
const MAX_BYTES_BUDGET: usize = MAX_BYTES_PER_INTERVAL * 5;
data_budget.update(INTERVAL_MS, |bytes| {
std::cmp::min(bytes + MAX_BYTES_PER_INTERVAL, MAX_BYTES_BUDGET)
});
let mut packet_vec = Vec::with_capacity(packets.len());
for p in packets {
if data_budget.take(p.meta.size) {
packet_vec.push((&p.data[..p.meta.size], tpu_forwards));
}
}
if let Err(SendPktsError::IoError(ioerr, _num_failed)) = batch_send(socket, &packet_vec) {
return Err(ioerr);
}
Ok(())
}
// Returns whether the given `Packets` has any more remaining unprocessed
// transactions
fn update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes: &mut Vec<usize>,
new_unprocessed_indexes: Vec<usize>,
) -> bool {
let has_more_unprocessed_transactions =
Self::packet_has_more_unprocessed_transactions(&new_unprocessed_indexes);
if has_more_unprocessed_transactions {
*original_unprocessed_indexes = new_unprocessed_indexes
};
has_more_unprocessed_transactions
}
#[allow(clippy::too_many_arguments)]
pub fn consume_buffered_packets(
my_pubkey: &Pubkey,
max_tx_ingestion_ns: u128,
poh_recorder: &Arc<Mutex<PohRecorder>>,
buffered_packets: &mut UnprocessedPackets,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
test_fn: Option<impl Fn()>,
banking_stage_stats: &BankingStageStats,
recorder: &TransactionRecorder,
cost_model: &Arc<RwLock<CostModel>>,
) {
let mut rebuffered_packets_len = 0;
let mut new_tx_count = 0;
let buffered_len = buffered_packets.len();
let mut proc_start = Measure::start("consume_buffered_process");
let mut reached_end_of_slot = None;
buffered_packets.retain_mut(|(msgs, ref mut original_unprocessed_indexes, _forwarded)| {
if let Some((next_leader, bank)) = &reached_end_of_slot {
// We've hit the end of this slot, no need to perform more processing,
// just filter the remaining packets for the invalid (e.g. too old) ones
let new_unprocessed_indexes = Self::filter_unprocessed_packets(
bank,
msgs,
original_unprocessed_indexes,
my_pubkey,
*next_leader,
banking_stage_stats,
cost_model,
);
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
)
} else {
let bank_start = poh_recorder.lock().unwrap().bank_start();
if let Some(BankStart {
working_bank,
bank_creation_time,
}) = bank_start
{
let (processed, verified_txs_len, new_unprocessed_indexes) =
Self::process_packets_transactions(
&working_bank,
&bank_creation_time,
recorder,
msgs,
original_unprocessed_indexes.to_owned(),
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
cost_model,
);
if processed < verified_txs_len
|| !Bank::should_bank_still_be_processing_txs(
&bank_creation_time,
max_tx_ingestion_ns,
)
{
reached_end_of_slot = Some((
poh_recorder.lock().unwrap().next_slot_leader(),
working_bank,
));
}
new_tx_count += processed;
// Out of the buffered packets just retried, collect any still unprocessed
// transactions in this batch for forwarding
rebuffered_packets_len += new_unprocessed_indexes.len();
let has_more_unprocessed_transactions =
Self::update_buffered_packets_with_new_unprocessed(
original_unprocessed_indexes,
new_unprocessed_indexes,
);
if let Some(test_fn) = &test_fn {
test_fn();
}
has_more_unprocessed_transactions
} else {
rebuffered_packets_len += original_unprocessed_indexes.len();
// `original_unprocessed_indexes` must have remaining packets to process
// if not yet processed.
assert!(Self::packet_has_more_unprocessed_transactions(
original_unprocessed_indexes
));
true
}
}
});
proc_start.stop();
debug!(
"@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}",
timestamp(),
buffered_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s())
);
banking_stage_stats
.consume_buffered_packets_elapsed
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.rebuffered_packets_count
.fetch_add(rebuffered_packets_len, Ordering::Relaxed);
banking_stage_stats
.consumed_buffered_packets_count
.fetch_add(new_tx_count, Ordering::Relaxed);
}
fn consume_or_forward_packets(
my_pubkey: &Pubkey,
leader_pubkey: Option<Pubkey>,
bank_still_processing_txs: Option<&Arc<Bank>>,
would_be_leader: bool,
would_be_leader_shortly: bool,
) -> BufferedPacketsDecision {
leader_pubkey.map_or(
// If leader is not known, return the buffered packets as is
BufferedPacketsDecision::Hold,
// else process the packets
|x| {
if let Some(bank) = bank_still_processing_txs {
// If the bank is available, this node is the leader
BufferedPacketsDecision::Consume(bank.ns_per_slot)
} else if would_be_leader_shortly {
// If the node will be the leader soon, hold the packets for now
BufferedPacketsDecision::Hold
} else if would_be_leader {
// Node will be leader within ~20 slots, hold the transactions in
// case it is the only node which produces an accepted slot.
BufferedPacketsDecision::ForwardAndHold
} else if x != *my_pubkey {
// If the current node is not the leader, forward the buffered packets
BufferedPacketsDecision::Forward
} else {
// We don't know the leader. Hold the packets for now
BufferedPacketsDecision::Hold
}
},
)
}
#[allow(clippy::too_many_arguments)]
fn process_buffered_packets(
my_pubkey: &Pubkey,
socket: &std::net::UdpSocket,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
forward_option: &ForwardOption,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
banking_stage_stats: &BankingStageStats,
recorder: &TransactionRecorder,
data_budget: &DataBudget,
cost_model: &Arc<RwLock<CostModel>>,
) -> BufferedPacketsDecision {
let bank_start;
let (
leader_at_slot_offset,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
) = {
let poh = poh_recorder.lock().unwrap();
bank_start = poh.bank_start();
(
poh.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET),
PohRecorder::get_working_bank_if_not_expired(&bank_start.as_ref()),
poh.would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET * DEFAULT_TICKS_PER_SLOT),
poh.would_be_leader(
(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET - 1) * DEFAULT_TICKS_PER_SLOT,
),
)
};
let decision = Self::consume_or_forward_packets(
my_pubkey,
leader_at_slot_offset,
bank_still_processing_txs,
would_be_leader,
would_be_leader_shortly,
);
match decision {
BufferedPacketsDecision::Consume(max_tx_ingestion_ns) => {
Self::consume_buffered_packets(
my_pubkey,
max_tx_ingestion_ns,
poh_recorder,
buffered_packets,
transaction_status_sender,
gossip_vote_sender,
None::<Box<dyn Fn()>>,
banking_stage_stats,
recorder,
cost_model,
);
}
BufferedPacketsDecision::Forward => {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
poh_recorder,
socket,
false,
data_budget,
);
}
BufferedPacketsDecision::ForwardAndHold => {
Self::handle_forwarding(
forward_option,
cluster_info,
buffered_packets,
poh_recorder,
socket,
true,
data_budget,
);
}
_ => (),
}
decision
}
fn handle_forwarding(
forward_option: &ForwardOption,
cluster_info: &ClusterInfo,
buffered_packets: &mut UnprocessedPackets,
poh_recorder: &Arc<Mutex<PohRecorder>>,
socket: &UdpSocket,
hold: bool,
data_budget: &DataBudget,
) {
let addr = match forward_option {
ForwardOption::NotForward => {
if !hold {
buffered_packets.clear();
}
return;
}
ForwardOption::ForwardTransaction => {
next_leader_tpu_forwards(cluster_info, poh_recorder)
}
ForwardOption::ForwardTpuVote => next_leader_tpu_vote(cluster_info, poh_recorder),
};
let addr = match addr {
Some(addr) => addr,
None => return,
};
let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets, data_budget);
if hold {
buffered_packets.retain(|(_, index, _)| !index.is_empty());
for (_, _, forwarded) in buffered_packets.iter_mut() {
*forwarded = true;
}
} else {
buffered_packets.clear();
}
}
#[allow(clippy::too_many_arguments)]
fn process_loop(
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
cluster_info: &ClusterInfo,
recv_start: &mut Instant,
forward_option: ForwardOption,
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: ReplayVoteSender,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
data_budget: &DataBudget,
cost_model: Arc<RwLock<CostModel>>,
) {
let recorder = poh_recorder.lock().unwrap().recorder();
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let mut buffered_packets = VecDeque::with_capacity(batch_limit);
let banking_stage_stats = BankingStageStats::new(id);
loop {
let my_pubkey = cluster_info.id();
while !buffered_packets.is_empty() {
let decision = Self::process_buffered_packets(
&my_pubkey,
&socket,
poh_recorder,
cluster_info,
&mut buffered_packets,
&forward_option,
transaction_status_sender.clone(),
&gossip_vote_sender,
&banking_stage_stats,
&recorder,
data_budget,
&cost_model,
);
if matches!(decision, BufferedPacketsDecision::Hold)
|| matches!(decision, BufferedPacketsDecision::ForwardAndHold)
{
// If we are waiting on a new bank,
// check the receiver for more transactions/for exiting
break;
}
}
let recv_timeout = if !buffered_packets.is_empty() {
// If packets are buffered, let's wait for less time on recv from the channel.
// This helps detect the next leader faster, and processing the buffered
// packets quickly
Duration::from_millis(10)
} else {
// Default wait time
Duration::from_millis(100)
};
match Self::process_packets(
&my_pubkey,
verified_receiver,
poh_recorder,
recv_start,
recv_timeout,
id,
batch_limit,
transaction_status_sender.clone(),
&gossip_vote_sender,
&mut buffered_packets,
&banking_stage_stats,
duplicates,
&recorder,
&cost_model,
) {
Ok(()) | Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
}
banking_stage_stats.report(1000);
}
}
pub fn num_threads() -> u32 {
cmp::max(
env::var("SOLANA_BANKING_THREADS")
.map(|x| x.parse().unwrap_or(NUM_THREADS))
.unwrap_or(NUM_THREADS),
NUM_VOTE_PROCESSING_THREADS + MIN_THREADS_BANKING,
)
}
#[allow(clippy::match_wild_err_arm)]
fn record_transactions(
bank_slot: Slot,
txs: &[SanitizedTransaction],
results: &[TransactionExecutionResult],
recorder: &TransactionRecorder,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut processed_generation = Measure::start("record::process_generation");
let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results
.iter()
.zip(txs)
.enumerate()
.filter_map(|(i, ((r, _n), tx))| {
if Bank::can_commit(r) {
Some((tx.to_versioned_transaction(), i))
} else {
None
}
})
.unzip();
processed_generation.stop();
let num_to_commit = processed_transactions.len();
debug!("num_to_commit: {} ", num_to_commit);
// unlock all the accounts with errors which are filtered by the above `filter_map`
if !processed_transactions.is_empty() {
inc_new_counter_info!("banking_stage-record_count", 1);
inc_new_counter_info!("banking_stage-record_transactions", num_to_commit);
let mut hash_time = Measure::start("record::hash");
let hash = hash_transactions(&processed_transactions[..]);
hash_time.stop();
let mut poh_record = Measure::start("record::poh_record");
// record and unlock will unlock all the successful transactions
let res = recorder.record(bank_slot, hash, processed_transactions);
match res {
Ok(()) => (),
Err(PohRecorderError::MaxHeightReached) => {
inc_new_counter_info!("banking_stage-max_height_reached", 1);
inc_new_counter_info!(
"banking_stage-max_height_reached_num_to_commit",
num_to_commit
);
// If record errors, add all the committable transactions (the ones
// we just attempted to record) as retryable
return (
Err(PohRecorderError::MaxHeightReached),
processed_transactions_indexes,
);
}
Err(e) => panic!("Poh recorder returned unexpected error: {:?}", e),
}
poh_record.stop();
}
(Ok(num_to_commit), vec![])
}
fn process_and_record_transactions_locked(
bank: &Arc<Bank>,
poh: &TransactionRecorder,
batch: &TransactionBatch,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut load_execute_time = Measure::start("load_execute_time");
// Use a shorter maximum age when adding transactions into the pipeline. This will reduce
// the likelihood of any single thread getting starved and processing old ids.
// TODO: Banking stage threads should be prioritized to complete faster then this queue
// expires.
let pre_balances = if transaction_status_sender.is_some() {
bank.collect_balances(batch)
} else {
vec![]
};
let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new();
let pre_token_balances = if transaction_status_sender.is_some() {
collect_token_balances(bank, batch, &mut mint_decimals)
} else {
vec![]
};
let mut execute_timings = ExecuteTimings::default();
let (
mut loaded_accounts,
results,
inner_instructions,
transaction_logs,
mut retryable_txs,
tx_count,
signature_count,
) = bank.load_and_execute_transactions(
batch,
MAX_PROCESSING_AGE,
transaction_status_sender.is_some(),
transaction_status_sender.is_some(),
&mut execute_timings,
);
load_execute_time.stop();
let freeze_lock = bank.freeze_lock();
let mut record_time = Measure::start("record_time");
let (num_to_commit, retryable_record_txs) =
Self::record_transactions(bank.slot(), batch.sanitized_transactions(), &results, poh);
inc_new_counter_info!(
"banking_stage-record_transactions_num_to_commit",
*num_to_commit.as_ref().unwrap_or(&0)
);
inc_new_counter_info!(
"banking_stage-record_transactions_retryable_record_txs",
retryable_record_txs.len()
);
retryable_txs.extend(retryable_record_txs);
if num_to_commit.is_err() {
return (num_to_commit, retryable_txs);
}
record_time.stop();
let mut commit_time = Measure::start("commit_time");
let sanitized_txs = batch.sanitized_transactions();
let num_to_commit = num_to_commit.unwrap();
if num_to_commit != 0 {
let tx_results = bank.commit_transactions(
sanitized_txs,
&mut loaded_accounts,
&results,
tx_count,
signature_count,
&mut execute_timings,
);
bank_utils::find_and_send_votes(sanitized_txs, &tx_results, Some(gossip_vote_sender));
if let Some(transaction_status_sender) = transaction_status_sender {
let txs = batch.sanitized_transactions().to_vec();
let post_balances = bank.collect_balances(batch);
let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals);
transaction_status_sender.send_transaction_status_batch(
bank.clone(),
txs,
tx_results.execution_results,
TransactionBalancesSet::new(pre_balances, post_balances),
TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances),
inner_instructions,
transaction_logs,
tx_results.rent_debits,
);
}
}
commit_time.stop();
drop(freeze_lock);
debug!(
"bank: {} process_and_record_locked: {}us record: {}us commit: {}us txs_len: {}",
bank.slot(),
load_execute_time.as_us(),
record_time.as_us(),
commit_time.as_us(),
sanitized_txs.len(),
);
debug!(
"process_and_record_transactions_locked: {:?}",
execute_timings
);
(Ok(num_to_commit), retryable_txs)
}
pub fn process_and_record_transactions(
bank: &Arc<Bank>,
txs: &[SanitizedTransaction],
poh: &TransactionRecorder,
chunk_offset: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (Result<usize, PohRecorderError>, Vec<usize>) {
let mut lock_time = Measure::start("lock_time");
// Once accounts are locked, other threads cannot encode transactions that will modify the
// same account state
let batch = bank.prepare_sanitized_batch(txs);
lock_time.stop();
let (result, mut retryable_txs) = Self::process_and_record_transactions_locked(
bank,
poh,
&batch,
transaction_status_sender,
gossip_vote_sender,
);
retryable_txs.iter_mut().for_each(|x| *x += chunk_offset);
let mut unlock_time = Measure::start("unlock_time");
// Once the accounts are new transactions can enter the pipeline to process them
drop(batch);
unlock_time.stop();
debug!(
"bank: {} lock: {}us unlock: {}us txs_len: {}",
bank.slot(),
lock_time.as_us(),
unlock_time.as_us(),
txs.len(),
);
(result, retryable_txs)
}
/// Sends transactions to the bank.
///
/// Returns the number of transactions successfully processed by the bank, which may be less
/// than the total number if max PoH height was reached and the bank halted
fn process_transactions(
bank: &Arc<Bank>,
bank_creation_time: &Instant,
transactions: &[SanitizedTransaction],
poh: &TransactionRecorder,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
) -> (usize, Vec<usize>) {
let mut chunk_start = 0;
let mut unprocessed_txs = vec![];
while chunk_start != transactions.len() {
let chunk_end = std::cmp::min(
transactions.len(),
chunk_start + MAX_NUM_TRANSACTIONS_PER_BATCH,
);
let (result, retryable_txs_in_chunk) = Self::process_and_record_transactions(
bank,
&transactions[chunk_start..chunk_end],
poh,
chunk_start,
transaction_status_sender.clone(),
gossip_vote_sender,
);
trace!("process_transactions result: {:?}", result);
// Add the retryable txs (transactions that errored in a way that warrants a retry)
// to the list of unprocessed txs.
unprocessed_txs.extend_from_slice(&retryable_txs_in_chunk);
// If `bank_creation_time` is None, it's a test so ignore the option so
// allow processing
let should_bank_still_be_processing_txs =
Bank::should_bank_still_be_processing_txs(bank_creation_time, bank.ns_per_slot);
match (result, should_bank_still_be_processing_txs) {
(Err(PohRecorderError::MaxHeightReached), _) | (_, false) => {
info!(
"process transactions: max height reached slot: {} height: {}",
bank.slot(),
bank.tick_height()
);
// process_and_record_transactions has returned all retryable errors in
// transactions[chunk_start..chunk_end], so we just need to push the remaining
// transactions into the unprocessed queue.
unprocessed_txs.extend(chunk_end..transactions.len());
break;
}
_ => (),
}
// Don't exit early on any other type of error, continue processing...
chunk_start = chunk_end;
}
(chunk_start, unprocessed_txs)
}
// This function creates a filter of transaction results with Ok() for every pending
// transaction. The non-pending transactions are marked with TransactionError
fn prepare_filter_for_pending_transactions(
transactions_len: usize,
pending_tx_indexes: &[usize],
) -> Vec<transaction::Result<()>> {
let mut mask = vec![Err(TransactionError::BlockhashNotFound); transactions_len];
pending_tx_indexes.iter().for_each(|x| mask[*x] = Ok(()));
mask
}
// This function returns a vector containing index of all valid transactions. A valid
// transaction has result Ok() as the value
fn filter_valid_transaction_indexes(
valid_txs: &[TransactionCheckResult],
transaction_indexes: &[usize],
) -> Vec<usize> {
valid_txs
.iter()
.enumerate()
.filter_map(|(index, (x, _h))| if x.is_ok() { Some(index) } else { None })
.map(|x| transaction_indexes[x])
.collect_vec()
}
/// Read the transaction message from packet data
fn packet_message(packet: &Packet) -> Option<&[u8]> {
let (sig_len, sig_size) = decode_shortu16_len(&packet.data).ok()?;
let msg_start = sig_len
.checked_mul(size_of::<Signature>())
.and_then(|v| v.checked_add(sig_size))?;
let msg_end = packet.meta.size;
Some(&packet.data[msg_start..msg_end])
}
// This function deserializes packets into transactions, computes the blake3 hash of transaction messages,
// and verifies secp256k1 instructions. A list of valid transactions are returned with their message hashes
// and packet indexes.
// Also returned is packet indexes for transaction should be retried due to cost limits.
#[allow(clippy::needless_collect)]
fn transactions_from_packets(
msgs: &Packets,
transaction_indexes: &[usize],
feature_set: &Arc<feature_set::FeatureSet>,
read_cost_tracker: &RwLockReadGuard<CostTracker>,
banking_stage_stats: &BankingStageStats,
demote_program_write_locks: bool,
votes_only: bool,
cost_model: &Arc<RwLock<CostModel>>,
) -> (Vec<SanitizedTransaction>, Vec<usize>, Vec<usize>) {
let mut retryable_transaction_packet_indexes: Vec<usize> = vec![];
let verified_transactions_with_packet_indexes: Vec<_> = transaction_indexes
.iter()
.filter_map(|tx_index| {
let p = &msgs.packets[*tx_index];
if votes_only && !p.meta.is_simple_vote_tx {
return None;
}
let tx: VersionedTransaction = limited_deserialize(&p.data[0..p.meta.size]).ok()?;
let message_bytes = Self::packet_message(p)?;
let message_hash = Message::hash_raw_message(message_bytes);
let tx = SanitizedTransaction::try_create(
tx,
message_hash,
Some(p.meta.is_simple_vote_tx),
|_| Err(TransactionError::UnsupportedVersion),
)
.ok()?;
tx.verify_precompiles(feature_set).ok()?;
Some((tx, *tx_index))
})
.collect();
banking_stage_stats.cost_tracker_check_count.fetch_add(
verified_transactions_with_packet_indexes.len(),
Ordering::Relaxed,
);
let mut cost_tracker_check_time = Measure::start("cost_tracker_check_time");
let (filtered_transactions, filter_transaction_packet_indexes) = {
verified_transactions_with_packet_indexes
.into_iter()
.filter_map(|(tx, tx_index)| {
// excluding vote TX from cost_model, for now
let is_vote = &msgs.packets[tx_index].meta.is_simple_vote_tx;
if !is_vote
&& read_cost_tracker
.would_transaction_fit(
&tx,
&cost_model
.read()
.unwrap()
.calculate_cost(&tx, demote_program_write_locks),
)
.is_err()
{
// put transaction into retry queue if it wouldn't fit
// into current bank
debug!("transaction {:?} would exceed limit", tx);
retryable_transaction_packet_indexes.push(tx_index);
return None;
}
Some((tx, tx_index))
})
.unzip()
};
cost_tracker_check_time.stop();
banking_stage_stats
.cost_tracker_check_elapsed
.fetch_add(cost_tracker_check_time.as_us(), Ordering::Relaxed);
(
filtered_transactions,
filter_transaction_packet_indexes,
retryable_transaction_packet_indexes,
)
}
/// This function filters pending packets that are still valid
/// # Arguments
/// * `transactions` - a batch of transactions deserialized from packets
/// * `transaction_to_packet_indexes` - maps each transaction to a packet index
/// * `pending_indexes` - identifies which indexes in the `transactions` list are still pending
fn | (
bank: &Arc<Bank>,
transactions: &[SanitizedTransaction],
transaction_to_packet_indexes: &[usize],
pending_indexes: &[usize],
) -> Vec<usize> {
let filter =
Self::prepare_filter_for_pending_transactions(transactions.len(), pending_indexes);
let mut error_counters = ErrorCounters::default();
// The following code also checks if the blockhash for a transaction is too old
// The check accounts for
// 1. Transaction forwarding delay
// 2. The slot at which the next leader will actually process the transaction
// Drop the transaction if it will expire by the time the next node receives and processes it
let api = perf_libs::api();
let max_tx_fwd_delay = if api.is_none() {
MAX_TRANSACTION_FORWARDING_DELAY
} else {
MAX_TRANSACTION_FORWARDING_DELAY_GPU
};
let results = bank.check_transactions(
transactions,
&filter,
(MAX_PROCESSING_AGE)
.saturating_sub(max_tx_fwd_delay)
.saturating_sub(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET as usize),
&mut error_counters,
);
Self::filter_valid_transaction_indexes(&results, transaction_to_packet_indexes)
}
#[allow(clippy::too_many_arguments)]
fn process_packets_transactions(
bank: &Arc<Bank>,
bank_creation_time: &Instant,
poh: &TransactionRecorder,
msgs: &Packets,
packet_indexes: Vec<usize>,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
banking_stage_stats: &BankingStageStats,
cost_model: &Arc<RwLock<CostModel>>,
) -> (usize, usize, Vec<usize>) {
let mut packet_conversion_time = Measure::start("packet_conversion");
let (transactions, transaction_to_packet_indexes, retryable_packet_indexes) =
Self::transactions_from_packets(
msgs,
&packet_indexes,
&bank.feature_set,
&bank.read_cost_tracker().unwrap(),
banking_stage_stats,
bank.demote_program_write_locks(),
bank.vote_only_bank(),
cost_model,
);
packet_conversion_time.stop();
inc_new_counter_info!("banking_stage-packet_conversion", 1);
banking_stage_stats
.cost_forced_retry_transactions_count
.fetch_add(retryable_packet_indexes.len(), Ordering::Relaxed);
debug!(
"bank: {} filtered transactions {} cost limited transactions {}",
bank.slot(),
transactions.len(),
retryable_packet_indexes.len()
);
let tx_len = transactions.len();
let mut process_tx_time = Measure::start("process_tx_time");
let (processed, unprocessed_tx_indexes) = Self::process_transactions(
bank,
bank_creation_time,
&transactions,
poh,
transaction_status_sender,
gossip_vote_sender,
);
process_tx_time.stop();
let unprocessed_tx_count = unprocessed_tx_indexes.len();
inc_new_counter_info!(
"banking_stage-unprocessed_transactions",
unprocessed_tx_count
);
// applying cost of processed transactions to shared cost_tracker
let mut cost_tracking_time = Measure::start("cost_tracking_time");
transactions.iter().enumerate().for_each(|(index, tx)| {
if unprocessed_tx_indexes.iter().all(|&i| i != index) {
bank.write_cost_tracker().unwrap().add_transaction_cost(
tx,
&cost_model
.read()
.unwrap()
.calculate_cost(tx, bank.demote_program_write_locks()),
);
}
});
cost_tracking_time.stop();
let mut filter_pending_packets_time = Measure::start("filter_pending_packets_time");
let mut filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
bank,
&transactions,
&transaction_to_packet_indexes,
&unprocessed_tx_indexes,
);
filter_pending_packets_time.stop();
inc_new_counter_info!(
"banking_stage-dropped_tx_before_forwarding",
unprocessed_tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
);
// combine cost-related unprocessed transactions with bank determined unprocessed for
// buffering
filtered_unprocessed_packet_indexes.extend(retryable_packet_indexes);
banking_stage_stats
.packet_conversion_elapsed
.fetch_add(packet_conversion_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.transaction_processing_elapsed
.fetch_add(process_tx_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.cost_tracker_update_elapsed
.fetch_add(cost_tracking_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.filter_pending_packets_elapsed
.fetch_add(filter_pending_packets_time.as_us(), Ordering::Relaxed);
(processed, tx_len, filtered_unprocessed_packet_indexes)
}
fn filter_unprocessed_packets(
bank: &Arc<Bank>,
msgs: &Packets,
transaction_indexes: &[usize],
my_pubkey: &Pubkey,
next_leader: Option<Pubkey>,
banking_stage_stats: &BankingStageStats,
cost_model: &Arc<RwLock<CostModel>>,
) -> Vec<usize> {
// Check if we are the next leader. If so, let's not filter the packets
// as we'll filter it again while processing the packets.
// Filtering helps if we were going to forward the packets to some other node
if let Some(leader) = next_leader {
if leader == *my_pubkey {
return transaction_indexes.to_vec();
}
}
let mut unprocessed_packet_conversion_time =
Measure::start("unprocessed_packet_conversion");
let (transactions, transaction_to_packet_indexes, retry_packet_indexes) =
Self::transactions_from_packets(
msgs,
transaction_indexes,
&bank.feature_set,
&bank.read_cost_tracker().unwrap(),
banking_stage_stats,
bank.demote_program_write_locks(),
bank.vote_only_bank(),
cost_model,
);
unprocessed_packet_conversion_time.stop();
let tx_count = transaction_to_packet_indexes.len();
let unprocessed_tx_indexes = (0..transactions.len()).collect_vec();
let mut filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs(
bank,
&transactions,
&transaction_to_packet_indexes,
&unprocessed_tx_indexes,
);
filtered_unprocessed_packet_indexes.extend(retry_packet_indexes);
inc_new_counter_info!(
"banking_stage-dropped_tx_before_forwarding",
tx_count.saturating_sub(filtered_unprocessed_packet_indexes.len())
);
banking_stage_stats
.unprocessed_packet_conversion_elapsed
.fetch_add(
unprocessed_packet_conversion_time.as_us(),
Ordering::Relaxed,
);
filtered_unprocessed_packet_indexes
}
fn generate_packet_indexes(vers: &PinnedVec<Packet>) -> Vec<usize> {
vers.iter()
.enumerate()
.filter_map(
|(index, ver)| {
if !ver.meta.discard {
Some(index)
} else {
None
}
},
)
.collect()
}
#[allow(clippy::too_many_arguments)]
/// Process the incoming packets
fn process_packets(
my_pubkey: &Pubkey,
verified_receiver: &CrossbeamReceiver<Vec<Packets>>,
poh: &Arc<Mutex<PohRecorder>>,
recv_start: &mut Instant,
recv_timeout: Duration,
id: u32,
batch_limit: usize,
transaction_status_sender: Option<TransactionStatusSender>,
gossip_vote_sender: &ReplayVoteSender,
buffered_packets: &mut UnprocessedPackets,
banking_stage_stats: &BankingStageStats,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
recorder: &TransactionRecorder,
cost_model: &Arc<RwLock<CostModel>>,
) -> Result<(), RecvTimeoutError> {
let mut recv_time = Measure::start("process_packets_recv");
let mms = verified_receiver.recv_timeout(recv_timeout)?;
recv_time.stop();
let mms_len = mms.len();
let count: usize = mms.iter().map(|x| x.packets.len()).sum();
debug!(
"@{:?} process start stalled for: {:?}ms txs: {} id: {}",
timestamp(),
duration_as_ms(&recv_start.elapsed()),
count,
id,
);
inc_new_counter_debug!("banking_stage-transactions_received", count);
let mut proc_start = Measure::start("process_packets_transactions_process");
let mut new_tx_count = 0;
let mut mms_iter = mms.into_iter();
let mut dropped_packets_count = 0;
let mut dropped_packet_batches_count = 0;
let mut newly_buffered_packets_count = 0;
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
let poh_recorder_bank = poh.lock().unwrap().get_poh_recorder_bank();
let working_bank_start = poh_recorder_bank.working_bank_start();
if PohRecorder::get_working_bank_if_not_expired(&working_bank_start).is_none() {
Self::push_unprocessed(
buffered_packets,
msgs,
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
continue;
}
// Destructure the `BankStart` behind an Arc
let BankStart {
working_bank,
bank_creation_time,
} = &*working_bank_start.unwrap();
let (processed, verified_txs_len, unprocessed_indexes) =
Self::process_packets_transactions(
working_bank,
bank_creation_time,
recorder,
&msgs,
packet_indexes,
transaction_status_sender.clone(),
gossip_vote_sender,
banking_stage_stats,
cost_model,
);
new_tx_count += processed;
// Collect any unprocessed transactions in this batch for forwarding
Self::push_unprocessed(
buffered_packets,
msgs,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
// If there were retryable transactions, add the unexpired ones to the buffered queue
if processed < verified_txs_len {
let mut handle_retryable_packets_time = Measure::start("handle_retryable_packets");
let next_leader = poh.lock().unwrap().next_slot_leader();
// Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones
#[allow(clippy::while_let_on_iterator)]
while let Some(msgs) = mms_iter.next() {
let packet_indexes = Self::generate_packet_indexes(&msgs.packets);
let unprocessed_indexes = Self::filter_unprocessed_packets(
working_bank,
&msgs,
&packet_indexes,
my_pubkey,
next_leader,
banking_stage_stats,
cost_model,
);
Self::push_unprocessed(
buffered_packets,
msgs,
unprocessed_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
duplicates,
banking_stage_stats,
);
}
handle_retryable_packets_time.stop();
banking_stage_stats
.handle_retryable_packets_elapsed
.fetch_add(handle_retryable_packets_time.as_us(), Ordering::Relaxed);
}
}
proc_start.stop();
debug!(
"@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}",
timestamp(),
mms_len,
proc_start.as_ms(),
new_tx_count,
(new_tx_count as f32) / (proc_start.as_s()),
count,
id,
);
banking_stage_stats
.process_packets_elapsed
.fetch_add(proc_start.as_us(), Ordering::Relaxed);
banking_stage_stats
.process_packets_count
.fetch_add(count, Ordering::Relaxed);
banking_stage_stats
.new_tx_count
.fetch_add(new_tx_count, Ordering::Relaxed);
banking_stage_stats
.dropped_packet_batches_count
.fetch_add(dropped_packet_batches_count, Ordering::Relaxed);
banking_stage_stats
.dropped_packets_count
.fetch_add(dropped_packets_count, Ordering::Relaxed);
banking_stage_stats
.newly_buffered_packets_count
.fetch_add(newly_buffered_packets_count, Ordering::Relaxed);
banking_stage_stats
.current_buffered_packet_batches_count
.swap(buffered_packets.len(), Ordering::Relaxed);
banking_stage_stats.current_buffered_packets_count.swap(
buffered_packets.iter().map(|packets| packets.1.len()).sum(),
Ordering::Relaxed,
);
*recv_start = Instant::now();
Ok(())
}
fn push_unprocessed(
unprocessed_packets: &mut UnprocessedPackets,
packets: Packets,
mut packet_indexes: Vec<usize>,
dropped_packet_batches_count: &mut usize,
dropped_packets_count: &mut usize,
newly_buffered_packets_count: &mut usize,
batch_limit: usize,
duplicates: &Arc<Mutex<(LruCache<u64, ()>, PacketHasher)>>,
banking_stage_stats: &BankingStageStats,
) {
{
let original_packets_count = packet_indexes.len();
let mut packet_duplicate_check_time = Measure::start("packet_duplicate_check");
let mut duplicates = duplicates.lock().unwrap();
let (cache, hasher) = duplicates.deref_mut();
packet_indexes.retain(|i| {
let packet_hash = hasher.hash_packet(&packets.packets[*i]);
match cache.get_mut(&packet_hash) {
Some(_hash) => false,
None => {
cache.put(packet_hash, ());
true
}
}
});
packet_duplicate_check_time.stop();
banking_stage_stats
.packet_duplicate_check_elapsed
.fetch_add(packet_duplicate_check_time.as_us(), Ordering::Relaxed);
banking_stage_stats
.dropped_duplicated_packets_count
.fetch_add(
original_packets_count.saturating_sub(packet_indexes.len()),
Ordering::Relaxed,
);
}
if Self::packet_has_more_unprocessed_transactions(&packet_indexes) {
if unprocessed_packets.len() >= batch_limit {
*dropped_packet_batches_count += 1;
if let Some(dropped_batch) = unprocessed_packets.pop_front() {
*dropped_packets_count += dropped_batch.1.len();
}
}
*newly_buffered_packets_count += packet_indexes.len();
unprocessed_packets.push_back((packets, packet_indexes, false));
}
}
fn packet_has_more_unprocessed_transactions(packet_indexes: &[usize]) -> bool {
!packet_indexes.is_empty()
}
pub fn join(self) -> thread::Result<()> {
for bank_thread_hdl in self.bank_thread_hdls {
bank_thread_hdl.join()?;
}
Ok(())
}
}
pub(crate) fn next_leader_tpu(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu)
}
fn next_leader_tpu_forwards(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_forwards)
}
pub(crate) fn next_leader_tpu_vote(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
) -> Option<std::net::SocketAddr> {
next_leader_x(cluster_info, poh_recorder, |leader| leader.tpu_vote)
}
fn next_leader_x<F>(
cluster_info: &ClusterInfo,
poh_recorder: &Mutex<PohRecorder>,
port_selector: F,
) -> Option<std::net::SocketAddr>
where
F: FnOnce(&ContactInfo) -> SocketAddr,
{
if let Some(leader_pubkey) = poh_recorder
.lock()
.unwrap()
.leader_after_n_slots(FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET)
{
cluster_info.lookup_contact_info(&leader_pubkey, port_selector)
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam_channel::unbounded;
use itertools::Itertools;
use analog_entry::entry::{next_entry, Entry, EntrySlice};
use analog_gossip::{cluster_info::Node, contact_info::ContactInfo};
use analog_ledger::{
blockstore::{entries_to_test_shreds, Blockstore},
genesis_utils::{create_genesis_config, GenesisConfigInfo},
get_tmp_ledger_path,
leader_schedule_cache::LeaderScheduleCache,
};
use analog_perf::packet::to_packets_chunked;
use analog_poh::{
poh_recorder::{create_test_recorder, Record, WorkingBankEntry},
poh_service::PohService,
};
use analog_rpc::transaction_status_service::TransactionStatusService;
use analog_runtime::cost_model::CostModel;
use analog_sdk::{
hash::Hash,
instruction::InstructionError,
poh_config::PohConfig,
signature::{Keypair, Signer},
system_instruction::SystemError,
system_transaction,
transaction::{Transaction, TransactionError},
};
use analog_streamer::socket::SocketAddrSpace;
use analog_transaction_status::TransactionWithStatusMeta;
use analog_vote_program::vote_transaction;
use std::{
net::SocketAddr,
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
mpsc::Receiver,
},
thread::sleep,
};
fn new_test_cluster_info(contact_info: ContactInfo) -> ClusterInfo {
ClusterInfo::new(
contact_info,
Arc::new(Keypair::new()),
SocketAddrSpace::Unspecified,
)
}
#[test]
fn test_banking_stage_shutdown1() {
let genesis_config = create_genesis_config(2).genesis_config;
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let (verified_sender, verified_receiver) = unbounded();
let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let (exit, poh_recorder, poh_service, _entry_receiever) =
create_test_recorder(&bank, &blockstore, None);
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
gossip_verified_vote_receiver,
None,
gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
drop(verified_sender);
drop(gossip_verified_vote_sender);
drop(tpu_vote_sender);
exit.store(true, Ordering::Relaxed);
banking_stage.join().unwrap();
poh_service.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_banking_stage_tick() {
analog_logger::setup();
let GenesisConfigInfo {
mut genesis_config, ..
} = create_genesis_config(2);
genesis_config.ticks_per_slot = 4;
let num_extra_ticks = 2;
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
target_tick_count: Some(bank.max_tick_height() + num_extra_ticks),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (verified_gossip_vote_sender, verified_gossip_vote_receiver) = unbounded();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
verified_gossip_vote_receiver,
None,
gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
trace!("sending bank");
drop(verified_sender);
drop(verified_gossip_vote_sender);
drop(tpu_vote_sender);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
trace!("getting entries");
let entries: Vec<_> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
trace!("done");
assert_eq!(entries.len(), genesis_config.ticks_per_slot as usize);
assert!(entries.verify(&start_hash));
assert_eq!(entries[entries.len() - 1].hash, bank.last_blockhash());
banking_stage.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec<u8>)>) -> Vec<Packets> {
with_vers.iter_mut().for_each(|(b, v)| {
b.packets
.iter_mut()
.zip(v)
.for_each(|(p, f)| p.meta.discard = *f == 0)
});
with_vers.into_iter().map(|(b, _)| b).collect()
}
#[test]
fn test_banking_stage_entries_only() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let start_hash = bank.last_blockhash();
let (verified_sender, verified_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let (gossip_verified_vote_sender, gossip_verified_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at PohRecord then
// PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let banking_stage = BankingStage::new(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
gossip_verified_vote_receiver,
None,
gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
// fund another account so we can send 2 good transactions in a single batch.
let keypair = Keypair::new();
let fund_tx =
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash);
bank.process_transaction(&fund_tx).unwrap();
// good tx
let to = analog_sdk::pubkey::new_rand();
let tx = system_transaction::transfer(&mint_keypair, &to, 1, start_hash);
// good tx, but no verify
let to2 = analog_sdk::pubkey::new_rand();
let tx_no_ver = system_transaction::transfer(&keypair, &to2, 2, start_hash);
// bad tx, AccountNotFound
let keypair = Keypair::new();
let to3 = analog_sdk::pubkey::new_rand();
let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash);
// send 'em over
let packets = to_packets_chunked(&[tx_no_ver, tx_anf, tx], 3);
// glad they all fit
assert_eq!(packets.len(), 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![0u8, 1u8, 1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender // no_ver, anf, tx
.send(packets)
.unwrap();
drop(verified_sender);
drop(tpu_vote_sender);
drop(gossip_verified_vote_sender);
// wait until banking_stage to finish up all packets
banking_stage.join().unwrap();
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
drop(poh_recorder);
let mut blockhash = start_hash;
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
bank.process_transaction(&fund_tx).unwrap();
//receive entries + ticks
loop {
let entries: Vec<Entry> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
assert!(entries.verify(&blockhash));
if !entries.is_empty() {
blockhash = entries.last().unwrap().hash;
for entry in entries {
bank.process_entry_transactions(entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
}
if bank.get_balance(&to) == 1 {
break;
}
sleep(Duration::from_millis(200));
}
assert_eq!(bank.get_balance(&to), 1);
assert_eq!(bank.get_balance(&to2), 0);
drop(entry_receiver);
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_banking_stage_entryfication() {
analog_logger::setup();
// In this attack we'll demonstrate that a verifier can interpret the ledger
// differently if either the server doesn't signal the ledger to add an
// Entry OR if the verifier tries to parallelize across multiple Entries.
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(2);
let (verified_sender, verified_receiver) = unbounded();
// Process a batch that includes a transaction that receives two tock.
let alice = Keypair::new();
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
// Process a second batch that uses the same from account, so conflicts with above TX
let tx =
system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash());
let packets = to_packets_chunked(&[tx], 1);
let packets = packets
.into_iter()
.map(|packets| (packets, vec![1u8]))
.collect();
let packets = convert_from_old_verified(packets);
verified_sender.send(packets).unwrap();
let (vote_sender, vote_receiver) = unbounded();
let (tpu_vote_sender, tpu_vote_receiver) = unbounded();
let ledger_path = get_tmp_ledger_path!();
{
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let entry_receiver = {
// start a banking_stage to eat verified receiver
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let cluster_info = Arc::new(cluster_info);
let _banking_stage = BankingStage::new_num_threads(
&cluster_info,
&poh_recorder,
verified_receiver,
tpu_vote_receiver,
vote_receiver,
3,
None,
gossip_vote_sender,
Arc::new(RwLock::new(CostModel::default())),
);
// wait for banking_stage to eat the packets
while bank.get_balance(&alice.pubkey()) < 2 {
sleep(Duration::from_millis(100));
}
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
entry_receiver
};
drop(verified_sender);
drop(vote_sender);
drop(tpu_vote_sender);
// consume the entire entry_receiver, feed it into a new bank
// check that the balance is what we expect.
let entries: Vec<_> = entry_receiver
.iter()
.map(|(_bank, (entry, _tick_height))| entry)
.collect();
let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config);
for entry in entries {
bank.process_entry_transactions(entry.transactions)
.iter()
.for_each(|x| assert_eq!(*x, Ok(())));
}
// Assert the user holds two tock, not three. If the stage only outputs one
// entry, then the second transaction will be rejected, because it drives
// the account balance below zero before the credit is added.
assert_eq!(bank.get_balance(&alice.pubkey()), 2);
}
Blockstore::destroy(&ledger_path).unwrap();
}
fn sanitize_transactions(txs: Vec<Transaction>) -> Vec<SanitizedTransaction> {
txs.into_iter()
.map(SanitizedTransaction::from_transaction_for_tests)
.collect()
}
#[test]
fn test_bank_record_transactions() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
// TODO use record_receiver
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
None,
bank.ticks_per_slot(),
&Pubkey::default(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_bank(&bank);
let pubkey = analog_sdk::pubkey::new_rand();
let keypair2 = Keypair::new();
let pubkey2 = analog_sdk::pubkey::new_rand();
let txs = sanitize_transactions(vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()),
]);
let mut results = vec![(Ok(()), None), (Ok(()), None)];
let _ = BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder);
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), txs.len());
// InstructionErrors should still be recorded
results[0] = (
Err(TransactionError::InstructionError(
1,
SystemError::ResultWithNegativeLamports.into(),
)),
None,
);
let (res, retryable) =
BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), txs.len());
// Other TransactionErrors should not be recorded
results[0] = (Err(TransactionError::AccountNotFound), None);
let (res, retryable) =
BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder);
res.unwrap();
assert!(retryable.is_empty());
let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap();
assert_eq!(entry.transactions.len(), txs.len() - 1);
// Once bank is set to a new bank (setting bank.slot() + 1 in record_transactions),
// record_transactions should throw MaxHeightReached and return the set of retryable
// txs
let next_slot = bank.slot() + 1;
let (res, retryable) =
BankingStage::record_transactions(next_slot, &txs, &results, &recorder);
assert_matches!(res, Err(PohRecorderError::MaxHeightReached));
// The first result was an error so it's filtered out. The second result was Ok(),
// so it should be marked as retryable
assert_eq!(retryable, vec![1]);
// Should receive nothing from PohRecorder b/c record failed
assert!(entry_receiver.try_recv().is_err());
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_bank_prepare_filter_for_pending_transaction() {
assert_eq!(
BankingStage::prepare_filter_for_pending_transactions(6, &[2, 4, 5]),
vec![
Err(TransactionError::BlockhashNotFound),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Ok(())
]
);
assert_eq!(
BankingStage::prepare_filter_for_pending_transactions(6, &[0, 2, 3]),
vec![
Ok(()),
Err(TransactionError::BlockhashNotFound),
Ok(()),
Ok(()),
Err(TransactionError::BlockhashNotFound),
Err(TransactionError::BlockhashNotFound),
]
);
}
#[test]
fn test_bank_filter_valid_transaction_indexes() {
assert_eq!(
BankingStage::filter_valid_transaction_indexes(
&[
(Err(TransactionError::BlockhashNotFound), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Ok(()), None),
],
&[2, 4, 5, 9, 11, 13]
),
[5, 11, 13]
);
assert_eq!(
BankingStage::filter_valid_transaction_indexes(
&[
(Ok(()), None),
(Err(TransactionError::BlockhashNotFound), None),
(Err(TransactionError::BlockhashNotFound), None),
(Ok(()), None),
(Ok(()), None),
(Ok(()), None),
],
&[1, 6, 7, 9, 31, 43]
),
[1, 9, 31, 43]
);
}
#[test]
fn test_should_process_or_forward_packets() {
let my_pubkey = analog_sdk::pubkey::new_rand();
let my_pubkey1 = analog_sdk::pubkey::new_rand();
let bank = Arc::new(Bank::default_for_tests());
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, Some(&bank), false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(&my_pubkey1, None, None, false, false),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
false,
false
),
BufferedPacketsDecision::Forward
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
true,
true
),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
None,
true,
false
),
BufferedPacketsDecision::ForwardAndHold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey,
Some(my_pubkey1),
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume(_)
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
None,
false,
false
),
BufferedPacketsDecision::Hold
);
assert_matches!(
BankingStage::consume_or_forward_packets(
&my_pubkey1,
Some(my_pubkey1),
Some(&bank),
false,
false
),
BufferedPacketsDecision::Consume(_)
);
}
fn create_slow_genesis_config(tock: u64) -> GenesisConfigInfo {
let mut config_info = create_genesis_config(tock);
// For these tests there's only 1 slot, don't want to run out of ticks
config_info.genesis_config.ticks_per_slot *= 8;
config_info
}
#[test]
fn test_bank_process_and_record_transactions() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let pubkey = analog_sdk::pubkey::new_rand();
let transactions = sanitize_transactions(vec![system_transaction::transfer(
&mint_keypair,
&pubkey,
1,
genesis_config.hash(),
)]);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_bank(&bank);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
)
.0
.unwrap();
// Tick up to max tick height
while poh_recorder.lock().unwrap().tick_height() != bank.max_tick_height() {
poh_recorder.lock().unwrap().tick();
}
let mut done = false;
// read entries until I find mine, might be ticks...
while let Ok((_bank, (entry, _tick_height))) = entry_receiver.recv() {
if !entry.is_tick() {
trace!("got entry");
assert_eq!(entry.transactions.len(), transactions.len());
assert_eq!(bank.get_balance(&pubkey), 1);
done = true;
}
if done {
break;
}
}
trace!("done ticking");
assert!(done);
let transactions = sanitize_transactions(vec![system_transaction::transfer(
&mint_keypair,
&pubkey,
2,
genesis_config.hash(),
)]);
assert_matches!(
BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
)
.0,
Err(PohRecorderError::MaxHeightReached)
);
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert_eq!(bank.get_balance(&pubkey), 1);
}
Blockstore::destroy(&ledger_path).unwrap();
}
fn simulate_poh(
record_receiver: CrossbeamReceiver<Record>,
poh_recorder: &Arc<Mutex<PohRecorder>>,
) -> JoinHandle<()> {
let poh_recorder = poh_recorder.clone();
let is_exited = poh_recorder.lock().unwrap().is_exited.clone();
let tick_producer = Builder::new()
.name("analog-simulate_poh".to_string())
.spawn(move || loop {
PohService::read_record_receiver_and_process(
&poh_recorder,
&record_receiver,
Duration::from_millis(10),
);
if is_exited.load(Ordering::Relaxed) {
break;
}
});
tick_producer.unwrap()
}
#[test]
fn test_bank_process_and_record_transactions_account_in_use() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let pubkey = analog_sdk::pubkey::new_rand();
let pubkey1 = analog_sdk::pubkey::new_rand();
let transactions = sanitize_transactions(vec![
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash()),
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()),
]);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
poh_recorder.lock().unwrap().set_bank(&bank);
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (result, unprocessed) = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
None,
&gossip_vote_sender,
);
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
assert!(result.is_ok());
assert_eq!(unprocessed.len(), 1);
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_filter_valid_packets() {
analog_logger::setup();
let mut all_packets = (0..16)
.map(|packets_id| {
let packets = Packets::new(
(0..32)
.map(|packet_id| {
let mut p = Packet::default();
p.meta.port = packets_id << 8 | packet_id;
p
})
.collect_vec(),
);
let valid_indexes = (0..32)
.filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None })
.collect_vec();
(packets, valid_indexes, false)
})
.collect_vec();
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
assert_eq!(result.len(), 256);
let _ = result
.into_iter()
.enumerate()
.map(|(index, p)| {
let packets_id = index / 16;
let packet_id = (index % 16) * 2 + 1;
assert_eq!(p.meta.port, (packets_id << 8 | packet_id) as u16);
})
.collect_vec();
all_packets[0].2 = true;
let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter());
assert_eq!(result.len(), 240);
}
#[test]
fn test_process_transactions_returns_unprocessed_txs() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let pubkey = analog_sdk::pubkey::new_rand();
let transactions = sanitize_transactions(vec![system_transaction::transfer(
&mint_keypair,
&pubkey,
1,
genesis_config.hash(),
)]);
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
&analog_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
// Poh Recorder has no working bank, so should throw MaxHeightReached error on
// record
let recorder = poh_recorder.recorder();
let poh_simulator = simulate_poh(record_receiver, &Arc::new(Mutex::new(poh_recorder)));
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let (processed_transactions_count, mut retryable_txs) =
BankingStage::process_transactions(
&bank,
&Instant::now(),
&transactions,
&recorder,
None,
&gossip_vote_sender,
);
assert_eq!(processed_transactions_count, 0,);
retryable_txs.sort_unstable();
let expected: Vec<usize> = (0..transactions.len()).collect();
assert_eq!(retryable_txs, expected);
recorder.is_exited.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_write_persist_transaction_status() {
analog_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_slow_genesis_config(10_000);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let pubkey = analog_sdk::pubkey::new_rand();
let pubkey1 = analog_sdk::pubkey::new_rand();
let keypair1 = Keypair::new();
let success_tx =
system_transaction::transfer(&mint_keypair, &pubkey, 1, genesis_config.hash());
let success_signature = success_tx.signatures[0];
let entry_1 = next_entry(&genesis_config.hash(), 1, vec![success_tx.clone()]);
let ix_error_tx =
system_transaction::transfer(&keypair1, &pubkey1, 10, genesis_config.hash());
let ix_error_signature = ix_error_tx.signatures[0];
let entry_2 = next_entry(&entry_1.hash, 1, vec![ix_error_tx.clone()]);
let fail_tx =
system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash());
let entry_3 = next_entry(&entry_2.hash, 1, vec![fail_tx.clone()]);
let entries = vec![entry_1, entry_2, entry_3];
let transactions = sanitize_transactions(vec![success_tx, ix_error_tx, fail_tx]);
bank.transfer(4, &mint_keypair, &keypair1.pubkey()).unwrap();
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger");
let blockstore = Arc::new(blockstore);
let (poh_recorder, _entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
&pubkey,
&blockstore,
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
Arc::new(AtomicBool::default()),
);
let recorder = poh_recorder.recorder();
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
poh_recorder.lock().unwrap().set_bank(&bank);
let shreds = entries_to_test_shreds(entries, bank.slot(), 0, true, 0);
blockstore.insert_shreds(shreds, None, false).unwrap();
blockstore.set_roots(std::iter::once(&bank.slot())).unwrap();
let (transaction_status_sender, transaction_status_receiver) = unbounded();
let transaction_status_service = TransactionStatusService::new(
transaction_status_receiver,
Arc::new(AtomicU64::default()),
blockstore.clone(),
&Arc::new(AtomicBool::new(false)),
);
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
let _ = BankingStage::process_and_record_transactions(
&bank,
&transactions,
&recorder,
0,
Some(TransactionStatusSender {
sender: transaction_status_sender,
enable_cpi_and_log_storage: false,
}),
&gossip_vote_sender,
);
transaction_status_service.join().unwrap();
let confirmed_block = blockstore.get_rooted_block(bank.slot(), false).unwrap();
assert_eq!(confirmed_block.transactions.len(), 3);
for TransactionWithStatusMeta { transaction, meta } in
confirmed_block.transactions.into_iter()
{
if transaction.signatures[0] == success_signature {
let meta = meta.unwrap();
assert_eq!(meta.status, Ok(()));
} else if transaction.signatures[0] == ix_error_signature {
let meta = meta.unwrap();
assert_eq!(
meta.status,
Err(TransactionError::InstructionError(
0,
InstructionError::Custom(1)
))
);
} else {
assert_eq!(meta, None);
}
}
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[allow(clippy::type_complexity)]
fn setup_conflicting_transactions(
ledger_path: &Path,
) -> (
Vec<Transaction>,
Arc<Bank>,
Arc<Mutex<PohRecorder>>,
Receiver<WorkingBankEntry>,
JoinHandle<()>,
) {
Blockstore::destroy(ledger_path).unwrap();
let genesis_config_info = create_slow_genesis_config(10_000);
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = &genesis_config_info;
let blockstore =
Blockstore::open(ledger_path).expect("Expected to be able to open database ledger");
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config));
let exit = Arc::new(AtomicBool::default());
let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new(
bank.tick_height(),
bank.last_blockhash(),
bank.clone(),
Some((4, 4)),
bank.ticks_per_slot(),
&analog_sdk::pubkey::new_rand(),
&Arc::new(blockstore),
&Arc::new(LeaderScheduleCache::new_from_bank(&bank)),
&Arc::new(PohConfig::default()),
exit,
);
let poh_recorder = Arc::new(Mutex::new(poh_recorder));
// Set up unparallelizable conflicting transactions
let pubkey0 = analog_sdk::pubkey::new_rand();
let pubkey1 = analog_sdk::pubkey::new_rand();
let pubkey2 = analog_sdk::pubkey::new_rand();
let transactions = vec![
system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()),
system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()),
];
let poh_simulator = simulate_poh(record_receiver, &poh_recorder);
(
transactions,
bank,
poh_recorder,
entry_receiver,
poh_simulator,
)
}
#[test]
fn test_consume_buffered_packets() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
setup_conflicting_transactions(&ledger_path);
let recorder = poh_recorder.lock().unwrap().recorder();
let num_conflicting_transactions = transactions.len();
let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions);
assert_eq!(packets_vec.len(), 1);
assert_eq!(packets_vec[0].packets.len(), num_conflicting_transactions);
let all_packets = packets_vec.pop().unwrap();
let mut buffered_packets: UnprocessedPackets = vec![(
all_packets,
(0..num_conflicting_transactions).into_iter().collect(),
false,
)]
.into_iter()
.collect();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
// When the working bank in poh_recorder is None, no packets should be processed
assert!(!poh_recorder.lock().unwrap().has_bank());
let max_tx_processing_ns = std::u128::MAX;
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
&BankingStageStats::default(),
&recorder,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions);
// When the poh recorder has a bank, should process all non conflicting buffered packets.
// Processes one packet per iteration of the loop
for num_expected_unprocessed in (0..num_conflicting_transactions).rev() {
poh_recorder.lock().unwrap().set_bank(&bank);
BankingStage::consume_buffered_packets(
&Pubkey::default(),
max_tx_processing_ns,
&poh_recorder,
&mut buffered_packets,
None,
&gossip_vote_sender,
None::<Box<dyn Fn()>>,
&BankingStageStats::default(),
&recorder,
&Arc::new(RwLock::new(CostModel::default())),
);
if num_expected_unprocessed == 0 {
assert!(buffered_packets.is_empty())
} else {
assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed);
}
}
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_consume_buffered_packets_interrupted() {
let ledger_path = get_tmp_ledger_path!();
{
let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) =
setup_conflicting_transactions(&ledger_path);
let num_conflicting_transactions = transactions.len();
let packets_vec = to_packets_chunked(&transactions, 1);
assert_eq!(packets_vec.len(), num_conflicting_transactions);
for single_packets in &packets_vec {
assert_eq!(single_packets.packets.len(), 1);
}
let mut buffered_packets: UnprocessedPackets = packets_vec
.clone()
.into_iter()
.map(|single_packets| (single_packets, vec![0], false))
.collect();
let (continue_sender, continue_receiver) = unbounded();
let (finished_packet_sender, finished_packet_receiver) = unbounded();
let test_fn = Some(move || {
finished_packet_sender.send(()).unwrap();
continue_receiver.recv().unwrap();
});
// When the poh recorder has a bank, it should process all non conflicting buffered packets.
// Because each conflicting transaction is in it's own `Packet` within `packets_vec`, then
// each iteration of this loop will process one element of `packets_vec`per iteration of the
// loop.
let interrupted_iteration = 1;
poh_recorder.lock().unwrap().set_bank(&bank);
let poh_recorder_ = poh_recorder.clone();
let recorder = poh_recorder_.lock().unwrap().recorder();
let (gossip_vote_sender, _gossip_vote_receiver) = unbounded();
// Start up thread to process the banks
let t_consume = Builder::new()
.name("consume-buffered-packets".to_string())
.spawn(move || {
BankingStage::consume_buffered_packets(
&Pubkey::default(),
std::u128::MAX,
&poh_recorder_,
&mut buffered_packets,
None,
&gossip_vote_sender,
test_fn,
&BankingStageStats::default(),
&recorder,
&Arc::new(RwLock::new(CostModel::default())),
);
// Check everything is correct. All indexes after `interrupted_iteration`
// should still be unprocessed
assert_eq!(
buffered_packets.len(),
packets_vec[interrupted_iteration + 1..].len()
);
for ((remaining_unprocessed_packet, _, _forwarded), original_packet) in
buffered_packets
.iter()
.zip(&packets_vec[interrupted_iteration + 1..])
{
assert_eq!(
remaining_unprocessed_packet.packets[0],
original_packet.packets[0]
);
}
})
.unwrap();
for i in 0..=interrupted_iteration {
finished_packet_receiver.recv().unwrap();
if i == interrupted_iteration {
poh_recorder
.lock()
.unwrap()
.schedule_dummy_max_height_reached_failure();
}
continue_sender.send(()).unwrap();
}
t_consume.join().unwrap();
poh_recorder
.lock()
.unwrap()
.is_exited
.store(true, Ordering::Relaxed);
let _ = poh_simulator.join();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_forwarder_budget() {
analog_logger::setup();
// Create `Packets` with 1 unprocessed element
let single_element_packets = Packets::new(vec![Packet::default()]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(single_element_packets, vec![0], false)]
.into_iter()
.collect();
let cluster_info = new_test_cluster_info(Node::new_localhost().info);
let genesis_config_info = create_slow_genesis_config(10_000);
let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info;
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config));
let ledger_path = get_tmp_ledger_path!();
{
let blockstore = Arc::new(
Blockstore::open(&ledger_path)
.expect("Expected to be able to open database ledger"),
);
let poh_config = PohConfig {
// limit tick count to avoid clearing working_bank at
// PohRecord then PohRecorderError(MaxHeightReached) at BankingStage
target_tick_count: Some(bank.max_tick_height() - 1),
..PohConfig::default()
};
let (exit, poh_recorder, poh_service, _entry_receiver) =
create_test_recorder(&bank, &blockstore, Some(poh_config));
let socket = UdpSocket::bind("0.0.0.0:0").unwrap();
let data_budget = DataBudget::default();
BankingStage::handle_forwarding(
&ForwardOption::ForwardTransaction,
&cluster_info,
&mut unprocessed_packets,
&poh_recorder,
&socket,
false,
&data_budget,
);
exit.store(true, Ordering::Relaxed);
poh_service.join().unwrap();
}
Blockstore::destroy(&ledger_path).unwrap();
}
#[test]
fn test_push_unprocessed_batch_limit() {
analog_logger::setup();
// Create `Packets` with 2 unprocessed elements
let new_packets = Packets::new(vec![Packet::default(); 2]);
let mut unprocessed_packets: UnprocessedPackets =
vec![(new_packets, vec![0, 1], false)].into_iter().collect();
// Set the limit to 2
let batch_limit = 2;
// Create some new unprocessed packets
let new_packets = Packets::new(vec![Packet::default()]);
let packet_indexes = vec![];
let duplicates = Arc::new(Mutex::new((
LruCache::new(DEFAULT_LRU_SIZE),
PacketHasher::default(),
)));
let mut dropped_packet_batches_count = 0;
let mut dropped_packets_count = 0;
let mut newly_buffered_packets_count = 0;
let banking_stage_stats = BankingStageStats::default();
// Because the set of unprocessed `packet_indexes` is empty, the
// packets are not added to the unprocessed queue
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 1);
assert_eq!(dropped_packet_batches_count, 0);
assert_eq!(dropped_packets_count, 0);
assert_eq!(newly_buffered_packets_count, 0);
// Because the set of unprocessed `packet_indexes` is non-empty, the
// packets are added to the unprocessed queue
let packet_indexes = vec![0];
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets,
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(dropped_packet_batches_count, 0);
assert_eq!(dropped_packets_count, 0);
assert_eq!(newly_buffered_packets_count, 1);
// Because we've reached the batch limit, old unprocessed packets are
// dropped and the new one is appended to the end
let new_packets = Packets::new(vec![Packet::from_data(
Some(&SocketAddr::from(([127, 0, 0, 1], 8001))),
42,
)
.unwrap()]);
assert_eq!(unprocessed_packets.len(), batch_limit);
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes.clone(),
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
batch_limit,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
// Check duplicates are dropped (newly buffered shouldn't change)
BankingStage::push_unprocessed(
&mut unprocessed_packets,
new_packets.clone(),
packet_indexes,
&mut dropped_packet_batches_count,
&mut dropped_packets_count,
&mut newly_buffered_packets_count,
3,
&duplicates,
&banking_stage_stats,
);
assert_eq!(unprocessed_packets.len(), 2);
assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]);
assert_eq!(dropped_packet_batches_count, 1);
assert_eq!(dropped_packets_count, 2);
assert_eq!(newly_buffered_packets_count, 2);
}
#[test]
fn test_packet_message() {
let keypair = Keypair::new();
let pubkey = analog_sdk::pubkey::new_rand();
let blockhash = Hash::new_unique();
let transaction = system_transaction::transfer(&keypair, &pubkey, 1, blockhash);
let packet = Packet::from_data(None, &transaction).unwrap();
assert_eq!(
BankingStage::packet_message(&packet).unwrap().to_vec(),
transaction.message_data()
);
}
#[cfg(test)]
fn make_test_packets(
transactions: Vec<Transaction>,
vote_indexes: Vec<usize>,
) -> (Packets, Vec<usize>) {
let capacity = transactions.len();
let mut packets = Packets::with_capacity(capacity);
let mut packet_indexes = Vec::with_capacity(capacity);
packets.packets.resize(capacity, Packet::default());
for (index, tx) in transactions.iter().enumerate() {
Packet::populate_packet(&mut packets.packets[index], None, tx).ok();
packet_indexes.push(index);
}
for index in vote_indexes.iter() {
packets.packets[*index].meta.is_simple_vote_tx = true;
}
(packets, packet_indexes)
}
#[test]
fn test_transactions_from_packets() {
use analog_sdk::feature_set::FeatureSet;
let keypair = Keypair::new();
let transfer_tx =
system_transaction::transfer(&keypair, &keypair.pubkey(), 1, Hash::default());
let vote_tx = vote_transaction::new_vote_transaction(
vec![42],
Hash::default(),
Hash::default(),
&keypair,
&keypair,
&keypair,
None,
);
// packets with no votes
{
let vote_indexes = vec![];
let (packets, packet_indexes) =
make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes);
let mut votes_only = false;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(2, txs.len());
assert_eq!(vec![0, 1], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(0, txs.len());
assert_eq!(0, tx_packet_index.len());
}
// packets with some votes
{
let vote_indexes = vec![0, 2];
let (packets, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), transfer_tx, vote_tx.clone()],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(2, txs.len());
assert_eq!(vec![0, 2], tx_packet_index);
}
// packets with all votes
{
let vote_indexes = vec![0, 1, 2];
let (packets, packet_indexes) = make_test_packets(
vec![vote_tx.clone(), vote_tx.clone(), vote_tx],
vote_indexes,
);
let mut votes_only = false;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
votes_only = true;
let (txs, tx_packet_index, _retryable_packet_indexes) =
BankingStage::transactions_from_packets(
&packets,
&packet_indexes,
&Arc::new(FeatureSet::default()),
&RwLock::new(CostTracker::default()).read().unwrap(),
&BankingStageStats::default(),
false,
votes_only,
&Arc::new(RwLock::new(CostModel::default())),
);
assert_eq!(3, txs.len());
assert_eq!(vec![0, 1, 2], tx_packet_index);
}
}
}
| filter_pending_packets_from_pending_txs |
helpers.go | package openshift
import (
"context"
"errors"
"fmt"
"strings"
semver "github.com/blang/semver/v4"
configv1 "github.com/openshift/api/config/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/predicate"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/projection"
)
func stripObject(obj client.Object) {
if obj == nil {
return
}
obj.SetResourceVersion("")
obj.SetUID("")
}
func watchNamespace(namespace *string) predicate.Funcs {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetNamespace() == *namespace
})
}
func watchName(name *string) predicate.Funcs {
return predicate.NewPredicateFuncs(func(object client.Object) bool {
return object.GetName() == *name
})
}
func conditionsEqual(a, b *configv1.ClusterOperatorStatusCondition) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Type == b.Type && a.Status == b.Status && a.Message == b.Message && a.Reason == b.Reason
}
func versionsMatch(a []configv1.OperandVersion, b []configv1.OperandVersion) bool {
if len(a) != len(b) {
return false
}
counts := map[configv1.OperandVersion]int{}
for _, av := range a {
counts[av] += 1
}
for _, bv := range b {
remaining, ok := counts[bv]
if !ok {
return false
}
if remaining == 1 {
delete(counts, bv)
continue
}
counts[bv] -= 1
}
return len(counts) < 1
}
type skews []skew
func (s skews) String() string {
msg := make([]string, len(s))
i, j := 0, len(s)-1
for _, sk := range s {
m := sk.String()
// Partial order: error skews first
if sk.err != nil {
msg[i] = m
i++
continue
}
msg[j] = m
j--
}
return "ClusterServiceVersions blocking cluster upgrade: " + strings.Join(msg, ",")
}
type skew struct {
namespace string
name string
maxOpenShiftVersion string
err error
}
func (s skew) String() string {
if s.err != nil {
return fmt.Sprintf("%s/%s has invalid %s properties: %s", s.namespace, s.name, MaxOpenShiftVersionProperty, s.err)
}
return fmt.Sprintf("%s/%s is incompatible with OpenShift minor versions greater than %s", s.namespace, s.name, s.maxOpenShiftVersion)
}
type transientError struct {
error
}
// transientErrors returns the result of stripping all wrapped errors not of type transientError from the given error.
func transientErrors(err error) error {
return utilerrors.FilterOut(err, func(e error) bool {
return !errors.As(e, new(transientError))
})
}
func incompatibleOperators(ctx context.Context, cli client.Client) (skews, error) {
desired, err := desiredRelease(ctx, cli)
if err != nil {
return nil, err
}
if desired == nil {
// Note: This shouldn't happen
return nil, fmt.Errorf("Failed to determine current OpenShift Y-stream release")
}
next, err := nextY(*desired)
if err != nil {
return nil, err
}
csvList := &operatorsv1alpha1.ClusterServiceVersionList{}
if err := cli.List(ctx, csvList); err != nil {
return nil, &transientError{fmt.Errorf("Failed to list ClusterServiceVersions: %w", err)}
}
var incompatible skews
for _, csv := range csvList.Items {
if csv.IsCopied() {
continue
}
s := skew{
name: csv.GetName(),
namespace: csv.GetNamespace(),
}
max, err := maxOpenShiftVersion(&csv)
if err != nil {
s.err = err
incompatible = append(incompatible, s)
continue
}
if max == nil || max.GTE(next) {
continue
}
s.maxOpenShiftVersion = fmt.Sprintf("%d.%d", max.Major, max.Minor)
incompatible = append(incompatible, s)
}
return incompatible, nil
}
func desiredRelease(ctx context.Context, cli client.Client) (*semver.Version, error) {
cv := configv1.ClusterVersion{}
if err := cli.Get(ctx, client.ObjectKey{Name: "version"}, &cv); err != nil { // "version" is the name of OpenShift's ClusterVersion singleton
return nil, &transientError{fmt.Errorf("Failed to get ClusterVersion: %w", err)}
}
v := cv.Status.Desired.Version
if v == "" {
// The release version hasn't been set yet
return nil, fmt.Errorf("Desired release version missing from ClusterVersion")
}
desired, err := semver.ParseTolerant(v)
if err != nil {
return nil, fmt.Errorf("ClusterVersion has invalid desired release version: %w", err)
}
| }
func nextY(v semver.Version) (semver.Version, error) {
v.Build = nil // Builds are irrelevant
if len(v.Pre) > 0 {
// Dropping pre-releases is equivalent to incrementing Y
v.Pre = nil
v.Patch = 0
return v, nil
}
return v, v.IncrementMinor() // Sets Y=Y+1 and Z=0
}
const (
MaxOpenShiftVersionProperty = "olm.maxOpenShiftVersion"
)
func maxOpenShiftVersion(csv *operatorsv1alpha1.ClusterServiceVersion) (*semver.Version, error) {
// Extract the property from the CSV's annotations if possible
annotation, ok := csv.GetAnnotations()[projection.PropertiesAnnotationKey]
if !ok {
return nil, nil
}
properties, err := projection.PropertyListFromPropertiesAnnotation(annotation)
if err != nil {
return nil, err
}
var max *string
for _, property := range properties {
if property.Type != MaxOpenShiftVersionProperty {
continue
}
if max != nil {
return nil, fmt.Errorf(`Defining more than one "%s" property is not allowed`, MaxOpenShiftVersionProperty)
}
max = &property.Value
}
if max == nil {
return nil, nil
}
// Account for any additional quoting
value := strings.Trim(*max, "\"")
if value == "" {
// Handle "" separately, so parse doesn't treat it as a zero
return nil, fmt.Errorf(`Value cannot be "" (an empty string)`)
}
version, err := semver.ParseTolerant(value)
if err != nil {
return nil, fmt.Errorf(`Failed to parse "%s" as semver: %w`, value, err)
}
truncatedVersion := semver.Version{Major: version.Major, Minor: version.Minor}
if !version.EQ(truncatedVersion) {
return nil, fmt.Errorf("property %s must specify only <major>.<minor> version, got invalid value %s", MaxOpenShiftVersionProperty, version)
}
return &truncatedVersion, nil
}
func notCopiedSelector() (labels.Selector, error) {
requirement, err := labels.NewRequirement(operatorsv1alpha1.CopiedLabelKey, selection.DoesNotExist, nil)
if err != nil {
return nil, err
}
return labels.NewSelector().Add(*requirement), nil
}
func olmOperatorRelatedObjects(ctx context.Context, cli client.Client, namespace string) ([]configv1.ObjectReference, error) {
selector, err := notCopiedSelector()
if err != nil {
return nil, err
}
csvList := &operatorsv1alpha1.ClusterServiceVersionList{}
if err := cli.List(ctx, csvList, client.InNamespace(namespace), client.MatchingLabelsSelector{Selector: selector}); err != nil {
return nil, err
}
var refs []configv1.ObjectReference
for _, csv := range csvList.Items {
if csv.IsCopied() {
// Filter out copied CSVs that the label selector missed
continue
}
// TODO: Generalize ObjectReference generation
refs = append(refs, configv1.ObjectReference{
Group: operatorsv1alpha1.GroupName,
Resource: "clusterserviceversions",
Namespace: csv.GetNamespace(),
Name: csv.GetName(),
})
}
return refs, nil
} | return &desired, nil |
interfaces.py | import os
import time
from typing import Dict
from cereal import car
from common.kalman.simple_kalman import KF1D
from common.realtime import DT_CTRL
from selfdrive.car import gen_empty_fingerprint
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX
from selfdrive.controls.lib.events import Events
from selfdrive.controls.lib.vehicle_model import VehicleModel
GearShifter = car.CarState.GearShifter
EventName = car.CarEvent.EventName
# WARNING: this value was determined based on the model's training distribution,
# model predictions above this speed can be unpredictable
MAX_CTRL_SPEED = (V_CRUISE_MAX + 4) * CV.KPH_TO_MS # 135 + 4 = 86 mph
ACCEL_MAX = 2.0
ACCEL_MIN = -3.5
# generic car and radar interfaces
class CarInterfaceBase():
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.steer_warning = 0
self.steering_unpressed = 0
self.low_speed_alert = False
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM)
@staticmethod
def get_pid_accel_limits(CP, current_speed, cruise_speed):
return ACCEL_MIN, ACCEL_MAX
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
raise NotImplementedError
@staticmethod
def init(CP, logcan, sendcan):
pass
# returns a set of default params to avoid repetition in car specific params
@staticmethod
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.openpilotLongitudinalControl = False
ret.startAccel = 0.0
ret.minSpeedCan = 0.3
ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop
ret.startingAccelRate = 3.2 # brake_travel/s while releasing on restart
ret.stoppingControl = True
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
ret.longitudinalActuatorDelay = 0.15
return ret
# returns a car.CarState, pass in car.CarControl
def update(self, c, can_strings):
raise NotImplementedError
# return sendcan, pass in a car.CarControl
def apply(self, c):
raise NotImplementedError
def create_common_events(self, cs_out, extra_gears=None, gas_resume_speed=-1, pcm_enable=True):
events = Events()
if cs_out.doorOpen:
events.add(EventName.doorOpen)
if cs_out.seatbeltUnlatched:
events.add(EventName.seatbeltNotLatched)
if cs_out.gearShifter != GearShifter.drive and (extra_gears is None or
cs_out.gearShifter not in extra_gears):
events.add(EventName.wrongGear)
if cs_out.gearShifter == GearShifter.reverse:
events.add(EventName.reverseGear)
if not cs_out.cruiseState.available:
events.add(EventName.wrongCarMode)
if cs_out.espDisabled:
events.add(EventName.espDisabled)
if cs_out.gasPressed:
events.add(EventName.gasPressed)
if cs_out.stockFcw:
events.add(EventName.stockFcw)
if cs_out.stockAeb:
events.add(EventName.stockAeb)
if cs_out.vEgo > MAX_CTRL_SPEED:
events.add(EventName.speedTooHigh)
if cs_out.cruiseState.nonAdaptive:
events.add(EventName.wrongCruiseMode)
self.steer_warning = self.steer_warning + 1 if cs_out.steerWarning else 0
self.steering_unpressed = 0 if cs_out.steeringPressed else self.steering_unpressed + 1
# Handle permanent and temporary steering faults
if cs_out.steerError:
events.add(EventName.steerUnavailable)
elif cs_out.steerWarning:
# only escalate to the harsher alert after the condition has
# persisted for 0.5s and we're certain that the user isn't overriding
|
# Disable on rising edge of gas or brake. Also disable on brake when speed > 0.
# Optionally allow to press gas at zero speed to resume.
# e.g. Chrysler does not spam the resume button yet, so resuming with gas is handy. FIXME!
if (cs_out.gasPressed and (not self.CS.out.gasPressed) and cs_out.vEgo > gas_resume_speed) or \
(cs_out.brakePressed and (not self.CS.out.brakePressed or not cs_out.standstill)):
events.add(EventName.pedalPressed)
# we engage when pcm is active (rising edge)
if pcm_enable:
if cs_out.cruiseState.enabled and not self.CS.out.cruiseState.enabled:
events.add(EventName.pcmEnable)
elif not cs_out.cruiseState.enabled:
events.add(EventName.pcmDisable)
return events
class RadarInterfaceBase():
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ
def update(self, can_strings):
ret = car.RadarData.new_message()
if not self.no_radar_sleep:
time.sleep(self.radar_ts) # radard runs on RI updates
return ret
class CarStateBase:
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
self.left_blinker_prev = False
self.right_blinker_prev = False
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
def update_speed_kf(self, v_ego_raw):
if abs(v_ego_raw - self.v_ego_kf.x[0][0]) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[v_ego_raw], [0.0]]
v_ego_x = self.v_ego_kf.update(v_ego_raw)
return float(v_ego_x[0]), float(v_ego_x[1])
def update_blinker_from_lamp(self, blinker_time: int, left_blinker_lamp: bool, right_blinker_lamp: bool):
"""Update blinkers from lights. Enable output when light was seen within the last `blinker_time`
iterations"""
# TODO: Handle case when switching direction. Now both blinkers can be on at the same time
self.left_blinker_cnt = blinker_time if left_blinker_lamp else max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = blinker_time if right_blinker_lamp else max(self.right_blinker_cnt - 1, 0)
return self.left_blinker_cnt > 0, self.right_blinker_cnt > 0
def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_blinker_stalk: bool):
"""Update blinkers from stalk position. When stalk is seen the blinker will be on for at least blinker_time,
or until the stalk is turned off, whichever is longer. If the opposite stalk direction is seen the blinker
is forced to the other side. On a rising edge of the stalk the timeout is reset."""
if left_blinker_stalk:
self.right_blinker_cnt = 0
if not self.left_blinker_prev:
self.left_blinker_cnt = blinker_time
if right_blinker_stalk:
self.left_blinker_cnt = 0
if not self.right_blinker_prev:
self.right_blinker_cnt = blinker_time
self.left_blinker_cnt = max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = max(self.right_blinker_cnt - 1, 0)
self.left_blinker_prev = left_blinker_stalk
self.right_blinker_prev = right_blinker_stalk
return bool(left_blinker_stalk or self.left_blinker_cnt > 0), bool(right_blinker_stalk or self.right_blinker_cnt > 0)
@staticmethod
def parse_gear_shifter(gear: str) -> car.CarState.GearShifter:
d: Dict[str, car.CarState.GearShifter] = {
'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake
}
return d.get(gear, GearShifter.unknown)
@staticmethod
def get_cam_can_parser(CP):
return None
@staticmethod
def get_body_can_parser(CP):
return None
| if self.steering_unpressed > int(0.5/DT_CTRL) and self.steer_warning > int(0.5/DT_CTRL):
events.add(EventName.steerTempUnavailable)
else:
events.add(EventName.steerTempUnavailableSilent) |
train_titlegen.py | from titlegen.gen_title import train, sample
if __name__ == "__main__": | # extract_resnet_features()
train()
sample() |
|
selection_sort.py | def selectionSort(array, n):
|
data = [ ]
size = int(input("Enter size of array : "))
print("Enter array elements: ")
for i in range(size):
e=int(input())
data.append(e)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | for i in range(n):
minimum = i
for j in range(i + 1, n):
# to sort in descending order, change > to < in this line
# select the minimum element in each loop
if array[j] < array[minimum]:
minimum = j
# put min at the correct position
(array[i], array[minimum]) = (array[minimum], array[i]) |
index.test.js | import validateModifier from './index'
it('should pass validation when not all fields are present', async () => {
const wife = {
name: {type: String},
state: {type: String}
}
const schema = {
_id: {type: 'ID'},
wife: {type: wife}
}
await validateModifier(schema, {$set: {'wife.state': 'Full'}})
}) |
it('should throw an error when a not present field is passed', async () => {
const wife = {
name: {type: String},
state: {type: String}
}
const schema = {
_id: {type: 'ID'},
wife: {type: wife}
}
expect.assertions(1)
try {
await validateModifier(schema, {$set: {'mom.name': 'Paula'}})
} catch (error) {
expect(error.code).toBe('validationError')
}
})
it('validate arrays', async () => {
const friend = {
name: {type: String}
}
const schema = {
_id: {type: 'ID'},
friends: {type: [friend]}
}
await validateModifier(schema, {$set: {'friends.0.name': 'Roberto'}})
await validateModifier(schema, {$set: {friends: [{name: 'Roberto'}]}})
expect.assertions(1)
try {
await validateModifier(schema, {$set: {friends: ['Roberto']}})
} catch (error) {
expect(error.code).toBe('validationError')
}
})
it('validate $push operations', async () => {
const friend = {
name: {type: String}
}
const schema = {
_id: {type: 'ID'},
friends: {type: [friend]}
}
await validateModifier(schema, {$push: {friends: {name: 'Roberto'}}})
await validateModifier(schema, {
$push: {friends: {$each: [{name: 'Roberto'}, {name: 'Joaquín'}]}}
})
expect.assertions(1)
try {
await validateModifier(schema, {$set: {friends: ['Roberto']}})
} catch (error) {
expect(error.code).toBe('validationError')
}
})
it('validate $unset operations', async () => {
const schema = {
_id: {type: 'ID'},
name: {type: String, optional: true}
}
await validateModifier(schema, {$unset: {name: ''}})
}) | |
layer.py | import json
import traceback
from mitreattack.navlayers.core.exceptions import UninitializedLayer, BadType, BadInput, handler
from mitreattack.navlayers.core.layerobj import _LayerObj
class Layer:
def __init__(self, init_data={}, name=None, domain=None, strict=True):
|
@property
def layer(self):
if self.__layer is not None:
return self.__layer
return "No Layer Loaded Yet!"
@layer.setter
def layer(self, layer):
if isinstance(layer, _LayerObj):
self.__layer = layer
def from_str(self, init_str):
"""
Loads a raw layer string into the object
:param init_str: the string representing the layer data to
be loaded
"""
self._data = json.loads(init_str)
self._build()
def from_dict(self, init_dict):
"""
Loads a raw layer string into the object
:param init_dict: the dictionary representing the layer data to
be loaded
"""
self._data = init_dict
if self._data != {}:
self._build()
def from_file(self, filename):
"""
loads input from a layer file specified by filename
:param filename: the target filename to load from
"""
fallback = False
with open(filename, 'r', encoding='utf-16') as fio:
try:
raw = fio.read()
except UnicodeError or UnicodeDecodeError:
fallback = True
if fallback:
with open(filename, 'r')as fio:
raw = fio.read()
self._data = json.loads(raw)
self._build()
def to_file(self, filename):
"""
saves the current state of the layer to a layer file specified by
filename
:param filename: the target filename to save as
"""
if self.__layer is not None:
with open(filename, 'w', encoding='utf-16') as fio:
json.dump(self.__layer.get_dict(), fio, ensure_ascii=False)
else:
raise UninitializedLayer
def _build(self):
"""
Loads the data stored in self.data into a LayerObj (self.layer)
"""
try:
self.__layer = _LayerObj(self._data['name'], self._data['domain'])
except BadType or BadInput as e:
handler(type(self).__name__, 'Layer is malformed: {}. '
'Unable to load.'.format(e))
self.__layer = None
return
except KeyError as e:
handler(type(self).__name__, 'Layer is missing parameters: {}. '
'Unable to load.'.format(e))
self.__layer = None
return
for key in self._data:
if key not in ['name', 'domain']:
try:
self.__layer._linker(key, self._data[key])
except Exception as e:
if self.strict:
handler(type(self).__name__, "{} encountered [{}]. "
"Unable to load."
.format(str(e.__class__.__name__), e))
handler(type(self).__name__, "Full Traceback - {}"
.format(traceback.format_exc()))
self.__layer = None
return
def to_dict(self):
"""
Converts the currently loaded layer file into a dict
:returns: A dict representation of the current layer object
"""
if self.__layer is not None:
return self.__layer.get_dict()
def to_str(self):
"""
Converts the currently loaded layer file into a string
representation of a dictionary
:returns: A string representation of the current layer object
"""
if self.__layer is not None:
return json.dumps(self.to_dict())
| """
Initialization - create a new Layer object
:param init_data: Optionally provide base Layer json or string
data on initialization
"""
self.__layer = None
self.strict = strict
if name and domain:
self.__data = dict(name=name, domain=domain)
self._build()
elif isinstance(init_data, str):
self.from_str(init_data)
else:
self.from_dict(init_data) |
script.js | /**** Header Js ***/
$(function () {
$('[data-toggle="tooltip"]').tooltip()
})
$(window).scroll(function () {
var scroll = $(window).scrollTop();
if (scroll >= 200) {
$(".site-header").addClass("sticky-1");
} else {
$(".site-header").removeClass("sticky-1");
}
if (scroll >= 300) {
$(".site-header").addClass("sticky");
} else {
$(".site-header").removeClass("sticky");
}
});
$(document).ready(function(){
$('.menuOpen').on('click', function () {
$("#navbarNav").addClass('nav-translate');
$("#mainBody").addClass('overflow-hidden');
});
$('.menuClose').on('click', function () {
$("#navbarNav").removeClass('nav-translate');
$("#mainBody").removeClass('overflow-hidden');
});
});
/**** Return To Top Js ***/
$(window).scroll(function() {
$(this).scrollTop() >= 50 ? $("#return-to-top").fadeIn(200) : $("#return-to-top").fadeOut(200)
}), $("#return-to-top").click(function() {
$("body,html").animate({
scrollTop: 0
}, 500)
});
$(function () {
new WOW().init();
$(document).ready(function($) {
$('.tfh-category-slider').slick({
dots: false,
infinite: true,
speed: 800,
slidesToShow: 6,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
arrows: true,
//variableWidth: true,
prevArrow:"<button type='button' class='prev-control slick-arrow-control'><i class='fal fa-chevron-left'></i></button>",
nextArrow:"<button type='button' class='next-control slick-arrow-control'><i class='fal fa-chevron-right'></i></button>",
responsive: [
{
breakpoint: 1200,
settings: {
slidesToShow: 4,
}
},
{
breakpoint: 991,
settings: {
slidesToShow: 3,
}
},
{
breakpoint: 768,
settings: {
slidesToShow: 2,
arrows: false,
}
},
]
});
});
$(document).ready(function($) {
$('.upcoming-event-slider').slick({
dots: false,
infinite: true,
speed: 900,
slidesToShow: 3,
slidesToScroll: 1,
autoplay: true,
autoplaySpeed: 2000,
arrows: true,
//variableWidth: true,
prevArrow:"<button type='button' class='prev-control slick-arrow-control'><i class='fal fa-chevron-left'></i></button>",
nextArrow:"<button type='button' class='next-control slick-arrow-control'><i class='fal fa-chevron-right'></i></button>",
responsive: [
{
breakpoint: 1200,
settings: {
slidesToShow: 2,
} | },
{
breakpoint: 768,
settings: {
slidesToShow: 1,
arrows: false,
}
},
]
});
});
$(document).ready(function($) {
$('.fes-slider').slick({
dots: false,
infinite: true,
//speed: 1000,
slidesToShow: 1,
slidesToScroll: 1,
//autoplay: true,
//autoplaySpeed: 2000,
arrows: true,
variableWidth: true,
centerMode: true,
prevArrow:"<button type='button' class='prev-control slick-arrow-control'><i class='fal fa-chevron-left'></i></button>",
nextArrow:"<button type='button' class='next-control slick-arrow-control'><i class='fal fa-chevron-right'></i></button>",
responsive: [
{
breakpoint: 1200,
settings: {
arrows: false,
}
},
{
breakpoint: 991,
settings: {
variableWidth: false,
centerMode: false,
arrows: false,
}
},
{
breakpoint: 768,
settings: {
variableWidth: false,
centerMode: false,
slidesToShow: 1,
arrows: false,
}
},
]
});
});
});
// Init slick slider + animation
$('.home-hero-slider').slick({
autoplay: true,
speed: 800,
lazyLoad: 'progressive',
arrows: true,
dots: false,
prevArrow: '<div class="slick-nav hh-slick-control prev-arrow"><i></i><svg><use xlink:href="#circle"></svg></div>',
nextArrow: '<div class="slick-nav hh-slick-control next-arrow"><i></i><svg><use xlink:href="#circle"></svg></div>',
responsive: [
{
breakpoint: 991,
settings: {
arrows: false,
}
},
{
breakpoint: 768,
settings: {
arrows: false,
}
},
]
}).slickAnimation();
$('.hh-slick-control').on('click touch', function(e){
e.preventDefault();
var arrow = $(this);
if(!arrow.hasClass('animate')) {
arrow.addClass('animate');
setTimeout(() => {
arrow.removeClass('animate');
}, 1600);
}
});
$(function () {
/*$('.datePicker-from-group').datetimepicker({
format: 'L',
});*/
});
/**** Filter Js *******/
$(".tfsef-filter-btn").click(function(){
$(".catOpen-filter").toggle();
$(this).find(".filterIcon").toggleClass("fa-filter fa-times");
}); | |
multiworld.py | from assembler import ASM
from roomEditor import RoomEditor
import entityData
def | (rom):
# Make a copy of the shop into GrandpaUlrira house
shop_room = RoomEditor(rom, 0x2A1)
re = RoomEditor(rom, 0x2A9)
re.objects = [obj for obj in shop_room.objects if obj.x is not None and obj.type_id != 0xCE] + re.getWarps()
re.entities = [(1, 6, 0x77), (2, 6, 0x77)]
re.animation_id = shop_room.animation_id
re.floor_object = shop_room.floor_object
re.store(rom)
# Fix the tileset
rom.banks[0x20][0x2EB3 + 0x2A9 - 0x100] = rom.banks[0x20][0x2EB3 + 0x2A1 - 0x100]
# Load the shopkeeper sprites instead of Grandpa sprites
entityData.SPRITE_DATA[0x77] = entityData.SPRITE_DATA[0x4D]
labels = {}
rom.patch(0x06, 0x2860, "00" * 0x215, ASM("""
shopItemsHandler:
; Render the shop items
ld h, $00
loop:
; First load links position to render the item at
ldh a, [$98] ; LinkX
ldh [$EE], a ; X
ldh a, [$99] ; LinkY
sub $0E
ldh [$EC], a ; Y
; Check if this is the item we have picked up
ld a, [$C509] ; picked up item in shop
dec a
cp h
jr z, .renderCarry
ld a, h
swap a
add a, $20
ldh [$EE], a ; X
ld a, $30
ldh [$EC], a ; Y
.renderCarry:
ld a, h
push hl
ldh [$F1], a ; variant
cp $03
jr nc, .singleSprite
ld de, ItemsDualSpriteData
call $3BC0 ; render sprite pair
jr .renderDone
.singleSprite:
ld de, ItemsSingleSpriteData
call $3C77 ; render sprite
.renderDone:
pop hl
.skipItem:
inc h
ld a, $07
cp h
jr nz, loop
; check if we want to pickup or drop an item
ldh a, [$CC]
and $30 ; A or B button
call nz, checkForPickup
; check if we have an item
ld a, [$C509] ; carry item
and a
ret z
; Set that link has picked something up
ld a, $01
ld [$C15C], a
call $0CAF ; reset spin attack...
; Check if we are trying to exit the shop and so drop our item.
ldh a, [$99]
cp $78
ret c
xor a
ld [$C509], a
ret
checkForPickup:
ldh a, [$9E] ; direction
cp $02
ret nz
ldh a, [$99] ; LinkY
cp $48
ret nc
ld a, $13
ldh [$F2], a ; play SFX
ld a, [$C509] ; picked up shop item
and a
jr nz, .drop
ldh a, [$98] ; LinkX
sub $08
swap a
and $07
ld [$C509], a ; picked up shop item
ret
.drop:
xor a
ld [$C509], a
ret
ItemsDualSpriteData:
db $60, $08, $60, $28 ; zol
db $68, $09 ; chicken (left)
ItemsSingleSpriteData: ; (first 3 entries are still dual sprites)
db $6A, $09 ; chicken (right)
db $14, $02, $14, $22 ; piece of power
;Real single sprite data starts here
db $00, $0F ; bomb
db $38, $0A ; rupees
db $20, $0C ; medicine
db $28, $0C ; heart
;------------------------------------trying to buy something starts here
talkHandler:
ld a, [$C509] ; carry item
add a, a
ret z ; check if we have something to buy
sub $02
ld hl, itemNames
ld e, a
ld d, b ; b=0
add hl, de
ld e, [hl]
inc hl
ld d, [hl]
ld hl, wCustomMessage
call appendString
dec hl
call padString
ld de, postMessage
call appendString
dec hl
ld a, $fe
ld [hl], a
ld de, $FFEF
add hl, de
ldh a, [$EE]
swap a
and $0F
add a, $30
ld [hl], a
ld a, $C9
call $2385 ; open dialog
call $3B12 ; increase entity state
ret
appendString:
ld a, [de]
inc de
and a
ret z
ldi [hl], a
jr appendString
padString:
ld a, l
and $0F
ret z
ld a, $20
ldi [hl], a
jr padString
itemNames:
dw itemZol
dw itemChicken
dw itemPieceOfPower
dw itemBombs
dw itemRupees
dw itemMedicine
dw itemHealth
postMessage:
db "For player X? Yes No ", $00
itemZol:
db m"Slime storm|100 {RUPEES}", $00
itemChicken:
db m"Coccu party|50 {RUPEES}", $00
itemPieceOfPower:
db m"Piece of Power|50 {RUPEES}", $00
itemBombs:
db m"20 Bombs|50 {RUPEES}", $00
itemRupees:
db m"100 {RUPEES}|200 {RUPEES}", $00
itemMedicine:
db m"Medicine|100 {RUPEES}", $00
itemHealth:
db m"Health refill|10 {RUPEES}", $00
TalkResultHandler:
ld hl, ItemPriceTableBCD
ld a, [$C509]
dec a
add a, a
ld c, a ; b=0
add hl, bc
ldi a, [hl]
ld d, [hl]
ld e, a
ld a, [$DB5D]
cp d
ret c
jr nz, .highEnough
ld a, [$DB5E]
cp e
ret c
.highEnough:
; Got enough money, take it.
ld hl, ItemPriceTableDEC
ld a, [$C509]
dec a
ld c, a ; b=0
add hl, bc
ld a, [hl]
ld [$DB92], a
; No longer picked up item
xor a
ld [$C509], a
ret
ItemPriceTableBCD:
dw $0100, $0050, $0050, $0050, $0200, $0100, $0010
ItemPriceTableDEC:
db $64, $32, $32, $32, $C8, $64, $0A
""", 0x6860, labels), fill_nop=True)
# Patch GrandpaUlrira to work as a multiworld shop
rom.patch(0x06, 0x1C0E, 0x1C89, ASM("""
ld a, $01
ld [$C50A], a ; this stops link from using items
;Draw shopkeeper
ld de, OwnerSpriteData
call $3BC0 ; render sprite pair
ldh a, [$E7] ; frame counter
swap a
and $01
call $3B0C ; set sprite variant
ldh a, [$F0]
and a
jr nz, checkTalkingResult
call $641A ; prevent link from moving into the sprite
call $645D ; check if talking to NPC
call c, ${TALKHANDLER:04x} ; talk handling
ldh a, [$EE] ; X
cp $18
ret nz
; Jump to other code which is placed on the old owl code. As we do not have enough space here.
jp ${SHOPITEMSHANDLER:04x}
checkTalkingResult:
ld a, [$C19F]
and a
ret nz ; still taking
call $3B12 ; increase entity state
ld [hl], $00
ld a, [$C177] ; dialog selection
and a
ret nz
jp ${TALKRESULTHANDLER:04x}
OwnerSpriteData:
;db $60, $03, $62, $03, $62, $23, $60, $23 ; down
db $64, $03, $66, $03, $66, $23, $64, $23 ; up
;db $68, $03, $6A, $03, $6C, $03, $6E, $03 ; left
;db $6A, $23, $68, $23, $6E, $23, $6C, $23 ; right
""".format(**labels), 0x5C0E), fill_nop=True)
| addMultiworldShop |
test_operators.py | import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.arrays.categorical.common import TestCategorical
class | (TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == "a"]
expected = self.factor[np.asarray(self.factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != "a"]
expected = self.factor[np.asarray(self.factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < "c"]
expected = self.factor[np.asarray(self.factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > "a"]
expected = self.factor[np.asarray(self.factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= "b"]
expected = self.factor[np.asarray(self.factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= "b"]
expected = self.factor[np.asarray(self.factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == "d"
expected = np.zeros(len(self.factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
with pytest.raises(TypeError, match=msg):
cat > cat_unorderd
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, all_compare_operators):
# alignment raises unless we transpose
op = getattr(operator, all_compare_operators)
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = "Invalid comparison between dtype=category and int"
with pytest.raises(TypeError, match=msg):
cat < 4
with pytest.raises(TypeError, match=msg):
cat > 4
with pytest.raises(TypeError, match=msg):
4 < cat
with pytest.raises(TypeError, match=msg):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_with_tuple(self):
cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))
result = cat == "foo"
expected = np.array([True, False, False, False], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat == (0, 1)
expected = np.array([False, True, False, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
result = cat != (0, 1)
tm.assert_numpy_array_equal(result, ~expected)
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
"Cannot compare a Categorical for op __gt__ with type "
r"<class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize(
"ctor",
[
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
],
)
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
assert (c1 == c2).all()
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categoricals can only be compared if 'categories' are the same."
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = Categorical(["a"], categories=["a", "b"])
b = Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
# numeric ops should not succeed
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = f"'Categorical' does not implement reduction '{op}'"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
TypeError, match="'Categorical' does not implement reduction 'sum'"
):
np.sum(s)
# numeric ops on a Series
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = f"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
# invalid ufunc
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
| TestCategoricalOpsWithFactor |
items.py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class | (scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| DatacrawlerItem |
app.js | 'use strict';
/**
* @ngdoc object
* @name conferenceApp
* @requires $routeProvider
* @requires conferenceControllers
* @requires ui.bootstrap
*
* @description
* Root app, which routes and specifies the partial html and controller depending on the url requested.
*
*/
var app = angular.module('conferenceApp',
['conferenceControllers', 'ngRoute', 'ui.bootstrap']).
config(['$routeProvider',
function ($routeProvider) {
$routeProvider.
when('/conference', {
templateUrl: '/partials/show_conferences.html',
controller: 'ShowConferenceCtrl'
}).
when('/conference/create', {
templateUrl: '/partials/create_conferences.html',
controller: 'CreateConferenceCtrl'
}).
when('/conference/detail/:websafeConferenceKey', {
templateUrl: '/partials/conference_detail.html',
controller: 'ConferenceDetailCtrl'
}).
when('/profile', {
templateUrl: '/partials/profile.html',
controller: 'MyProfileCtrl'
}). | when('/', {
templateUrl: '/partials/home.html'
}).
otherwise({
redirectTo: '/'
});
}]);
/**
* @ngdoc filter
* @name startFrom
*
* @description
* A filter that extracts an array from the specific index.
*
*/
app.filter('startFrom', function () {
/**
* Extracts an array from the specific index.
*
* @param {Array} data
* @param {Integer} start
* @returns {Array|*}
*/
var filter = function (data, start) {
return data.slice(start);
}
return filter;
});
/**
* @ngdoc constant
* @name HTTP_ERRORS
*
* @description
* Holds the constants that represent HTTP error codes.
*
*/
app.constant('HTTP_ERRORS', {
'UNAUTHORIZED': 401
});
/**
* @ngdoc service
* @name oauth2Provider
*
* @description
* Service that holds the OAuth2 information shared across all the pages.
*
*/
app.factory('oauth2Provider', function ($modal) {
var oauth2Provider = {
CLIENT_ID: '585214976686-uvp5r3ulipj9213d6pibh0dd4u39b1ck.apps.googleusercontent.com',
SCOPES: 'email profile',
signedIn: false
}
/**
* Calls the OAuth2 authentication method.
*/
oauth2Provider.signIn = function (callback) {
gapi.auth.signIn({
'clientid': oauth2Provider.CLIENT_ID,
'cookiepolicy': 'single_host_origin',
'accesstype': 'online',
'approveprompt': 'auto',
'scope': oauth2Provider.SCOPES,
'callback': callback
});
};
/**
* Logs out the user.
*/
oauth2Provider.signOut = function () {
gapi.auth.signOut();
// Explicitly set the invalid access token in order to make the API calls fail.
gapi.auth.setToken({access_token: ''})
oauth2Provider.signedIn = false;
};
/**
* Shows the modal with Google+ sign in button.
*
* @returns {*|Window}
*/
oauth2Provider.showLoginModal = function() {
var modalInstance = $modal.open({
templateUrl: '/partials/login.modal.html',
controller: 'OAuth2LoginModalCtrl'
});
return modalInstance;
};
return oauth2Provider;
}); | |
kubernetes.go | // Copyright Project Contour Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package driver
import (
"context"
"errors"
"log"
"github.com/projectcontour/integration-tester/pkg/filter"
"github.com/projectcontour/integration-tester/pkg/must"
"github.com/projectcontour/integration-tester/pkg/utils"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// KubeClient collects various Kubernetes client interfaces.
type KubeClient struct {
Config *rest.Config // XXX(jpeach): remove this, it's only needed for init
Client *kubernetes.Clientset
Dynamic dynamic.Interface
Discovery discovery.CachedDiscoveryInterface
}
// SetUserAgent sets the HTTP User-Agent on the Client.
func (k *KubeClient) SetUserAgent(ua string) {
// XXX(jpeach): user agent is captured at create time, so keeping the config here doesn't help ...
k.Config.UserAgent = ua
}
// NamespaceExists tests whether the given namespace is present.
func (k *KubeClient) NamespaceExists(nsName string) (bool, error) {
_, err := k.Client.CoreV1().Namespaces().Get(context.Background(), nsName, metav1.GetOptions{})
switch {
case err == nil:
return true, nil
case apierrors.IsNotFound(err):
return false, nil
default:
return true, err
}
}
func (k *KubeClient) findAPIResourceForKind(kind schema.GroupVersionKind) (metav1.APIResource, error) {
resources, err := k.Discovery.ServerResourcesForGroupVersion(
schema.GroupVersion{Group: kind.Group, Version: kind.Version}.String())
if err != nil {
return metav1.APIResource{}, err
}
// The listed resources will have empty Group and Version
// fields, which means that they are the same as that of the
// list. Parse the list's GroupVersion to populate the result.
gv := must.GroupVersion(schema.ParseGroupVersion(resources.GroupVersion))
for _, r := range resources.APIResources {
if r.Kind == kind.Kind {
if r.Group == "" {
r.Group = gv.Group
}
if r.Version == "" {
r.Version = gv.Version
}
return r, nil
}
}
return metav1.APIResource{}, errors.New("no match for kind")
}
// KindIsNamespaced returns whether the given kind can be created within a namespace.
func (k *KubeClient) KindIsNamespaced(kind schema.GroupVersionKind) (bool, error) {
res, err := k.findAPIResourceForKind(kind)
if err != nil {
return false, err
}
return res.Namespaced, nil
}
// ResourceForKind returns the schema.GroupVersionResource corresponding to kind.
func (k *KubeClient) ResourceForKind(kind schema.GroupVersionKind) (schema.GroupVersionResource, error) {
res, err := k.findAPIResourceForKind(kind)
if err != nil {
return schema.GroupVersionResource{}, err
}
return schema.GroupVersionResource{
Group: res.Group,
Version: res.Version,
Resource: res.Name,
}, nil
}
// ResourcesForName returns the possible set of schema.GroupVersionResource
// corresponding to the given resource name.
func (k *KubeClient) ResourcesForName(name string) ([]schema.GroupVersionResource, error) {
apiResources, err := k.ServerResources()
if err != nil {
return nil, err
}
var matched []schema.GroupVersionResource
for _, r := range apiResources {
if r.Name != name {
continue
}
matched = append(matched, schema.GroupVersionResource{
Group: r.Group,
Version: r.Version,
Resource: r.Name,
})
}
return matched, nil
}
// SelectObjects lists the objects matching the given kind and selector.
func (k *KubeClient) SelectObjects(kind schema.GroupVersionKind, selector labels.Selector) (
[]*unstructured.Unstructured, error) {
res, err := k.findAPIResourceForKind(kind)
if err != nil {
return nil, err
}
r := schema.GroupVersionResource{
Group: res.Group,
Version: res.Version,
Resource: res.Name,
}
var results []*unstructured.Unstructured
// TODO(jpeach): set a more reasonable limit and implement Continue.
list, err := k.Dynamic.Resource(r).Namespace(metav1.NamespaceAll).List(
context.Background(), metav1.ListOptions{LabelSelector: selector.String(), Limit: 10000})
if apierrors.IsNotFound(err) {
return results, nil
}
if err != nil {
return nil, err
}
for _, u := range list.Items {
results = append(results, u.DeepCopy())
}
return results, nil
}
// ServerResources returns the list of all the resources supported
// by the API server. Note that this method guarantees to populate the
// Group and Version fields in the result.
func (k *KubeClient) ServerResources() ([]metav1.APIResource, error) {
var resources []metav1.APIResource
_, apiList, err := k.Discovery.ServerGroupsAndResources()
if err != nil {
return nil, err
}
for _, g := range apiList {
// The listed resources will have empty Group and Version
// fields, which means that they are the same as that of the
// list. Parse the list's GroupVersion to populate the result.
gv := must.GroupVersion(schema.ParseGroupVersion(g.GroupVersion))
for _, r := range g.APIResources {
r.Group = gv.Group
r.Version = gv.Version
resources = append(resources, r)
}
}
return resources, nil
}
// SelectObjectsByLabel lists all objects that are labeled as managed.
func (k *KubeClient) SelectObjectsByLabel(label string, value string) ([]*unstructured.Unstructured, error) {
groups, err := k.Discovery.ServerPreferredResources()
if err != nil {
return nil, err
}
var resources []schema.GroupVersionResource
for _, g := range groups {
// The listed resources will have empty Group and Version
// fields, which means that they are the same as that of the
// list. Parse the list's GroupVersion to populate the result.
gv := must.GroupVersion(schema.ParseGroupVersion(g.GroupVersion))
for _, r := range g.APIResources {
// Only choose resources we can list.
if !utils.ContainsString(r.Verbs, "list") {
continue
}
gvr := schema.GroupVersionResource{
Group: gv.Group,
Version: gv.Version,
Resource: r.Name,
}
resources = append(resources, gvr)
}
}
selector := labels.SelectorFromSet(labels.Set{label: value}).String()
var results []*unstructured.Unstructured
for _, r := range resources {
// TODO(jpeach): set a more reasonable limit and implement Continue.
list, err := k.Dynamic.Resource(r).Namespace(metav1.NamespaceAll).List(
context.Background(), metav1.ListOptions{LabelSelector: selector, Limit: 10000})
if apierrors.IsNotFound(err) {
continue
}
if err != nil {
return nil, err
}
for _, u := range list.Items {
results = append(results, u.DeepCopy())
}
}
return results, nil
}
// RunIDFor returns the test run ID for u, if there is one. If there
// is no run ID, it returns "".
func (k *KubeClient) RunIDFor(u *unstructured.Unstructured) (string, error) {
for k, v := range u.GetAnnotations() {
if k == filter.LabelRunID {
return v, nil
}
}
// If this object doesn't have th run ID, walk up the owner
// refs to try to find it.
for range u.GetOwnerReferences() {
// TODO(jpeach) ...
}
return "", nil
}
// NewKubeClient returns a new set of Kubernetes client interfaces
// that are configured to use the default Kubernetes context.
func NewKubeClient() (*KubeClient, error) |
// NewNamespace returns a v1/Namespace object named by nsName and
// converted to an unstructured.Unstructured object.
func NewNamespace(nsName string) *unstructured.Unstructured {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: nsName,
},
}
u := &unstructured.Unstructured{}
if err := scheme.Scheme.Convert(ns, u, nil); err != nil {
log.Fatalf("namespace conversion failed: %s", err)
}
return u
}
// ObjectReference uniquely identifies Kubernetes API object.
type ObjectReference struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Meta struct {
Group string `json:"group"`
Version string `json:"version"`
Kind string `json:"kind"`
} `json:"meta"`
}
// FromUnstructured initializes an ObjectReference from a
// unstructured.Unstructured object.
func (o *ObjectReference) FromUnstructured(u *unstructured.Unstructured) *ObjectReference {
o.Name = u.GetName()
o.Namespace = u.GetNamespace()
// We manually construct a GVK so that we can apply JSON
// field labels to lowercase the names in the Rego data store.
kind := u.GetObjectKind().GroupVersionKind()
o.Meta.Group = kind.Group
o.Meta.Version = kind.Version
o.Meta.Kind = kind.Kind
return o
}
| {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
overrides := &clientcmd.ConfigOverrides{}
config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides)
restConfig, err := config.ClientConfig()
if err != nil {
return nil, err
}
clientSet, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, err
}
dynamicIntf, err := dynamic.NewForConfig(restConfig)
if err != nil {
return nil, err
}
return &KubeClient{
Config: restConfig,
Client: clientSet,
Dynamic: dynamicIntf,
Discovery: memory.NewMemCacheClient(clientSet.Discovery()),
}, nil
} |
index.tsx | import React from 'react';
import { View, ImageBackground, Text } from 'react-native';
import {RectButton} from 'react-native-gesture-handler';
import giveClassesBgImage from '../../assets/images/give-classes-background.png';
import styles from './styles';
import { useNavigation } from '@react-navigation/native';
function GiveClasses(){
const {goBack} = useNavigation();
function handlerNavigateBack(){
goBack();
}
return (
<View style={styles.container}>
<ImageBackground
source={giveClassesBgImage}
style={styles.content}
resizeMode="contain"
>
<Text style={styles.title}>
Quer ser um Proffy?
</Text> | </ImageBackground>
<RectButton onPress={handlerNavigateBack} style={styles.okButton}>
<Text style={styles.okButtonText}>Tudo Bem</Text>
</RectButton>
</View>
)
}
export default GiveClasses; | <Text style={styles.description}>
Para começar , você precisa se cadastrar com professor na nossa plataforma Web.
</Text> |
models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.core.validators
from aldryn_apphooks_config.fields import AppHookConfigField
from aldryn_categories.fields import CategoryManyToManyField
from aldryn_categories.models import Category
from aldryn_newsblog.utils.utilities import get_valid_languages_from_request
from aldryn_people.models import Person
from aldryn_translation_tools.models import TranslatedAutoSlugifyMixin, TranslationHelperMixin
from cms.models.fields import PlaceholderField
from cms.models.pluginmodel import CMSPlugin
from cms.utils.i18n import get_current_language, get_redirect_on_fallback
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
try:
from django.core.urlresolvers import reverse
except ModuleNotFoundError:
from django.urls import reverse
from django.db import connection, models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import override, ugettext
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from parler.models import TranslatableModel, TranslatedFields
from sortedm2m.fields import SortedManyToManyField
from taggit.managers import TaggableManager
from taggit.models import Tag
from .cms_appconfig import NewsBlogConfig
from .managers import RelatedManager
from .settings import ENABLE_REVERSION
from .utils import get_plugin_index_data, get_request, strip_tags
try:
from django.utils.encoding import force_unicode
except ImportError:
from django.utils.encoding import force_text as force_unicode
if settings.LANGUAGES:
LANGUAGE_CODES = [language[0] for language in settings.LANGUAGES]
elif settings.LANGUAGE:
LANGUAGE_CODES = [settings.LANGUAGE]
else:
raise ImproperlyConfigured(
'Neither LANGUAGES nor LANGUAGE was found in settings.')
# At startup time, SQL_NOW_FUNC will contain the database-appropriate SQL to
# obtain the CURRENT_TIMESTAMP.
SQL_NOW_FUNC = {
'mssql': 'GetDate()', 'mysql': 'NOW()', 'postgresql': 'now()',
'sqlite': 'CURRENT_TIMESTAMP', 'oracle': 'CURRENT_TIMESTAMP'
}[connection.vendor]
SQL_IS_TRUE = {
'mssql': '== TRUE', 'mysql': '= 1', 'postgresql': 'IS TRUE',
'sqlite': '== 1', 'oracle': 'IS TRUE'
}[connection.vendor]
class Article(TranslatedAutoSlugifyMixin,
TranslationHelperMixin,
TranslatableModel):
# TranslatedAutoSlugifyMixin options
slug_source_field_name = 'title'
slug_default = _('untitled-article')
# when True, updates the article's search_data field
# whenever the article is saved or a plugin is saved
# on the article's content placeholder.
update_search_on_save = getattr(
settings,
'ALDRYN_NEWSBLOG_UPDATE_SEARCH_DATA_ON_SAVE',
False
)
translations = TranslatedFields(
title=models.CharField(_('title'), max_length=234),
slug=models.SlugField(
verbose_name=_('slug'),
max_length=255,
db_index=True,
blank=True,
help_text=_(
'Used in the URL. If changed, the URL will change. '
'Clear it to have it re-created automatically.'),
),
lead_in=HTMLField(
verbose_name=_('lead'), default='',
help_text=_(
'The lead gives the reader the main idea of the story, this '
'is useful in overviews, lists or as an introduction to your '
'article.'
),
blank=True,
),
meta_title=models.CharField(
max_length=255, verbose_name=_('meta title'),
blank=True, default=''),
meta_description=models.TextField(
verbose_name=_('meta description'), blank=True, default=''),
meta_keywords=models.TextField(
verbose_name=_('meta keywords'), blank=True, default=''),
meta={'unique_together': (('language_code', 'slug', ), )},
search_data=models.TextField(blank=True, editable=False)
)
content = PlaceholderField('newsblog_article_content',
related_name='newsblog_article_content')
author = models.ForeignKey(Person, null=True, blank=True,
verbose_name=_('author'), on_delete=models.SET_NULL)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('owner'), on_delete=models.PROTECT)
app_config = AppHookConfigField(NewsBlogConfig,
verbose_name=_('Apphook configuration'))
categories = CategoryManyToManyField('aldryn_categories.Category',
verbose_name=_('categories'),
blank=True)
publishing_date = models.DateTimeField(_('publishing date'),
default=now)
is_published = models.BooleanField(_('is published'), default=False,
db_index=True)
is_featured = models.BooleanField(_('is featured'), default=False,
db_index=True)
featured_image = FilerImageField(
verbose_name=_('featured image'),
null=True,
blank=True,
on_delete=models.SET_NULL,
)
tags = TaggableManager(blank=True)
# Setting "symmetrical" to False since it's a bit unexpected that if you
# set "B relates to A" you immediately have also "A relates to B". It have
# to be forced to False because by default it's True if rel.to is "self":
#
# https://github.com/django/django/blob/1.8.4/django/db/models/fields/related.py#L2144
#
# which in the end causes to add reversed releted-to entry as well:
#
# https://github.com/django/django/blob/1.8.4/django/db/models/fields/related.py#L977
related = SortedManyToManyField('self', verbose_name=_('related articles'),
blank=True, symmetrical=False)
objects = RelatedManager()
class Meta:
ordering = ['-publishing_date']
@property
def published(self):
"""
Returns True only if the article (is_published == True) AND has a
published_date that has passed.
"""
return (self.is_published and self.publishing_date <= now())
@property
def future(self):
"""
Returns True if the article is published but is scheduled for a
future date/time.
"""
return (self.is_published and self.publishing_date > now())
def get_absolute_url(self, language=None):
"""Returns the url for this Article in the selected permalink format."""
if not language:
language = get_current_language()
kwargs = {}
permalink_type = self.app_config.permalink_type
if 'y' in permalink_type:
kwargs.update(year=self.publishing_date.year)
if 'm' in permalink_type:
kwargs.update(month="%02d" % self.publishing_date.month)
if 'd' in permalink_type:
kwargs.update(day="%02d" % self.publishing_date.day)
if 'i' in permalink_type:
kwargs.update(pk=self.pk)
if 's' in permalink_type:
slug, lang = self.known_translation_getter(
'slug', default=None, language_code=language)
if slug and lang:
site_id = getattr(settings, 'SITE_ID', None)
if get_redirect_on_fallback(language, site_id):
language = lang
kwargs.update(slug=slug)
if self.app_config and self.app_config.namespace:
namespace = '{0}:'.format(self.app_config.namespace)
else:
namespace = ''
with override(language):
return reverse('{0}article-detail'.format(namespace), kwargs=kwargs)
def get_search_data(self, language=None, request=None):
"""
Provides an index for use with Haystack, or, for populating
Article.translations.search_data.
"""
if not self.pk:
return ''
if language is None:
language = get_current_language()
if request is None:
request = get_request(language=language)
description = self.safe_translation_getter('lead_in', '')
text_bits = [strip_tags(description)]
for category in self.categories.all():
text_bits.append(
force_unicode(category.safe_translation_getter('name')))
for tag in self.tags.all():
text_bits.append(force_unicode(tag.name))
if self.content:
plugins = self.content.cmsplugin_set.filter(language=language)
for base_plugin in plugins:
plugin_text_content = ' '.join(
get_plugin_index_data(base_plugin, request))
text_bits.append(plugin_text_content)
return ' '.join(text_bits)
def save(self, *args, **kwargs):
# Update the search index
if self.update_search_on_save:
self.search_data = self.get_search_data()
# Ensure there is an owner.
if self.app_config.create_authors and self.author is None:
# TODO: With django-parler 1.8 and Django 1.11 get_or_create() is
# not working with translated fields yet:
# https://github.com/django-parler/django-parler/issues/189
self.author = Person.objects.get_or_create(
user=self.owner,
defaults={
'name': ' '.join((
self.owner.first_name,
self.owner.last_name,
)),
})[0]
# slug would be generated by TranslatedAutoSlugifyMixin
super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.safe_translation_getter('title', any_language=True)
class PluginEditModeMixin(object):
def get_edit_mode(self, request):
"""
Returns True only if an operator is logged-into the CMS and is in
edit mode.
"""
return (
hasattr(request, 'toolbar') and request.toolbar and
request.toolbar.edit_mode)
class AdjustableCacheModelMixin(models.Model):
# NOTE: This field shouldn't even be displayed in the plugin's change form
# if using django CMS < 3.3.0
cache_duration = models.PositiveSmallIntegerField(
default=0, # not the most sensible, but consistent with older versions
blank=False,
help_text=_(
"The maximum duration (in seconds) that this plugin's content "
"should be cached.")
)
class Meta:
abstract = True
class NewsBlogCMSPlugin(CMSPlugin):
"""AppHookConfig aware abstract CMSPlugin class for Aldryn Newsblog"""
# avoid reverse relation name clashes by not adding a related_name
# to the parent plugin
cmsplugin_ptr = models.OneToOneField(
CMSPlugin, related_name='+', parent_link=True, on_delete=models.CASCADE)
app_config = models.ForeignKey(NewsBlogConfig, verbose_name=_('Apphook configuration'), on_delete=models.PROTECT)
class Meta:
abstract = True
def copy_relations(self, old_instance):
self.app_config = old_instance.app_config
class NewsBlogArchivePlugin(PluginEditModeMixin, AdjustableCacheModelMixin,
NewsBlogCMSPlugin):
# NOTE: the PluginEditModeMixin is eventually used in the cmsplugin, not
# here in the model.
def __str__(self):
return ugettext('%s archive') % (self.app_config.get_app_title(), )
class NewsBlogArticleSearchPlugin(NewsBlogCMSPlugin):
max_articles = models.PositiveIntegerField(
_('max articles'), default=10,
validators=[django.core.validators.MinValueValidator(1)],
help_text=_('The maximum number of found articles display.')
)
|
class NewsBlogAuthorsPlugin(PluginEditModeMixin, NewsBlogCMSPlugin):
def get_authors(self, request):
"""
Returns a queryset of authors (people who have published an article),
annotated by the number of articles (article_count) that are visible to
the current user. If this user is anonymous, then this will be all
articles that are published and whose publishing_date has passed. If the
user is a logged-in cms operator, then it will be all articles.
"""
# The basic subquery (for logged-in content managers in edit mode)
subquery = """
SELECT COUNT(*)
FROM aldryn_newsblog_article
WHERE
aldryn_newsblog_article.author_id =
aldryn_people_person.id AND
aldryn_newsblog_article.app_config_id = %d"""
# For other users, limit subquery to published articles
if not self.get_edit_mode(request):
subquery += """ AND
aldryn_newsblog_article.is_published %s AND
aldryn_newsblog_article.publishing_date <= %s
""" % (SQL_IS_TRUE, SQL_NOW_FUNC, )
# Now, use this subquery in the construction of the main query.
query = """
SELECT (%s) as article_count, aldryn_people_person.*
FROM aldryn_people_person
""" % (subquery % (self.app_config.pk, ), )
raw_authors = list(Person.objects.raw(query))
authors = [author for author in raw_authors if author.article_count]
return sorted(authors, key=lambda x: x.article_count, reverse=True)
def __str__(self):
return ugettext('%s authors') % (self.app_config.get_app_title(), )
class NewsBlogCategoriesPlugin(PluginEditModeMixin, NewsBlogCMSPlugin):
def __str__(self):
return ugettext('%s categories') % (self.app_config.get_app_title(), )
def get_categories(self, request):
"""
Returns a list of categories, annotated by the number of articles
(article_count) that are visible to the current user. If this user is
anonymous, then this will be all articles that are published and whose
publishing_date has passed. If the user is a logged-in cms operator,
then it will be all articles.
"""
subquery = """
SELECT COUNT(*)
FROM aldryn_newsblog_article, aldryn_newsblog_article_categories
WHERE
aldryn_newsblog_article_categories.category_id =
aldryn_categories_category.id AND
aldryn_newsblog_article_categories.article_id =
aldryn_newsblog_article.id AND
aldryn_newsblog_article.app_config_id = %d
""" % (self.app_config.pk, )
if not self.get_edit_mode(request):
subquery += """ AND
aldryn_newsblog_article.is_published %s AND
aldryn_newsblog_article.publishing_date <= %s
""" % (SQL_IS_TRUE, SQL_NOW_FUNC, )
query = """
SELECT (%s) as article_count, aldryn_categories_category.*
FROM aldryn_categories_category
""" % (subquery, )
raw_categories = list(Category.objects.raw(query))
categories = [
category for category in raw_categories if category.article_count]
return sorted(categories, key=lambda x: x.article_count, reverse=True)
class NewsBlogFeaturedArticlesPlugin(PluginEditModeMixin, NewsBlogCMSPlugin):
article_count = models.PositiveIntegerField(
default=1,
validators=[django.core.validators.MinValueValidator(1)],
help_text=_('The maximum number of featured articles display.')
)
def get_articles(self, request):
if not self.article_count:
return Article.objects.none()
queryset = Article.objects
if not self.get_edit_mode(request):
queryset = queryset.published()
languages = get_valid_languages_from_request(
self.app_config.namespace, request)
if self.language not in languages:
return queryset.none()
queryset = queryset.translated(*languages).filter(
app_config=self.app_config,
is_featured=True)
return queryset[:self.article_count]
def __str__(self):
if not self.pk:
return 'featured articles'
prefix = self.app_config.get_app_title()
if self.article_count == 1:
title = ugettext('featured article')
else:
title = ugettext('featured articles: %(count)s') % {
'count': self.article_count,
}
return '{0} {1}'.format(prefix, title)
class NewsBlogLatestArticlesPlugin(PluginEditModeMixin,
AdjustableCacheModelMixin,
NewsBlogCMSPlugin):
latest_articles = models.IntegerField(
default=5,
help_text=_('The maximum number of latest articles to display.')
)
exclude_featured = models.PositiveSmallIntegerField(
default=0,
blank=True,
help_text=_(
'The maximum number of featured articles to exclude from display. '
'E.g. for uses in combination with featured articles plugin.')
)
def get_articles(self, request):
"""
Returns a queryset of the latest N articles. N is the plugin setting:
latest_articles.
"""
queryset = Article.objects
featured_qs = Article.objects.all().filter(is_featured=True)
if not self.get_edit_mode(request):
queryset = queryset.published()
featured_qs = featured_qs.published()
languages = get_valid_languages_from_request(
self.app_config.namespace, request)
if self.language not in languages:
return queryset.none()
queryset = queryset.translated(*languages).filter(
app_config=self.app_config)
featured_qs = featured_qs.translated(*languages).filter(
app_config=self.app_config)
exclude_featured = featured_qs.values_list(
'pk', flat=True)[:self.exclude_featured]
queryset = queryset.exclude(pk__in=list(exclude_featured))
return queryset[:self.latest_articles]
def __str__(self):
return ugettext('%(app_title)s latest articles: %(latest_articles)s') % {
'app_title': self.app_config.get_app_title(),
'latest_articles': self.latest_articles,
}
class NewsBlogRelatedPlugin(PluginEditModeMixin, AdjustableCacheModelMixin,
CMSPlugin):
# NOTE: This one does NOT subclass NewsBlogCMSPlugin. This is because this
# plugin can really only be placed on the article detail view in an apphook.
cmsplugin_ptr = models.OneToOneField(
CMSPlugin, related_name='+', parent_link=True, on_delete=models.CASCADE)
def get_articles(self, article, request):
"""
Returns a queryset of articles that are related to the given article.
"""
languages = get_valid_languages_from_request(
article.app_config.namespace, request)
if self.language not in languages:
return Article.objects.none()
qs = article.related.translated(*languages)
if not self.get_edit_mode(request):
qs = qs.published()
return qs
def __str__(self):
return ugettext('Related articles')
class NewsBlogTagsPlugin(PluginEditModeMixin, NewsBlogCMSPlugin):
def get_tags(self, request):
"""
Returns a queryset of tags, annotated by the number of articles
(article_count) that are visible to the current user. If this user is
anonymous, then this will be all articles that are published and whose
publishing_date has passed. If the user is a logged-in cms operator,
then it will be all articles.
"""
article_content_type = ContentType.objects.get_for_model(Article)
subquery = """
SELECT COUNT(*)
FROM aldryn_newsblog_article, taggit_taggeditem
WHERE
taggit_taggeditem.tag_id = taggit_tag.id AND
taggit_taggeditem.content_type_id = %d AND
taggit_taggeditem.object_id = aldryn_newsblog_article.id AND
aldryn_newsblog_article.app_config_id = %d"""
if not self.get_edit_mode(request):
subquery += """ AND
aldryn_newsblog_article.is_published %s AND
aldryn_newsblog_article.publishing_date <= %s
""" % (SQL_IS_TRUE, SQL_NOW_FUNC, )
query = """
SELECT (%s) as article_count, taggit_tag.*
FROM taggit_tag
""" % (subquery % (article_content_type.id, self.app_config.pk), )
raw_tags = list(Tag.objects.raw(query))
tags = [tag for tag in raw_tags if tag.article_count]
return sorted(tags, key=lambda x: x.article_count, reverse=True)
def __str__(self):
return ugettext('%s tags') % (self.app_config.get_app_title(), )
@receiver(post_save, dispatch_uid='article_update_search_data')
def update_search_data(sender, instance, **kwargs):
"""
Upon detecting changes in a plugin used in an Article's content
(PlaceholderField), update the article's search_index so that we can
perform simple searches even without Haystack, etc.
"""
is_cms_plugin = issubclass(instance.__class__, CMSPlugin)
if Article.update_search_on_save and is_cms_plugin:
placeholder = (getattr(instance, '_placeholder_cache', None) or
instance.placeholder)
if hasattr(placeholder, '_attached_model_cache'):
if placeholder._attached_model_cache == Article:
article = placeholder._attached_model_cache.objects.language(
instance.language).get(content=placeholder.pk)
article.search_data = article.get_search_data(instance.language)
article.save()
if ENABLE_REVERSION:
from aldryn_reversion.core import version_controlled_content
Article = version_controlled_content(Article, follow=['app_config']) | def __str__(self):
return ugettext('%s archive') % (self.app_config.get_app_title(), )
|
diag_nodes_test.go | // +build integration,!no-etcd
package integration
import (
"testing"
"time"
kapi "k8s.io/kubernetes/pkg/api"
kapierror "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/util/wait"
clusterdiags "github.com/openshift/origin/pkg/diagnostics/cluster"
diagtype "github.com/openshift/origin/pkg/diagnostics/types"
testutil "github.com/openshift/origin/test/util"
)
func | (t *testing.T) {
//masterConfig, nodeConfig, clientFile, err := testutil.StartTestAllInOne()
_, nodeConfig, clientFile, err := testutil.StartTestAllInOne()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
client, err := testutil.GetClusterAdminKubeClient(clientFile)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
nodeDiag := clusterdiags.NodeDefinitions{KubeClient: client}
err = wait.Poll(200*time.Millisecond, 5*time.Second, func() (bool, error) {
if _, err := client.Nodes().Get(nodeConfig.NodeName); kapierror.IsNotFound(err) {
return false, nil
}
return true, err
})
if err != nil {
t.Errorf("unexpected error waiting for all-in-one node: %v", err)
}
// start by testing that the diagnostic passes with all-in-one up.
result := nodeDiag.Check()
if warnings := result.Warnings(); len(warnings) > 0 {
t.Fatalf("expected no warnings with one node ready, but: %#v", warnings)
} else if errors := result.Errors(); len(errors) > 0 {
t.Fatalf("expected no errors with one node ready, but: %#v", errors)
}
// Make the node unschedulable and verify diagnostics notices
err = wait.Poll(200*time.Millisecond, time.Second, func() (bool, error) {
node, err := client.Nodes().Get(nodeConfig.NodeName)
if err != nil {
return false, err
}
node.Spec.Unschedulable = true
if _, err := client.Nodes().Update(node); kapierror.IsConflict(err) {
return false, nil
}
return true, err
})
if err != nil {
t.Errorf("unexpected error making node unschedulable: %v", err)
}
result = nodeDiag.Check()
if errors := result.Errors(); len(errors) != 1 ||
!diagtype.MatchesDiagError(errors[0], "DClu0004") {
t.Fatalf("expected 1 error about not having nodes, but: %#v", errors)
} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0003") {
t.Fatalf("expected a warning about test-node not being schedulable, but: %#v", warnings)
}
// delete it and check with no nodes defined; should get an error about that.
if err := client.Nodes().Delete(nodeConfig.NodeName); err != nil {
t.Errorf("unexpected error deleting node: %v", err)
}
if errors := nodeDiag.Check().Errors(); len(errors) != 1 ||
!diagtype.MatchesDiagError(errors[0], "DClu0004") {
t.Fatalf("expected 1 error about not having nodes, not: %#v", errors)
}
// Next create a node and leave it in NotReady state. Should get a warning
// about that, plus the previous error as there are still no nodes available.
_, err = client.Nodes().Create(&kapi.Node{ObjectMeta: kapi.ObjectMeta{Name: "test-node"}})
if err != nil {
t.Fatalf("expected no errors creating a node: %#v", err)
}
result = nodeDiag.Check()
if errors := result.Errors(); len(errors) != 1 ||
!diagtype.MatchesDiagError(errors[0], "DClu0004") {
t.Fatalf("expected 1 error about not having nodes, not: %#v", errors)
} else if warnings := result.Warnings(); len(warnings) < 1 || !diagtype.MatchesDiagError(warnings[0], "DClu0002") {
t.Fatalf("expected a warning about test-node not being ready, not: %#v", warnings)
}
}
| TestDiagNodeConditions |
computeDelta.js | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
*
* @format
*/
"use strict";
function _toConsumableArray(arr) {
return (
_arrayWithoutHoles(arr) ||
_iterableToArray(arr) ||
_unsupportedIterableToArray(arr) ||
_nonIterableSpread()
);
}
function _nonIterableSpread() {
throw new TypeError(
"Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."
);
}
function _iterableToArray(iter) {
if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter))
return Array.from(iter);
}
function _arrayWithoutHoles(arr) {
if (Array.isArray(arr)) return _arrayLikeToArray(arr);
}
function _slicedToArray(arr, i) {
return (
_arrayWithHoles(arr) ||
_iterableToArrayLimit(arr, i) ||
_unsupportedIterableToArray(arr, i) ||
_nonIterableRest()
);
}
function _nonIterableRest() {
throw new TypeError(
"Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."
);
}
function _unsupportedIterableToArray(o, minLen) {
if (!o) return;
if (typeof o === "string") return _arrayLikeToArray(o, minLen);
var n = Object.prototype.toString.call(o).slice(8, -1);
if (n === "Object" && o.constructor) n = o.constructor.name;
if (n === "Map" || n === "Set") return Array.from(o);
if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))
return _arrayLikeToArray(o, minLen);
}
function _arrayLikeToArray(arr, len) {
if (len == null || len > arr.length) len = arr.length;
for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i]; | }
function _iterableToArrayLimit(arr, i) {
if (typeof Symbol === "undefined" || !(Symbol.iterator in Object(arr)))
return;
var _arr = [];
var _n = true;
var _d = false;
var _e = undefined;
try {
for (
var _i = arr[Symbol.iterator](), _s;
!(_n = (_s = _i.next()).done);
_n = true
) {
_arr.push(_s.value);
if (i && _arr.length === i) break;
}
} catch (err) {
_d = true;
_e = err;
} finally {
try {
if (!_n && _i["return"] != null) _i["return"]();
} finally {
if (_d) throw _e;
}
}
return _arr;
}
function _arrayWithHoles(arr) {
if (Array.isArray(arr)) return arr;
}
function computeDelta(entries1, entries2) {
const modules1 = new Map(entries1);
const modules2 = new Map(entries2);
const added = new Map();
const modified = new Map();
const deleted = new Set();
for (const _ref of modules1.entries()) {
var _ref2 = _slicedToArray(_ref, 2);
const id = _ref2[0];
const code = _ref2[1];
const newCode = modules2.get(id);
if (newCode == null) {
deleted.add(id);
} else if (newCode !== code) {
modified.set(id, newCode);
}
}
for (const _ref3 of modules2.entries()) {
var _ref4 = _slicedToArray(_ref3, 2);
const id = _ref4[0];
const code = _ref4[1];
if (!modules1.has(id)) {
added.set(id, code);
}
}
return {
added: _toConsumableArray(added.entries()),
modified: _toConsumableArray(modified.entries()),
deleted: _toConsumableArray(deleted)
};
}
module.exports = computeDelta; | return arr2; |
relative_direct.rs | use std::{borrow::Borrow, num::NonZeroI32, ops::Deref};
use swift_sys::ptr::RelativeDirectPointerNonNull;
// TODO: Implement methods for `Offset` of `NonZeroIsize` (`intptr_t`)
/// A borrowed value that is referred at a relative offset from itself.
#[repr(transparent)]
pub struct RelativeDirect<T, Offset = NonZeroI32> {
ptr: RelativeDirectPointerNonNull<T, Offset>,
}
impl<T> Deref for RelativeDirect<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
unsafe { self.ptr.as_ref() }
}
}
impl<T> Borrow<T> for RelativeDirect<T> {
#[inline]
fn borrow(&self) -> &T {
self
}
}
impl<T> AsRef<T> for RelativeDirect<T> {
#[inline]
fn as_ref(&self) -> &T {
self
}
}
impl<T, Offset> RelativeDirect<T, Offset> {
/// Casts the pointer to a borrow.
///
/// # Safety
///
/// The placement address (`ptr`), when adjusted by the stored offset, must
/// not:
///
/// - Result in a null pointer.
///
/// - Be unaligned with respect to `T`.
#[inline]
pub unsafe fn from_ptr(ptr: &RelativeDirectPointerNonNull<T, Offset>) -> &Self {
&*(ptr as *const _ as *const Self)
}
/// Casts the borrow to a pointer.
#[inline]
pub fn as_ptr(this: &Self) -> &RelativeDirectPointerNonNull<T, Offset> |
}
| {
&this.ptr
} |
cors.go | // Package pcors provides implementation of fully permissive CORS middleware.
//
// This package is intended for web services that does not require a strict
// cross-origin resource sharing rules and would like to expose API endpoints to
// all origins, allow all methods, all request headers including credentials
// with minimal configuration. CORS pre-fligh request also hints client to
// cache the response for up to 24 hours.
//
// Example usage:
// package main
//
// import (
// "net/http"
//
// "github.com/fln/pcors"
// )
//
// func main() {
// http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// w.Write([]byte("Hello world!"))
// })
// http.ListenAndServe(":8123", pcors.Default(http.DefaultServeMux))
// }
package pcors
import (
"net/http"
"strings"
)
type cors struct {
exposeHeaders string
next http.Handler
}
// ExposeHeaders generates permissive CORS middleware similar to Default one,
// but also exposes given list of response headers.
func ExposeHeaders(headers ...string) func(http.Handler) http.Handler {
return func(h http.Handler) http.Handler {
return &cors{
exposeHeaders: strings.Join(headers, ", "),
next: h,
}
}
}
// Default is a standard permissive CORS middleware. It will allow all
// cross-origin requests. However this middleware will not expose any of the
// response headers.
func | (h http.Handler) http.Handler {
return &cors{
exposeHeaders: "",
next: h,
}
}
// ServeHTTP implements http.Handler interface.
func (c *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin == "" {
c.next.ServeHTTP(w, r)
return
}
// Common headers
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Add("Vary", "Origin")
w.Header().Set("Access-Control-Allow-Credentials", "true")
requestMethod := r.Header.Get("Access-Control-Request-Method")
// Preflight request case
if r.Method == "OPTIONS" && requestMethod != "" {
w.Header().Set("Access-Control-Allow-Methods", requestMethod)
w.Header().Add("Vary", "Access-Control-Request-Method")
if headers := r.Header.Get("Access-Control-Request-Headers"); headers != "" {
w.Header().Set("Access-Control-Allow-Headers", headers)
}
w.Header().Add("Vary", "Access-Control-Request-Headers")
w.Header().Set("Access-Control-Max-Age", "86400") // 24 hours
w.WriteHeader(http.StatusNoContent)
return
}
// Normal request case
if c.exposeHeaders != "" {
w.Header().Set("Access-Control-Expose-Headers", c.exposeHeaders)
}
c.next.ServeHTTP(w, r)
}
| Default |
joinaggregate.py | import altair as alt
import pandas as pd
from .visitor import visit
from .aggregate import AGG_REPLACEMENTS
@visit.register(alt.JoinAggregateTransform)
def visit_joinaggregate(
transform: alt.JoinAggregateTransform, df: pd.DataFrame
) -> pd.DataFrame:
transform = transform.to_dict()
groupby = transform.get("groupby")
for aggregate in transform["joinaggregate"]:
op = aggregate["op"]
field = aggregate["field"]
col = aggregate["as"]
op = AGG_REPLACEMENTS.get(op, op)
if field == "*" and field not in df.columns:
field = df.columns[0]
if groupby is None:
df[col] = df[field].aggregate(op)
else:
result = df.groupby(groupby)[field].aggregate(op) | result.name = col
df = df.join(result, on=groupby)
return df |
|
unsubscribe.js | /**
* Copyright 2015, Yahoo! Inc. |
var removers = globalVars.removers;
/**
* Unsubscribe to UI events.
* @method unsubscribe
* @param {String} type - The type of event.
* @param {Function} cb - The callback function.
*/
function unsubscribe(type, cb) {
var remover = void 0;
for (var i = removers.length - 1; i >= 0; i -= 1) {
remover = removers[i];
if (remover._cb === cb && remover._type.indexOf(type) >= 0) {
remover.unsubscribe();
removers.splice(i, 1);
}
}
}
export default unsubscribe; | * Copyrights licensed under the New BSD License. See the accompanying LICENSE file for terms.
*/
import globalVars from './globalVars'; |
config.go | package zgrab2
import (
"net"
"net/http"
"os"
"runtime"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
// Config is the high level framework options that will be parsed
// from the command line
type Config struct {
OutputFileName string `short:"o" long:"output-file" default:"-" description:"Output filename, use - for stdout"`
InputFileName string `short:"f" long:"input-file" default:"-" description:"Input filename, use - for stdin"`
MetaFileName string `short:"m" long:"metadata-file" default:"-" description:"Metadata filename, use - for stderr"`
LogFileName string `short:"l" long:"log-file" default:"-" description:"Log filename, use - for stderr"`
LocalAddress string `long:"source-ip" description:"Local source IP address to use for making connections"`
Senders int `short:"s" long:"senders" default:"1000" description:"Number of send goroutines to use"`
Debug bool `long:"debug" description:"Include debug fields in the output."`
GOMAXPROCS int `long:"gomaxprocs" default:"0" description:"Set GOMAXPROCS"`
ConnectionsPerHost int `long:"connections-per-host" default:"1" description:"Number of times to connect to each host (results in more output)"`
ReadLimitPerHost int `long:"read-limit-per-host" default:"96" description:"Maximum total kilobytes to read for a single host (default 96kb)"`
Prometheus string `long:"prometheus" description:"Address to use for Prometheus server (e.g. localhost:8080). If empty, Prometheus is disabled."`
Multiple MultipleCommand `command:"multiple" description:"Multiple module actions"`
inputFile *os.File
outputFile *os.File
metaFile *os.File
logFile *os.File
inputTargets InputTargetsFunc
outputResults OutputResultsFunc
localAddr *net.TCPAddr
}
// SetInputFunc sets the target input function to the provided function.
func SetInputFunc(f InputTargetsFunc) {
config.inputTargets = f
}
// SetOutputFunc sets the result output function to the provided function.
func | (f OutputResultsFunc) {
config.outputResults = f
}
func init() {
config.Multiple.ContinueOnError = true // set default for multiple value
config.Multiple.BreakOnSuccess = false // set default for multiple value
}
var config Config
func validateFrameworkConfiguration() {
// validate files
if config.LogFileName == "-" {
config.logFile = os.Stderr
} else {
var err error
if config.logFile, err = os.Create(config.LogFileName); err != nil {
log.Fatal(err)
}
log.SetOutput(config.logFile)
}
SetInputFunc(InputTargetsCSV)
if config.LocalAddress != "" {
parsed := net.ParseIP(config.LocalAddress)
if parsed == nil {
log.Fatalf("Error parsing local interface %s as IP", config.LocalAddress)
}
config.localAddr = &net.TCPAddr{parsed, 0, ""}
}
if config.InputFileName == "-" {
config.inputFile = os.Stdin
} else {
var err error
if config.inputFile, err = os.Open(config.InputFileName); err != nil {
log.Fatal(err)
}
}
if config.OutputFileName == "-" {
config.outputFile = os.Stdout
} else {
var err error
if config.outputFile, err = os.Create(config.OutputFileName); err != nil {
log.Fatal(err)
}
}
outputFunc := OutputResultsWriterFunc(config.outputFile)
SetOutputFunc(outputFunc)
if config.MetaFileName == "-" {
config.metaFile = os.Stderr
} else {
var err error
if config.metaFile, err = os.Create(config.MetaFileName); err != nil {
log.Fatal(err)
}
}
// Validate Go Runtime config
if config.GOMAXPROCS < 0 {
log.Fatal("invalid GOMAXPROCS (must be positive, given %d)", config.GOMAXPROCS)
}
runtime.GOMAXPROCS(config.GOMAXPROCS)
//validate/start prometheus
if config.Prometheus != "" {
go func() {
http.Handle("metrics", promhttp.Handler())
if err := http.ListenAndServe(config.Prometheus, nil); err != nil {
log.Fatalf("could not run prometheus server: %s", err.Error())
}
}()
}
//validate senders
if config.Senders <= 0 {
log.Fatalf("need at least one sender, given %d", config.Senders)
}
// validate connections per host
if config.ConnectionsPerHost <= 0 {
log.Fatalf("need at least one connection, given %d", config.ConnectionsPerHost)
}
// Stop the lowliest idiot from using this to DoS people
if config.ConnectionsPerHost > 50 {
log.Fatalf("connectionsPerHost must be in the range [0,50]")
}
// Stop even third-party libraries from performing unbounded reads on untrusted hosts
if config.ReadLimitPerHost > 0 {
DefaultBytesReadLimit = config.ReadLimitPerHost * 1024
}
}
// GetMetaFile returns the file to which metadata should be output
func GetMetaFile() *os.File {
return config.metaFile
}
func includeDebugOutput() bool {
return config.Debug
}
| SetOutputFunc |
environment.py | import os
import uuid
import yaml
from sceptre_template_fetcher.cli import setup_logging
def before_all(context):
if context.config.wip:
setup_logging(True)
context.uuid = uuid.uuid1().hex
context.project_code = "sceptre-integration-tests-{0}".format(
context.uuid
)
context.sceptre_dir = os.path.join(
os.getcwd(), "integration-tests", "sceptre-project"
)
update_config(context)
def before_scenario(context, scenario):
context.error = None
context.response = None
context.output = None
def | (context):
config_path = os.path.join(
context.sceptre_dir, "config", "config.yaml"
)
with open(config_path) as config_file:
env_config = yaml.safe_load(config_file)
env_config["project_code"] = context.project_code
with open(config_path, 'w') as config_file:
yaml.safe_dump(env_config, config_file, default_flow_style=False)
def after_all(context):
update_config(context)
| update_config |
rc_mod_fix.rs | use std::rc::Rc;
use std::cell::RefCell;
fn main() {
// ヒープに内部可変性を持ったi32型を確保 --- (*1)
let a = Rc::new(RefCell::new(1000));
// 参照カウンタを加算 --- (*2)
let b = Rc::clone(&a);
// 値を変更する --- (*3)
*b.borrow_mut() += 100; | } | // 値が変更された --- (*4)
println!("{}", a.borrow()); |
size_defined_by_child_with_border.rs | #[test]
fn size_defined_by_child_with_border() | {
let layout = stretch::node::Node::new(
stretch::style::Style {
border: stretch::geometry::Rect {
start: stretch::style::Dimension::Points(10f32),
end: stretch::style::Dimension::Points(10f32),
top: stretch::style::Dimension::Points(10f32),
bottom: stretch::style::Dimension::Points(10f32),
..Default::default()
},
..Default::default()
},
vec![&stretch::node::Node::new(
stretch::style::Style {
size: stretch::geometry::Size {
width: stretch::style::Dimension::Points(10f32),
height: stretch::style::Dimension::Points(10f32),
..Default::default()
},
..Default::default()
},
vec![],
)],
)
.compute_layout(stretch::geometry::Size::undefined())
.unwrap();
assert_eq!(layout.size.width, 30f32);
assert_eq!(layout.size.height, 30f32);
assert_eq!(layout.location.x, 0f32);
assert_eq!(layout.location.y, 0f32);
assert_eq!(layout.children[0usize].size.width, 10f32);
assert_eq!(layout.children[0usize].size.height, 10f32);
assert_eq!(layout.children[0usize].location.x, 10f32);
assert_eq!(layout.children[0usize].location.y, 10f32);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.