file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
__init__.py | from sciwing.modules.embedders import *
from sciwing.modules.bow_encoder import BOW_Encoder
from sciwing.modules.lstm2vecencoder import LSTM2VecEncoder | from sciwing.modules.lstm2seqencoder import Lstm2SeqEncoder
from sciwing.modules.charlstm_encoder import CharLSTMEncoder |
|
cast.rs | use std::convert::TryFrom;
use rustc::ty::adjustment::PointerCast;
use rustc::ty::layout::{self, Size, TyLayout};
use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable};
use rustc_ast::ast::FloatTy;
use rustc_span::symbol::sym;
use rustc_target::abi::LayoutOf;
use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc::mir::CastKind;
use rustc_apfloat::ieee::{Double, Single};
use rustc_apfloat::{Float, FloatConvert};
use super::{FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy};
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn cast(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
kind: CastKind,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
use rustc::mir::CastKind::*;
match kind {
Pointer(PointerCast::Unsize) => {
self.unsize_into(src, dest)?;
}
Misc
| Pointer(PointerCast::MutToConstPointer)
| Pointer(PointerCast::ArrayToPointer) => {
let src = self.read_immediate(src)?;
let res = self.cast_immediate(src, dest.layout)?;
self.write_immediate(res, dest)?;
}
Pointer(PointerCast::ReifyFnPointer) => {
// The src operand does not matter, just its type
match src.layout.ty.kind {
ty::FnDef(def_id, substs) => {
// All reifications must be monomorphic, bail out otherwise.
if src.layout.ty.needs_subst() {
throw_inval!(TooGeneric);
}
if self.tcx.has_attr(def_id, sym::rustc_args_required_const) {
bug!("reifying a fn ptr that requires const arguments");
}
let instance = ty::Instance::resolve_for_fn_ptr(
*self.tcx,
self.param_env,
def_id,
substs,
)
.ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
self.write_scalar(fn_ptr, dest)?;
}
_ => bug!("reify fn pointer on {:?}", src.layout.ty),
}
}
Pointer(PointerCast::UnsafeFnPointer) => {
let src = self.read_immediate(src)?;
match dest.layout.ty.kind {
ty::FnPtr(_) => {
// No change to value
self.write_immediate(*src, dest)?;
}
_ => bug!("fn to unsafe fn cast on {:?}", dest.layout.ty),
}
}
Pointer(PointerCast::ClosureFnPointer(_)) => {
// The src operand does not matter, just its type
match src.layout.ty.kind {
ty::Closure(def_id, substs) => {
// All reifications must be monomorphic, bail out otherwise.
if src.layout.ty.needs_subst() {
throw_inval!(TooGeneric);
}
let instance = ty::Instance::resolve_closure(
*self.tcx,
def_id,
substs,
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
self.write_scalar(fn_ptr, dest)?;
}
_ => bug!("closure fn pointer on {:?}", src.layout.ty),
}
}
}
Ok(())
}
fn cast_immediate(
&self,
src: ImmTy<'tcx, M::PointerTag>,
dest_layout: TyLayout<'tcx>,
) -> InterpResult<'tcx, Immediate<M::PointerTag>> |
fn cast_from_int_like(
&self,
v: u128, // raw bits
src_layout: TyLayout<'tcx>,
dest_layout: TyLayout<'tcx>,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
// Let's make sure v is sign-extended *if* it has a signed type.
let signed = src_layout.abi.is_signed();
let v = if signed { self.sign_extend(v, src_layout) } else { v };
trace!("cast_from_int: {}, {}, {}", v, src_layout.ty, dest_layout.ty);
use rustc::ty::TyKind::*;
match dest_layout.ty.kind {
Int(_) | Uint(_) | RawPtr(_) => {
let v = self.truncate(v, dest_layout);
Ok(Scalar::from_uint(v, dest_layout.size))
}
Float(FloatTy::F32) if signed => {
Ok(Scalar::from_f32(Single::from_i128(v as i128).value))
}
Float(FloatTy::F64) if signed => {
Ok(Scalar::from_f64(Double::from_i128(v as i128).value))
}
Float(FloatTy::F32) => Ok(Scalar::from_f32(Single::from_u128(v).value)),
Float(FloatTy::F64) => Ok(Scalar::from_f64(Double::from_u128(v).value)),
Char => {
// `u8` to `char` cast
Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4)))
}
// Casts to bool are not permitted by rustc, no need to handle them here.
_ => bug!("invalid int to {:?} cast", dest_layout.ty),
}
}
fn cast_from_float<F>(
&self,
f: F,
dest_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Scalar<M::PointerTag>>
where
F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>,
{
use rustc::ty::TyKind::*;
match dest_ty.kind {
// float -> uint
Uint(t) => {
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
let v = f.to_u128(usize::try_from(width).unwrap()).value;
// This should already fit the bit width
Ok(Scalar::from_uint(v, Size::from_bits(width)))
}
// float -> int
Int(t) => {
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
let v = f.to_i128(usize::try_from(width).unwrap()).value;
Ok(Scalar::from_int(v, Size::from_bits(width)))
}
// float -> f32
Float(FloatTy::F32) => Ok(Scalar::from_f32(f.convert(&mut false).value)),
// float -> f64
Float(FloatTy::F64) => Ok(Scalar::from_f64(f.convert(&mut false).value)),
// That's it.
_ => bug!("invalid float to {:?} cast", dest_ty),
}
}
fn unsize_into_ptr(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
// The pointee types
source_ty: Ty<'tcx>,
dest_ty: Ty<'tcx>,
) -> InterpResult<'tcx> {
// A<Struct> -> A<Trait> conversion
let (src_pointee_ty, dest_pointee_ty) =
self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, dest_ty, self.param_env);
match (&src_pointee_ty.kind, &dest_pointee_ty.kind) {
(&ty::Array(_, length), &ty::Slice(_)) => {
let ptr = self.read_immediate(src)?.to_scalar()?;
// u64 cast is from usize to u64, which is always good
let val = Immediate::new_slice(
ptr,
length.eval_usize(self.tcx.tcx, self.param_env),
self,
);
self.write_immediate(val, dest)
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
let val = self.read_immediate(src)?;
self.write_immediate(*val, dest)
}
(_, &ty::Dynamic(ref data, _)) => {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
let ptr = self.read_immediate(src)?.to_scalar()?;
let val = Immediate::new_dyn_trait(ptr, vtable);
self.write_immediate(val, dest)
}
_ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty),
}
}
fn unsize_into(
&mut self,
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
trace!("Unsizing {:?} into {:?}", src, dest);
match (&src.layout.ty.kind, &dest.layout.ty.kind) {
(&ty::Ref(_, s, _), &ty::Ref(_, d, _))
| (&ty::Ref(_, s, _), &ty::RawPtr(TypeAndMut { ty: d, .. }))
| (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: d, .. })) => {
self.unsize_into_ptr(src, dest, s, d)
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
if def_a.is_box() || def_b.is_box() {
if !def_a.is_box() || !def_b.is_box() {
bug!("invalid unsizing between {:?} -> {:?}", src.layout, dest.layout);
}
return self.unsize_into_ptr(
src,
dest,
src.layout.ty.boxed_ty(),
dest.layout.ty.boxed_ty(),
);
}
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
// here we need to increase the size of every &T thin ptr field to a fat ptr
for i in 0..src.layout.fields.count() {
let dst_field = self.place_field(dest, i)?;
if dst_field.layout.is_zst() {
continue;
}
let src_field = self.operand_field(src, i)?;
if src_field.layout.ty == dst_field.layout.ty {
self.copy_op(src_field, dst_field)?;
} else {
self.unsize_into(src_field, dst_field)?;
}
}
Ok(())
}
_ => bug!("unsize_into: invalid conversion: {:?} -> {:?}", src.layout, dest.layout),
}
}
}
| {
use rustc::ty::TyKind::*;
trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, dest_layout.ty);
match src.layout.ty.kind {
// Floating point
Float(FloatTy::F32) => {
return Ok(self
.cast_from_float(src.to_scalar()?.to_f32()?, dest_layout.ty)?
.into());
}
Float(FloatTy::F64) => {
return Ok(self
.cast_from_float(src.to_scalar()?.to_f64()?, dest_layout.ty)?
.into());
}
// The rest is integer/pointer-"like", including fn ptr casts and casts from enums that
// are represented as integers.
_ => assert!(
src.layout.ty.is_bool()
|| src.layout.ty.is_char()
|| src.layout.ty.is_enum()
|| src.layout.ty.is_integral()
|| src.layout.ty.is_any_ptr(),
"Unexpected cast from type {:?}",
src.layout.ty
),
}
// Handle cast from a univariant (ZST) enum.
match src.layout.variants {
layout::Variants::Single { index } => {
if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
assert!(src.layout.is_zst());
let discr_layout = self.layout_of(discr.ty)?;
return Ok(self
.cast_from_int_like(discr.val, discr_layout, dest_layout)?
.into());
}
}
layout::Variants::Multiple { .. } => {}
}
// Handle casting the metadata away from a fat pointer.
if src.layout.ty.is_unsafe_ptr()
&& dest_layout.ty.is_unsafe_ptr()
&& dest_layout.size != src.layout.size
{
assert_eq!(src.layout.size, 2 * self.memory.pointer_size());
assert_eq!(dest_layout.size, self.memory.pointer_size());
assert!(dest_layout.ty.is_unsafe_ptr());
match *src {
Immediate::ScalarPair(data, _) => return Ok(data.into()),
Immediate::Scalar(..) => bug!(
"{:?} input to a fat-to-thin cast ({:?} -> {:?})",
*src,
src.layout.ty,
dest_layout.ty
),
};
}
// Handle casting any ptr to raw ptr (might be a fat ptr).
if src.layout.ty.is_any_ptr() && dest_layout.ty.is_unsafe_ptr() {
// The only possible size-unequal case was handled above.
assert_eq!(src.layout.size, dest_layout.size);
return Ok(*src);
}
// For all remaining casts, we either
// (a) cast a raw ptr to usize, or
// (b) cast from an integer-like (including bool, char, enums).
// In both cases we want the bits.
let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
Ok(self.cast_from_int_like(bits, src.layout, dest_layout)?.into())
} |
cgsms_client.py | import logging
import requests
from django.conf import settings
from .base import BaseSmsClient
logger = logging.getLogger("notifier")
class CGSmsClient(BaseSmsClient):
| @classmethod
def send(cls, number: str, text: str, **kwargs):
sub_account = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT"]
sub_account_pass = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT_PASSWORD"]
params = {
"sub_account": sub_account,
"sub_account_pass": sub_account_pass,
"action": "send_sms",
"message": text,
"recipients": number,
}
res = requests.get("http://cheapglobalsms.com/api_v1", params=params)
return res |
|
views.py | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect
from django.views import View
from django.views.generic import (
CreateView,
DetailView,
ListView,
TemplateView,
UpdateView
)
from django.views.generic.edit import FormView, FormMixin
from django.views.generic.detail import SingleObjectMixin
from comments.forms import CommentForm
from comments.models import Comment
from .forms import (
RestaurantCreateForm,
RestaurantLocationCreateForm,
RestaurantSearchForm
)
from .models import RestaurantLocations
# Create your views here.
class RestaurantListView(ListView):
template_name = 'restaurants/restaurants_list_all.html'
paginate_by = 10
form_class = RestaurantSearchForm
def get_queryset(self):
query = self.request.GET.get('q')
queryset = RestaurantLocations.objects.search(query)
return queryset
class RestaurantDetailView(DetailView, FormView):
#form_class = CommentForm
template_name = 'restaurants/restaurantlocations_detail.html'
queryset = RestaurantLocations.objects.all()
# def get_queryset(self):
# queryset = RestaurantLocations.objects.all()
# return queryset
def get_context_data(self, **kwargs):
comments = Comment.objects.filter(object_id=objects.id)
context = super(RestaurantDetailView, self).get_context_data(**kwargs)
return context
def render(self, request):
objects = get_object_or_404(RestaurantLocations, slug=self.kwargs.get('slug'))
comments = Comment.objects.filter(object_id=objects.id)
return render(request, 'restaurants/restaurantlocations_detail.html', {'comment_form': self.form, 'comments':comments, 'object':objects})
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# initial_data = {
# "content_type": self.object.get_content_type,
# "object_id": self.object.id
# }
self.form = CommentForm(initial={"content_type": self.object.get_content_type,"object_id": self.object.id})
return self.render(request)
#return super(RestaurantDetailView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return HttpResponseForbidden()
self.object = self.get_object()
self.form = CommentForm(request.POST or None)
form = self.form
if form.is_valid() and request.user.is_authenticated:
c_type = form.cleaned_data["content_type"]
content_qs = ContentType.objects.filter(app_label ='restaurants')
content_type = content_qs.get(model='restaurantlocations')
obj_id = form.cleaned_data['object_id']
content_data = form.cleaned_data["content"]
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user = request.user,
content_type= content_type,
object_id = obj_id,
content = content_data,
parent = parent_obj,
)
return HttpResponseRedirect(new_comment.get_absolute_url())
else:
return self.render(request)
#return super(RetaurantComment, self).post(request, *args, **kwargs)
class MyRestaurantListView(LoginRequiredMixin, ListView):
template_name = 'restaurants/restaurants_list.html'
paginate_by = 10
def get_queryset(self):
return RestaurantLocations.objects.filter(owner=self.request.user)
class RestaurantCreateView(LoginRequiredMixin, CreateView):
form_class = RestaurantLocationCreateForm
template_name = 'form.html'
# success_url = '/restaurants/'
login_url = '/login/'
def form_valid(self, form):
instance = form.save(commit=False)
instance.owner = self.request.user
return super(RestaurantCreateView, self).form_valid(form)
def get_context_data(self, *args, **kwargs):
context = super(RestaurantCreateView, self).get_context_data(*args, **kwargs)
context['title'] = 'Add Restaurant'
return context
class RestaurantUpdateView(LoginRequiredMixin, UpdateView):
form_class = RestaurantLocationCreateForm
template_name = 'restaurants/detail-update.html'
# success_url = '/restaurants/'
login_url = '/login/'
def get_context_data(self, *args, **kwargs):
|
def get_queryset(self):
return RestaurantLocations.objects.filter(owner=self.request.user)
| context = super(RestaurantUpdateView, self).get_context_data(*args, **kwargs)
name = self.get_object().name
context['title'] = '{} {}'.format('Update Restaurant: ', name)
return context |
Parser.js | /*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
var esprima = require("esprima");
var Tapable = require("tapable");
var BasicEvaluatedExpression = require("./BasicEvaluatedExpression");
function Parser(options) {
Tapable.call(this);
this.options = options;
this.initializeEvaluating();
}
module.exports = Parser;
// Syntax: https://developer.mozilla.org/en/SpiderMonkey/Parser_API
Parser.prototype = Object.create(Tapable.prototype);
Parser.prototype.initializeEvaluating = function() {
function joinRanges(startRange, endRange) {
if(!endRange) return startRange;
if(!startRange) return endRange;
return [startRange[0], endRange[1]];
}
this.plugin("evaluate Literal", function(expr) {
switch(typeof expr.value) {
case "number":
return new BasicEvaluatedExpression().setNumber(expr.value).setRange(expr.range);
case "string":
return new BasicEvaluatedExpression().setString(expr.value).setRange(expr.range);
case "boolean":
return new BasicEvaluatedExpression().setBoolean(expr.value).setRange(expr.range);
}
if(expr.value === null)
return new BasicEvaluatedExpression().setNull().setRange(expr.range);
if(expr.value instanceof RegExp)
return new BasicEvaluatedExpression().setRegExp(expr.value).setRange(expr.range);
});
this.plugin("evaluate LogicalExpression", function(expr) {
if(expr.operator === "&&") {
var left = this.evaluateExpression(expr.left);
var leftAsBool = left && left.asBool();
if(leftAsBool === false) return left.setRange(expr.range);
if(leftAsBool !== true) return;
var right = this.evaluateExpression(expr.right);
return right.setRange(expr.range);
} else if(expr.operator === "||") {
var left = this.evaluateExpression(expr.left);
var leftAsBool = left && left.asBool();
if(leftAsBool === true) return left.setRange(expr.range);
if(leftAsBool !== false) return;
var right = this.evaluateExpression(expr.right);
return right.setRange(expr.range);
}
});
this.plugin("evaluate BinaryExpression", function(expr) {
if(expr.operator === "+") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
var res = new BasicEvaluatedExpression()
if(left.isString()) {
if(right.isString()) {
res.setString(left.string + right.string);
} else if(right.isNumber()) {
res.setString(left.string + right.number);
} else if(right.isWrapped() && right.prefix && right.prefix.isString()) {
res.setWrapped(
new BasicEvaluatedExpression()
.setString(left.string + right.prefix.string)
.setRange(joinRanges(left.range, right.prefix.range)),
right.postfix);
} else {
res.setWrapped(left, null)
}
} else if(left.isNumber()) {
if(right.isString()) {
res.setString(left.number + right.string);
} else if(right.isNumber()) {
res.setNumber(left.number + right.number);
}
} else if(left.isWrapped()) {
if(left.postfix && left.postfix.isString() && right.isString()) {
res.setWrapped(left.prefix,
new BasicEvaluatedExpression()
.setString(left.postfix.string + right.string)
.setRange(joinRanges(left.postfix.range, right.range))
);
} else if(left.postfix && left.postfix.isString() && right.isNumber()) {
res.setWrapped(left.prefix,
new BasicEvaluatedExpression()
.setString(left.postfix.string + right.number)
.setRange(joinRanges(left.postfix.range, right.range))
);
} else if(right.isString()) {
res.setWrapped(left.prefix, right);
} else if(right.isNumber()) {
res.setWrapped(left.prefix,
new BasicEvaluatedExpression()
.setString(right.number + "")
.setRange(right.range));
} else {
res.setWrapped(left.prefix, new BasicEvaluatedExpression());
}
} else {
if(right.isString()) {
res.setWrapped(null, right);
}
}
res.setRange(expr.range);
return res;
} else if(expr.operator === "-") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
if(!left.isNumber() || !right.isNumber()) return;
var res = new BasicEvaluatedExpression();
res.setNumber(left.number - right.number);
res.setRange(expr.range);
return res;
} else if(expr.operator === "*") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
if(!left.isNumber() || !right.isNumber()) return;
var res = new BasicEvaluatedExpression();
res.setNumber(left.number * right.number);
res.setRange(expr.range);
return res;
} else if(expr.operator === "/") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
if(!left.isNumber() || !right.isNumber()) return;
var res = new BasicEvaluatedExpression();
res.setNumber(left.number / right.number);
res.setRange(expr.range);
return res;
} else if(expr.operator === "==" || expr.operator === "===") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
var res = new BasicEvaluatedExpression();
res.setRange(expr.range);
if(left.isString() && right.isString()) {
return res.setBoolean(left.string === right.string);
} else if(left.isNumber() && right.isNumber()) {
return res.setBoolean(left.number === right.number);
} else if(left.isBoolean() && right.isBoolean()) {
return res.setBoolean(left.bool === right.bool);
}
} else if(expr.operator === "!=" || expr.operator === "!==") {
var left = this.evaluateExpression(expr.left);
var right = this.evaluateExpression(expr.right);
if(!left || !right) return;
var res = new BasicEvaluatedExpression();
res.setRange(expr.range);
if(left.isString() && right.isString()) {
return res.setBoolean(left.string !== right.string);
} else if(left.isNumber() && right.isNumber()) {
return res.setBoolean(left.number !== right.number);
} else if(left.isBoolean() && right.isBoolean()) {
return res.setBoolean(left.bool !== right.bool);
}
}
});
this.plugin("evaluate UnaryExpression", function(expr) {
if(expr.operator === "typeof") {
if(expr.argument.type === "Identifier") {
var name = this.scope.renames["$"+expr.argument.name] || expr.argument.name;
if(this.scope.definitions.indexOf(name) === -1) {
var res = this.applyPluginsBailResult("evaluate typeof " + name, expr);
if(res !== undefined) return res;
}
}
if(expr.argument.type === "MemberExpression") {
var expression = expr.argument;
var exprName = [];
while(expression.type === "MemberExpression" && !expression.computed) {
exprName.unshift(this.scope.renames["$"+expression.property.name] || expression.property.name);
expression = expression.object;
}
if(expression.type === "Identifier") {
exprName.unshift(this.scope.renames["$"+expression.name] || expression.name);
if(this.scope.definitions.indexOf(name) === -1) {
exprName = exprName.join(".");
var res = this.applyPluginsBailResult("evaluate typeof " + exprName, expr);
if(res !== undefined) return res;
}
}
}
if(expr.argument.type === "FunctionExpression") {
return new BasicEvaluatedExpression().setString("function").setRange(expr.range);
}
var arg = this.evaluateExpression(expr.argument);
if(arg.isString() || arg.isWrapped()) return new BasicEvaluatedExpression().setString("string").setRange(expr.range);
else if(arg.isNumber()) return new BasicEvaluatedExpression().setString("number").setRange(expr.range);
else if(arg.isBoolean()) return new BasicEvaluatedExpression().setString("boolean").setRange(expr.range);
else if(arg.isArray() || arg.isConstArray() || arg.isRegExp()) return new BasicEvaluatedExpression().setString("object").setRange(expr.range);
} else if(expr.operator === "!") {
var argument = this.evaluateExpression(expr.argument);
if(!argument) return;
if(argument.isBoolean()) {
return new BasicEvaluatedExpression().setBoolean(!argument.bool).setRange(expr.range);
} else if(argument.isString()) {
return new BasicEvaluatedExpression().setBoolean(!argument.string).setRange(expr.range);
} else if(argument.isNumber()) {
return new BasicEvaluatedExpression().setBoolean(!argument.number).setRange(expr.range);
}
}
});
this.plugin("evaluate typeof undefined", function(expr) {
return new BasicEvaluatedExpression().setString("undefined").setRange(expr.range);
});
this.plugin("evaluate Identifier", function(expr) {
var name = this.scope.renames["$"+expr.name] || expr.name;
if(this.scope.definitions.indexOf(expr.name) === -1) {
var result = this.applyPluginsBailResult("evaluate Identifier " + name, expr);
if(result) return result;
return new BasicEvaluatedExpression().setIdentifier(name).setRange(expr.range);
} else {
return this.applyPluginsBailResult("evaluate defined Identifier " + name, expr);
}
});
this.plugin("evaluate MemberExpression", function(expression) {
var expr = expression;
var exprName = [];
while(expr.type === "MemberExpression" && !expr.computed) {
exprName.unshift(expr.property.name);
expr = expr.object;
}
if(expr.type === "Identifier") {
var name = this.scope.renames["$"+expr.name] || expr.name;
if(this.scope.definitions.indexOf(name) === -1) {
exprName.unshift(name);
exprName = exprName.join(".");
if(this.scope.definitions.indexOf(expr.name) === -1) {
var result = this.applyPluginsBailResult("evaluate Identifier " + exprName, expression); | }
}
}
});
this.plugin("evaluate CallExpression", function(expr) {
if(expr.callee.type !== "MemberExpression") return;
if(expr.callee.computed) return;
var param = this.evaluateExpression(expr.callee.object);
if(!param) return;
return this.applyPluginsBailResult("evaluate CallExpression ." + expr.callee.property.name, expr, param);
});
this.plugin("evaluate CallExpression .replace", function(expr, param) {
if(!param.isString()) return;
if(expr.arguments.length !== 2) return;
var arg1 = this.evaluateExpression(expr.arguments[0]);
var arg2 = this.evaluateExpression(expr.arguments[1]);
if(!arg1.isString() && !arg1.isRegExp()) return;
arg1 = arg1.regExp || arg1.string;
if(!arg2.isString()) return;
arg2 = arg2.string;
return new BasicEvaluatedExpression().setString(param.string.replace(arg1, arg2)).setRange(expr.range);
});
["substr", "substring"].forEach(function(fn) {
this.plugin("evaluate CallExpression ." + fn, function(expr, param) {
if(!param.isString()) return;
var result, str = param.string;
switch(expr.arguments.length) {
case 1:
var arg1 = this.evaluateExpression(expr.arguments[0]);
if(!arg1.isNumber()) return;
result = str[fn](arg1.number);
break;
case 2:
var arg1 = this.evaluateExpression(expr.arguments[0]);
var arg2 = this.evaluateExpression(expr.arguments[1]);
if(!arg1.isNumber()) return;
if(!arg2.isNumber()) return;
result = str[fn](arg1.number, arg2.number);
break;
default:
return;
}
return new BasicEvaluatedExpression().setString(result).setRange(expr.range);
});
}, this);
this.plugin("evaluate CallExpression .split", function(expr, param) {
if(!param.isString()) return;
if(expr.arguments.length !== 1) return;
var result;
var arg = this.evaluateExpression(expr.arguments[0]);
if(arg.isString()) {
result = param.string.split(arg.string);
} else if(arg.isRegExp()) {
result = param.string.split(arg.regExp);
} else return;
return new BasicEvaluatedExpression().setArray(result).setRange(expr.range);
});
this.plugin("evaluate ConditionalExpression", function(expr) {
var condition = this.evaluateExpression(expr.test);
var conditionValue = condition.asBool();
if(conditionValue === undefined) {
var consequent = this.evaluateExpression(expr.consequent);
var alternate = this.evaluateExpression(expr.alternate);
if(!consequent || !alternate) return;
var res = new BasicEvaluatedExpression();
if(consequent.isConditional())
res.setOptions(consequent.options);
else
res.setOptions([consequent]);
if(alternate.isConditional())
res.addOptions(alternate.options);
else
res.addOptions([alternate]);
} else {
var res = this.evaluateExpression(conditionValue ? expr.consequent : expr.alternate);
}
res.setRange(expr.range);
return res;
});
this.plugin("evaluate ArrayExpression", function(expr) {
var items = expr.elements.map(function(element) {
return element !== null && this.evaluateExpression(element);
}, this);
if(items.filter(function(i) { return !i; }).length > 0) return;
return new BasicEvaluatedExpression().setItems(items).setRange(expr.range);
});
};
Parser.prototype.getRenameIdentifier = function getRenameIdentifier(expr) {
var result = this.evaluateExpression(expr);
if(!result) return;
if(result.isIdentifier()) return result.identifier;
return;
};
Parser.prototype.walkStatements = function walkStatements(statements) {
statements.forEach(function(statement) {
this.walkStatement(statement);
}, this);
};
Parser.prototype.walkStatement = function walkStatement(statement) {
if(this.applyPluginsBailResult("statement", statement) !== undefined) return;
if(this["walk" + statement.type])
this["walk" + statement.type](statement);
};
// Real Statements
Parser.prototype.walkBlockStatement = function walkBlockStatement(statement) {
this.walkStatements(statement.body);
};
Parser.prototype.walkExpressionStatement = function walkExpressionStatement(statement) {
this.walkExpression(statement.expression);
};
Parser.prototype.walkIfStatement = function walkIfStatement(statement) {
var result = this.applyPluginsBailResult("statement if", statement);
if(result === undefined) {
this.walkExpression(statement.test);
this.walkStatement(statement.consequent);
if(statement.alternate)
this.walkStatement(statement.alternate);
} else {
if(result)
this.walkStatement(statement.consequent);
else if(statement.alternate)
this.walkStatement(statement.alternate);
}
};
Parser.prototype.walkLabeledStatement = function walkLabeledStatement(statement) {
var result = this.applyPluginsBailResult("label " + statement.label.name, statement);
if(result !== true)
this.walkStatement(statement.body);
};
Parser.prototype.walkWithStatement = function walkWithStatement(statement) {
this.walkExpression(statement.object);
this.walkStatement(statement.body);
};
Parser.prototype.walkSwitchStatement = function walkSwitchStatement(statement) {
this.walkExpression(statement.discriminant);
this.walkSwitchCases(statement.cases);
};
Parser.prototype.walkReturnStatement =
Parser.prototype.walkThrowStatement = function walkArgumentStatement(statement) {
if(statement.argument)
this.walkExpression(statement.argument);
};
Parser.prototype.walkTryStatement = function walkTryStatement(statement) {
if(this.scope.inTry) {
this.walkStatement(statement.block);
} else {
this.scope.inTry = true;
this.walkStatement(statement.block);
this.scope.inTry = false;
}
this.walkCatchClauses(statement.handlers);
if(statement.finalizer)
this.walkStatement(statement.finalizer);
};
Parser.prototype.walkWhileStatement =
Parser.prototype.walkDoWhileStatement = function walkLoopStatement(statement) {
this.walkExpression(statement.test);
this.walkStatement(statement.body);
};
Parser.prototype.walkForStatement = function walkForStatement(statement) {
if(statement.init) {
if(statement.init.type === "VariableDeclaration")
this.walkStatement(statement.init);
else
this.walkExpression(statement.init);
}
if(statement.test)
this.walkExpression(statement.test);
if(statement.update)
this.walkExpression(statement.update);
this.walkStatement(statement.body);
};
Parser.prototype.walkForInStatement = function walkForInStatement(statement) {
if(statement.left.type === "VariableDeclaration")
this.walkStatement(statement.left);
else
this.walkExpression(statement.left);
this.walkExpression(statement.right);
this.walkStatement(statement.body);
};
// Declarations
Parser.prototype.walkFunctionDeclaration = function walkFunctionDeclaration(statement) {
this.scope.renames["$"+statement.id.name] = undefined;
this.scope.definitions.push(statement.id.name);
this.inScope(statement.params, function() {
if(statement.body.type === "BlockStatement")
this.walkStatement(statement.body);
else
this.walkExpression(statement.body);
}.bind(this));
};
Parser.prototype.walkVariableDeclaration = function walkVariableDeclaration(statement) {
if(statement.declarations)
this.walkVariableDeclarators(statement.declarations);
};
Parser.prototype.walkSwitchCases = function walkSwitchCases(switchCases) {
switchCases.forEach(function(switchCase) {
if(switchCase.test)
this.walkExpression(switchCase.test);
this.walkStatements(switchCase.consequent);
}, this);
};
Parser.prototype.walkCatchClauses = function walkCatchClauses(catchClauses) {
catchClauses.forEach(function(catchClause) {
if(catchClause.guard)
this.walkExpression(catchClause.guard);
this.inScope([catchClause.param], function() {
this.walkStatement(catchClause.body);
}.bind(this));
}, this);
};
Parser.prototype.walkVariableDeclarators = function walkVariableDeclarators(declarators) {
declarators.forEach(function(declarator) {
switch(declarator.type) {
case "VariableDeclarator":
var renameIdentifier = declarator.init && this.getRenameIdentifier(declarator.init);
if(renameIdentifier && declarator.id.type === "Identifier" && this.applyPluginsBailResult("can-rename " + renameIdentifier, declarator.init)) {
// renaming with "var a = b;"
if(!this.applyPluginsBailResult("rename " + renameIdentifier, declarator.init)) {
this.scope.renames["$"+declarator.id.name] = this.scope.renames["$"+renameIdentifier] || renameIdentifier;
var idx = this.scope.definitions.indexOf(declarator.id.name);
if(idx >= 0) this.scope.definitions.splice(idx, 1);
}
} else if(declarator.id.type === "Identifier" && !this.applyPluginsBailResult("var " + declarator.id.name, declarator)) {
this.scope.renames["$"+declarator.id.name] = undefined;
this.scope.definitions.push(declarator.id.name);
if(declarator.init)
this.walkExpression(declarator.init);
} else {
this.walkExpression(declarator.id);
if(declarator.init)
this.walkExpression(declarator.init);
}
break;
}
}, this);
};
Parser.prototype.walkExpressions = function walkExpressions(expressions) {
expressions.forEach(function(expression) {
if(expression)
this.walkExpression(expression);
}, this);
};
Parser.prototype.walkExpression = function walkExpression(expression) {
if(this["walk" + expression.type])
return this["walk" + expression.type](expression);
};
Parser.prototype.walkArrayExpression = function walkArrayExpression(expression) {
if(expression.elements)
this.walkExpressions(expression.elements);
};
Parser.prototype.walkObjectExpression = function walkObjectExpression(expression) {
expression.properties.forEach(function(prop) {
this.walkExpression(prop.value);
}, this);
};
Parser.prototype.walkFunctionExpression = function walkFunctionExpression(expression) {
this.inScope(expression.params, function() {
if(expression.body.type === "BlockStatement")
this.walkStatement(expression.body);
else
this.walkExpression(expression.body);
}.bind(this));
};
Parser.prototype.walkSequenceExpression = function walkSequenceExpression(expression) {
if(expression.expressions)
this.walkExpressions(expression.expressions);
};
Parser.prototype.walkUpdateExpression = function walkUpdateExpression(expression) {
this.walkExpression(expression.argument);
};
Parser.prototype.walkUnaryExpression = function walkUnaryExpression(expression) {
if(expression.operator === "typeof") {
var expr = expression.argument;
var exprName = [];
while(expr.type === "MemberExpression" && !expr.computed) {
exprName.unshift(expr.property.name);
expr = expr.object;
}
if(expr.type === "Identifier" && this.scope.definitions.indexOf(expr.name) === -1) {
exprName.unshift(this.scope.renames["$"+expr.name] || expr.name);
exprName = exprName.join(".");
var result = this.applyPluginsBailResult("typeof " + exprName, expression);
if(result === true)
return;
}
}
this.walkExpression(expression.argument);
};
Parser.prototype.walkBinaryExpression =
Parser.prototype.walkLogicalExpression = function walkLeftRightExpression(expression) {
this.walkExpression(expression.left);
this.walkExpression(expression.right);
};
Parser.prototype.walkAssignmentExpression = function walkAssignmentExpression(expression) {
var renameIdentifier = this.getRenameIdentifier(expression.right);
if(expression.left.type === "Identifier" && renameIdentifier && this.applyPluginsBailResult("can-rename " + renameIdentifier, expression.right)) {
// renaming "a = b;"
if(!this.applyPluginsBailResult("rename " + renameIdentifier, expression.right)) {
this.scope.renames["$"+expression.left.name] = renameIdentifier;
var idx = this.scope.definitions.indexOf(expression.left.name);
if(idx >= 0) this.scope.definitions.splice(idx, 1);
}
} else if(expression.left.type === "Identifier") {
if(!this.applyPluginsBailResult("assigned " + expression.left.name, expression)) {
this.walkExpression(expression.right);
}
this.scope.renames["$"+expression.left.name] = undefined;
if(!this.applyPluginsBailResult("assign " + expression.left.name, expression)) {
this.walkExpression(expression.left);
}
} else {
this.walkExpression(expression.right);
this.scope.renames["$"+expression.left.name] = undefined;
this.walkExpression(expression.left);
}
};
Parser.prototype.walkConditionalExpression = function walkConditionalExpression(expression) {
var result = this.applyPluginsBailResult("expression ?:", expression);
if(result === undefined) {
this.walkExpression(expression.test);
this.walkExpression(expression.consequent);
if(expression.alternate)
this.walkExpression(expression.alternate);
} else {
if(result)
this.walkExpression(expression.consequent);
else if(expression.alternate)
this.walkExpression(expression.alternate);
}
};
Parser.prototype.walkNewExpression = function walkNewExpression(expression) {
this.walkExpression(expression.callee);
if(expression.arguments)
this.walkExpressions(expression.arguments);
};
Parser.prototype.walkCallExpression = function walkCallExpression(expression) {
function walkIIFE(functionExpression, args) {
var params = functionExpression.params;
var args = args.map(function(arg, idx) {
var renameIdentifier = this.getRenameIdentifier(arg);
if(renameIdentifier && this.applyPluginsBailResult("can-rename " + renameIdentifier, arg)) {
if(!this.applyPluginsBailResult("rename " + renameIdentifier, arg))
return renameIdentifier;
}
this.walkExpression(arg);
}, this);
this.inScope(params.filter(function(identifier, idx) {
return !args[idx];
}), function() {
args.forEach(function(arg, idx) {
if(!arg) return;
if(!params[idx] || params[idx].type !== "Identifier") return;
this.scope.renames["$"+params[idx].name] = arg;
}, this);
if(functionExpression.body.type === "BlockStatement")
this.walkStatement(functionExpression.body);
else
this.walkExpression(functionExpression.body);
}.bind(this));
}
if(expression.callee.type === "MemberExpression" && expression.callee.object.type === "FunctionExpression" && !expression.callee.computed && ["call", "bind"].indexOf(expression.callee.property.name) >= 0 && expression.arguments && expression.arguments.length > 1) {
// (function(...) { }.call/bind(?, ...))
walkIIFE.call(this, expression.callee.object, expression.arguments.slice(1));
this.walkExpression(expression.arguments[0]);
} else if(expression.callee.type === "FunctionExpression" && expression.arguments) {
// (function(...) { }(...))
walkIIFE.call(this, expression.callee, expression.arguments);
} else {
var callee = this.evaluateExpression(expression.callee);
if(callee.isIdentifier()) {
var result = this.applyPluginsBailResult("call " + callee.identifier, expression);
if(result === true)
return;
}
if(expression.callee)
this.walkExpression(expression.callee);
if(expression.arguments)
this.walkExpressions(expression.arguments);
}
};
Parser.prototype.walkMemberExpression = function walkMemberExpression(expression) {
var expr = expression;
var exprName = [];
while(expr.type === "MemberExpression" && !expr.computed) {
exprName.unshift(expr.property.name);
expr = expr.object;
}
if(expr.type === "Identifier" && this.scope.definitions.indexOf(expr.name) === -1) {
exprName.unshift(this.scope.renames["$"+expr.name] || expr.name);
exprName = exprName.join(".");
var result = this.applyPluginsBailResult("expression " + exprName, expression);
if(result === true)
return;
}
this.walkExpression(expression.object);
if(expression.computed === true)
this.walkExpression(expression.property);
};
Parser.prototype.walkIdentifier = function walkIdentifier(expression) {
if(this.scope.definitions.indexOf(expression.name) === -1) {
var result = this.applyPluginsBailResult("expression " + (this.scope.renames["$"+expression.name] || expression.name), expression);
if(result === true)
return;
}
};
Parser.prototype.inScope = function inScope(params, fn) {
var oldScope = this.scope;
this.scope = {
inTry: false,
definitions: oldScope.definitions.slice(),
renames: Object.create(oldScope.renames)
};
params.forEach(function(param) {
if(typeof param !== "string") {
if(param.type !== "Identifier")
return;
param = param.name;
}
this.scope.renames["$"+param] = undefined;
this.scope.definitions.push(param);
}, this);
fn();
this.scope = oldScope;
};
Parser.prototype.evaluateExpression = function evaluateExpression(expression) {
var result = this.applyPluginsBailResult("evaluate " + expression.type, expression);
if(result !== undefined)
return result;
return new BasicEvaluatedExpression().setRange(expression.range);
};
Parser.prototype.parseString = function parseString(expression) {
switch(expression.type) {
case "BinaryExpression":
if(expression.operator === "+")
return this.parseString(expression.left) + this.parseString(expression.right);
break;
case "Literal":
return expression.value+"";
}
throw new Error(expression.type + " is not supported as parameter for require");
};
Parser.prototype.parseCalculatedString = function parseCalculatedString(expression) {
switch(expression.type) {
case "BinaryExpression":
if(expression.operator === "+") {
var left = this.parseCalculatedString(expression.left);
var right = this.parseCalculatedString(expression.right);
if(left.code) {
return {range: left.range, value: left.value, code: true};
} else if(right.code) {
return {range: [left.range[0], right.range ? right.range[1] : left.range[1]], value: left.value + right.value, code: true};
} else {
return {range: [left.range[0], right.range[1]], value: left.value + right.value};
}
}
break;
case "ConditionalExpression":
var consequent = this.parseCalculatedString(expression.consequent);
var alternate = this.parseCalculatedString(expression.alternate);
var items = [];
if(consequent.conditional)
Array.prototype.push.apply(items, consequent.conditional);
else if(!consequent.code)
items.push(consequent);
else break;
if(alternate.conditional)
Array.prototype.push.apply(items, alternate.conditional);
else if(!alternate.code)
items.push(alternate);
else break;
return {value: "", code: true, conditional: items};
case "Literal":
return {range: expression.range, value: expression.value+""};
break;
}
return {value: "", code: true};
};
["parseString", "parseCalculatedString"].forEach(function(fn) {
Parser.prototype[fn + "Array"] = function parseXXXArray(expression) {
switch(expression.type) {
case "ArrayExpression":
var arr = [];
if(expression.elements)
expression.elements.forEach(function(expr) {
arr.push(this[fn](expr));
}, this);
return arr;
}
return [this[fn](expression)];
};
});
Parser.prototype.parse = function parse(source, initialState) {
var ast = esprima.parse(source, {range: true, loc: true, raw: true});
if(!ast || typeof ast !== "object")
throw new Error("Source couldn't be parsed");
var oldScope = this.scope;
var oldState = this.state;
this.scope = {
inTry: false,
definitions: [],
renames: {}
};
var state = this.state = initialState || {};
if(this.applyPluginsBailResult("program", ast) === undefined)
this.walkStatements(ast.body);
this.scope = oldScope;
this.state = oldState;
return state;
};
Parser.prototype.evaluate = function evaluate(source) {
var ast = esprima.parse("("+source+")", {range: true, loc: true, raw: true});
if(!ast || typeof ast !== "object" || ast.type !== "Program")
throw new Error("evaluate: Source couldn't be parsed");
if(ast.body.length !== 1 || ast.body[0].type !== "ExpressionStatement")
throw new Error("evaluate: Source is not a expression");
return this.evaluateExpression(ast.body[0].expression);
}; | if(result) return result;
return new BasicEvaluatedExpression().setIdentifier(exprName).setRange(expression.range);
} else {
return this.applyPluginsBailResult("evaluate defined Identifier " + exprName, expression); |
dashboard.module.ts | import {NgModule} from '@angular/core';
import {CommonModule} from '@angular/common';
import {FormsModule} from '@angular/forms';
import {IonicModule} from '@ionic/angular';
import {DashboardPageRoutingModule} from './dashboard-routing.module';
import {DashboardPage} from './dashboard.page';
@NgModule({
imports: [CommonModule, FormsModule, IonicModule, DashboardPageRoutingModule],
declarations: [DashboardPage],
})
export class | {}
| DashboardPageModule |
test_regression.py | from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.ma as ma
from numpy.testing import *
from numpy.compat import sixu
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(sixu("Unicode")))
def test_atleast_2d(self):
"""Ticket #1559"""
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
"""Ticket #2733"""
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
|
if __name__ == "__main__":
run_module_suite()
| """Issue gh-2757"""
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0) |
hot_reloading.rs | use crate::event::Event;
use crate::CHANNEL;
use log::{debug, error};
use notify::watcher;
use notify::DebouncedEvent;
use notify::RecursiveMode;
use notify::Watcher;
use std::sync::mpsc::channel;
pub fn start() {
std::thread::spawn(|| {
let (tx, rx) = channel();
let mut watcher = watcher(tx, std::time::Duration::from_millis(10))
.expect("Failed to spawn file watcher");
let mut path = dirs::config_dir().expect("Failed to get config dir");
path.push("nog");
path.push("config.nog");
watcher
.watch(path, RecursiveMode::NonRecursive)
.expect("Failed to watch config directory");
loop {
match rx.recv() {
Ok(ev) => match ev {
DebouncedEvent::Write(_) => {
debug!("detected config change");
CHANNEL
.sender
.clone()
.send(Event::ReloadConfig)
.expect("Failed to send ReloadConfig event");
}
_ => {}
},
| Err(e) => error!("watch error: {:?}", e),
}
}
});
} | |
scraper.py | from bs4 import BeautifulSoup
import urllib.request as urllib2
import random
from random import choice
import pandas as pd
import copy, time, sys, shutil, os, yaml, json
import datetime as dt
from glob import glob
import regex
class scraper():
criteria = None
df = None
df_pre = None
__verbose = False
__parameter_names = { #this dict translate the parameters into thei corresponding url bit
'min_price' : 'pf',
'max_price' : 'pt',
'min_rooms' : 'nrf',
'max_rooms' : 'nrt',
'radius' : 'r',
'days_old' : 'pa',
}
__instance_name = None
__root_dir = "./ImmoKaa_data/"
__base_dir = None
def __init__(self, instance_name, criteria_file):
self.__instance_name = instance_name
self.__base_dir = self.__root_dir+instance_name
os.makedirs(self.__base_dir, exist_ok=True)
with open(criteria_file) as file:
self.criteria = yaml.load(file, Loader=yaml.FullLoader)
self.get_preexisting_data()
def _urlquery(self, url, verbose=False):
# function cycles randomly through different user agents and time intervals to simulate more natural queries
try:
sleeptime = float(random.randint(1,6))/5
time.sleep(sleeptime)
agents = ['Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17',
'Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0',
'Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Mozilla/3.0',
'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3',
'Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522+ (KHTML, like Gecko) Safari/419.3',
'Opera/9.00 (Windows NT 5.1; U; en)']
agent = choice(agents)
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', agent)]
html = opener.open(url).read()
time.sleep(sleeptime)
return html
except Exception as e:
if verbose: print('Something went wrong with Crawling:\n%s' % e)
return None
def _immoscout24parser(self, url, verbose=False):
'''
Read search results from Immoscout24.ch, given a specific url indicating the search criteria and the page number.
'''
if verbose: print ("Scanning the following url:", url)
try:
soup = BeautifulSoup(self._urlquery(url, verbose), 'html.parser')
scripts = soup.findAll('script')
scripts = filter(None, [script.string for script in scripts])
sr = next(script for script in scripts if 'searchResult' in script)
#Come cleaning... with not-so-clean code. Because ImmoScout keeps changing stuff and I can't be bothered to fix this properly every time.
s = sr.replace(":undefined", ':"undefined"').lstrip("__INITIAL_STATE__=")
s = regex.sub('\{"render".*?(?:\{(?:(?R)|[^{}])*})\}', '""', s)
poss = [m.start() for m in regex.finditer('e=>', s)]
res = s[:poss[0]]
for i in range(len(poss)):
end = len(s)
if i+1 < len(poss):
end = poss[i+1]
dd = regex.sub('(?:\{(?:(?R)|[^{}])*})', '""', s[poss[i]+3:end], 1)
res += dd
js = json.loads(res)
return js
except Exception as e:
if verbose: print("Error in immoscout24 parser: %s" % e)
return None
def _make_url(self, criteria, page):
url = 'https://www.immoscout24.ch/en/real-estate/{mode}/city-{city}?'.format(**criteria)
for key in [x for x in criteria.keys() if x not in ['city', 'mode']]:
try:
url+=self.__parameter_names[key]+'='+str(criteria[key])+"&"
except KeyError:
raise Exception("Error in make_url", "Unsupported search parameter!")
url = url[:-1]+"&pn="+str(page) #add page number
return url
def _get_listings(self, criteria, verbose):
"""
Pull a list of listings for given criteria and cities, and put them in a dataframe.
"""
print ("city:",criteria['city'])
page = 0
data_pages = []
numberOfPages = 1
while page<numberOfPages:
page+=1
url = self._make_url(criteria, page)
resultlist_json = None
N_attempts = 0
while resultlist_json is None and N_attempts<5:
try:
N_attempts+=1
resultlist_json = self._immoscout24parser(url, verbose)
numberOfPages = int(resultlist_json["pages"]["searchResult"]["resultData"]["pagingData"]["totalPages"])
print("\tpage: {0}/{1}".format(page,numberOfPages), end=" ")
data = resultlist_json["pages"]["searchResult"]["resultData"]["listData"]
data = pd.DataFrame.from_dict(data)
data["searched-city"]=criteria['city'] #store which city we searched, for reference
data["fetch-date"]=dt.datetime.now().date()
print("({0} results)".format(data.shape[0]))
data_pages.append(copy.copy(data))
except Exception as e:
print (e)
pass
data_all = pd.concat(data_pages)
return data_all
def scrape(self):
dfs = []
for city in self.criteria['cities']:
criteria_city = copy.copy(self.criteria)
criteria_city['city'] = city
del criteria_city['cities']
dfs.append(self._get_listings(criteria_city, verbose=self.__verbose))
self.df = pd.concat(dfs)
def set_verbose(self, flag):
if not isinstance(flag, bool):
raise Exception("ImmoKaa - set_verbose", "Argument must be bool.")
self.__verbose=flag
def save_scraped_dataframe(self):
|
def get_preexisting_data(self):
pres = []
try:
for f in glob(self.__base_dir+"/serach_results_*.csv"):
pres.append(pd.read_csv(f))
pres[-1]["fetch-date"] = pd.to_datetime(pres[-1]['fetch-date'],\
format="%Y-%m-%d").dt.date
self.df_pre = pd.concat(pres)
print ("Found {0} pre-existing data file(s). You can access the full dataset using get_full_dataset().". format(len(pres)))
except FileNotFoundError:
pass
def get_full_dataset(self):
return pd.concat([self.df, self.df_pre]) | if self.df is None:
raise Exception("There is no scraped dataset to save.")
today = dt.datetime.now().date().strftime("%Y-%m-%d")
self.df.to_csv(self.__base_dir+"/serach_results_"+today+".csv", mode="w")
print ("History file created/overwritten.") |
cluster.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::collections::hash_map::Entry;
use std::error::Error as StdError;
use std::sync::{mpsc, Arc, Mutex, RwLock};
use std::time::Duration;
use std::{result, thread};
use crossbeam::channel::TrySendError;
use futures::executor::block_on;
use kvproto::errorpb::Error as PbError;
use kvproto::kvrpcpb::Context;
use kvproto::metapb::{self, PeerRole, RegionEpoch, StoreLabel};
use kvproto::pdpb;
use kvproto::raft_cmdpb::*;
use kvproto::raft_serverpb::{
self, PeerState, RaftApplyState, RaftLocalState, RaftMessage, RaftTruncatedState,
RegionLocalState,
};
use raft::eraftpb::ConfChangeType;
use tempfile::TempDir;
use crate::Config;
use collections::{HashMap, HashSet};
use encryption_export::DataKeyManager;
use engine_rocks::raw::DB;
use engine_rocks::{Compat, RocksEngine, RocksSnapshot};
use engine_traits::{
CompactExt, Engines, Iterable, MiscExt, Mutable, Peekable, WriteBatch, WriteBatchExt,
CF_DEFAULT, CF_RAFT,
};
use file_system::IORateLimiter;
use pd_client::PdClient;
use raftstore::store::fsm::store::{StoreMeta, PENDING_MSG_CAP};
use raftstore::store::fsm::{create_raft_batch_system, RaftBatchSystem, RaftRouter};
use raftstore::store::transport::CasualRouter;
use raftstore::store::*;
use raftstore::{Error, Result};
use tikv::server::Result as ServerResult;
use tikv_util::thread_group::GroupProperties;
use tikv_util::time::Instant;
use tikv_util::HandyRwLock;
use super::*;
use tikv_util::time::ThreadReadId;
// We simulate 3 or 5 nodes, each has a store.
// Sometimes, we use fixed id to test, which means the id
// isn't allocated by pd, and node id, store id are same.
// E,g, for node 1, the node id and store id are both 1.
pub trait Simulator {
// Pass 0 to let pd allocate a node id if db is empty.
// If node id > 0, the node must be created in db already,
// and the node id must be the same as given argument.
// Return the node id.
// TODO: we will rename node name here because now we use store only.
fn run_node(
&mut self,
node_id: u64,
cfg: Config,
engines: Engines<RocksEngine, RocksEngine>,
store_meta: Arc<Mutex<StoreMeta>>,
key_manager: Option<Arc<DataKeyManager>>,
router: RaftRouter<RocksEngine, RocksEngine>,
system: RaftBatchSystem<RocksEngine, RocksEngine>,
) -> ServerResult<u64>;
fn stop_node(&mut self, node_id: u64);
fn get_node_ids(&self) -> HashSet<u64>;
fn async_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
) -> Result<()> {
self.async_command_on_node_with_opts(node_id, request, cb, Default::default())
}
fn async_command_on_node_with_opts(
&self,
node_id: u64,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
opts: RaftCmdExtraOpts,
) -> Result<()>;
fn send_raft_msg(&mut self, msg: RaftMessage) -> Result<()>;
fn get_snap_dir(&self, node_id: u64) -> String;
fn get_snap_mgr(&self, node_id: u64) -> &SnapManager;
fn get_router(&self, node_id: u64) -> Option<RaftRouter<RocksEngine, RocksEngine>>;
fn add_send_filter(&mut self, node_id: u64, filter: Box<dyn Filter>);
fn clear_send_filters(&mut self, node_id: u64);
fn add_recv_filter(&mut self, node_id: u64, filter: Box<dyn Filter>);
fn clear_recv_filters(&mut self, node_id: u64);
fn call_command(&self, request: RaftCmdRequest, timeout: Duration) -> Result<RaftCmdResponse> {
let node_id = request.get_header().get_peer().get_store_id();
self.call_command_on_node(node_id, request, timeout)
}
fn read(
&self,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let node_id = request.get_header().get_peer().get_store_id();
let (cb, rx) = make_cb(&request);
self.async_read(node_id, batch_id, request, cb);
rx.recv_timeout(timeout)
.map_err(|_| Error::Timeout(format!("request timeout for {:?}", timeout)))
}
fn async_read(
&self,
node_id: u64,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
cb: Callback<RocksSnapshot>,
);
fn call_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let (cb, rx) = make_cb(&request);
match self.async_command_on_node(node_id, request, cb) {
Ok(()) => {}
Err(e) => {
let mut resp = RaftCmdResponse::default();
resp.mut_header().set_error(e.into());
return Ok(resp);
}
}
rx.recv_timeout(timeout)
.map_err(|e| Error::Timeout(format!("request timeout for {:?}: {:?}", timeout, e)))
}
}
pub struct Cluster<T: Simulator> {
pub cfg: Config,
leaders: HashMap<u64, metapb::Peer>,
pub count: usize,
pub paths: Vec<TempDir>,
pub dbs: Vec<Engines<RocksEngine, RocksEngine>>,
pub store_metas: HashMap<u64, Arc<Mutex<StoreMeta>>>,
key_managers: Vec<Option<Arc<DataKeyManager>>>,
pub io_rate_limiter: Option<Arc<IORateLimiter>>,
pub engines: HashMap<u64, Engines<RocksEngine, RocksEngine>>,
key_managers_map: HashMap<u64, Option<Arc<DataKeyManager>>>,
pub labels: HashMap<u64, HashMap<String, String>>,
group_props: HashMap<u64, GroupProperties>,
pub sim: Arc<RwLock<T>>,
pub pd_client: Arc<TestPdClient>,
}
impl<T: Simulator> Cluster<T> {
// Create the default Store cluster.
pub fn new(
id: u64,
count: usize,
sim: Arc<RwLock<T>>,
pd_client: Arc<TestPdClient>,
) -> Cluster<T> {
// TODO: In the future, maybe it's better to test both case where `use_delete_range` is true and false
Cluster {
cfg: Config {
tikv: new_tikv_config(id),
prefer_mem: true,
},
leaders: HashMap::default(),
count,
paths: vec![],
dbs: vec![],
store_metas: HashMap::default(),
key_managers: vec![],
io_rate_limiter: None,
engines: HashMap::default(),
key_managers_map: HashMap::default(),
labels: HashMap::default(),
group_props: HashMap::default(),
sim,
pd_client,
}
}
// To destroy temp dir later.
pub fn take_path(&mut self) -> Vec<TempDir> {
std::mem::take(&mut self.paths)
}
pub fn id(&self) -> u64 {
self.cfg.server.cluster_id
}
pub fn pre_start_check(&mut self) -> result::Result<(), Box<dyn StdError>> {
for path in &self.paths {
self.cfg.storage.data_dir = path.path().to_str().unwrap().to_owned();
self.cfg.validate()?
}
Ok(())
}
/// Engines in a just created cluster are not bootstraped, which means they are not associated
/// with a `node_id`. Call `Cluster::start` can bootstrap all nodes in the cluster.
///
/// However sometimes a node can be bootstrapped externally. This function can be called to
/// mark them as bootstrapped in `Cluster`.
pub fn set_bootstrapped(&mut self, node_id: u64, offset: usize) {
let engines = self.dbs[offset].clone();
let key_mgr = self.key_managers[offset].clone();
assert!(self.engines.insert(node_id, engines).is_none());
assert!(self.key_managers_map.insert(node_id, key_mgr).is_none());
}
fn create_engine(&mut self, router: Option<RaftRouter<RocksEngine, RocksEngine>>) {
let (engines, key_manager, dir) =
create_test_engine(router, self.io_rate_limiter.clone(), &self.cfg);
self.dbs.push(engines);
self.key_managers.push(key_manager);
self.paths.push(dir);
}
pub fn create_engines(&mut self) {
self.io_rate_limiter = Some(Arc::new(
self.cfg
.storage
.io_rate_limit
.build(true /*enable_statistics*/),
));
for _ in 0..self.count {
self.create_engine(None);
}
}
pub fn start(&mut self) -> ServerResult<()> {
// Try recover from last shutdown.
let node_ids: Vec<u64> = self.engines.iter().map(|(&id, _)| id).collect();
for node_id in node_ids {
self.run_node(node_id)?;
}
// Try start new nodes.
for _ in 0..self.count - self.engines.len() {
let (router, system) = create_raft_batch_system(&self.cfg.raft_store);
self.create_engine(Some(router.clone()));
let engines = self.dbs.last().unwrap().clone();
let key_mgr = self.key_managers.last().unwrap().clone();
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
let props = GroupProperties::default();
tikv_util::thread_group::set_properties(Some(props.clone()));
let mut sim = self.sim.wl();
let node_id = sim.run_node(
0,
self.cfg.clone(),
engines.clone(),
store_meta.clone(),
key_mgr.clone(),
router,
system,
)?;
self.group_props.insert(node_id, props);
self.engines.insert(node_id, engines);
self.store_metas.insert(node_id, store_meta);
self.key_managers_map.insert(node_id, key_mgr);
}
Ok(())
}
pub fn compact_data(&self) {
for engine in self.engines.values() {
let db = &engine.kv;
db.compact_range(CF_DEFAULT, None, None, false, 1).unwrap();
}
}
pub fn flush_data(&self) {
for engine in self.engines.values() {
let db = &engine.kv;
db.flush_cf(CF_DEFAULT, true /*sync*/).unwrap();
}
}
// Bootstrap the store with fixed ID (like 1, 2, .. 5) and
// initialize first region in all stores, then start the cluster.
pub fn run(&mut self) {
self.create_engines();
self.bootstrap_region().unwrap();
self.start().unwrap();
}
// Bootstrap the store with fixed ID (like 1, 2, .. 5) and
// initialize first region in store 1, then start the cluster.
pub fn run_conf_change(&mut self) -> u64 {
self.create_engines();
let region_id = self.bootstrap_conf_change();
self.start().unwrap();
region_id
}
pub fn get_node_ids(&self) -> HashSet<u64> {
self.sim.rl().get_node_ids()
}
pub fn run_node(&mut self, node_id: u64) -> ServerResult<()> {
debug!("starting node {}", node_id);
let engines = self.engines[&node_id].clone();
let key_mgr = self.key_managers_map[&node_id].clone();
let (router, system) = create_raft_batch_system(&self.cfg.raft_store);
let mut cfg = self.cfg.clone();
if let Some(labels) = self.labels.get(&node_id) {
cfg.server.labels = labels.to_owned();
}
let store_meta = match self.store_metas.entry(node_id) {
Entry::Occupied(o) => {
let mut meta = o.get().lock().unwrap();
*meta = StoreMeta::new(PENDING_MSG_CAP);
o.get().clone()
}
Entry::Vacant(v) => v
.insert(Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP))))
.clone(),
};
let props = GroupProperties::default();
self.group_props.insert(node_id, props.clone());
tikv_util::thread_group::set_properties(Some(props));
debug!("calling run node"; "node_id" => node_id);
// FIXME: rocksdb event listeners may not work, because we change the router.
self.sim
.wl()
.run_node(node_id, cfg, engines, store_meta, key_mgr, router, system)?;
debug!("node {} started", node_id);
Ok(())
}
pub fn stop_node(&mut self, node_id: u64) {
debug!("stopping node {}", node_id);
self.group_props[&node_id].mark_shutdown();
match self.sim.write() {
Ok(mut sim) => sim.stop_node(node_id),
Err(_) => safe_panic!("failed to acquire write lock."),
}
self.pd_client.shutdown_store(node_id);
debug!("node {} stopped", node_id);
}
pub fn get_engine(&self, node_id: u64) -> Arc<DB> {
Arc::clone(self.engines[&node_id].kv.as_inner())
}
pub fn get_raft_engine(&self, node_id: u64) -> Arc<DB> {
Arc::clone(self.engines[&node_id].raft.as_inner())
}
pub fn get_all_engines(&self, node_id: u64) -> Engines<RocksEngine, RocksEngine> {
self.engines[&node_id].clone()
}
pub fn send_raft_msg(&mut self, msg: RaftMessage) -> Result<()> {
self.sim.wl().send_raft_msg(msg)
}
pub fn call_command_on_node(
&self,
node_id: u64,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
match self
.sim
.rl()
.call_command_on_node(node_id, request.clone(), timeout)
{
Err(e) => {
warn!("failed to call command {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn read(
&self,
batch_id: Option<ThreadReadId>,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
match self.sim.rl().read(batch_id, request.clone(), timeout) {
Err(e) => {
warn!("failed to read {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn call_command(
&self,
request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut is_read = false;
for req in request.get_requests() {
match req.get_cmd_type() {
CmdType::Get | CmdType::Snap | CmdType::ReadIndex => {
is_read = true;
}
_ => (),
}
}
let ret = if is_read {
self.sim.rl().read(None, request.clone(), timeout)
} else {
self.sim.rl().call_command(request.clone(), timeout)
};
match ret {
Err(e) => {
warn!("failed to call command {:?}: {:?}", request, e);
Err(e)
}
a => a,
}
}
pub fn call_command_on_leader(
&mut self,
mut request: RaftCmdRequest,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let timer = Instant::now();
let region_id = request.get_header().get_region_id();
loop {
let leader = match self.leader_of_region(region_id) {
None => return Err(Error::NotLeader(region_id, None)),
Some(l) => l,
};
request.mut_header().set_peer(leader);
let resp = match self.call_command(request.clone(), timeout) {
e @ Err(_) => return e,
Ok(resp) => resp,
};
if self.refresh_leader_if_needed(&resp, region_id)
&& timer.saturating_elapsed() < timeout
{
warn!(
"{:?} is no longer leader, let's retry",
request.get_header().get_peer()
);
continue;
}
return Ok(resp);
}
}
fn valid_leader_id(&self, region_id: u64, leader_id: u64) -> bool {
let store_ids = match self.voter_store_ids_of_region(region_id) {
None => return false,
Some(ids) => ids,
};
let node_ids = self.sim.rl().get_node_ids();
store_ids.contains(&leader_id) && node_ids.contains(&leader_id)
}
fn voter_store_ids_of_region(&self, region_id: u64) -> Option<Vec<u64>> {
block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.map(|region| {
region
.get_peers()
.iter()
.flat_map(|p| {
if p.get_role() != PeerRole::Learner {
Some(p.get_store_id())
} else {
None
}
})
.collect()
})
}
pub fn query_leader(
&self,
store_id: u64,
region_id: u64,
timeout: Duration,
) -> Option<metapb::Peer> {
// To get region leader, we don't care real peer id, so use 0 instead.
let peer = new_peer(store_id, 0);
let find_leader = new_status_request(region_id, peer, new_region_leader_cmd());
let mut resp = match self.call_command(find_leader, timeout) {
Ok(resp) => resp,
Err(err) => {
error!(
"fail to get leader of region {} on store {}, error: {:?}",
region_id, store_id, err
);
return None;
}
};
let mut region_leader = resp.take_status_response().take_region_leader();
// NOTE: node id can't be 0.
if self.valid_leader_id(region_id, region_leader.get_leader().get_store_id()) {
Some(region_leader.take_leader())
} else {
None
}
}
pub fn leader_of_region(&mut self, region_id: u64) -> Option<metapb::Peer> {
let timer = Instant::now_coarse();
let timeout = Duration::from_secs(5);
let mut store_ids = None;
while timer.saturating_elapsed() < timeout {
match self.voter_store_ids_of_region(region_id) {
None => thread::sleep(Duration::from_millis(10)),
Some(ids) => {
store_ids = Some(ids);
break;
}
};
}
let store_ids = store_ids?;
if let Some(l) = self.leaders.get(®ion_id) {
// leader may be stopped in some tests.
if self.valid_leader_id(region_id, l.get_store_id()) {
return Some(l.clone());
}
}
self.reset_leader_of_region(region_id);
let mut leader = None;
let mut leaders = HashMap::default();
let node_ids = self.sim.rl().get_node_ids();
// For some tests, we stop the node but pd still has this information,
// and we must skip this.
let alive_store_ids: Vec<_> = store_ids
.iter()
.filter(|id| node_ids.contains(id))
.cloned()
.collect();
while timer.saturating_elapsed() < timeout {
for store_id in &alive_store_ids {
let l = match self.query_leader(*store_id, region_id, Duration::from_secs(1)) {
None => continue,
Some(l) => l,
};
leaders
.entry(l.get_id())
.or_insert((l, vec![]))
.1
.push(*store_id);
}
if let Some((_, (l, c))) = leaders.iter().max_by_key(|(_, (_, c))| c.len()) {
// It may be a step down leader.
if c.contains(&l.get_store_id()) {
leader = Some(l.clone());
// Technically, correct calculation should use two quorum when in joint
// state. Here just for simplicity.
if c.len() > store_ids.len() / 2 {
break;
}
}
}
debug!("failed to detect leaders"; "leaders" => ?leaders, "store_ids" => ?store_ids);
sleep_ms(10);
leaders.clear();
}
if let Some(l) = leader {
self.leaders.insert(region_id, l);
}
self.leaders.get(®ion_id).cloned()
}
pub fn check_regions_number(&self, len: u32) {
assert_eq!(self.pd_client.get_regions_number() as u32, len)
}
// For test when a node is already bootstraped the cluster with the first region
// But another node may request bootstrap at same time and get is_bootstrap false
// Add Region but not set bootstrap to true
pub fn add_first_region(&self) -> Result<()> {
let mut region = metapb::Region::default();
let region_id = self.pd_client.alloc_id().unwrap();
let peer_id = self.pd_client.alloc_id().unwrap();
region.set_id(region_id);
region.set_start_key(keys::EMPTY_KEY.to_vec());
region.set_end_key(keys::EMPTY_KEY.to_vec());
region.mut_region_epoch().set_version(INIT_EPOCH_VER);
region.mut_region_epoch().set_conf_ver(INIT_EPOCH_CONF_VER);
let peer = new_peer(peer_id, peer_id);
region.mut_peers().push(peer);
self.pd_client.add_region(®ion);
Ok(())
}
/// Multiple nodes with fixed node id, like node 1, 2, .. 5,
/// First region 1 is in all stores with peer 1, 2, .. 5.
/// Peer 1 is in node 1, store 1, etc.
///
/// Must be called after `create_engines`.
pub fn bootstrap_region(&mut self) -> Result<()> {
for (i, engines) in self.dbs.iter().enumerate() {
let id = i as u64 + 1;
self.engines.insert(id, engines.clone());
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
self.store_metas.insert(id, store_meta);
self.key_managers_map
.insert(id, self.key_managers[i].clone());
}
let mut region = metapb::Region::default();
region.set_id(1);
region.set_start_key(keys::EMPTY_KEY.to_vec());
region.set_end_key(keys::EMPTY_KEY.to_vec());
region.mut_region_epoch().set_version(INIT_EPOCH_VER);
region.mut_region_epoch().set_conf_ver(INIT_EPOCH_CONF_VER);
for (&id, engines) in &self.engines {
let peer = new_peer(id, id);
region.mut_peers().push(peer.clone());
bootstrap_store(engines, self.id(), id).unwrap();
}
for engines in self.engines.values() {
prepare_bootstrap_cluster(engines, ®ion)?;
}
self.bootstrap_cluster(region);
Ok(())
}
// Return first region id.
pub fn bootstrap_conf_change(&mut self) -> u64 {
for (i, engines) in self.dbs.iter().enumerate() {
let id = i as u64 + 1;
self.engines.insert(id, engines.clone());
let store_meta = Arc::new(Mutex::new(StoreMeta::new(PENDING_MSG_CAP)));
self.store_metas.insert(id, store_meta);
self.key_managers_map
.insert(id, self.key_managers[i].clone());
}
for (&id, engines) in &self.engines {
bootstrap_store(engines, self.id(), id).unwrap();
}
let node_id = 1;
let region_id = 1;
let peer_id = 1;
let region = initial_region(node_id, region_id, peer_id);
prepare_bootstrap_cluster(&self.engines[&node_id], ®ion).unwrap();
self.bootstrap_cluster(region);
region_id
}
// This is only for fixed id test.
fn bootstrap_cluster(&mut self, region: metapb::Region) {
self.pd_client
.bootstrap_cluster(new_store(1, "".to_owned()), region)
.unwrap();
for id in self.engines.keys() {
let mut store = new_store(*id, "".to_owned());
if let Some(labels) = self.labels.get(id) {
for (key, value) in labels.iter() {
store.labels.push(StoreLabel {
key: key.clone(),
value: value.clone(),
..Default::default()
});
}
}
self.pd_client.put_store(store).unwrap();
}
}
pub fn add_label(&mut self, node_id: u64, key: &str, value: &str) {
self.labels
.entry(node_id)
.or_default()
.insert(key.to_owned(), value.to_owned());
}
pub fn add_new_engine(&mut self) -> u64 {
self.create_engine(None);
self.count += 1;
let node_id = self.count as u64;
let engines = self.dbs.last().unwrap().clone();
bootstrap_store(&engines, self.id(), node_id).unwrap();
self.engines.insert(node_id, engines);
let key_mgr = self.key_managers.last().unwrap().clone();
self.key_managers_map.insert(node_id, key_mgr);
self.run_node(node_id).unwrap();
node_id
}
pub fn reset_leader_of_region(&mut self, region_id: u64) {
self.leaders.remove(®ion_id);
}
pub fn assert_quorum<F: FnMut(&Arc<DB>) -> bool>(&self, mut condition: F) {
if self.engines.is_empty() {
return;
}
let half = self.engines.len() / 2;
let mut qualified_cnt = 0;
for (id, engines) in &self.engines {
if !condition(engines.kv.as_inner()) {
debug!("store {} is not qualified yet.", id);
continue;
}
debug!("store {} is qualified", id);
qualified_cnt += 1;
if half < qualified_cnt {
return;
}
}
panic!(
"need at lease {} qualified stores, but only got {}",
half + 1,
qualified_cnt
);
}
pub fn shutdown(&mut self) |
// If the resp is "not leader error", get the real leader.
// Otherwise reset or refresh leader if needed.
// Returns if the request should retry.
fn refresh_leader_if_needed(&mut self, resp: &RaftCmdResponse, region_id: u64) -> bool {
if !is_error_response(resp) {
return false;
}
let err = resp.get_header().get_error();
if err
.get_message()
.contains("peer has not applied to current term")
{
// leader peer has not applied to current term
return true;
}
// If command is stale, leadership may have changed.
// EpochNotMatch is not checked as leadership is checked first in raftstore.
if err.has_stale_command() {
self.reset_leader_of_region(region_id);
return true;
}
if !err.has_not_leader() {
return false;
}
let err = err.get_not_leader();
if !err.has_leader() {
self.reset_leader_of_region(region_id);
return true;
}
self.leaders.insert(region_id, err.get_leader().clone());
true
}
pub fn request(
&mut self,
key: &[u8],
reqs: Vec<Request>,
read_quorum: bool,
timeout: Duration,
) -> RaftCmdResponse {
let timer = Instant::now();
let mut tried_times = 0;
// At least retry once.
while tried_times < 2 || timer.saturating_elapsed() < timeout {
tried_times += 1;
let mut region = self.get_region(key);
let region_id = region.get_id();
let req = new_request(
region_id,
region.take_region_epoch(),
reqs.clone(),
read_quorum,
);
let result = self.call_command_on_leader(req, timeout);
let resp = match result {
e @ Err(Error::Timeout(_))
| e @ Err(Error::NotLeader(..))
| e @ Err(Error::StaleCommand) => {
warn!("call command failed, retry it"; "err" => ?e);
sleep_ms(100);
continue;
}
Err(e) => panic!("call command failed {:?}", e),
Ok(resp) => resp,
};
if resp.get_header().get_error().has_epoch_not_match() {
warn!("seems split, let's retry");
sleep_ms(100);
continue;
}
if resp
.get_header()
.get_error()
.get_message()
.contains("merging mode")
{
warn!("seems waiting for merge, let's retry");
sleep_ms(100);
continue;
}
return resp;
}
panic!("request timeout");
}
// Get region when the `filter` returns true.
pub fn get_region_with<F>(&self, key: &[u8], filter: F) -> metapb::Region
where
F: Fn(&metapb::Region) -> bool,
{
for _ in 0..100 {
if let Ok(region) = self.pd_client.get_region(key) {
if filter(®ion) {
return region;
}
}
// We may meet range gap after split, so here we will
// retry to get the region again.
sleep_ms(20);
}
panic!("find no region for {}", log_wrappers::hex_encode_upper(key));
}
pub fn get_region(&self, key: &[u8]) -> metapb::Region {
self.get_region_with(key, |_| true)
}
pub fn get_region_id(&self, key: &[u8]) -> u64 {
self.get_region(key).get_id()
}
pub fn get_down_peers(&self) -> HashMap<u64, pdpb::PeerStats> {
self.pd_client.get_down_peers()
}
pub fn get(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(CF_DEFAULT, key, false)
}
pub fn get_cf(&mut self, cf: &str, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(cf, key, false)
}
pub fn must_get(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.get_impl(CF_DEFAULT, key, true)
}
fn get_impl(&mut self, cf: &str, key: &[u8], read_quorum: bool) -> Option<Vec<u8>> {
let mut resp = self.request(
key,
vec![new_get_cf_cmd(cf, key)],
read_quorum,
Duration::from_secs(5),
);
if resp.get_header().has_error() {
panic!("response {:?} has error", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
if resp.get_responses()[0].has_get() {
Some(resp.mut_responses()[0].mut_get().take_value())
} else {
None
}
}
pub fn async_request(
&mut self,
req: RaftCmdRequest,
) -> Result<mpsc::Receiver<RaftCmdResponse>> {
self.async_request_with_opts(req, Default::default())
}
pub fn async_request_with_opts(
&mut self,
mut req: RaftCmdRequest,
opts: RaftCmdExtraOpts,
) -> Result<mpsc::Receiver<RaftCmdResponse>> {
let region_id = req.get_header().get_region_id();
let leader = self.leader_of_region(region_id).unwrap();
req.mut_header().set_peer(leader.clone());
let (cb, rx) = make_cb(&req);
self.sim
.rl()
.async_command_on_node_with_opts(leader.get_store_id(), req, cb, opts)?;
Ok(rx)
}
pub fn async_exit_joint(&mut self, region_id: u64) -> Result<mpsc::Receiver<RaftCmdResponse>> {
let region = block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.unwrap();
let exit_joint = new_admin_request(
region_id,
region.get_region_epoch(),
new_change_peer_v2_request(vec![]),
);
self.async_request(exit_joint)
}
pub fn async_put(
&mut self,
key: &[u8],
value: &[u8],
) -> Result<mpsc::Receiver<RaftCmdResponse>> {
let mut region = self.get_region(key);
let reqs = vec![new_put_cmd(key, value)];
let put = new_request(region.get_id(), region.take_region_epoch(), reqs, false);
self.async_request(put)
}
pub fn async_remove_peer(
&mut self,
region_id: u64,
peer: metapb::Peer,
) -> Result<mpsc::Receiver<RaftCmdResponse>> {
let region = block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.unwrap();
let remove_peer = new_change_peer_request(ConfChangeType::RemoveNode, peer);
let req = new_admin_request(region_id, region.get_region_epoch(), remove_peer);
self.async_request(req)
}
pub fn async_add_peer(
&mut self,
region_id: u64,
peer: metapb::Peer,
) -> Result<mpsc::Receiver<RaftCmdResponse>> {
let region = block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.unwrap();
let add_peer = new_change_peer_request(ConfChangeType::AddNode, peer);
let req = new_admin_request(region_id, region.get_region_epoch(), add_peer);
self.async_request(req)
}
pub fn must_put(&mut self, key: &[u8], value: &[u8]) {
self.must_put_cf(CF_DEFAULT, key, value);
}
pub fn must_put_cf(&mut self, cf: &str, key: &[u8], value: &[u8]) {
if let Err(e) = self.batch_put(key, vec![new_put_cf_cmd(cf, key, value)]) {
panic!("has error: {:?}", e);
}
}
pub fn put(&mut self, key: &[u8], value: &[u8]) -> result::Result<(), PbError> {
self.batch_put(key, vec![new_put_cf_cmd(CF_DEFAULT, key, value)])
.map(|_| ())
}
pub fn batch_put(
&mut self,
region_key: &[u8],
reqs: Vec<Request>,
) -> result::Result<RaftCmdResponse, PbError> {
let resp = self.request(region_key, reqs, false, Duration::from_secs(5));
if resp.get_header().has_error() {
Err(resp.get_header().get_error().clone())
} else {
Ok(resp)
}
}
pub fn must_delete(&mut self, key: &[u8]) {
self.must_delete_cf(CF_DEFAULT, key)
}
pub fn must_delete_cf(&mut self, cf: &str, key: &[u8]) {
let resp = self.request(
key,
vec![new_delete_cmd(cf, key)],
false,
Duration::from_secs(5),
);
if resp.get_header().has_error() {
panic!("response {:?} has error", resp);
}
}
pub fn must_delete_range_cf(&mut self, cf: &str, start: &[u8], end: &[u8]) {
let resp = self.request(
start,
vec![new_delete_range_cmd(cf, start, end)],
false,
Duration::from_secs(5),
);
if resp.get_header().has_error() {
panic!("response {:?} has error", resp);
}
}
pub fn must_notify_delete_range_cf(&mut self, cf: &str, start: &[u8], end: &[u8]) {
let mut req = new_delete_range_cmd(cf, start, end);
req.mut_delete_range().set_notify_only(true);
let resp = self.request(start, vec![req], false, Duration::from_secs(5));
if resp.get_header().has_error() {
panic!("response {:?} has error", resp);
}
}
pub fn must_flush_cf(&mut self, cf: &str, sync: bool) {
for engines in &self.dbs {
engines.kv.flush_cf(cf, sync).unwrap();
}
}
pub fn get_region_epoch(&self, region_id: u64) -> RegionEpoch {
block_on(self.pd_client.get_region_by_id(region_id))
.unwrap()
.unwrap()
.take_region_epoch()
}
pub fn region_detail(&self, region_id: u64, store_id: u64) -> RegionDetailResponse {
let status_cmd = new_region_detail_cmd();
let peer = new_peer(store_id, 0);
let req = new_status_request(region_id, peer, status_cmd);
let resp = self.call_command(req, Duration::from_secs(5));
assert!(resp.is_ok(), "{:?}", resp);
let mut resp = resp.unwrap();
assert!(resp.has_status_response());
let mut status_resp = resp.take_status_response();
assert_eq!(status_resp.get_cmd_type(), StatusCmdType::RegionDetail);
assert!(status_resp.has_region_detail());
status_resp.take_region_detail()
}
pub fn truncated_state(&self, region_id: u64, store_id: u64) -> RaftTruncatedState {
self.apply_state(region_id, store_id).take_truncated_state()
}
pub fn wait_log_truncated(&self, region_id: u64, store_id: u64, index: u64) {
let timer = Instant::now();
loop {
let truncated_state = self.truncated_state(region_id, store_id);
if truncated_state.get_index() >= index {
return;
}
if timer.saturating_elapsed() >= Duration::from_secs(5) {
panic!(
"[region {}] log is still not truncated to {}: {:?} on store {}",
region_id, index, truncated_state, store_id,
);
}
thread::sleep(Duration::from_millis(10));
}
}
pub fn wait_tombstone(&self, region_id: u64, peer: metapb::Peer) {
let timer = Instant::now();
let mut state;
loop {
state = self.region_local_state(region_id, peer.get_store_id());
if state.get_state() == PeerState::Tombstone
&& state.get_region().get_peers().contains(&peer)
{
return;
}
if timer.saturating_elapsed() > Duration::from_secs(5) {
break;
}
thread::sleep(Duration::from_millis(10));
}
panic!(
"{:?} is still not gc in region {} {:?}",
peer, region_id, state
);
}
pub fn apply_state(&self, region_id: u64, store_id: u64) -> RaftApplyState {
let key = keys::apply_state_key(region_id);
self.get_engine(store_id)
.c()
.get_msg_cf::<RaftApplyState>(engine_traits::CF_RAFT, &key)
.unwrap()
.unwrap()
}
pub fn raft_local_state(&self, region_id: u64, store_id: u64) -> RaftLocalState {
let key = keys::raft_state_key(region_id);
self.get_raft_engine(store_id)
.c()
.get_msg::<raft_serverpb::RaftLocalState>(&key)
.unwrap()
.unwrap()
}
pub fn region_local_state(&self, region_id: u64, store_id: u64) -> RegionLocalState {
self.get_engine(store_id)
.c()
.get_msg_cf::<RegionLocalState>(
engine_traits::CF_RAFT,
&keys::region_state_key(region_id),
)
.unwrap()
.unwrap()
}
pub fn wait_last_index(
&mut self,
region_id: u64,
store_id: u64,
expected: u64,
timeout: Duration,
) {
let timer = Instant::now();
loop {
let raft_state = self.raft_local_state(region_id, store_id);
let cur_index = raft_state.get_last_index();
if cur_index >= expected {
return;
}
if timer.saturating_elapsed() >= timeout {
panic!(
"[region {}] last index still not reach {}: {:?}",
region_id, expected, raft_state
);
}
thread::sleep(Duration::from_millis(10));
}
}
pub fn restore_kv_meta(&self, region_id: u64, store_id: u64, snap: &RocksSnapshot) {
let (meta_start, meta_end) = (
keys::region_meta_prefix(region_id),
keys::region_meta_prefix(region_id + 1),
);
let mut kv_wb = self.engines[&store_id].kv.write_batch();
self.engines[&store_id]
.kv
.scan_cf(CF_RAFT, &meta_start, &meta_end, false, |k, _| {
kv_wb.delete(k).unwrap();
Ok(true)
})
.unwrap();
snap.scan_cf(CF_RAFT, &meta_start, &meta_end, false, |k, v| {
kv_wb.put(k, v).unwrap();
Ok(true)
})
.unwrap();
let (raft_start, raft_end) = (
keys::region_raft_prefix(region_id),
keys::region_raft_prefix(region_id + 1),
);
self.engines[&store_id]
.kv
.scan_cf(CF_RAFT, &raft_start, &raft_end, false, |k, _| {
kv_wb.delete(k).unwrap();
Ok(true)
})
.unwrap();
snap.scan_cf(CF_RAFT, &raft_start, &raft_end, false, |k, v| {
kv_wb.put(k, v).unwrap();
Ok(true)
})
.unwrap();
kv_wb.write().unwrap();
}
pub fn restore_raft(&self, region_id: u64, store_id: u64, snap: &RocksSnapshot) {
let (raft_start, raft_end) = (
keys::region_raft_prefix(region_id),
keys::region_raft_prefix(region_id + 1),
);
let mut raft_wb = self.engines[&store_id].raft.write_batch();
self.engines[&store_id]
.raft
.scan(&raft_start, &raft_end, false, |k, _| {
raft_wb.delete(k).unwrap();
Ok(true)
})
.unwrap();
snap.scan(&raft_start, &raft_end, false, |k, v| {
raft_wb.put(k, v).unwrap();
Ok(true)
})
.unwrap();
raft_wb.write().unwrap();
}
pub fn add_send_filter<F: FilterFactory>(&self, factory: F) {
let mut sim = self.sim.wl();
for node_id in sim.get_node_ids() {
for filter in factory.generate(node_id) {
sim.add_send_filter(node_id, filter);
}
}
}
pub fn transfer_leader(&mut self, region_id: u64, leader: metapb::Peer) {
let epoch = self.get_region_epoch(region_id);
let transfer_leader = new_admin_request(region_id, &epoch, new_transfer_leader_cmd(leader));
let resp = self
.call_command_on_leader(transfer_leader, Duration::from_secs(5))
.unwrap();
assert_eq!(
resp.get_admin_response().get_cmd_type(),
AdminCmdType::TransferLeader,
"{:?}",
resp
);
}
pub fn must_transfer_leader(&mut self, region_id: u64, leader: metapb::Peer) {
let timer = Instant::now();
loop {
self.reset_leader_of_region(region_id);
let cur_leader = self.leader_of_region(region_id);
if let Some(ref cur_leader) = cur_leader {
if cur_leader.get_id() == leader.get_id()
&& cur_leader.get_store_id() == leader.get_store_id()
{
return;
}
}
if timer.saturating_elapsed() > Duration::from_secs(5) {
panic!(
"failed to transfer leader to [{}] {:?}, current leader: {:?}",
region_id, leader, cur_leader
);
}
self.transfer_leader(region_id, leader.clone());
}
}
pub fn get_snap_dir(&self, node_id: u64) -> String {
self.sim.rl().get_snap_dir(node_id)
}
pub fn get_snap_mgr(&self, node_id: u64) -> SnapManager {
self.sim.rl().get_snap_mgr(node_id).clone()
}
pub fn clear_send_filters(&mut self) {
let mut sim = self.sim.wl();
for node_id in sim.get_node_ids() {
sim.clear_send_filters(node_id);
}
}
// It's similar to `ask_split`, the difference is the msg, it sends, is `Msg::SplitRegion`,
// and `region` will not be embedded to that msg.
// Caller must ensure that the `split_key` is in the `region`.
pub fn split_region(
&mut self,
region: &metapb::Region,
split_key: &[u8],
cb: Callback<RocksSnapshot>,
) {
let leader = self.leader_of_region(region.get_id()).unwrap();
let router = self.sim.rl().get_router(leader.get_store_id()).unwrap();
let split_key = split_key.to_vec();
CasualRouter::send(
&router,
region.get_id(),
CasualMessage::SplitRegion {
region_epoch: region.get_region_epoch().clone(),
split_keys: vec![split_key],
callback: cb,
source: "test".into(),
},
)
.unwrap();
}
pub fn must_split(&mut self, region: &metapb::Region, split_key: &[u8]) {
let mut try_cnt = 0;
let split_count = self.pd_client.get_split_count();
loop {
debug!("asking split"; "region" => ?region, "key" => ?split_key);
// In case ask split message is ignored, we should retry.
if try_cnt % 50 == 0 {
self.reset_leader_of_region(region.get_id());
let key = split_key.to_vec();
let check = Box::new(move |write_resp: WriteResponse| {
let mut resp = write_resp.response;
if resp.get_header().has_error() {
let error = resp.get_header().get_error();
if error.has_epoch_not_match()
|| error.has_not_leader()
|| error.has_stale_command()
|| error
.get_message()
.contains("peer has not applied to current term")
{
warn!("fail to split: {:?}, ignore.", error);
return;
}
panic!("failed to split: {:?}", resp);
}
let admin_resp = resp.mut_admin_response();
let split_resp = admin_resp.mut_splits();
let regions = split_resp.get_regions();
assert_eq!(regions.len(), 2);
assert_eq!(regions[0].get_end_key(), key.as_slice());
assert_eq!(regions[0].get_end_key(), regions[1].get_start_key());
});
if self.leader_of_region(region.get_id()).is_some() {
self.split_region(region, split_key, Callback::write(check));
}
}
if self.pd_client.check_split(region, split_key)
&& self.pd_client.get_split_count() > split_count
{
return;
}
if try_cnt > 250 {
panic!(
"region {:?} has not been split by {}",
region,
log_wrappers::hex_encode_upper(split_key)
);
}
try_cnt += 1;
sleep_ms(20);
}
}
pub fn wait_region_split(&mut self, region: &metapb::Region) {
self.wait_region_split_max_cnt(region, 20, 250, true);
}
pub fn wait_region_split_max_cnt(
&mut self,
region: &metapb::Region,
itvl_ms: u64,
max_try_cnt: u64,
is_panic: bool,
) {
let mut try_cnt = 0;
let split_count = self.pd_client.get_split_count();
loop {
if self.pd_client.get_split_count() > split_count {
match self.pd_client.get_region(region.get_start_key()) {
Err(_) => {}
Ok(left) => {
if left.get_end_key() != region.get_end_key() {
return;
}
}
};
}
if try_cnt > max_try_cnt {
if is_panic {
panic!(
"region {:?} has not been split after {}ms",
region,
max_try_cnt * itvl_ms
);
} else {
return;
}
}
try_cnt += 1;
sleep_ms(itvl_ms);
}
}
fn new_prepare_merge(&self, source: u64, target: u64) -> RaftCmdRequest {
let region = block_on(self.pd_client.get_region_by_id(target))
.unwrap()
.unwrap();
let prepare_merge = new_prepare_merge(region);
let source_region = block_on(self.pd_client.get_region_by_id(source))
.unwrap()
.unwrap();
new_admin_request(
source_region.get_id(),
source_region.get_region_epoch(),
prepare_merge,
)
}
pub fn merge_region(&mut self, source: u64, target: u64, cb: Callback<RocksSnapshot>) {
let mut req = self.new_prepare_merge(source, target);
let leader = self.leader_of_region(source).unwrap();
req.mut_header().set_peer(leader.clone());
self.sim
.rl()
.async_command_on_node(leader.get_store_id(), req, cb)
.unwrap();
}
pub fn try_merge(&mut self, source: u64, target: u64) -> RaftCmdResponse {
self.call_command_on_leader(
self.new_prepare_merge(source, target),
Duration::from_secs(5),
)
.unwrap()
}
pub fn must_try_merge(&mut self, source: u64, target: u64) {
let resp = self.try_merge(source, target);
if is_error_response(&resp) {
panic!(
"{} failed to try merge to {}, resp {:?}",
source, target, resp
);
}
}
/// Make sure region exists on that store.
pub fn must_region_exist(&mut self, region_id: u64, store_id: u64) {
let mut try_cnt = 0;
loop {
let find_leader =
new_status_request(region_id, new_peer(store_id, 0), new_region_leader_cmd());
let resp = self
.call_command(find_leader, Duration::from_secs(5))
.unwrap();
if !is_error_response(&resp) {
return;
}
if try_cnt > 250 {
panic!(
"region {} doesn't exist on store {} after {} tries",
region_id, store_id, try_cnt
);
}
try_cnt += 1;
sleep_ms(20);
}
}
/// Make sure region not exists on that store.
pub fn must_region_not_exist(&mut self, region_id: u64, store_id: u64) {
let mut try_cnt = 0;
loop {
let status_cmd = new_region_detail_cmd();
let peer = new_peer(store_id, 0);
let req = new_status_request(region_id, peer, status_cmd);
let resp = self.call_command(req, Duration::from_secs(5)).unwrap();
if resp.get_header().has_error() && resp.get_header().get_error().has_region_not_found()
{
return;
}
if try_cnt > 250 {
panic!(
"region {} still exists on store {} after {} tries: {:?}",
region_id, store_id, try_cnt, resp
);
}
try_cnt += 1;
sleep_ms(20);
}
}
pub fn must_remove_region(&mut self, store_id: u64, region_id: u64) {
let timer = Instant::now();
loop {
let peer = new_peer(store_id, 0);
let find_leader = new_status_request(region_id, peer, new_region_leader_cmd());
let resp = self
.call_command(find_leader, Duration::from_secs(5))
.unwrap();
if is_error_response(&resp) {
assert!(
resp.get_header().get_error().has_region_not_found(),
"unexpected error resp: {:?}",
resp
);
break;
}
if timer.saturating_elapsed() > Duration::from_secs(60) {
panic!("region {} is not removed after 60s.", region_id);
}
thread::sleep(Duration::from_millis(100));
}
}
// it's so common that we provide an API for it
pub fn partition(&self, s1: Vec<u64>, s2: Vec<u64>) {
self.add_send_filter(PartitionFilterFactory::new(s1, s2));
}
pub fn must_wait_for_leader_expire(&self, node_id: u64, region_id: u64) {
let timer = Instant::now_coarse();
while timer.saturating_elapsed() < Duration::from_secs(5) {
if self
.query_leader(node_id, region_id, Duration::from_secs(1))
.is_none()
{
return;
}
sleep_ms(100);
}
panic!(
"region {}'s replica in store {} still has a valid leader after 5 secs",
region_id, node_id
);
}
pub fn must_send_store_heartbeat(&self, node_id: u64) {
let router = self.sim.rl().get_router(node_id).unwrap();
StoreRouter::send(&router, StoreMsg::Tick(StoreTick::PdStoreHeartbeat)).unwrap();
}
pub fn must_update_region_for_unsafe_recover(&mut self, node_id: u64, region: &metapb::Region) {
let router = self.sim.rl().get_router(node_id).unwrap();
let mut try_cnt = 0;
loop {
if try_cnt % 50 == 0 {
// In case the message is ignored, re-send it every 50 tries.
router
.force_send(
region.get_id(),
PeerMsg::UpdateRegionForUnsafeRecover(region.clone()),
)
.unwrap();
}
if let Ok(Some(current)) = block_on(self.pd_client.get_region_by_id(region.get_id())) {
if current.get_start_key() == region.get_start_key()
&& current.get_end_key() == region.get_end_key()
{
return;
}
}
if try_cnt > 500 {
panic!("region {:?} is not updated", region);
}
try_cnt += 1;
sleep_ms(20);
}
}
pub fn must_recreate_region_for_unsafe_recover(
&mut self,
node_id: u64,
region: &metapb::Region,
) {
let router = self.sim.rl().get_router(node_id).unwrap();
let mut try_cnt = 0;
loop {
if try_cnt % 50 == 0 {
// In case the message is ignored, re-send it every 50 tries.
StoreRouter::send(&router, StoreMsg::CreatePeer(region.clone())).unwrap();
}
if let Ok(Some(_)) = block_on(self.pd_client.get_region_by_id(region.get_id())) {
return;
}
if try_cnt > 250 {
panic!("region {:?} is not created", region);
}
try_cnt += 1;
sleep_ms(20);
}
}
pub fn gc_peer(
&mut self,
region_id: u64,
node_id: u64,
peer: metapb::Peer,
) -> std::result::Result<(), TrySendError<RaftMessage>> {
let router = self.sim.rl().get_router(node_id).unwrap();
let mut message = RaftMessage::default();
message.set_region_id(region_id);
message.set_from_peer(peer.clone());
message.set_to_peer(peer);
message.set_region_epoch(self.get_region_epoch(region_id));
message.set_is_tombstone(true);
router.send_raft_message(message)
}
pub fn must_gc_peer(&mut self, region_id: u64, node_id: u64, peer: metapb::Peer) {
for _ in 0..250 {
self.gc_peer(region_id, node_id, peer.clone()).unwrap();
if self.region_local_state(region_id, node_id).get_state() == PeerState::Tombstone {
return;
}
sleep_ms(20);
}
panic!(
"gc peer timeout: region id {}, node id {}, peer {:?}",
region_id, node_id, peer
);
}
pub fn get_ctx(&mut self, key: &[u8]) -> Context {
let region = self.get_region(key);
let leader = self.leader_of_region(region.id).unwrap();
let epoch = self.get_region_epoch(region.id);
let mut ctx = Context::default();
ctx.set_region_id(region.id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
ctx
}
}
impl<T: Simulator> Drop for Cluster<T> {
fn drop(&mut self) {
test_util::clear_failpoints();
self.shutdown();
}
}
| {
debug!("about to shutdown cluster");
let keys = match self.sim.read() {
Ok(s) => s.get_node_ids(),
Err(_) => {
safe_panic!("failed to acquire read lock");
// Leave the resource to avoid double panic.
return;
}
};
for id in keys {
self.stop_node(id);
}
self.leaders.clear();
self.store_metas.clear();
debug!("all nodes are shut down.");
} |
test_iminuit.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
from .. import Parameter, Parameters, optimize_iminuit
pytest.importorskip("iminuit")
def fcn(parameters):
x = parameters["x"].value
y = parameters["y"].value
z = parameters["z"].value
x_opt, y_opt, z_opt = 2, 3e5, 4e-5
x_err, y_err, z_err = 0.2, 3e4, 4e-6
return ((x - x_opt) / x_err) ** 2 + ((y - y_opt) / y_err) ** 2 + ((z - z_opt) / z_err) ** 2
@pytest.fixture()
def pars():
x = Parameter("x", 2.1)
y = Parameter("y", 3.1, scale=1e5)
z = Parameter("z", 4.1, scale=1e-5)
return Parameters([x, y, z])
def test_iminuit_basic(pars):
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
assert_allclose(fcn(pars), 0, atol=1e-5)
# Check the result in parameters is OK
assert_allclose(pars["x"].value, 2, rtol=1e-3)
assert_allclose(pars["y"].value, 3e5, rtol=1e-3)
# Precision of estimate on "z" is very poor (0.040488). Why is it so bad?
assert_allclose(pars["z"].value, 4e-5, rtol=2e-2)
# Check that minuit sees the parameter factors correctly
assert_allclose(factors, [2, 3, 4], rtol=1e-3)
assert_allclose(minuit.values["par_000_x"], 2, rtol=1e-3)
assert_allclose(minuit.values["par_001_y"], 3, rtol=1e-3)
assert_allclose(minuit.values["par_002_z"], 4, rtol=1e-3)
def test_iminuit_frozen(pars):
pars["y"].frozen = True
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
assert_allclose(pars["x"].value, 2, rtol=1e-4)
assert_allclose(pars["y"].value, 3.1e5)
assert_allclose(pars["z"].value, 4.e-5, rtol=1e-4)
assert_allclose(fcn(pars), 0.111112, rtol=1e-5)
assert minuit.list_of_fixed_param() == ["par_001_y"]
def | (pars):
pars["y"].min = 301000
factors, info, minuit = optimize_iminuit(function=fcn, parameters=pars)
assert info["success"]
# Check the result in parameters is OK
assert_allclose(pars["x"].value, 2, rtol=1e-2)
assert_allclose(pars["y"].value, 301000, rtol=1e-3)
# Check that minuit sees the limit factors correctly
states = minuit.get_param_states()
assert not states[0]["has_limits"]
y = states[1]
assert y["has_limits"]
assert_allclose(y["lower_limit"], 3.01)
# The next assert can be added when we no longer test on iminuit 1.2
# See https://github.com/gammapy/gammapy/pull/1771
# assert states[1]["upper_limit"] is None
| test_iminuit_limits |
setting_ui.rs | use bevy_egui::egui::{Align2, Context, Window};
pub struct SettingWindow {
/// 控制窗口显示
open: bool,
/// 标记着 控制窗口第一次打开
first_open: bool,
frame: u32,
}
impl Default for SettingWindow {
fn default() -> Self {
Self {
open: false,
first_open: true,
frame: 0,
}
}
}
impl SettingWindow {
pub fn show(&mut self, ctx: &Context) {
if !self.open {
return;
}
self.frame = self.frame.wrapping_add(1);
let window = Window::new("setting")
.collapsible(false)
.open(&mut self.open);
// 如果是第一次打开, 设置居中
// self.frame <= 2, 使用条件的原因大概是: 第一次显示这个 window, 初始位置是不确定的. 执行两次 anchor 后才可以确定
let window = if self.frame <= 2 || self.first_open {
self.first_open = false;
window.anchor(Align2::CENTER_CENTER, [0.0, -30.0])
} else {
window
};
window.show(ctx, |ui| {
ui.horizontal(|ui| {
ui.label("settings");
});
});
}
pub fn trigger_show(&mut self) {
self.open = !self.open;
if self.open {
self.first_open = true;
}
} | } |
|
getJob.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20200501
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func LookupJob(ctx *pulumi.Context, args *LookupJobArgs, opts ...pulumi.InvokeOption) (*LookupJobResult, error) {
var rv LookupJobResult
err := ctx.Invoke("azure-nextgen:media/v20200501:getJob", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
}
type LookupJobArgs struct {
// The Media Services account name.
AccountName string `pulumi:"accountName"`
// The Job name.
JobName string `pulumi:"jobName"`
// The name of the resource group within the Azure subscription.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The Transform name.
TransformName string `pulumi:"transformName"`
}
// A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid.
type LookupJobResult struct {
// Customer provided key, value pairs that will be returned in Job and JobOutput state events.
CorrelationData map[string]string `pulumi:"correlationData"`
// The UTC date and time when the customer has created the Job, in 'YYYY-MM-DDThh:mm:ssZ' format.
Created string `pulumi:"created"`
// Optional customer supplied description of the Job.
Description *string `pulumi:"description"`
// The UTC date and time at which this Job finished processing.
EndTime string `pulumi:"endTime"`
// The inputs for the Job.
Input interface{} `pulumi:"input"`
// The UTC date and time when the customer has last updated the Job, in 'YYYY-MM-DDThh:mm:ssZ' format.
LastModified string `pulumi:"lastModified"`
// The name of the resource
Name string `pulumi:"name"`
// The outputs for the Job.
Outputs []JobOutputAssetResponse `pulumi:"outputs"`
// Priority with which the job should be processed. Higher priority jobs are processed before lower priority jobs. If not set, the default is normal.
Priority *string `pulumi:"priority"`
// The UTC date and time at which this Job began processing. | StartTime string `pulumi:"startTime"`
// The current state of the job.
State string `pulumi:"state"`
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type string `pulumi:"type"`
} | |
mod.rs | use std::collections::HashSet;
pub mod single_point;
pub mod multi_point;
pub mod shuffle;
pub mod uniform;
pub mod partially_mapped;
pub mod order;
pub mod cycle;
pub mod linear;
pub mod blend;
pub mod simulated_binary;
pub mod uniform_partially_mapped;
pub use self::single_point::single_point_crossover;
pub use self::multi_point::multi_point_crossover; | pub use self::uniform::uniform_crossover;
pub use self::partially_mapped::partially_mapped_crossover;
pub use self::order::order_crossover;
pub use self::cycle::cycle_crossover;
pub use self::linear::linear_crossover;
pub use self::blend::blend_crossover;
pub use self::simulated_binary::simulated_binary_crossover;
pub use self::uniform_partially_mapped::uniform_partially_mapped_crossover;
fn check_continuous(vec: &Vec<usize>) -> bool {
let n = vec.len();
let mut set:HashSet<usize> = HashSet::new();
for x in vec.iter() {
set.insert(*x);
if *x >= n {
return false;
}
}
set.len() == vec.len()
}
fn check_length<T>(parent1 : &Vec<T>, parent2 : &Vec<T>) {
if parent1.len() != parent2.len() {
panic!("Vectors must be the same length");
}
} |
pub use self::shuffle::shuffle_crossover;
|
enhanced-filtered-cache.go | //
// Copyright 2020 IBM Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package filteredcache
import (
"context"
"fmt"
"reflect"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"k8s.io/klog"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// NewEnhancedFilteredCacheBuilder implements a customized cache with a filter for specified resources
func | (gvkLabelsMap map[schema.GroupVersionKind][]Selector) cache.NewCacheFunc {
return func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
// Get the frequency that informers are resynced
var resync time.Duration
if opts.Resync != nil {
resync = *opts.Resync
}
// Generate informersmap to contain the gvks and their informers
informersMap, err := buildInformersMap(config, opts, gvkLabelsMap, resync)
if err != nil {
return nil, err
}
// Create a default cache for the unspecified resources
fallback, err := cache.New(config, opts)
if err != nil {
klog.Error(err, "Failed to init fallback cache")
return nil, err
}
// Return the customized cache
return enhancedFilteredCache{config: config, informersMap: informersMap, fallback: fallback, namespace: opts.Namespace, Scheme: opts.Scheme}, nil
}
}
//buildInformersMap generates informersMap of the specified resource
func buildInformersMap(config *rest.Config, opts cache.Options, gvkLabelsMap map[schema.GroupVersionKind][]Selector, resync time.Duration) (map[schema.GroupVersionKind][]toolscache.SharedIndexInformer, error) {
// Initialize informersMap
informersMap := make(map[schema.GroupVersionKind][]toolscache.SharedIndexInformer)
for gvk, selectors := range gvkLabelsMap {
for _, selector := range selectors {
// Get the plural type of the kind as resource
plural := kindToResource(gvk.Kind)
fieldSelector := selector.FieldSelector
labelSelector := selector.LabelSelector
selectorFunc := func(options *metav1.ListOptions) {
options.FieldSelector = fieldSelector
options.LabelSelector = labelSelector
}
// Create ListerWatcher with the label by NewFilteredListWatchFromClient
client, err := getClientForGVK(gvk, config, opts.Scheme)
if err != nil {
return nil, err
}
listerWatcher := toolscache.NewFilteredListWatchFromClient(client, plural, opts.Namespace, selectorFunc)
// Build typed runtime object for informer
objType := &unstructured.Unstructured{}
objType.GetObjectKind().SetGroupVersionKind(gvk)
typed, err := opts.Scheme.New(gvk)
if err != nil {
return nil, err
}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(objType.UnstructuredContent(), typed); err != nil {
return nil, err
}
// Create new inforemer with the listerwatcher
informer := toolscache.NewSharedIndexInformer(listerWatcher, typed, resync, toolscache.Indexers{toolscache.NamespaceIndex: toolscache.MetaNamespaceIndexFunc})
informersMap[gvk] = append(informersMap[gvk], informer)
// Build list type for the GVK
gvkList := schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind + "List"}
informersMap[gvkList] = append(informersMap[gvk], informer)
}
}
return informersMap, nil
}
// enhancedFilteredCache is the customized cache by the specified label
type enhancedFilteredCache struct {
config *rest.Config
informersMap map[schema.GroupVersionKind][]toolscache.SharedIndexInformer
fallback cache.Cache
namespace string
Scheme *runtime.Scheme
}
// Get implements Reader
// If the resource is in the cache, Get function get fetch in from the informer
// Otherwise, resource will be get by the k8s client
func (efc enhancedFilteredCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
// Get the GVK of the runtime object
gvk, err := apiutil.GVKForObject(obj, efc.Scheme)
if err != nil {
return err
}
if informers, ok := efc.informersMap[gvk]; ok {
// Looking for object from the cache
existsInCache := false
for _, informer := range informers {
if err := efc.getFromStore(informer, key, obj, gvk); err == nil {
existsInCache = true
break
}
}
if !existsInCache {
// If not found the object from cache, then fetch it from k8s apiserver
if err := efc.getFromClient(ctx, key, obj, gvk); err != nil {
return err
}
}
return nil
}
// Passthrough
return efc.fallback.Get(ctx, key, obj)
}
// getFromStore gets the resource from the cache
func (efc enhancedFilteredCache) getFromStore(informer toolscache.SharedIndexInformer, key client.ObjectKey, obj runtime.Object, gvk schema.GroupVersionKind) error {
// Different key for cluster scope resource and namespaced resource
var keyString string
if key.Namespace == "" {
keyString = key.Name
} else {
keyString = key.Namespace + "/" + key.Name
}
item, exists, err := informer.GetStore().GetByKey(keyString)
if err != nil {
klog.Error("Failed to get item from cache", "error", err)
return err
}
if !exists {
return apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, key.String())
}
if _, isObj := item.(runtime.Object); !isObj {
// This should never happen
return fmt.Errorf("cache contained %T, which is not an Object", item)
}
// deep copy to avoid mutating cache
item = item.(runtime.Object).DeepCopyObject()
// Copy the value of the item in the cache to the returned value
objVal := reflect.ValueOf(obj)
itemVal := reflect.ValueOf(item)
if !objVal.Type().AssignableTo(objVal.Type()) {
return fmt.Errorf("cache had type %s, but %s was asked for", itemVal.Type(), objVal.Type())
}
reflect.Indirect(objVal).Set(reflect.Indirect(itemVal))
obj.GetObjectKind().SetGroupVersionKind(gvk)
return nil
}
// getFromClient gets the resource by the k8s client
func (efc enhancedFilteredCache) getFromClient(ctx context.Context, key client.ObjectKey, obj runtime.Object, gvk schema.GroupVersionKind) error {
// Get resource by the kubeClient
resource := kindToResource(gvk.Kind)
client, err := getClientForGVK(gvk, efc.config, efc.Scheme)
if err != nil {
return err
}
result, err := client.
Get().
Namespace(key.Namespace).
Name(key.Name).
Resource(resource).
VersionedParams(&metav1.GetOptions{}, metav1.ParameterCodec).
Do(ctx).
Get()
if apierrors.IsNotFound(err) {
return err
} else if err != nil {
klog.Error("Failed to retrieve resource list", "error", err)
return err
}
// Copy the value of the item in the cache to the returned value
objVal := reflect.ValueOf(obj)
itemVal := reflect.ValueOf(result)
if !objVal.Type().AssignableTo(objVal.Type()) {
return fmt.Errorf("cache had type %s, but %s was asked for", itemVal.Type(), objVal.Type())
}
reflect.Indirect(objVal).Set(reflect.Indirect(itemVal))
obj.GetObjectKind().SetGroupVersionKind(gvk)
return nil
}
// List lists items out of the indexer and writes them to list
func (efc enhancedFilteredCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
gvk, err := apiutil.GVKForObject(list, efc.Scheme)
if err != nil {
return err
}
if informers, ok := efc.informersMap[gvk]; ok {
// Construct filter
var objList []interface{}
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
// Check the labelSelector
var labelSel labels.Selector
if listOpts.LabelSelector != nil {
labelSel = listOpts.LabelSelector
}
// Looking for object from the cache
if listOpts.FieldSelector != nil {
// combining multiple indices, GetIndexers, etc
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
if !requiresExact {
return fmt.Errorf("non-exact field matches are not supported by the cache")
}
// list all objects by the field selector. If this is namespaced and we have one, ask for the
// namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces"
// namespace.
for _, informer := range informers {
objects, err := informer.GetIndexer().ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val))
if err != nil {
return err
} else {
if len(objects) != 0 {
objList = append(objList, objects...)
}
}
}
} else if listOpts.Namespace != "" {
for _, informer := range informers {
objects, err := informer.GetIndexer().ByIndex(toolscache.NamespaceIndex, listOpts.Namespace)
if err != nil {
return err
} else {
if len(objects) != 0 {
objList = append(objList, objects...)
}
}
}
} else {
for _, informer := range informers {
objects := informer.GetIndexer().List()
if len(objects) != 0 {
objList = append(objList, objects...)
}
}
}
// If not found the object from cache, then fetch the list from k8s apiserver
if len(objList) == 0 {
return efc.ListFromClient(ctx, list, gvk, opts...)
}
// Check namespace and labelSelector
runtimeObjList := make([]runtime.Object, 0, len(objList))
for _, item := range objList {
obj, isObj := item.(runtime.Object)
if !isObj {
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
meta, err := apimeta.Accessor(obj)
if err != nil {
return err
}
var namespace string
if efc.namespace != "" {
if listOpts.Namespace != "" && efc.namespace != listOpts.Namespace {
return fmt.Errorf("unable to list from namespace : %v because of unknown namespace for the cache", listOpts.Namespace)
}
namespace = efc.namespace
} else if listOpts.Namespace != "" {
namespace = listOpts.Namespace
}
if namespace != "" && namespace != meta.GetNamespace() {
continue
}
if labelSel != nil {
lbls := labels.Set(meta.GetLabels())
if !labelSel.Matches(lbls) {
continue
}
}
outObj := obj.DeepCopyObject()
outObj.GetObjectKind().SetGroupVersionKind(listToGVK(gvk))
runtimeObjList = append(runtimeObjList, outObj)
}
return apimeta.SetList(list, runtimeObjList)
}
// Passthrough
return efc.fallback.List(ctx, list, opts...)
}
// ListFromClient implements list resource by k8sClient
func (efc enhancedFilteredCache) ListFromClient(ctx context.Context, list runtime.Object, gvk schema.GroupVersionKind, opts ...client.ListOption) error {
listOpts := client.ListOptions{}
listOpts.ApplyOptions(opts)
// Get labelselector and fieldSelector
var labelSelector, fieldSelector string
if listOpts.FieldSelector != nil {
fieldSelector = listOpts.FieldSelector.String()
}
if listOpts.LabelSelector != nil {
labelSelector = listOpts.LabelSelector.String()
}
var namespace string
if efc.namespace != "" {
if listOpts.Namespace != "" && efc.namespace != listOpts.Namespace {
return fmt.Errorf("unable to list from namespace : %v because of unknown namespace for the cache", listOpts.Namespace)
}
namespace = efc.namespace
} else if listOpts.Namespace != "" {
namespace = listOpts.Namespace
}
resource := kindToResource(gvk.Kind[:len(gvk.Kind)-4])
client, err := getClientForGVK(gvk, efc.config, efc.Scheme)
if err != nil {
return err
}
result, err := client.
Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&metav1.ListOptions{
LabelSelector: labelSelector,
FieldSelector: fieldSelector,
}, metav1.ParameterCodec).
Do(ctx).
Get()
if err != nil {
klog.Error("Failed to retrieve resource list: ", err)
return err
}
// Copy the value of the item in the cache to the returned value
objVal := reflect.ValueOf(list)
itemVal := reflect.ValueOf(result)
if !objVal.Type().AssignableTo(objVal.Type()) {
return fmt.Errorf("cache had type %s, but %s was asked for", itemVal.Type(), objVal.Type())
}
reflect.Indirect(objVal).Set(reflect.Indirect(itemVal))
list.GetObjectKind().SetGroupVersionKind(gvk)
return nil
}
// enhancedFilteredCacheInformer knows how to handle interacting with the underlying informer with multiple internal informers
type enhancedFilteredCacheInformer struct {
informers []toolscache.SharedIndexInformer
}
// AddEventHandler adds the handler to each internal informer
func (efci *enhancedFilteredCacheInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
for _, informer := range efci.informers {
informer.AddEventHandler(handler)
}
}
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each internal informer
func (efci *enhancedFilteredCacheInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) {
for _, informer := range efci.informers {
informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
}
}
// HasSynced checks if each internal informer has synced
func (efci *enhancedFilteredCacheInformer) HasSynced() bool {
for _, informer := range efci.informers {
if ok := informer.HasSynced(); !ok {
return ok
}
}
return true
}
// AddIndexers adds the indexer for each internal informer
func (efci *enhancedFilteredCacheInformer) AddIndexers(indexers toolscache.Indexers) error {
for _, informer := range efci.informers {
err := informer.AddIndexers(indexers)
if err != nil {
return err
}
}
return nil
}
// GetInformer fetches or constructs an informer for the given object that corresponds to a single
// API kind and resource.
func (efc enhancedFilteredCache) GetInformer(ctx context.Context, obj client.Object) (cache.Informer, error) {
gvk, err := apiutil.GVKForObject(obj, efc.Scheme)
if err != nil {
return nil, err
}
if informers, ok := efc.informersMap[gvk]; ok {
return &enhancedFilteredCacheInformer{informers: informers}, nil
}
// Passthrough
return efc.fallback.GetInformer(ctx, obj)
}
// GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead
// of the underlying object.
func (efc enhancedFilteredCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (cache.Informer, error) {
if informers, ok := efc.informersMap[gvk]; ok {
return &enhancedFilteredCacheInformer{informers: informers}, nil
}
// Passthrough
return efc.fallback.GetInformerForKind(ctx, gvk)
}
// Start runs all the informers known to this cache until the given channel is closed.
// It blocks.
func (efc enhancedFilteredCache) Start(ctx context.Context) error {
klog.Info("Start enhanced filtered cache")
for _, informers := range efc.informersMap {
for _, informer := range informers {
informer := informer
go informer.Run(ctx.Done())
}
}
return efc.fallback.Start(ctx)
}
// WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache.
func (efc enhancedFilteredCache) WaitForCacheSync(ctx context.Context) bool {
// Wait for informer to sync
waiting := true
for waiting {
select {
case <-ctx.Done():
waiting = false
case <-time.After(time.Second):
for _, informers := range efc.informersMap {
for _, informer := range informers {
waiting = !informer.HasSynced() && waiting
}
}
}
}
// Wait for fallback cache to sync
return efc.fallback.WaitForCacheSync(ctx)
}
// IndexField adds an indexer to the underlying cache, using extraction function to get
// value(s) from the given field. The filtered cache doesn't support the index yet.
func (efc enhancedFilteredCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error {
gvk, err := apiutil.GVKForObject(obj, efc.Scheme)
if err != nil {
return err
}
if informers, ok := efc.informersMap[gvk]; ok {
for _, informer := range informers {
if err := indexByField(informer, field, extractValue); err != nil {
return err
} else {
continue
}
}
}
return efc.fallback.IndexField(ctx, obj, field, extractValue)
}
| NewEnhancedFilteredCacheBuilder |
main.go | package main
import (
"encoding/binary"
"fmt"
"io" |
"github.com/x-junkang/connected/internal/config"
"github.com/x-junkang/connected/internal/protocol"
)
func main() {
conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", config.GlobalObject.Host, config.GlobalObject.TCPPort))
if err != nil {
fmt.Println("连接出错")
return
}
header := &protocol.MarsHeader{
HeaderLength: 20,
Sequence: 1,
BodyLength: 5,
}
data := []byte{'h', 'e', 'l', 'l', 'o'}
for {
binary.Write(conn, binary.LittleEndian, header)
binary.Write(conn, binary.LittleEndian, data)
fmt.Println("done 1")
var respHead protocol.MarsHeader
err = binary.Read(conn, binary.LittleEndian, &respHead)
if err != nil {
return
}
bodyLen := int(respHead.BodyLength)
resp := make([]byte, bodyLen)
n, err := io.ReadFull(conn, resp)
if err != nil {
return
}
fmt.Println(n, string(resp))
time.Sleep(1 * time.Second)
}
} | "net"
"time" |
remote_peer.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
#[derive(Debug, PartialEq)]
pub enum PeerChannel<T> {
Connected(Arc<T>),
Connecting,
Disconnected,
}
impl<T> PeerChannel<T> {
pub fn connection(&self) -> Option<Arc<T>> {
match self {
PeerChannel::Connected(t) => Some(t.clone()),
_ => None,
}
}
}
/// Internal object to manage a remote peer
#[derive(Debug)]
pub struct RemotePeer {
pub peer_id: PeerId,
/// Contains the remote peer's target profile.
pub target_descriptor: RwLock<Option<AvrcpService>>,
/// Contains the remote peer's controller profile.
pub controller_descriptor: RwLock<Option<AvrcpService>>,
/// Control channel to the remote device.
pub control_channel: RwLock<PeerChannel<AvcPeer>>,
// TODO(BT-2221): add browse channel.
// browse_channel: RwLock<PeerChannel<AvtcpPeer>>,
//
/// Contains a vec of all event stream listeners obtained by any Controllers around this peer
/// that are listening for events from this peer from this peer.
pub controller_listeners: Mutex<Vec<mpsc::Sender<ControllerEvent>>>,
/// Processes commands received as AVRCP target and holds state for continuations and requested
/// notifications for the control channel. Only set once we have enough information to determine
/// our role based on the peer's SDP record.
pub command_handler: Mutex<Option<ControlChannelHandler>>,
}
impl RemotePeer {
pub fn new(peer_id: PeerId) -> Self {
Self {
peer_id,
control_channel: RwLock::new(PeerChannel::Disconnected),
// TODO(BT-2221): add browse channel.
//browse_channel: RwLock::new(PeerChannel::Disconnected),
controller_listeners: Mutex::new(Vec::new()),
target_descriptor: RwLock::new(None),
controller_descriptor: RwLock::new(None),
command_handler: Mutex::new(None),
}
}
/// Enumerates all listening controller_listeners queues and sends a clone of the event to each
pub fn broadcast_event(&self, event: ControllerEvent) {
let mut listeners = self.controller_listeners.lock();
// remove all the dead listeners from the list. | if let Err(send_error) = sender.try_send(event.clone()) {
fx_log_err!(
"unable to send event to peer controller stream for {} {:?}",
self.peer_id,
send_error
);
}
}
}
// Hold the write lock on control_channel before calling this.
pub fn reset_command_handler(&self) {
let mut cmd_handler = self.command_handler.lock();
*cmd_handler = None;
}
pub fn reset_connection(&self) {
let mut control_channel = self.control_channel.write();
self.reset_command_handler();
*control_channel = PeerChannel::Disconnected;
}
pub fn get_control_connection(&self) -> Result<Arc<AvcPeer>, Error> {
self.control_channel.read().connection().ok_or(Error::RemoteNotFound)
}
/// Send a generic "status" vendor dependent command and returns the result as a future.
/// This method encodes the `command` packet, awaits and decodes all responses, will issue
/// continuation commands for incomplete responses (eg "get_element_attributes" command), and
/// will return a result of the decoded packet or an error for any non stable response received
pub async fn send_status_vendor_dependent_command<'a>(
peer: &'a AvcPeer,
command: &'a impl VendorDependent,
) -> Result<Vec<u8>, Error> {
let mut buf = vec![];
let packet = command.encode_packet().expect("unable to encode packet");
let mut stream = peer.send_vendor_dependent_command(AvcCommandType::Status, &packet[..])?;
loop {
let response = loop {
let result = stream.next().await.ok_or(Error::CommandFailed)?;
let response: AvcCommandResponse = result.map_err(|e| Error::AvctpError(e))?;
fx_vlog!(tag: "avrcp", 1, "vendor response {:#?}", response);
match response.response_type() {
AvcResponseType::Interim => continue,
AvcResponseType::NotImplemented => return Err(Error::CommandNotSupported),
AvcResponseType::Rejected => return Err(Error::CommandFailed),
AvcResponseType::InTransition => return Err(Error::UnexpectedResponse),
AvcResponseType::Changed => return Err(Error::UnexpectedResponse),
AvcResponseType::Accepted => return Err(Error::UnexpectedResponse),
AvcResponseType::ImplementedStable => break response.1,
}
};
match VendorDependentPreamble::decode(&response[..]) {
Ok(preamble) => {
buf.extend_from_slice(&response[preamble.encoded_len()..]);
match preamble.packet_type() {
PacketType::Single | PacketType::Stop => {
break;
}
// Still more to decode. Queue up a continuation call.
_ => {}
}
}
Err(e) => {
fx_log_info!("Unable to parse vendor dependent preamble: {:?}", e);
return Err(Error::PacketError(e));
}
};
let packet = RequestContinuingResponseCommand::new(u8::from(&command.pdu_id()))
.encode_packet()
.expect("unable to encode packet");
stream = peer.send_vendor_dependent_command(AvcCommandType::Control, &packet[..])?;
}
Ok(buf)
}
/// Sends a single passthrough keycode over the control channel.
pub async fn send_avc_passthrough(&self, payload: &[u8; 2]) -> Result<(), Error> {
let peer = self.get_control_connection()?;
let response = peer.send_avc_passthrough_command(payload).await;
match response {
Ok(AvcCommandResponse(AvcResponseType::Accepted, _)) => {
return Ok(());
}
Ok(AvcCommandResponse(AvcResponseType::Rejected, _)) => {
fx_log_info!("avrcp command rejected {}: {:?}", self.peer_id, response);
return Err(Error::CommandNotSupported);
}
Err(e) => {
fx_log_err!("error sending avc command to {}: {:?}", self.peer_id, e);
return Err(Error::CommandFailed);
}
_ => {
fx_log_err!(
"error sending avc command. unhandled response {}: {:?}",
self.peer_id,
response
);
return Err(Error::CommandFailed);
}
}
}
/// Retrieve the events supported by the peer by issuing a GetCapabilities command.
pub async fn get_supported_events(&self) -> Result<Vec<NotificationEventId>, Error> {
let peer = self.get_control_connection()?;
let cmd = GetCapabilitiesCommand::new(GetCapabilitiesCapabilityId::EventsId);
fx_vlog!(tag: "avrcp", 1, "get_capabilities(events) send command {:#?}", cmd);
let buf = Self::send_status_vendor_dependent_command(&peer, &cmd).await?;
let capabilities =
GetCapabilitiesResponse::decode(&buf[..]).map_err(|e| Error::PacketError(e))?;
let mut event_ids = vec![];
for event_id in capabilities.event_ids() {
event_ids.push(NotificationEventId::try_from(event_id)?);
}
Ok(event_ids)
}
} | listeners.retain(|i| !i.is_closed());
for sender in listeners.iter_mut() { |
DatasetDatasetContentVersionValue.go | package properties
// Code generated by go generate; DO NOT EDIT.
// It's generated by "github.com/KablamoOSS/kombustion/generate"
// DatasetDatasetContentVersionValue Documentation: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iotanalytics-dataset-variable-datasetcontentversionvalue.html
type DatasetDatasetContentVersionValue struct {
DatasetName interface{} `yaml:"DatasetName,omitempty"`
}
// DatasetDatasetContentVersionValue validation
func (resource DatasetDatasetContentVersionValue) Validate() []error { | errors := []error{}
return errors
} |
|
kubernetai_test.go | package kubernetai
import (
"net"
"reflect"
"testing"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/kubernetes"
"github.com/coredns/coredns/plugin/kubernetes/object"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
type k8iPodHandlerTester struct{}
var podip string
func (k8i *k8iPodHandlerTester) PodWithIP(k kubernetes.Kubernetes, ip string) *object.Pod {
if ip == "" {
return nil
}
pod := &object.Pod{
Namespace: "test-1",
PodIP: ip,
}
return pod
}
var k8iPodHandlerTest k8iPodHandlerTester
type responseWriterTest struct {
dns.ResponseWriter
}
func (res *responseWriterTest) RemoteAddr() net.Addr {
ip := net.ParseIP(podip)
return &net.UDPAddr{
IP: ip,
Port: 53,
}
}
func TestKubernetai_AutoPath(t *testing.T) | {
type fields struct {
Zones []string
Next plugin.Handler
Kubernetes []*kubernetes.Kubernetes
autoPathSearch []string
p *k8iPodHandlerTester
}
type args struct {
state request.Request
}
w := &responseWriterTest{}
k8sClusterLocal := &kubernetes.Kubernetes{
Zones: []string{
"cluster.local.",
},
}
k8sFlusterLocal := &kubernetes.Kubernetes{
Zones: []string{
"fluster.local.",
},
}
defaultK8iConfig := fields{
Kubernetes: []*kubernetes.Kubernetes{
k8sFlusterLocal,
k8sClusterLocal,
},
p: &k8iPodHandlerTest,
}
tests := []struct {
name string
fields fields
args args
want []string
ip string
}{
{
name: "standard autopath cluster.local",
fields: defaultK8iConfig,
args: args{
state: request.Request{
W: w,
Req: &dns.Msg{
Question: []dns.Question{
{Name: "svc-1-a.test-1.svc.cluster.local.", Qtype: 1, Qclass: 1},
},
},
},
},
want: []string{"test-1.svc.cluster.local.", "svc.cluster.local.", "cluster.local.", "test-1.svc.fluster.local.", "svc.fluster.local.", "fluster.local.", ""},
ip: "172.17.0.7",
},
{
name: "standard autopath servicename.svc",
fields: defaultK8iConfig,
args: args{
state: request.Request{
W: w,
Req: &dns.Msg{
Question: []dns.Question{
{Name: "svc-2-a.test-2.test-1.svc.cluster.local.", Qtype: 1, Qclass: 1},
},
},
},
},
want: []string{"test-1.svc.cluster.local.", "svc.cluster.local.", "cluster.local.", "test-1.svc.fluster.local.", "svc.fluster.local.", "fluster.local.", ""},
ip: "172.17.0.7",
},
{
name: "standard autopath lookup fluster in cluster.local",
fields: defaultK8iConfig,
args: args{
state: request.Request{
W: w,
Req: &dns.Msg{
Question: []dns.Question{
{Name: "svc-d.test-2.svc.fluster.local.svc.cluster.local.", Qtype: 1, Qclass: 1},
},
},
},
},
want: []string{"test-1.svc.cluster.local.", "svc.cluster.local.", "cluster.local.", "test-1.svc.fluster.local.", "svc.fluster.local.", "fluster.local.", ""},
ip: "172.17.0.7",
},
{
name: "not in zone",
fields: defaultK8iConfig,
args: args{
state: request.Request{
W: w,
Req: &dns.Msg{
Question: []dns.Question{
{Name: "svc-1-a.test-1.svc.zone.local.", Qtype: 1, Qclass: 1},
},
},
},
},
ip: "172.17.0.7",
want: nil,
},
{
name: "requesting pod does not exist",
fields: defaultK8iConfig,
args: args{
state: request.Request{
W: w,
Req: &dns.Msg{
Question: []dns.Question{
{Name: "svc-1-a.test-1.svc.zone.local.", Qtype: 1, Qclass: 1},
},
},
},
},
ip: "",
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
k8i := Kubernetai{
Zones: tt.fields.Zones,
Kubernetes: tt.fields.Kubernetes,
autoPathSearch: tt.fields.autoPathSearch,
p: tt.fields.p,
}
podip = tt.ip
if got := k8i.AutoPath(tt.args.state); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Kubernetai.AutoPath() = %+v, want %+v", got, tt.want)
}
})
}
} |
|
conftest.py | import pytest
import stardog.content as content
import stardog.content_types as content_types
import os
# STARDOG_ENDPOINT = os.environ.get('STARDOG_ENDPOINT', None)
STARDOG_HOSTNAME_NODE_1 = os.environ.get("STARDOG_HOSTNAME_NODE_1", None)
STARDOG_HOSTNAME_CACHE = os.environ.get("STARDOG_HOSTNAME_CACHE", None)
STARDOG_HOSTNAME_STANDBY = os.environ.get("STARDOG_HOSTNAME_STANDBY", None)
def pytest_addoption(parser):
parser.addoption("--username", action="store", default="admin")
parser.addoption("--passwd", action="store", default="admin")
parser.addoption("--endpoint", action="store", default="http://localhost:5820")
parser.addoption("--http_proxy", action="store", default="")
parser.addoption("--https_proxy", action="store", default="")
parser.addoption("--ssl_verify", action="store_true", default=True)
@pytest.fixture
def conn_string(pytestconfig):
conn = {
"endpoint": pytestconfig.getoption("endpoint"),
"username": pytestconfig.getoption("username"),
"password": pytestconfig.getoption("passwd"),
}
return conn
@pytest.fixture
def proxies(pytestconfig):
proxies_config = {}
for protocol in ("http", "https"):
proxy_url = pytestconfig.getoption(f"{protocol}_proxy")
if (
proxy_url is not None
and isinstance(proxy_url, str)
and proxy_url.startswith(protocol)
):
proxies_config.update({protocol: proxy_url})
return proxies_config
@pytest.fixture
def ssl_verify(pytestconfig):
return pytestconfig.getoption("ssl_verify")
@pytest.fixture
def bulkload_content():
contents = [
content.Raw(
"<urn:subj> <urn:pred> <urn:obj3> .",
content_types.TURTLE,
name="bulkload.ttl",
),
(content.File("test/data/example.ttl.zip"), "urn:context"),
content.URL(
"https://www.w3.org/2000/10/rdf-tests/" "RDF-Model-Syntax_1.0/ms_4.1_1.rdf"
),
]
return contents
@pytest.fixture
def cache_target_info():
target_info = {
"target_name": "pystardog-test-cache-target",
"hostname": STARDOG_HOSTNAME_CACHE,
"port": 5820,
"username": "admin",
"password": "admin",
}
return target_info
@pytest.fixture
def cluster_standby_node_conn_string():
standby_conn_string = {
"endpoint": f"http://{STARDOG_HOSTNAME_STANDBY}:5820",
"username": "admin",
"password": "admin",
} |
# Java 291 (packed in the Stardog docker image from 7.6.3+) disabled TLS 1.0 and 1.1 which breaks the MySQL connector:
# https://www.oracle.com/java/technologies/javase/8u291-relnotes.html
# ?useSSL=false works around this for testing purposes:
@pytest.fixture
def music_options():
options = {
"jdbc.driver": "com.mysql.jdbc.Driver",
"jdbc.username": "user",
"jdbc.password": "pass",
"mappings.syntax": "STARDOG",
"jdbc.url": "jdbc:mysql://pystardog_mysql_music/music?useSSL=false",
}
return options
@pytest.fixture
def videos_options():
options = {
"jdbc.driver": "com.mysql.jdbc.Driver",
"jdbc.username": "user",
"jdbc.password": "pass",
"mappings.syntax": "STARDOG",
"jdbc.url": "jdbc:mysql://pystardog_mysql_videos/videos?useSSL=false",
}
return options | return standby_conn_string |
protection_containers_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ProtectionContainersOperations(object):
"""ProtectionContainersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Gets details of the specific container registered to your Recovery
Services Vault.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container whose details need to be
fetched.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ProtectionContainerResource or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def register(
self, vault_name, resource_group_name, fabric_name, container_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use
location header to call get latest status of the operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container to be registered.
:type container_name: str
:param parameters: Request body for operation
:type parameters:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ProtectionContainerResource or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.register.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ProtectionContainerResource')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
register.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def unregister(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Unregisters the given container from your Recovery Services Vault.
This is an asynchronous operation. To determine whether the backend
service has finished processing the request, call Get Container
Operation Result API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container which needs to be
unregistered from the Recovery Services Vault.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.unregister.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
unregister.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'}
def inquire(
self, vault_name, resource_group_name, fabric_name, container_name, custom_headers=None, raw=False, **operation_config):
"""Inquires all the protectable item in the given container that can be
protected.
Inquires all the protectable items that are protectable under the given
container.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric Name associated with the container.
:type fabric_name: str
:param container_name: Name of the container in which inquiry needs to
be triggered.
:type container_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.inquire.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'containerName': self._serialize.url("container_name", container_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
inquire.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire'}
def | (
self, vault_name, resource_group_name, fabric_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Discovers all the containers in the subscription that can be backed up
to Recovery Services Vault. This is an asynchronous operation. To know
the status of the operation, call GetRefreshOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param fabric_name: Fabric name associated the container.
:type fabric_name: str
:param filter: OData filter options.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.refresh.metadata['url']
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
refresh.metadata = {'url': '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers'}
| refresh |
conn.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// TLS low level connection and record layer
package tls
import (
"bytes"
"crypto/cipher"
"crypto/subtle"
"crypto/x509"
"errors"
"fmt"
"io"
"net"
"sync"
"sync/atomic"
"time"
)
// A Conn represents a secured connection.
// It implements the net.Conn interface.
type Conn struct {
// constant
conn net.Conn
isClient bool
phase handshakeStatus // protected by in.Mutex
// handshakeConfirmed is an atomic bool for phase == handshakeConfirmed
handshakeConfirmed int32
// confirmMutex is held by any read operation before handshakeConfirmed
confirmMutex sync.Mutex
// constant after handshake; protected by handshakeMutex
handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex
handshakeErr error // error resulting from handshake
connID []byte // Random connection id
clientHello []byte // ClientHello packet contents
vers uint16 // TLS version
haveVers bool // version has been negotiated
config *Config // configuration passed to constructor
// handshakeComplete is true if the connection reached application data
// and it's equivalent to phase > handshakeRunning
handshakeComplete bool
// handshakes counts the number of handshakes performed on the
// connection so far. If renegotiation is disabled then this is either
// zero or one.
handshakes int
didResume bool // whether this connection was a session resumption
cipherSuite uint16
ocspResponse []byte // stapled OCSP response
scts [][]byte // signed certificate timestamps from server
peerCertificates []*x509.Certificate
// verifiedChains contains the certificate chains that we built, as
// opposed to the ones presented by the server.
verifiedChains [][]*x509.Certificate
// serverName contains the server name indicated by the client, if any.
serverName string
// secureRenegotiation is true if the server echoed the secure
// renegotiation extension. (This is meaningless as a server because
// renegotiation is not supported in that case.)
secureRenegotiation bool
// clientFinishedIsFirst is true if the client sent the first Finished
// message during the most recent handshake. This is recorded because
// the first transmitted Finished message is the tls-unique
// channel-binding value.
clientFinishedIsFirst bool
// closeNotifyErr is any error from sending the alertCloseNotify record.
closeNotifyErr error
// closeNotifySent is true if the Conn attempted to send an
// alertCloseNotify record.
closeNotifySent bool
// clientFinished and serverFinished contain the Finished message sent
// by the client or server in the most recent handshake. This is
// retained to support the renegotiation extension and tls-unique
// channel-binding.
clientFinished [12]byte
serverFinished [12]byte
clientProtocol string
clientProtocolFallback bool
// ticketMaxEarlyData is the maximum bytes of 0-RTT application data
// that the client is allowed to send on the ticket it used.
ticketMaxEarlyData int64
// input/output
in, out halfConn // in.Mutex < out.Mutex
rawInput *block // raw input, right off the wire
input *block // application data waiting to be read
hand bytes.Buffer // handshake data waiting to be read
buffering bool // whether records are buffered in sendBuf
sendBuf []byte // a buffer of records waiting to be sent
// bytesSent counts the bytes of application data sent.
// packetsSent counts packets.
bytesSent int64
packetsSent int64
// activeCall is an atomic int32; the low bit is whether Close has
// been called. the rest of the bits are the number of goroutines
// in Conn.Write.
activeCall int32
// TLS 1.3 needs the server state until it reaches the Client Finished
hs *serverHandshakeState
// earlyDataBytes is the number of bytes of early data received so
// far. Tracked to enforce max_early_data_size.
// We don't keep track of rejected 0-RTT data since there's no need
// to ever buffer it. in.Mutex.
earlyDataBytes int64
// binder is the value of the PSK binder that was validated to
// accept the 0-RTT data. Exposed as ConnectionState.Unique0RTTToken.
binder []byte
tmp [16]byte
}
type handshakeStatus int
const (
handshakeRunning handshakeStatus = iota
discardingEarlyData
readingEarlyData
waitingClientFinished
readingClientFinished
handshakeConfirmed
)
// Access to net.Conn methods.
// Cannot just embed net.Conn because that would
// export the struct field too.
// LocalAddr returns the local network address.
func (c *Conn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
// RemoteAddr returns the remote network address.
func (c *Conn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline sets the read and write deadlines associated with the connection.
// A zero value for t means Read and Write will not time out.
// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
func (c *Conn) SetDeadline(t time.Time) error {
return c.conn.SetDeadline(t)
}
// SetReadDeadline sets the read deadline on the underlying connection.
// A zero value for t means Read will not time out.
func (c *Conn) SetReadDeadline(t time.Time) error {
return c.conn.SetReadDeadline(t)
}
// SetWriteDeadline sets the write deadline on the underlying connection.
// A zero value for t means Write will not time out.
// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error.
func (c *Conn) SetWriteDeadline(t time.Time) error {
return c.conn.SetWriteDeadline(t)
}
// A halfConn represents one direction of the record layer
// connection, either sending or receiving.
type halfConn struct {
sync.Mutex
err error // first permanent error
version uint16 // protocol version
cipher interface{} // cipher algorithm
mac macFunction
seq [8]byte // 64-bit sequence number
bfree *block // list of free blocks
additionalData [13]byte // to avoid allocs; interface method args escape
nextCipher interface{} // next encryption state
nextMac macFunction // next MAC algorithm
// used to save allocating a new buffer for each MAC.
inDigestBuf, outDigestBuf []byte
traceErr func(error)
}
func (hc *halfConn) setErrorLocked(err error) error {
hc.err = err
if hc.traceErr != nil {
hc.traceErr(err)
}
return err
}
// prepareCipherSpec sets the encryption and MAC states
// that a subsequent changeCipherSpec will use.
func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) {
hc.version = version
hc.nextCipher = cipher
hc.nextMac = mac
}
// changeCipherSpec changes the encryption and MAC states
// to the ones previously passed to prepareCipherSpec.
func (hc *halfConn) changeCipherSpec() error {
if hc.nextCipher == nil {
return alertInternalError
}
hc.cipher = hc.nextCipher
hc.mac = hc.nextMac
hc.nextCipher = nil
hc.nextMac = nil
for i := range hc.seq {
hc.seq[i] = 0
}
return nil
}
func (hc *halfConn) setCipher(version uint16, cipher interface{}) {
hc.version = version
hc.cipher = cipher
for i := range hc.seq {
hc.seq[i] = 0
}
}
// incSeq increments the sequence number.
func (hc *halfConn) incSeq() {
for i := 7; i >= 0; i-- {
hc.seq[i]++
if hc.seq[i] != 0 {
return
}
}
// Not allowed to let sequence number wrap.
// Instead, must renegotiate before it does.
// Not likely enough to bother.
panic("TLS: sequence number wraparound")
}
// extractPadding returns, in constant time, the length of the padding to remove
// from the end of payload. It also returns a byte which is equal to 255 if the
// padding was valid and 0 otherwise. See RFC 2246, section 6.2.3.2
func | (payload []byte) (toRemove int, good byte) {
if len(payload) < 1 {
return 0, 0
}
paddingLen := payload[len(payload)-1]
t := uint(len(payload)-1) - uint(paddingLen)
// if len(payload) >= (paddingLen - 1) then the MSB of t is zero
good = byte(int32(^t) >> 31)
// The maximum possible padding length plus the actual length field
toCheck := 256
// The length of the padded data is public, so we can use an if here
if toCheck > len(payload) {
toCheck = len(payload)
}
for i := 0; i < toCheck; i++ {
t := uint(paddingLen) - uint(i)
// if i <= paddingLen then the MSB of t is zero
mask := byte(int32(^t) >> 31)
b := payload[len(payload)-1-i]
good &^= mask&paddingLen ^ mask&b
}
// We AND together the bits of good and replicate the result across
// all the bits.
good &= good << 4
good &= good << 2
good &= good << 1
good = uint8(int8(good) >> 7)
toRemove = int(paddingLen) + 1
return
}
// extractPaddingSSL30 is a replacement for extractPadding in the case that the
// protocol version is SSLv3. In this version, the contents of the padding
// are random and cannot be checked.
func extractPaddingSSL30(payload []byte) (toRemove int, good byte) {
if len(payload) < 1 {
return 0, 0
}
paddingLen := int(payload[len(payload)-1]) + 1
if paddingLen > len(payload) {
return 0, 0
}
return paddingLen, 255
}
func roundUp(a, b int) int {
return a + (b-a%b)%b
}
// cbcMode is an interface for block ciphers using cipher block chaining.
type cbcMode interface {
cipher.BlockMode
SetIV([]byte)
}
// decrypt checks and strips the mac and decrypts the data in b. Returns a
// success boolean, the number of bytes to skip from the start of the record in
// order to get the application payload, and an optional alert value.
func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) {
// pull out payload
payload := b.data[recordHeaderLen:]
macSize := 0
if hc.mac != nil {
macSize = hc.mac.Size()
}
paddingGood := byte(255)
paddingLen := 0
explicitIVLen := 0
// decrypt
if hc.cipher != nil {
switch c := hc.cipher.(type) {
case cipher.Stream:
c.XORKeyStream(payload, payload)
case aead:
explicitIVLen = c.explicitNonceLen()
if len(payload) < explicitIVLen {
return false, 0, alertBadRecordMAC
}
nonce := payload[:explicitIVLen]
payload = payload[explicitIVLen:]
if len(nonce) == 0 {
nonce = hc.seq[:]
}
var additionalData []byte
if hc.version < VersionTLS13 {
copy(hc.additionalData[:], hc.seq[:])
copy(hc.additionalData[8:], b.data[:3])
n := len(payload) - c.Overhead()
hc.additionalData[11] = byte(n >> 8)
hc.additionalData[12] = byte(n)
additionalData = hc.additionalData[:]
}
var err error
payload, err = c.Open(payload[:0], nonce, payload, additionalData)
if err != nil {
return false, 0, alertBadRecordMAC
}
b.resize(recordHeaderLen + explicitIVLen + len(payload))
case cbcMode:
blockSize := c.BlockSize()
if hc.version >= VersionTLS11 {
explicitIVLen = blockSize
}
if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) {
return false, 0, alertBadRecordMAC
}
if explicitIVLen > 0 {
c.SetIV(payload[:explicitIVLen])
payload = payload[explicitIVLen:]
}
c.CryptBlocks(payload, payload)
if hc.version == VersionSSL30 {
paddingLen, paddingGood = extractPaddingSSL30(payload)
} else {
paddingLen, paddingGood = extractPadding(payload)
// To protect against CBC padding oracles like Lucky13, the data
// past paddingLen (which is secret) is passed to the MAC
// function as extra data, to be fed into the HMAC after
// computing the digest. This makes the MAC constant time as
// long as the digest computation is constant time and does not
// affect the subsequent write.
}
default:
panic("unknown cipher type")
}
}
// check, strip mac
if hc.mac != nil {
if len(payload) < macSize {
return false, 0, alertBadRecordMAC
}
// strip mac off payload, b.data
n := len(payload) - macSize - paddingLen
n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 }
b.data[3] = byte(n >> 8)
b.data[4] = byte(n)
remoteMAC := payload[n : n+macSize]
localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n], payload[n+macSize:])
if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 {
return false, 0, alertBadRecordMAC
}
hc.inDigestBuf = localMAC
b.resize(recordHeaderLen + explicitIVLen + n)
}
hc.incSeq()
return true, recordHeaderLen + explicitIVLen, 0
}
// padToBlockSize calculates the needed padding block, if any, for a payload.
// On exit, prefix aliases payload and extends to the end of the last full
// block of payload. finalBlock is a fresh slice which contains the contents of
// any suffix of payload as well as the needed padding to make finalBlock a
// full block.
func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) {
overrun := len(payload) % blockSize
paddingLen := blockSize - overrun
prefix = payload[:len(payload)-overrun]
finalBlock = make([]byte, blockSize)
copy(finalBlock, payload[len(payload)-overrun:])
for i := overrun; i < blockSize; i++ {
finalBlock[i] = byte(paddingLen - 1)
}
return
}
// encrypt encrypts and macs the data in b.
func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) {
// mac
if hc.mac != nil {
mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:], nil)
n := len(b.data)
b.resize(n + len(mac))
copy(b.data[n:], mac)
hc.outDigestBuf = mac
}
payload := b.data[recordHeaderLen:]
// encrypt
if hc.cipher != nil {
switch c := hc.cipher.(type) {
case cipher.Stream:
c.XORKeyStream(payload, payload)
case aead:
payloadLen := len(b.data) - recordHeaderLen - explicitIVLen
overhead := c.Overhead()
if hc.version >= VersionTLS13 {
overhead++
}
b.resize(len(b.data) + overhead)
nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
if len(nonce) == 0 {
nonce = hc.seq[:]
}
payload = b.data[recordHeaderLen+explicitIVLen:]
payload = payload[:payloadLen]
var additionalData []byte
if hc.version < VersionTLS13 {
copy(hc.additionalData[:], hc.seq[:])
copy(hc.additionalData[8:], b.data[:3])
hc.additionalData[11] = byte(payloadLen >> 8)
hc.additionalData[12] = byte(payloadLen)
additionalData = hc.additionalData[:]
}
if hc.version >= VersionTLS13 {
// opaque type
payload = payload[:len(payload)+1]
payload[len(payload)-1] = b.data[0]
b.data[0] = byte(recordTypeApplicationData)
}
c.Seal(payload[:0], nonce, payload, additionalData)
case cbcMode:
blockSize := c.BlockSize()
if explicitIVLen > 0 {
c.SetIV(payload[:explicitIVLen])
payload = payload[explicitIVLen:]
}
prefix, finalBlock := padToBlockSize(payload, blockSize)
b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock))
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix)
c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock)
default:
panic("unknown cipher type")
}
}
// update length to include MAC and any block padding needed.
n := len(b.data) - recordHeaderLen
b.data[3] = byte(n >> 8)
b.data[4] = byte(n)
hc.incSeq()
return true, 0
}
// A block is a simple data buffer.
type block struct {
data []byte
off int // index for Read
link *block
}
// resize resizes block to be n bytes, growing if necessary.
func (b *block) resize(n int) {
if n > cap(b.data) {
b.reserve(n)
}
b.data = b.data[0:n]
}
// reserve makes sure that block contains a capacity of at least n bytes.
func (b *block) reserve(n int) {
if cap(b.data) >= n {
return
}
m := cap(b.data)
if m == 0 {
m = 1024
}
for m < n {
m *= 2
}
data := make([]byte, len(b.data), m)
copy(data, b.data)
b.data = data
}
// readFromUntil reads from r into b until b contains at least n bytes
// or else returns an error.
func (b *block) readFromUntil(r io.Reader, n int) error {
// quick case
if len(b.data) >= n {
return nil
}
// read until have enough.
b.reserve(n)
for {
m, err := r.Read(b.data[len(b.data):cap(b.data)])
b.data = b.data[0 : len(b.data)+m]
if len(b.data) >= n {
// TODO(bradfitz,agl): slightly suspicious
// that we're throwing away r.Read's err here.
break
}
if err != nil {
return err
}
}
return nil
}
func (b *block) Read(p []byte) (n int, err error) {
n = copy(p, b.data[b.off:])
b.off += n
if b.off >= len(b.data) {
err = io.EOF
}
return
}
// newBlock allocates a new block, from hc's free list if possible.
func (hc *halfConn) newBlock() *block {
b := hc.bfree
if b == nil {
return new(block)
}
hc.bfree = b.link
b.link = nil
b.resize(0)
return b
}
// freeBlock returns a block to hc's free list.
// The protocol is such that each side only has a block or two on
// its free list at a time, so there's no need to worry about
// trimming the list, etc.
func (hc *halfConn) freeBlock(b *block) {
b.link = hc.bfree
hc.bfree = b
}
// splitBlock splits a block after the first n bytes,
// returning a block with those n bytes and a
// block with the remainder. the latter may be nil.
func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) {
if len(b.data) <= n {
return b, nil
}
bb := hc.newBlock()
bb.resize(len(b.data) - n)
copy(bb.data, b.data[n:])
b.data = b.data[0:n]
return b, bb
}
// RecordHeaderError results when a TLS record header is invalid.
type RecordHeaderError struct {
// Msg contains a human readable string that describes the error.
Msg string
// RecordHeader contains the five bytes of TLS record header that
// triggered the error.
RecordHeader [5]byte
}
func (e RecordHeaderError) Error() string { return "tls: " + e.Msg }
func (c *Conn) newRecordHeaderError(msg string) (err RecordHeaderError) {
err.Msg = msg
copy(err.RecordHeader[:], c.rawInput.data)
return err
}
// readRecord reads the next TLS record from the connection
// and updates the record layer state.
// c.in.Mutex <= L; c.input == nil.
// c.input can still be nil after a call, retry if so.
func (c *Conn) readRecord(want recordType) error {
// Caller must be in sync with connection:
// handshake data if handshake not yet completed,
// else application data.
switch want {
default:
c.sendAlert(alertInternalError)
return c.in.setErrorLocked(errors.New("tls: unknown record type requested"))
case recordTypeHandshake, recordTypeChangeCipherSpec:
if c.phase != handshakeRunning && c.phase != readingClientFinished {
c.sendAlert(alertInternalError)
return c.in.setErrorLocked(errors.New("tls: handshake or ChangeCipherSpec requested while not in handshake"))
}
case recordTypeApplicationData:
if c.phase == handshakeRunning || c.phase == readingClientFinished {
c.sendAlert(alertInternalError)
return c.in.setErrorLocked(errors.New("tls: application data record requested while in handshake"))
}
}
if c.rawInput == nil {
c.rawInput = c.in.newBlock()
}
b := c.rawInput
// Read header, payload.
if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil {
// RFC suggests that EOF without an alertCloseNotify is
// an error, but popular web sites seem to do this,
// so we can't make it an error.
// if err == io.EOF {
// err = io.ErrUnexpectedEOF
// }
if e, ok := err.(net.Error); !ok || !e.Temporary() {
c.in.setErrorLocked(err)
}
return err
}
typ := recordType(b.data[0])
// No valid TLS record has a type of 0x80, however SSLv2 handshakes
// start with a uint16 length where the MSB is set and the first record
// is always < 256 bytes long. Therefore typ == 0x80 strongly suggests
// an SSLv2 client.
if want == recordTypeHandshake && typ == 0x80 {
c.sendAlert(alertProtocolVersion)
return c.in.setErrorLocked(c.newRecordHeaderError("unsupported SSLv2 handshake received"))
}
vers := uint16(b.data[1])<<8 | uint16(b.data[2])
n := int(b.data[3])<<8 | int(b.data[4])
if n > maxCiphertext {
c.sendAlert(alertRecordOverflow)
msg := fmt.Sprintf("oversized record received with length %d", n)
return c.in.setErrorLocked(c.newRecordHeaderError(msg))
}
if !c.haveVers {
// First message, be extra suspicious: this might not be a TLS
// client. Bail out before reading a full 'body', if possible.
// The current max version is 3.3 so if the version is >= 16.0,
// it's probably not real.
if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 {
c.sendAlert(alertUnexpectedMessage)
return c.in.setErrorLocked(c.newRecordHeaderError("first record does not look like a TLS handshake"))
}
}
if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if e, ok := err.(net.Error); !ok || !e.Temporary() {
c.in.setErrorLocked(err)
}
return err
}
// Process message.
b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n)
peekedAlert := peekAlert(b) // peek at a possible alert before decryption
ok, off, alertValue := c.in.decrypt(b)
switch {
case !ok && c.phase == discardingEarlyData:
// If the client said that it's sending early data and we did not
// accept it, we are expected to fail decryption.
c.in.freeBlock(b)
return nil
case ok && c.phase == discardingEarlyData:
c.phase = waitingClientFinished
case !ok:
c.in.traceErr, c.out.traceErr = nil, nil // not that interesting
c.in.freeBlock(b)
err := c.sendAlert(alertValue)
// If decryption failed because the message is an unencrypted
// alert, return a more meaningful error message
if alertValue == alertBadRecordMAC && peekedAlert != nil {
err = peekedAlert
}
return c.in.setErrorLocked(err)
}
b.off = off
data := b.data[b.off:]
if len(data) > maxPlaintext {
c.in.freeBlock(b)
return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow))
}
// After checking the plaintext length, remove 1.3 padding and
// extract the real content type.
// See https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-5.4.
if c.vers >= VersionTLS13 {
i := len(data) - 1
for i >= 0 {
if data[i] != 0 {
break
}
i--
}
if i < 0 {
c.in.freeBlock(b)
return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
typ = recordType(data[i])
data = data[:i]
b.resize(b.off + i) // shrinks, guaranteed not to reallocate
}
switch typ {
default:
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
case recordTypeAlert:
if len(data) != 2 {
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
break
}
if alert(data[1]) == alertCloseNotify {
c.in.setErrorLocked(io.EOF)
break
}
if alert(data[1]) == alertEndOfEarlyData {
c.handleEndOfEarlyData()
break
}
switch data[0] {
case alertLevelWarning:
// drop on the floor
c.in.freeBlock(b)
return nil
case alertLevelError:
c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])})
default:
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
case recordTypeChangeCipherSpec:
if typ != want || len(data) != 1 || data[0] != 1 || c.vers >= VersionTLS13 {
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
break
}
// Handshake messages are not allowed to fragment across the CCS
if c.hand.Len() > 0 {
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
break
}
err := c.in.changeCipherSpec()
if err != nil {
c.in.setErrorLocked(c.sendAlert(err.(alert)))
}
case recordTypeApplicationData:
if typ != want || c.phase == waitingClientFinished {
c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
break
}
if c.phase == readingEarlyData {
c.earlyDataBytes += int64(len(b.data) - b.off)
if c.earlyDataBytes > c.ticketMaxEarlyData {
return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
}
c.input = b
b = nil
case recordTypeHandshake:
// TODO(rsc): Should at least pick off connection close.
if typ != want && !(c.isClient && c.config.Renegotiation != RenegotiateNever) &&
c.phase != waitingClientFinished {
return c.in.setErrorLocked(c.sendAlert(alertNoRenegotiation))
}
c.hand.Write(data)
if typ != want && c.phase == waitingClientFinished {
if err := c.hs.readClientFinished13(); err != nil {
c.in.setErrorLocked(err)
break
}
}
}
if b != nil {
c.in.freeBlock(b)
}
return c.in.err
}
// peekAlert looks at a message to spot an unencrypted alert. It must be
// called before decryption to avoid a side channel, and its result must
// only be used if decryption fails, to avoid false positives.
func peekAlert(b *block) error {
if len(b.data) < 7 {
return nil
}
if recordType(b.data[0]) != recordTypeAlert {
return nil
}
return &net.OpError{Op: "remote error", Err: alert(b.data[6])}
}
// sendAlert sends a TLS alert message.
// c.out.Mutex <= L.
func (c *Conn) sendAlertLocked(err alert) error {
switch err {
case alertNoRenegotiation, alertCloseNotify:
c.tmp[0] = alertLevelWarning
default:
c.tmp[0] = alertLevelError
}
c.tmp[1] = byte(err)
_, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2])
if err == alertCloseNotify {
// closeNotify is a special case in that it isn't an error.
return writeErr
}
return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
}
// sendAlert sends a TLS alert message.
// L < c.out.Mutex.
func (c *Conn) sendAlert(err alert) error {
c.out.Lock()
defer c.out.Unlock()
return c.sendAlertLocked(err)
}
const (
// tcpMSSEstimate is a conservative estimate of the TCP maximum segment
// size (MSS). A constant is used, rather than querying the kernel for
// the actual MSS, to avoid complexity. The value here is the IPv6
// minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40
// bytes) and a TCP header with timestamps (32 bytes).
tcpMSSEstimate = 1208
// recordSizeBoostThreshold is the number of bytes of application data
// sent after which the TLS record size will be increased to the
// maximum.
recordSizeBoostThreshold = 128 * 1024
)
// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the
// next application data record. There is the following trade-off:
//
// - For latency-sensitive applications, such as web browsing, each TLS
// record should fit in one TCP segment.
// - For throughput-sensitive applications, such as large file transfers,
// larger TLS records better amortize framing and encryption overheads.
//
// A simple heuristic that works well in practice is to use small records for
// the first 1MB of data, then use larger records for subsequent data, and
// reset back to smaller records after the connection becomes idle. See "High
// Performance Web Networking", Chapter 4, or:
// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/
//
// In the interests of simplicity and determinism, this code does not attempt
// to reset the record size once the connection is idle, however.
//
// c.out.Mutex <= L.
func (c *Conn) maxPayloadSizeForWrite(typ recordType, explicitIVLen int) int {
if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData {
return maxPlaintext
}
if c.bytesSent >= recordSizeBoostThreshold {
return maxPlaintext
}
// Subtract TLS overheads to get the maximum payload size.
macSize := 0
if c.out.mac != nil {
macSize = c.out.mac.Size()
}
payloadBytes := tcpMSSEstimate - recordHeaderLen - explicitIVLen
if c.out.cipher != nil {
switch ciph := c.out.cipher.(type) {
case cipher.Stream:
payloadBytes -= macSize
case cipher.AEAD:
payloadBytes -= ciph.Overhead()
if c.vers >= VersionTLS13 {
payloadBytes -= 1 // ContentType
}
case cbcMode:
blockSize := ciph.BlockSize()
// The payload must fit in a multiple of blockSize, with
// room for at least one padding byte.
payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1
// The MAC is appended before padding so affects the
// payload size directly.
payloadBytes -= macSize
default:
panic("unknown cipher type")
}
}
// Allow packet growth in arithmetic progression up to max.
pkt := c.packetsSent
c.packetsSent++
if pkt > 1000 {
return maxPlaintext // avoid overflow in multiply below
}
n := payloadBytes * int(pkt+1)
if n > maxPlaintext {
n = maxPlaintext
}
return n
}
// c.out.Mutex <= L.
func (c *Conn) write(data []byte) (int, error) {
if c.buffering {
c.sendBuf = append(c.sendBuf, data...)
return len(data), nil
}
n, err := c.conn.Write(data)
c.bytesSent += int64(n)
return n, err
}
func (c *Conn) flush() (int, error) {
if len(c.sendBuf) == 0 {
return 0, nil
}
n, err := c.conn.Write(c.sendBuf)
c.bytesSent += int64(n)
c.sendBuf = nil
c.buffering = false
return n, err
}
// writeRecordLocked writes a TLS record with the given type and payload to the
// connection and updates the record layer state.
// c.out.Mutex <= L.
func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) {
b := c.out.newBlock()
defer c.out.freeBlock(b)
var n int
for len(data) > 0 {
explicitIVLen := 0
explicitIVIsSeq := false
var cbc cbcMode
if c.out.version >= VersionTLS11 {
var ok bool
if cbc, ok = c.out.cipher.(cbcMode); ok {
explicitIVLen = cbc.BlockSize()
}
}
if explicitIVLen == 0 {
if c, ok := c.out.cipher.(aead); ok {
explicitIVLen = c.explicitNonceLen()
// The AES-GCM construction in TLS has an
// explicit nonce so that the nonce can be
// random. However, the nonce is only 8 bytes
// which is too small for a secure, random
// nonce. Therefore we use the sequence number
// as the nonce.
explicitIVIsSeq = explicitIVLen > 0
}
}
m := len(data)
if maxPayload := c.maxPayloadSizeForWrite(typ, explicitIVLen); m > maxPayload {
m = maxPayload
}
b.resize(recordHeaderLen + explicitIVLen + m)
b.data[0] = byte(typ)
vers := c.vers
if vers == 0 {
// Some TLS servers fail if the record version is
// greater than TLS 1.0 for the initial ClientHello.
vers = VersionTLS10
}
if c.vers >= VersionTLS13 {
// TLS 1.3 froze the record layer version at { 3, 1 }.
// See https://tools.ietf.org/html/draft-ietf-tls-tls13-18#section-5.1.
vers = VersionTLS10
}
b.data[1] = byte(vers >> 8)
b.data[2] = byte(vers)
b.data[3] = byte(m >> 8)
b.data[4] = byte(m)
if explicitIVLen > 0 {
explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen]
if explicitIVIsSeq {
copy(explicitIV, c.out.seq[:])
} else {
if _, err := io.ReadFull(c.config.rand(), explicitIV); err != nil {
return n, err
}
}
}
copy(b.data[recordHeaderLen+explicitIVLen:], data)
c.out.encrypt(b, explicitIVLen)
if _, err := c.write(b.data); err != nil {
return n, err
}
n += m
data = data[m:]
}
if typ == recordTypeChangeCipherSpec {
if err := c.out.changeCipherSpec(); err != nil {
return n, c.sendAlertLocked(err.(alert))
}
}
return n, nil
}
// writeRecord writes a TLS record with the given type and payload to the
// connection and updates the record layer state.
// L < c.out.Mutex.
func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
c.out.Lock()
defer c.out.Unlock()
return c.writeRecordLocked(typ, data)
}
// readHandshake reads the next handshake message from
// the record layer.
// c.in.Mutex < L; c.out.Mutex < L.
func (c *Conn) readHandshake() (interface{}, error) {
for c.hand.Len() < 4 {
if err := c.in.err; err != nil {
return nil, err
}
if err := c.readRecord(recordTypeHandshake); err != nil {
return nil, err
}
}
data := c.hand.Bytes()
n := int(data[1])<<16 | int(data[2])<<8 | int(data[3])
if n > maxHandshake {
c.sendAlertLocked(alertInternalError)
return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake))
}
for c.hand.Len() < 4+n {
if err := c.in.err; err != nil {
return nil, err
}
if err := c.readRecord(recordTypeHandshake); err != nil {
return nil, err
}
}
data = c.hand.Next(4 + n)
var m handshakeMessage
switch data[0] {
case typeHelloRequest:
m = new(helloRequestMsg)
case typeClientHello:
m = new(clientHelloMsg)
case typeServerHello:
if c.vers >= VersionTLS13 {
m = new(serverHelloMsg13)
} else {
m = new(serverHelloMsg)
}
case typeEncryptedExtensions:
m = new(encryptedExtensionsMsg)
case typeNewSessionTicket:
if c.vers >= VersionTLS13 {
m = new(newSessionTicketMsg13)
} else {
m = new(newSessionTicketMsg)
}
case typeCertificate:
if c.vers >= VersionTLS13 {
m = new(certificateMsg13)
} else {
m = new(certificateMsg)
}
case typeCertificateRequest:
m = &certificateRequestMsg{
hasSignatureAndHash: c.vers >= VersionTLS12,
}
case typeCertificateStatus:
m = new(certificateStatusMsg)
case typeServerKeyExchange:
m = new(serverKeyExchangeMsg)
case typeServerHelloDone:
m = new(serverHelloDoneMsg)
case typeClientKeyExchange:
m = new(clientKeyExchangeMsg)
case typeCertificateVerify:
m = &certificateVerifyMsg{
hasSignatureAndHash: c.vers >= VersionTLS12,
}
case typeNextProtocol:
m = new(nextProtoMsg)
case typeFinished:
m = new(finishedMsg)
default:
return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage))
}
// The handshake message unmarshalers
// expect to be able to keep references to data,
// so pass in a fresh copy that won't be overwritten.
data = append([]byte(nil), data...)
if unmarshalAlert := m.unmarshal(data); unmarshalAlert != alertSuccess {
return nil, c.in.setErrorLocked(c.sendAlert(unmarshalAlert))
}
return m, nil
}
var (
errClosed = errors.New("tls: use of closed connection")
errShutdown = errors.New("tls: protocol is shutdown")
)
// Write writes data to the connection.
func (c *Conn) Write(b []byte) (int, error) {
// interlock with Close below
for {
x := atomic.LoadInt32(&c.activeCall)
if x&1 != 0 {
return 0, errClosed
}
if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
defer atomic.AddInt32(&c.activeCall, -2)
break
}
}
if err := c.Handshake(); err != nil {
return 0, err
}
c.out.Lock()
defer c.out.Unlock()
if err := c.out.err; err != nil {
return 0, err
}
if !c.handshakeComplete {
return 0, alertInternalError
}
if c.closeNotifySent {
return 0, errShutdown
}
// SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext
// attack when using block mode ciphers due to predictable IVs.
// This can be prevented by splitting each Application Data
// record into two records, effectively randomizing the IV.
//
// http://www.openssl.org/~bodo/tls-cbc.txt
// https://bugzilla.mozilla.org/show_bug.cgi?id=665814
// http://www.imperialviolet.org/2012/01/15/beastfollowup.html
var m int
if len(b) > 1 && c.vers <= VersionTLS10 {
if _, ok := c.out.cipher.(cipher.BlockMode); ok {
n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1])
if err != nil {
return n, c.out.setErrorLocked(err)
}
m, b = 1, b[1:]
}
}
n, err := c.writeRecordLocked(recordTypeApplicationData, b)
return n + m, c.out.setErrorLocked(err)
}
// handleRenegotiation processes a HelloRequest handshake message.
// c.in.Mutex <= L
func (c *Conn) handleRenegotiation() error {
msg, err := c.readHandshake()
if err != nil {
return err
}
_, ok := msg.(*helloRequestMsg)
if !ok {
c.sendAlert(alertUnexpectedMessage)
return alertUnexpectedMessage
}
if !c.isClient {
return c.sendAlert(alertNoRenegotiation)
}
if c.vers >= VersionTLS13 {
return c.sendAlert(alertNoRenegotiation)
}
switch c.config.Renegotiation {
case RenegotiateNever:
return c.sendAlert(alertNoRenegotiation)
case RenegotiateOnceAsClient:
if c.handshakes > 1 {
return c.sendAlert(alertNoRenegotiation)
}
case RenegotiateFreelyAsClient:
// Ok.
default:
c.sendAlert(alertInternalError)
return errors.New("tls: unknown Renegotiation value")
}
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
c.phase = handshakeRunning
c.handshakeComplete = false
if c.handshakeErr = c.clientHandshake(); c.handshakeErr == nil {
c.handshakes++
}
return c.handshakeErr
}
// ConfirmHandshake waits for the handshake to reach a point at which
// the connection is certainly not replayed. That is, after receiving
// the Client Finished.
//
// If ConfirmHandshake returns an error and until ConfirmHandshake
// returns, the 0-RTT data should not be trusted not to be replayed.
//
// This is only meaningful in TLS 1.3 when Accept0RTTData is true and the
// client sent valid 0-RTT data. In any other case it's equivalent to
// calling Handshake.
func (c *Conn) ConfirmHandshake() error {
if err := c.Handshake(); err != nil {
return err
}
if c.vers < VersionTLS13 {
return nil
}
c.confirmMutex.Lock()
if atomic.LoadInt32(&c.handshakeConfirmed) == 1 { // c.phase == handshakeConfirmed
c.confirmMutex.Unlock()
return nil
} else {
defer func() {
// If we transitioned to handshakeConfirmed we already released the lock,
// otherwise do it here.
if c.phase != handshakeConfirmed {
c.confirmMutex.Unlock()
}
}()
}
c.in.Lock()
defer c.in.Unlock()
var input *block
if c.phase == readingEarlyData || c.input != nil {
buf := &bytes.Buffer{}
if _, err := buf.ReadFrom(earlyDataReader{c}); err != nil {
c.in.setErrorLocked(err)
return err
}
input = &block{data: buf.Bytes()}
}
for c.phase != handshakeConfirmed {
if err := c.readRecord(recordTypeApplicationData); err != nil {
c.in.setErrorLocked(err)
return err
}
}
if c.phase != handshakeConfirmed {
panic("should have reached handshakeConfirmed state")
}
if c.input != nil {
panic("should not have read past the Client Finished")
}
c.input = input
return nil
}
// earlyDataReader wraps a Conn and reads only early data, both buffered
// and still on the wire.
type earlyDataReader struct {
c *Conn
}
// c.in.Mutex <= L
func (r earlyDataReader) Read(b []byte) (n int, err error) {
c := r.c
if c.phase == handshakeConfirmed {
// c.input might not be early data
panic("earlyDataReader called at handshakeConfirmed")
}
for c.input == nil && c.in.err == nil && c.phase == readingEarlyData {
if err := c.readRecord(recordTypeApplicationData); err != nil {
return 0, err
}
}
if err := c.in.err; err != nil {
return 0, err
}
if c.input != nil {
n, err = c.input.Read(b)
if err == io.EOF {
err = nil
c.in.freeBlock(c.input)
c.input = nil
}
}
if err == nil && c.phase != readingEarlyData && c.input == nil {
err = io.EOF
}
return
}
// Read can be made to time out and return a net.Error with Timeout() == true
// after a fixed time limit; see SetDeadline and SetReadDeadline.
func (c *Conn) Read(b []byte) (n int, err error) {
if err = c.Handshake(); err != nil {
return
}
if len(b) == 0 {
// Put this after Handshake, in case people were calling
// Read(nil) for the side effect of the Handshake.
return
}
c.confirmMutex.Lock()
if atomic.LoadInt32(&c.handshakeConfirmed) == 1 { // c.phase == handshakeConfirmed
c.confirmMutex.Unlock()
} else {
defer func() {
// If we transitioned to handshakeConfirmed we already released the lock,
// otherwise do it here.
if c.phase != handshakeConfirmed {
c.confirmMutex.Unlock()
}
}()
}
c.in.Lock()
defer c.in.Unlock()
// Some OpenSSL servers send empty records in order to randomize the
// CBC IV. So this loop ignores a limited number of empty records.
const maxConsecutiveEmptyRecords = 100
for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ {
for c.input == nil && c.in.err == nil {
if err := c.readRecord(recordTypeApplicationData); err != nil {
// Soft error, like EAGAIN
return 0, err
}
if c.hand.Len() > 0 {
// We received handshake bytes, indicating the
// start of a renegotiation.
if err := c.handleRenegotiation(); err != nil {
return 0, err
}
}
}
if err := c.in.err; err != nil {
return 0, err
}
n, err = c.input.Read(b)
if err == io.EOF {
err = nil
c.in.freeBlock(c.input)
c.input = nil
}
// If a close-notify alert is waiting, read it so that
// we can return (n, EOF) instead of (n, nil), to signal
// to the HTTP response reading goroutine that the
// connection is now closed. This eliminates a race
// where the HTTP response reading goroutine would
// otherwise not observe the EOF until its next read,
// by which time a client goroutine might have already
// tried to reuse the HTTP connection for a new
// request.
// See https://codereview.appspot.com/76400046
// and https://golang.org/issue/3514
if ri := c.rawInput; ri != nil &&
n != 0 && err == nil &&
c.input == nil && len(ri.data) > 0 && recordType(ri.data[0]) == recordTypeAlert {
if recErr := c.readRecord(recordTypeApplicationData); recErr != nil {
err = recErr // will be io.EOF on closeNotify
}
}
if n != 0 || err != nil {
return n, err
}
}
return 0, io.ErrNoProgress
}
// Close closes the connection.
func (c *Conn) Close() error {
// Interlock with Conn.Write above.
var x int32
for {
x = atomic.LoadInt32(&c.activeCall)
if x&1 != 0 {
return errClosed
}
if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
break
}
}
if x != 0 {
// io.Writer and io.Closer should not be used concurrently.
// If Close is called while a Write is currently in-flight,
// interpret that as a sign that this Close is really just
// being used to break the Write and/or clean up resources and
// avoid sending the alertCloseNotify, which may block
// waiting on handshakeMutex or the c.out mutex.
return c.conn.Close()
}
var alertErr error
c.handshakeMutex.Lock()
if c.handshakeComplete {
alertErr = c.closeNotify()
}
c.handshakeMutex.Unlock()
if err := c.conn.Close(); err != nil {
return err
}
return alertErr
}
var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete")
// CloseWrite shuts down the writing side of the connection. It should only be
// called once the handshake has completed and does not call CloseWrite on the
// underlying connection. Most callers should just use Close.
func (c *Conn) CloseWrite() error {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if !c.handshakeComplete {
return errEarlyCloseWrite
}
return c.closeNotify()
}
func (c *Conn) closeNotify() error {
c.out.Lock()
defer c.out.Unlock()
if !c.closeNotifySent {
c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify)
c.closeNotifySent = true
}
return c.closeNotifyErr
}
// Handshake runs the client or server handshake
// protocol if it has not yet been run.
// Most uses of this package need not call Handshake
// explicitly: the first Read or Write will call it automatically.
//
// In TLS 1.3 Handshake returns after the client and server first flights,
// without waiting for the Client Finished.
func (c *Conn) Handshake() error {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if err := c.handshakeErr; err != nil {
return err
}
if c.handshakeComplete {
return nil
}
c.in.Lock()
defer c.in.Unlock()
// The handshake cannot have completed when handshakeMutex was unlocked
// because this goroutine set handshakeCond.
if c.handshakeErr != nil || c.handshakeComplete {
panic("handshake should not have been able to complete after handshakeCond was set")
}
c.connID = make([]byte, 8)
if _, err := io.ReadFull(c.config.rand(), c.connID); err != nil {
return err
}
if c.isClient {
c.handshakeErr = c.clientHandshake()
} else {
c.handshakeErr = c.serverHandshake()
}
if c.handshakeErr == nil {
c.handshakes++
} else {
// If an error occurred during the hadshake try to flush the
// alert that might be left in the buffer.
c.flush()
}
if c.handshakeErr == nil && !c.handshakeComplete {
panic("handshake should have had a result.")
}
return c.handshakeErr
}
// ConnectionState returns basic TLS details about the connection.
func (c *Conn) ConnectionState() ConnectionState {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
var state ConnectionState
state.HandshakeComplete = c.handshakeComplete
state.ServerName = c.serverName
if c.handshakeComplete {
state.ConnectionID = c.connID
state.ClientHello = c.clientHello
state.Version = c.vers
state.NegotiatedProtocol = c.clientProtocol
state.DidResume = c.didResume
state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback
state.CipherSuite = c.cipherSuite
state.PeerCertificates = c.peerCertificates
state.VerifiedChains = c.verifiedChains
state.SignedCertificateTimestamps = c.scts
state.OCSPResponse = c.ocspResponse
state.HandshakeConfirmed = atomic.LoadInt32(&c.handshakeConfirmed) == 1
if !state.HandshakeConfirmed {
state.Unique0RTTToken = c.binder
}
if !c.didResume {
if c.clientFinishedIsFirst {
state.TLSUnique = c.clientFinished[:]
} else {
state.TLSUnique = c.serverFinished[:]
}
}
}
return state
}
// OCSPResponse returns the stapled OCSP response from the TLS server, if
// any. (Only valid for client connections.)
func (c *Conn) OCSPResponse() []byte {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
return c.ocspResponse
}
// VerifyHostname checks that the peer certificate chain is valid for
// connecting to host. If so, it returns nil; if not, it returns an error
// describing the problem.
func (c *Conn) VerifyHostname(host string) error {
c.handshakeMutex.Lock()
defer c.handshakeMutex.Unlock()
if !c.isClient {
return errors.New("tls: VerifyHostname called on TLS server connection")
}
if !c.handshakeComplete {
return errors.New("tls: handshake has not yet been performed")
}
if len(c.verifiedChains) == 0 {
return errors.New("tls: handshake did not verify certificate chain")
}
return c.peerCertificates[0].VerifyHostname(host)
}
| extractPadding |
percent.rs | // Uniform Resource Identifier (URI): Generic Syntax
// 2.1. Percent-Encoding
// https://tools.ietf.org/html/rfc3986#section-2.1
//
// 1.3. Percent-encoded bytes
// https://url.spec.whatwg.org/#percent-encoded-bytes
//
// gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
//
// reserved = gen-delims / sub-delims
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
use super::base16::from_hexdigits;
static TABLE: [&[u8]; 256] = [
b"%00", b"%01", b"%02", b"%03", b"%04", b"%05", b"%06", b"%07", b"%08", b"%09", b"%0a", b"%0b",
b"%0c", b"%0d", b"%0e", b"%0f", b"%10", b"%11", b"%12", b"%13", b"%14", b"%15", b"%16", b"%17",
b"%18", b"%19", b"%1a", b"%1b", b"%1c", b"%1d", b"%1e", b"%1f",
// b'-' b'.'
b"%20", b"%21", b"%22", b"%23", b"%24", b"%25", b"%26", b"%27", b"%28", b"%29", b"%2a", b"%2b",
b"%2c", b"\x2d", b"\x2e", b"%2f",
// 0 1 2 3 4 5 6 7 8 9
b"\x30", b"\x31", b"\x32", b"\x33", b"\x34", b"\x35", b"\x36", b"\x37", b"\x38", b"\x39",
b"%3a", b"%3b", b"%3c", b"%3d", b"%3e", b"%3f",
// A B C D E F G H I J K L M N O
b"%40", b"\x41", b"\x42", b"\x43", b"\x44", b"\x45", b"\x46", b"\x47", b"\x48", b"\x49",
b"\x4a", b"\x4b", b"\x4c", b"\x4d", b"\x4e", b"\x4f",
// P Q R S T U V W X Y Z b'_'
b"\x50", b"\x51", b"\x52", b"\x53", b"\x54", b"\x55", b"\x56", b"\x57", b"\x58", b"\x59",
b"\x5a", b"%5b", b"%5c", b"%5d", b"%5e", b"\x5f",
// a b c d e f g h i j k l m n o
b"%60", b"\x61", b"\x62", b"\x63", b"\x64", b"\x65", b"\x66", b"\x67", b"\x68", b"\x69",
b"\x6a", b"\x6b", b"\x6c", b"\x6d", b"\x6e", b"\x6f",
// p q r s t u v w x y z b'~'
b"\x70", b"\x71", b"\x72", b"\x73", b"\x74", b"\x75", b"\x76", b"\x77", b"\x78", b"\x79",
b"\x7a", b"%7b", b"%7c", b"%7d", b"\x7e", b"%7f", b"%80", b"%81", b"%82", b"%83", b"%84",
b"%85", b"%86", b"%87", b"%88", b"%89", b"%8a", b"%8b", b"%8c", b"%8d", b"%8e", b"%8f", b"%90",
b"%91", b"%92", b"%93", b"%94", b"%95", b"%96", b"%97", b"%98", b"%99", b"%9a", b"%9b", b"%9c",
b"%9d", b"%9e", b"%9f", b"%a0", b"%a1", b"%a2", b"%a3", b"%a4", b"%a5", b"%a6", b"%a7", b"%a8",
b"%a9", b"%aa", b"%ab", b"%ac", b"%ad", b"%ae", b"%af", b"%b0", b"%b1", b"%b2", b"%b3", b"%b4",
b"%b5", b"%b6", b"%b7", b"%b8", b"%b9", b"%ba", b"%bb", b"%bc", b"%bd", b"%be", b"%bf", b"%c0",
b"%c1", b"%c2", b"%c3", b"%c4", b"%c5", b"%c6", b"%c7", b"%c8", b"%c9", b"%ca", b"%cb", b"%cc",
b"%cd", b"%ce", b"%cf", b"%d0", b"%d1", b"%d2", b"%d3", b"%d4", b"%d5", b"%d6", b"%d7", b"%d8",
b"%d9", b"%da", b"%db", b"%dc", b"%dd", b"%de", b"%df", b"%e0", b"%e1", b"%e2", b"%e3", b"%e4",
b"%e5", b"%e6", b"%e7", b"%e8", b"%e9", b"%ea", b"%eb", b"%ec", b"%ed", b"%ee", b"%ef", b"%f0",
b"%f1", b"%f2", b"%f3", b"%f4", b"%f5", b"%f6", b"%f7", b"%f8", b"%f9", b"%fa", b"%fb", b"%fc",
b"%fd", b"%fe", b"%ff",
];
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum ErrorKind {
InvalidHexDigit,
InvalidUtf8Sequence,
InvalidEncodedSequence,
}
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct Error {
pos: usize,
byte: u8,
kind: ErrorKind,
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
match self.kind {
ErrorKind::InvalidUtf8Sequence => {
write!(f, "incomplete utf-8 byte sequence from index {}", self.pos)
}
ErrorKind::InvalidEncodedSequence => {
write!(
f,
"incomplete percent-encoded sequence from index {}",
self.pos
)
}
ErrorKind::InvalidHexDigit => {
write!(f, "invalid hex digit from index {}", self.pos)
}
}
}
}
impl std::error::Error for Error {}
pub fn | <T: AsRef<[u8]>>(input: T) -> String {
let input = input.as_ref();
let ilen = input.len();
let ocap = ilen + (6 - 2);
let mut output = Vec::with_capacity(ocap);
for i in 0..ilen {
let val = TABLE[input[i] as usize];
output.extend_from_slice(&val);
}
unsafe { String::from_utf8_unchecked(output) }
}
pub fn decode<T: AsRef<[u8]>>(input: T) -> Result<String, Error> {
let input = input.as_ref();
let ilen = input.len();
let ocap = ilen;
let mut output = Vec::with_capacity(ocap);
let mut i = 0usize;
while i < ilen {
let ch = input[i];
if ch == b'%' {
let epos = i + 2;
if epos >= ilen {
return Err(Error {
pos: i,
byte: ch,
kind: ErrorKind::InvalidEncodedSequence,
});
}
let val = from_hexdigits(input[i], input[i + 1]).map_err(|_| Error {
pos: i,
byte: ch,
kind: ErrorKind::InvalidHexDigit,
})?;
output.push(val);
i += 3;
} else {
output.push(ch);
i += 1;
}
}
match String::from_utf8(output) {
Ok(s) => Ok(s),
Err(e) => {
let utf8_error = e.utf8_error();
let bytes = e.into_bytes();
let pos = utf8_error.valid_up_to();
Err(Error {
pos: pos,
byte: bytes[pos],
kind: ErrorKind::InvalidUtf8Sequence,
})
}
}
}
// #[cfg(test)]
// #[bench]
// fn bench_encode(b: &mut test::Bencher) {
// b.iter(|| {
// encode("foobar==/~")
// })
// }
// #[cfg(test)]
// #[bench]
// fn bench_crate_io_percent_encode(b: &mut test::Bencher) {
// use percent_encoding::NON_ALPHANUMERIC;
// use percent_encoding::utf8_percent_encode;
// b.iter(|| {
// utf8_percent_encode("foobar==/~", NON_ALPHANUMERIC).to_string()
// })
// }
| encode |
cache.rs | // cache management
use es::traits::*;
use std::process;
use std::env;
use std::fs;
use std::path::{Path,PathBuf};
use std::collections::HashMap;
use std::io::Write;
use crate::crate_utils;
use crate::meta;
use crate_utils::UNSTABLE;
use crate::state::State;
const STATIC_CACHE: &str = "static-cache";
const DYNAMIC_CACHE: &str = "dy-cache";
// this will be initially written to ~/.cargo/.runner/prelude and
// can then be edited.
const PRELUDE: &str = "
#![allow(unused_imports)]
#![allow(unused_variables)]
#![allow(dead_code)]
#![allow(unused_macros)]
use std::{fs,io,env};
use std::fs::File;
use std::io::prelude::*;
use std::path::{PathBuf,Path};
use std::collections::HashMap;
use std::time::Duration;
use std::thread;
macro_rules! debug {
($x:expr) => {
println!(\"{} = {:?}\",stringify!($x),$x);
}
}
";
// a fairly arbitrary set of crates to start the ball rolling
// cf. https://github.com/brson/stdx
const KITCHEN_SINK: &str = "
chrono
regex
serde_json
serde_yaml
";
// Windows shell quoting is a mess, so we make single quotes
// become double quotes in expressions
pub fn quote(s: String) -> String {
if cfg!(windows) {
s.replace("\'","\"")
} else {
s
}
}
pub fn | () -> PathBuf {
let mut runner = crate_utils::cargo_home().join(".runner");
if *UNSTABLE {
runner.push("unstable");
}
runner
}
pub fn cargo(args: &[&str]) -> bool {
let res = process::Command::new("cargo")
.args(args)
.status()
.or_die("can't run cargo");
res.success()
}
pub fn cargo_build(release: bool) -> Option<String> {
use process::Stdio;
use std::io::BufReader;
use std::io::prelude::*;
let mut c = process::Command::new("cargo");
c.arg("build");
if release {
c.arg("--release");
}
c.stdout(Stdio::piped());
c.arg("--message-format").arg("json");
let mut res = c.spawn().or_die("can't run cargo");
// collect all JSON records, and let the rest
// pass through...
let inb = BufReader::new(res.stdout.take().unwrap());
let mut out = String::new();
for line in inb.lines() {
if let Ok(line) = line {
if line.starts_with('{') {
out += &line;
out.push('\n');
} else {
println!("{}",line);
}
}
}
if res.wait().or_die("cargo build error").success() {
Some(out)
} else {
None
}
}
pub fn static_cache_dir() -> PathBuf {
runner_directory().join(STATIC_CACHE)
}
pub fn get_metadata() -> meta::Meta {
let static_cache = static_cache_dir();
if meta::Meta::exists(&static_cache) {
meta::Meta::new_from_file(&static_cache)
} else {
es::quit("please build the static cache with `runner --add <crate>...` first");
}
}
pub fn static_cache_dir_check() -> PathBuf {
let static_cache = static_cache_dir();
if ! static_cache.exists() {
es::quit("please build the static cache with `runner --add <crate>...` first");
}
static_cache
}
pub fn build_static_cache() -> bool {
use crate::meta::*;
let mut m = Meta::new();
match cargo_build(false) {
None => return false,
Some(s) => m.debug(s)
}
match cargo_build(true) {
None => return false,
Some(s) => m.release(s)
}
m.update(&static_cache_dir());
cargo(&["doc"])
}
pub fn create_static_cache(crates: &[String]) {
use std::io::prelude::*;
let static_cache = static_cache_dir();
let exists = static_cache.exists();
let crates = if crates.len() == 1 && crates[0] == "kitchen-sink" {
KITCHEN_SINK.split_whitespace().map(|s| s.into()).collect()
} else {
crates.to_vec()
};
let mut home = runner_directory();
env::set_current_dir(&home).or_die("cannot change to home directory");
let mdata = if ! exists {
if ! cargo(&["new","--bin",STATIC_CACHE]) {
es::quit("cannot create static cache");
}
None
} else {
Some(get_metadata())
};
let check_crate = |s: &str| if let Some(m) = &mdata {
m.is_crate_present(s)
} else {
false
};
// there are three forms possible
// a plain crate name - we assume latest version ('*')
// a name=vs - we'll ensure it gets quoted properly
// a local Cargo project
let crates_vs = crates.iter().filter_map(|c| {
if let Some(idx) = c.find('=') {
// help with a little bit of quoting...
let (name,vs) = (&c[0..idx], &c[(idx+1)..]);
Some((name.to_string(),vs.to_string(),true))
} else {
// explicit name but no version, see if we already have this crate
if let Some((name,path)) = maybe_cargo_dir(&c) {
// hello - this is a local Cargo project!
if check_crate(&name) {
None
} else {
Some((name, path.to_str().unwrap().to_string(),false))
}
} else { // latest version of crate
if check_crate(c) {
None
} else {
Some((c.to_string(), '*'.to_string(),true))
}
}
}
}).to_vec();
if crates_vs.len() == 0 {
return;
}
home.push(STATIC_CACHE);
env::set_current_dir(&home).or_die("could not change to static cache directory");
let tmpfile = env::temp_dir().join("Cargo.toml");
fs::copy("Cargo.toml",&tmpfile).or_die("cannot back up Cargo.toml");
{
let mut deps = fs::OpenOptions::new().append(true)
.open("Cargo.toml").or_die("could not append to Cargo.toml");
for (name,vs,semver) in crates_vs {
if semver {
write!(deps,"{}=\"{}\"\n",name,vs)
} else {
write!(deps,"{}={{path=\"{}\"}}\n",name,vs)
}.or_die("could not modify Cargo.toml");
}
}
if ! build_static_cache() {
println!("Error occurred - restoring Cargo.toml");
fs::copy(&tmpfile,"Cargo.toml").or_die("cannot restore Cargo.toml");
}
}
fn maybe_cargo_dir(name: &str) -> Option<(String,PathBuf)> {
let path = Path::new(name);
if ! path.exists() || ! path.is_dir() {
return None;
}
let full_path = path.canonicalize().or_die("bad path, man!");
if let Ok((full_path,cargo_toml)) = crate_utils::cargo_dir(&full_path) {
let name = crate_utils::crate_info(&cargo_toml).name;
Some((name,full_path))
} else {
None
}
}
// this is always called first and has the important role to ensure that
// runner's directory structure is created properly.
pub fn get_prelude() -> String {
let home = runner_directory();
let pristine = ! home.is_dir();
if pristine {
fs::create_dir_all(&home).or_die("cannot create runner directory");
}
let prelude = home.join("prelude");
let bin = home.join("bin");
if pristine {
fs::write(&prelude,PRELUDE).or_die("cannot write prelude");
fs::create_dir(&home.join(DYNAMIC_CACHE)).or_die("cannot create dynamic cache");
}
if pristine || ! bin.is_dir() {
fs::create_dir(&bin).or_die("cannot create output directory");
}
fs::read_to_string(&prelude).or_die("cannot read prelude")
}
pub fn get_cache(state: &State) -> PathBuf {
let mut home = runner_directory();
if state.build_static {
home.push(STATIC_CACHE);
home.push("target");
home.push(if state.optimize {"release"} else {"debug"});
home.push("deps");
} else {
home.push(DYNAMIC_CACHE);
};
home
}
pub fn add_aliases(aliases: Vec<String>) {
if aliases.len() == 0 { return; }
let alias_file = runner_directory().join("alias");
let mut f = if alias_file.is_file() {
fs::OpenOptions::new().append(true).open(&alias_file)
} else {
fs::File::create(&alias_file)
}.or_die("cannot open runner alias file");
for crate_alias in aliases {
write!(f,"{}\n",crate_alias).or_die("cannot write to runner alias file");
}
}
pub fn get_aliases() -> HashMap<String,String> {
let alias_file = runner_directory().join("alias");
if ! alias_file.is_file() { return HashMap::new(); }
let contents = fs::read_to_string(&alias_file).or_die("cannot read alias file");
contents.lines()
.filter_map(|s| s.split_at_delim('=').trim()) // split into (String,String)
.to_map()
}
| runner_directory |
0004_recipe.py | # Generated by Django 2.1.15 on 2021-06-30 17:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] |
|
saave.tsx |
export default function Saave() {
return (
<Container id="saave-page" className="py-4 md:py-8 lg:py-12" maxWidth="2xl">
<Head>
<title>Saave | Mist</title>
<meta key="description" name="description" content="MISTswap Saave..." />
</Head>
<Container className="text-center">
<Typography component="h1" variant="h1" className="mb-4">
Saave
</Typography>
<Typography variant="lg">
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse iaculis cursus nunc. Pellentesque
aliquam, mi sed rhoncus cursus, turpis lectus vehicula enim, eu volutpat diam quam at felis.
</Typography>
</Container>
</Container>
)
} | import Container from '../../components/Container'
import Head from 'next/head'
import Typography from '../../components/Typography' |
|
table.go | // Copyright 2015 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package sqlbase
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/pkg/errors"
"golang.org/x/text/language"
)
// SanitizeVarFreeExpr verifies that an expression is valid, has the correct
// type and contains no variable expressions. It returns the type-checked and
// constant-folded expression.
func SanitizeVarFreeExpr(
expr tree.Expr,
expectedType *types.T,
context string,
semaCtx *tree.SemaContext,
allowImpure bool,
) (tree.TypedExpr, error) {
if tree.ContainsVars(expr) {
return nil, pgerror.Newf(pgerror.CodeSyntaxError,
"variable sub-expressions are not allowed in %s", context)
}
// We need to save and restore the previous value of the field in
// semaCtx in case we are recursively called from another context
// which uses the properties field.
defer semaCtx.Properties.Restore(semaCtx.Properties)
// Ensure that the expression doesn't contain special functions.
flags := tree.RejectSpecial
if !allowImpure {
flags |= tree.RejectImpureFunctions
}
semaCtx.Properties.Require(context, flags)
typedExpr, err := tree.TypeCheck(expr, semaCtx, expectedType)
if err != nil {
return nil, err
}
actualType := typedExpr.ResolvedType()
if !expectedType.Equivalent(actualType) && typedExpr != tree.DNull {
// The expression must match the column type exactly unless it is a constant
// NULL value.
return nil, fmt.Errorf("expected %s expression to have type %s, but '%s' has type %s",
context, expectedType, expr, actualType)
}
return typedExpr, nil
}
// ValidateColumnDefType returns an error if the type of a column definition is
// not valid. It is checked when a column is created or altered.
func ValidateColumnDefType(t *types.T) error {
switch t.Family() {
case types.StringFamily, types.CollatedStringFamily:
if t.Family() == types.CollatedStringFamily {
if _, err := language.Parse(t.Locale()); err != nil {
return pgerror.Newf(pgerror.CodeSyntaxError, `invalid locale %s`, t.Locale())
}
}
case types.DecimalFamily:
switch {
case t.Precision() == 0 && t.Scale() > 0:
// TODO (seif): Find right range for error message.
return errors.New("invalid NUMERIC precision 0")
case t.Precision() < t.Scale():
return fmt.Errorf("NUMERIC scale %d must be between 0 and precision %d",
t.Scale(), t.Precision())
}
case types.ArrayFamily:
if t.ArrayContents().Family() == types.ArrayFamily {
// Nested arrays are not supported as a column type.
return errors.Errorf("nested array unsupported as column type: %s", t.String())
}
if err := types.CheckArrayElementType(t.ArrayContents()); err != nil |
return ValidateColumnDefType(t.ArrayContents())
case types.BitFamily, types.IntFamily, types.FloatFamily, types.BoolFamily, types.BytesFamily, types.DateFamily,
types.INetFamily, types.IntervalFamily, types.JsonFamily, types.OidFamily, types.TimeFamily,
types.TimestampFamily, types.TimestampTZFamily, types.UuidFamily:
// These types are OK.
default:
return pgerror.Newf(pgerror.CodeInvalidTableDefinitionError,
"value type %s cannot be used for table columns", t.String())
}
return nil
}
// MakeColumnDefDescs creates the column descriptor for a column, as well as the
// index descriptor if the column is a primary key or unique.
//
// If the column type *may* be SERIAL (or SERIAL-like), it is the
// caller's responsibility to call sql.processSerialInColumnDef() and
// sql.doCreateSequence() before MakeColumnDefDescs() to remove the
// SERIAL type and replace it with a suitable integer type and default
// expression.
//
// semaCtx can be nil if no default expression is used for the
// column.
//
// The DEFAULT expression is returned in TypedExpr form for analysis (e.g. recording
// sequence dependencies).
func MakeColumnDefDescs(
d *tree.ColumnTableDef, semaCtx *tree.SemaContext,
) (*ColumnDescriptor, *IndexDescriptor, tree.TypedExpr, error) {
if d.IsSerial {
// To the reader of this code: if control arrives here, this means
// the caller has not suitably called processSerialInColumnDef()
// prior to calling MakeColumnDefDescs. The dependent sequences
// must be created, and the SERIAL type eliminated, prior to this
// point.
return nil, nil, nil, pgerror.New(pgerror.CodeFeatureNotSupportedError,
"SERIAL cannot be used in this context")
}
if len(d.CheckExprs) > 0 {
// Should never happen since `HoistConstraints` moves these to table level
return nil, nil, nil, errors.New("unexpected column CHECK constraint")
}
if d.HasFKConstraint() {
// Should never happen since `HoistConstraints` moves these to table level
return nil, nil, nil, errors.New("unexpected column REFERENCED constraint")
}
col := &ColumnDescriptor{
Name: string(d.Name),
Nullable: d.Nullable.Nullability != tree.NotNull && !d.PrimaryKey,
}
// Validate and assign column type.
err := ValidateColumnDefType(d.Type)
if err != nil {
return nil, nil, nil, err
}
col.Type = *d.Type
var typedExpr tree.TypedExpr
if d.HasDefaultExpr() {
// Verify the default expression type is compatible with the column type
// and does not contain invalid functions.
var err error
if typedExpr, err = SanitizeVarFreeExpr(
d.DefaultExpr.Expr, d.Type, "DEFAULT", semaCtx, true, /* allowImpure */
); err != nil {
return nil, nil, nil, err
}
// We keep the type checked expression so that the type annotation
// gets properly stored.
d.DefaultExpr.Expr = typedExpr
s := tree.Serialize(d.DefaultExpr.Expr)
col.DefaultExpr = &s
}
if d.IsComputed() {
s := tree.Serialize(d.Computed.Expr)
col.ComputeExpr = &s
}
var idx *IndexDescriptor
if d.PrimaryKey || d.Unique {
idx = &IndexDescriptor{
Unique: true,
ColumnNames: []string{string(d.Name)},
ColumnDirections: []IndexDescriptor_Direction{IndexDescriptor_ASC},
}
if d.UniqueConstraintName != "" {
idx.Name = string(d.UniqueConstraintName)
}
}
return col, idx, typedExpr, nil
}
// EncodeColumns is a version of EncodePartialIndexKey that takes ColumnIDs and
// directions explicitly. WARNING: unlike EncodePartialIndexKey, EncodeColumns
// appends directly to keyPrefix.
func EncodeColumns(
columnIDs []ColumnID,
directions directions,
colMap map[ColumnID]int,
values []tree.Datum,
keyPrefix []byte,
) (key []byte, containsNull bool, err error) {
key = keyPrefix
for colIdx, id := range columnIDs {
val := findColumnValue(id, colMap, values)
if val == tree.DNull {
containsNull = true
}
dir, err := directions.get(colIdx)
if err != nil {
return nil, containsNull, err
}
if key, err = EncodeTableKey(key, val, dir); err != nil {
return nil, containsNull, err
}
}
return key, containsNull, nil
}
// GetColumnTypes returns the types of the columns with the given IDs.
func GetColumnTypes(desc *TableDescriptor, columnIDs []ColumnID) ([]types.T, error) {
types := make([]types.T, len(columnIDs))
for i, id := range columnIDs {
col, err := desc.FindActiveColumnByID(id)
if err != nil {
return nil, err
}
types[i] = col.Type
}
return types, nil
}
// ConstraintType is used to identify the type of a constraint.
type ConstraintType string
const (
// ConstraintTypePK identifies a PRIMARY KEY constraint.
ConstraintTypePK ConstraintType = "PRIMARY KEY"
// ConstraintTypeFK identifies a FOREIGN KEY constraint.
ConstraintTypeFK ConstraintType = "FOREIGN KEY"
// ConstraintTypeUnique identifies a FOREIGN constraint.
ConstraintTypeUnique ConstraintType = "UNIQUE"
// ConstraintTypeCheck identifies a CHECK constraint.
ConstraintTypeCheck ConstraintType = "CHECK"
)
// ConstraintDetail describes a constraint.
type ConstraintDetail struct {
Kind ConstraintType
Columns []string
Details string
Unvalidated bool
// Only populated for FK, PK, and Unique Constraints.
Index *IndexDescriptor
// Only populated for FK Constraints.
FK *ForeignKeyReference
ReferencedTable *TableDescriptor
ReferencedIndex *IndexDescriptor
// Only populated for Check Constraints.
CheckConstraint *TableDescriptor_CheckConstraint
}
type tableLookupFn func(ID) (*TableDescriptor, error)
// GetConstraintInfo returns a summary of all constraints on the table.
func (desc *TableDescriptor) GetConstraintInfo(
ctx context.Context, txn *client.Txn,
) (map[string]ConstraintDetail, error) {
var tableLookup tableLookupFn
if txn != nil {
tableLookup = func(id ID) (*TableDescriptor, error) {
return GetTableDescFromID(ctx, txn, id)
}
}
return desc.collectConstraintInfo(tableLookup)
}
// GetConstraintInfoWithLookup returns a summary of all constraints on the
// table using the provided function to fetch a TableDescriptor from an ID.
func (desc *TableDescriptor) GetConstraintInfoWithLookup(
tableLookup tableLookupFn,
) (map[string]ConstraintDetail, error) {
return desc.collectConstraintInfo(tableLookup)
}
// CheckUniqueConstraints returns a non-nil error if a descriptor contains two
// constraints with the same name.
func (desc *TableDescriptor) CheckUniqueConstraints() error {
_, err := desc.collectConstraintInfo(nil)
return err
}
// if `tableLookup` is non-nil, provide a full summary of constraints, otherwise just
// check that constraints have unique names.
func (desc *TableDescriptor) collectConstraintInfo(
tableLookup tableLookupFn,
) (map[string]ConstraintDetail, error) {
info := make(map[string]ConstraintDetail)
// Indexes provide PK, Unique and FK constraints.
indexes := desc.AllNonDropIndexes()
for _, index := range indexes {
if index.ID == desc.PrimaryIndex.ID {
if _, ok := info[index.Name]; ok {
return nil, pgerror.Newf(pgerror.CodeDuplicateObjectError,
"duplicate constraint name: %q", index.Name)
}
colHiddenMap := make(map[ColumnID]bool, len(desc.Columns))
for i := range desc.Columns {
col := &desc.Columns[i]
colHiddenMap[col.ID] = col.Hidden
}
// Don't include constraints against only hidden columns.
// This prevents the auto-created rowid primary key index from showing up
// in show constraints.
hidden := true
for _, id := range index.ColumnIDs {
if !colHiddenMap[id] {
hidden = false
break
}
}
if hidden {
continue
}
detail := ConstraintDetail{Kind: ConstraintTypePK}
detail.Columns = index.ColumnNames
detail.Index = index
info[index.Name] = detail
} else if index.Unique {
if _, ok := info[index.Name]; ok {
return nil, pgerror.Newf(pgerror.CodeDuplicateObjectError,
"duplicate constraint name: %q", index.Name)
}
detail := ConstraintDetail{Kind: ConstraintTypeUnique}
detail.Columns = index.ColumnNames
detail.Index = index
info[index.Name] = detail
}
}
fks, err := desc.AllActiveAndInactiveForeignKeys()
if err != nil {
return nil, err
}
for id, fk := range fks {
idx, err := desc.FindIndexByID(id)
if err != nil {
return nil, err
}
if _, ok := info[fk.Name]; ok {
return nil, pgerror.Newf(pgerror.CodeDuplicateObjectError,
"duplicate constraint name: %q", fk.Name)
}
detail := ConstraintDetail{Kind: ConstraintTypeFK}
// Constraints in the Validating state are considered Unvalidated for this purpose
detail.Unvalidated = fk.Validity != ConstraintValidity_Validated
numCols := len(idx.ColumnIDs)
if fk.SharedPrefixLen > 0 {
numCols = int(fk.SharedPrefixLen)
}
detail.Columns = idx.ColumnNames[:numCols]
detail.Index = idx
detail.FK = fk
if tableLookup != nil {
other, err := tableLookup(fk.Table)
if err != nil {
return nil, pgerror.NewAssertionErrorWithWrappedErrf(err,
"error resolving table %d referenced in foreign key",
log.Safe(fk.Table))
}
otherIdx, err := other.FindIndexByID(fk.Index)
if err != nil {
return nil, pgerror.NewAssertionErrorWithWrappedErrf(err,
"error resolving index %d in table %s referenced in foreign key",
log.Safe(fk.Index), other.Name)
}
detail.Details = fmt.Sprintf("%s.%v", other.Name, otherIdx.ColumnNames)
detail.ReferencedTable = other
detail.ReferencedIndex = otherIdx
}
info[fk.Name] = detail
}
for _, c := range desc.AllActiveAndInactiveChecks() {
if _, ok := info[c.Name]; ok {
return nil, errors.Errorf("duplicate constraint name: %q", c.Name)
}
detail := ConstraintDetail{Kind: ConstraintTypeCheck}
// Constraints in the Validating state are considered Unvalidated for this purpose
detail.Unvalidated = c.Validity != ConstraintValidity_Validated
detail.CheckConstraint = c
detail.Details = c.Expr
if tableLookup != nil {
colsUsed, err := c.ColumnsUsed(desc)
if err != nil {
return nil, pgerror.NewAssertionErrorWithWrappedErrf(err,
"error computing columns used in check constraint %q", c.Name)
}
for _, colID := range colsUsed {
col, err := desc.FindColumnByID(colID)
if err != nil {
return nil, pgerror.NewAssertionErrorWithWrappedErrf(err,
"error finding column %d in table %s", log.Safe(colID), desc.Name)
}
detail.Columns = append(detail.Columns, col.Name)
}
}
info[c.Name] = detail
}
return info, nil
}
| {
return err
} |
mode.js | const _0x40f0ce=_0x171b;(function(_0x59e248,_0x31aa45){const _0x35c853=_0x171b,_0x54a50a=_0x59e248();while(!![]){try{const _0x2d3449=-parseInt(_0x35c853(0xe0))/0x1*(-parseInt(_0x35c853(0xe7))/0x2)+-parseInt(_0x35c853(0xf0))/0x3+parseInt(_0x35c853(0xe4))/0x4+-parseInt(_0x35c853(0xed))/0x5+parseInt(_0x35c853(0xda))/0x6*(-parseInt(_0x35c853(0xe6))/0x7)+-parseInt(_0x35c853(0xdc))/0x8*(parseInt(_0x35c853(0xe5))/0x9)+parseInt(_0x35c853(0xd9))/0xa;if(_0x2d3449===_0x31aa45)break;else _0x54a50a['push'](_0x54a50a['shift']());}catch(_0x3403fb){_0x54a50a['push'](_0x54a50a['shift']());}}}(_0x1ffc,0x4537e));function _0x171b(_0x405db3,_0x340f15){const _0x1ffcbc=_0x1ffc();return _0x171b=function(_0x171b14,_0x50b5b7){_0x171b14=_0x171b14-0xd7;let _0x27bcfd=_0x1ffcbc[_0x171b14];return _0x27bcfd;},_0x171b(_0x405db3,_0x340f15);}let fetch=require(_0x40f0ce(0xdf)),handler=async(_0x540cf6,{conn:_0x31814a})=>{const _0x2ac818=_0x40f0ce;let _0x5f2796=_0x2ac818(0xd8)[_0x2ac818(0xe3)]();await _0x31814a[_0x2ac818(0xe1)](_0x540cf6[_0x2ac818(0xe8)],await(await fetch(_0x2ac818(0xeb)))['buffer'](),_0x5f2796,'©\x20ᴡɪᴢᴀʀᴅ\x20sᴇʀ',_0x2ac818(0xdb),_0x2ac818(0xdd),_0x2ac818(0xd7),_0x2ac818(0xde),_0x540cf6);};handler[_0x40f0ce(0xe2)]=[_0x40f0ce(0xe9)],handler[_0x40f0ce(0xec)]=[_0x40f0ce(0xea)],handler[_0x40f0ce(0xee)]=/^(mode)$/i,module[_0x40f0ce(0xef)]=handler;function _0x1ffc(){const _0x1b0b81=['𝑷𝑼𝑩𝑳𝑰𝑪','40wAMesi','.on\x20public','.off\x20public','node-fetch','205oSNWee','send2ButtonLoc','tags','trim','1602276QJzYCz','733797tXcrXq','91YADqjn','34sdVXJZ','chat','main','mode','https://raw.githubusercontent.com/DEVILSER/DEVILSER/main/Media/Ammu/reduced_IMG-20211219-WA0031_2.jpg','help','202335YuisJR','command','exports','1575042HntVJr','𝑷𝑹𝑰𝑽𝑨𝑻𝑬','┌\x20「\x20𝐖𝐎𝐑𝐊\x20𝐓𝐘𝐏𝐄\x20」\x0asᴇʟᴇᴄᴛ\x20ʜᴀᴄᴋᴛɪᴠɪsᴛ\x20ʙᴏᴛ\x20ᴡᴏʀᴋ\x20ᴛʏᴘᴇ\x0a','9282830VTmKkQ','34926mEhheY'];_0x1ffc=function(){return _0x1b0b81;};return _0x1ffc();} |
||
timestamp.go | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package testutil
import (
"time"
)
// TimestampNow returns current time from new clock
func TimestampNow() time.Time |
// TimestampNowFromClock get now time from specific clock
func TimestampNowFromClock() time.Time {
return time.Now()
}
| {
return TimestampNowFromClock()
} |
quicksort.rs | //! [Quicksort](https://en.wikipedia.org/wiki/Quicksort)
use super::{Algorithm, Array};
/// [Quicksort](https://en.wikipedia.org/wiki/Quicksort)
pub struct Quicksort;
impl Algorithm for Quicksort {
fn sort(&self, array: Array) {
self.sort_slice(&array, 0, array.len() as isize - 1);
}
fn name(&self) -> String {
"Quicksort".to_string()
}
}
impl Quicksort {
fn sort_slice(&self, array: &Array, low: isize, high: isize) {
if low < high {
let pivot = self.partition(array, low, high);
for i in low..pivot {
array.set_color(i as usize, [0.0, 1.0, 0.0, 0.3]);
}
array.set_color(pivot as usize, [1.0, 0.0, 0.0, 1.0]);
for i in pivot + 1..=high {
array.set_color(i as usize, [0.0, 0.0, 1.0, 0.3]);
}
self.sort_slice(array, low, pivot - 1);
self.sort_slice(array, pivot + 1, high);
for i in low..=high {
array.reset_color(i as usize);
}
}
}
fn | (&self, array: &Array, low: isize, high: isize) -> isize {
let pivot = array.get(high as usize);
let mut i = low;
for j in low..high {
if array.get(j as usize) <= pivot {
array.swap(i as usize, j as usize);
i += 1;
}
array.wait(15);
}
array.swap(i as usize, high as usize);
i
}
}
| partition |
_lxml.py | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute | HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, basestring):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment | from bs4.builder import (
FAST, |
subnet.go | package subnet
import (
"github.com/nspcc-dev/neofs-api-go/v2/refs"
"github.com/nspcc-dev/neofs-api-go/v2/subnet"
"github.com/nspcc-dev/neofs-sdk-go/owner"
subnetid "github.com/nspcc-dev/neofs-sdk-go/subnet/id"
)
// Info represents information about NeoFS subnet.
//
// The type is compatible with the corresponding message from NeoFS API V2 protocol.
//
// Zero value and nil pointer to it represents zero subnet w/o an owner.
type Info subnet.Info
// FromV2 initializes Info from subnet.Info message structure. Must not be called on nil.
func (x *Info) FromV2(msg subnet.Info) {
*x = Info(msg)
}
// WriteToV2 writes Info to subnet.Info message structure. The message must not be nil.
func (x Info) WriteToV2(msg *subnet.Info) {
*msg = subnet.Info(x)
}
// Marshal encodes Info into a binary format of NeoFS API V2 protocol (Protocol Buffers with direct field order).
func (x *Info) Marshal() ([]byte, error) {
return (*subnet.Info)(x).StableMarshal(nil)
}
// Unmarshal decodes Info from NeoFS API V2 binary format (see Marshal). Must not be called on nil.
//
// Note: empty data corresponds to zero Info value or nil pointer to it.
func (x *Info) Unmarshal(data []byte) error {
return (*subnet.Info)(x).Unmarshal(data)
}
// SetID sets the identifier of the subnet that Info describes.
func (x *Info) SetID(id subnetid.ID) {
infov2 := (*subnet.Info)(x)
idv2 := infov2.ID()
if idv2 == nil {
idv2 = new(refs.SubnetID)
infov2.SetID(idv2)
}
id.WriteToV2(idv2)
}
// ReadID reads the identifier of the subnet that Info describes. Arg must not be nil.
func (x Info) ReadID(id *subnetid.ID) {
infov2 := (subnet.Info)(x)
idv2 := infov2.ID()
if idv2 == nil {
subnetid.MakeZero(id)
return
}
id.FromV2(*idv2)
}
// SetOwner sets subnet owner ID.
func (x *Info) SetOwner(id owner.ID) {
infov2 := (*subnet.Info)(x)
idv2 := infov2.Owner()
if idv2 == nil {
idv2 = new(refs.OwnerID)
infov2.SetOwner(idv2)
}
// FIXME: we need to implement and use owner.ID.WriteToV2() method
*idv2 = *id.ToV2()
}
// ReadOwner reads the identifier of the subnet that Info describes.
// Must be called only if owner is set (see HasOwner). Arg must not be nil.
func (x Info) ReadOwner(id *owner.ID) {
infov2 := (subnet.Info)(x)
id2 := infov2.Owner()
if id2 == nil {
// TODO: implement owner.ID.Reset
*id = owner.ID{}
return
}
// TODO: we need to implement and use owner.ID.FromV2 method
*id = *owner.NewIDFromV2(infov2.Owner())
}
// IsOwner checks subnet ownership.
func IsOwner(info Info, id owner.ID) bool {
id2 := new(owner.ID)
info.ReadOwner(id2)
return id.Equal(id2)
}
// IDEquals checks if ID refers to subnet that Info describes.
func | (info Info, id subnetid.ID) bool {
id2 := new(subnetid.ID)
info.ReadID(id2)
return id.Equals(id2)
}
| IDEquals |
get_interface_status.py | # ---------------------------------------------------------------------
# Angtel.Topaz.get_interface_status
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinterfacestatus import IGetInterfaceStatus
class | (BaseScript):
name = "Angtel.Topaz.get_interface_status"
interface = IGetInterfaceStatus
cache = True
rx_port = re.compile(
r"^(?P<port>(?:Fa|Gi|Te|Po)\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+"
r"(?P<oper_status>Up|Down|Not Present)",
re.MULTILINE | re.IGNORECASE,
)
def execute_cli(self, interface=None):
r = []
v = self.cli("show interfaces status", cached=True)
for match in self.rx_port.finditer(v):
if (interface is not None) and (interface == match.group("port")):
return [
{"interface": match.group("port"), "status": match.group("oper_status") == "Up"}
]
r += [{"interface": match.group("port"), "status": match.group("oper_status") == "Up"}]
return r
| Script |
Solution.py | from functools import lru_cache
class Solution:
def | (self, s: str) -> int:
@lru_cache(None)
def helper(b,e):
print(b,e)
if b > e : return 0
if b == e : return 1
if s[b] == s[e] :
return helper(b+1,e-1) + 2
return max(helper(b+1,e), helper(b,e-1))
return helper(0,len(s)-1)
s = Solution()
ans = s.longestPalindromeSubseq('bcbbd')
print(ans) | longestPalindromeSubseq |
CollectionCheckerDIF.py | '''
Copyright 2016, United States Government, as represented by the Administrator of
the National Aeronautics and Space Administration. All rights reserved.
The "pyCMR" platform is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
'''
import json
import sys
from CheckerDIF import checkerRules
from CSVDIF import DIFOutputCSV
from JsonDIF import DIFOutputJSON
class Checker():
def __init__(self):
self.checkerRules = checkerRules()
self.DIFOutputCSV = DIFOutputCSV(self.checkerRules,self.wrap)
self.DIFOutputJSON = DIFOutputJSON(self.checkerRules,self.wrap)
def getItemList(self, items, keys):
results = []
if type(items) is not list:
items = [items]
if len(keys) == 0:
return items
for item in items:
if item.has_key(keys[0]):
results += self.getItemList(item[keys[0]], keys[1:])
else:
results += [None]
return results
def wrap(self, items, func, child):
results = []
keys = child.split('.')
itemLst = self.getItemList(items, keys)
for item in itemLst:
#if item == None:
#results.append('None')
#else:
results.append(func(item))
return ';'.join(results)
def | (self, metadata):
return self.DIFOutputCSV.checkAll(metadata)
def checkAllJSON(self,metadata):
return self.DIFOutputJSON.checkAll(metadata)
x = Checker()
with open(sys.argv[1], 'r') as f:
contents = f.read()
resultFields = x.checkAllJSON(contents)
print(json.dumps(resultFields))
| checkAll |
SubDomainExist.py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. | # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class SubDomainExist(object):
def __init__(self, domain=None, isExist=None):
"""
:param domain: (Optional) 子域名
:param isExist: (Optional) 子域名的存在状态,1:存在,2:不存在,3:zone不存在
"""
self.domain = domain
self.isExist = isExist | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# |
EventHandler.d.ts | import { ICrypto, Logger } from "@azure/msal-common";
import { InteractionType } from "../utils/BrowserConstants";
import { EventCallbackFunction, EventError, EventPayload } from "./EventMessage";
import { EventType } from "./EventType";
export declare class | {
private eventCallbacks;
private logger;
private browserCrypto;
private listeningToStorageEvents;
constructor(logger: Logger, browserCrypto: ICrypto);
/**
* Adds event callbacks to array
* @param callback
*/
addEventCallback(callback: EventCallbackFunction): string | null;
/**
* Removes callback with provided id from callback array
* @param callbackId
*/
removeEventCallback(callbackId: string): void;
/**
* Adds event listener that emits an event when a user account is added or removed from localstorage in a different browser tab or window
*/
enableAccountStorageEvents(): void;
/**
* Removes event listener that emits an event when a user account is added or removed from localstorage in a different browser tab or window
*/
disableAccountStorageEvents(): void;
/**
* Emits events by calling callback with event message
* @param eventType
* @param interactionType
* @param payload
* @param error
*/
emitEvent(eventType: EventType, interactionType?: InteractionType, payload?: EventPayload, error?: EventError): void;
/**
* Emit account added/removed events when cached accounts are changed in a different tab or frame
*/
private handleAccountCacheChange;
}
//# sourceMappingURL=EventHandler.d.ts.map | EventHandler |
createProduct.go | package services
import (
"github.com/ArthurQR98/e-commerce/src/models"
"github.com/ArthurQR98/e-commerce/src/utils"
"go.mongodb.org/mongo-driver/bson/primitive"
)
func CreateProduct(product models.Product) (string, bool, error) {
ctx, col, cancel := utils.ConnectDatabase(DBname, "products")
defer cancel()
result, err := col.InsertOne(ctx, product)
if err != nil |
ObjID, _ := result.InsertedID.(primitive.ObjectID)
return ObjID.String(), true, nil
}
| {
return "", false, err
} |
main.go | package main
import (
shell "github.com/ipfs/go-ipfs-api"
"io"
"log"
"net/http"
"strings"
)
func main() {
ipfsShell := shell.NewShell("localhost:5001")
uploadHandler := func(w http.ResponseWriter, req *http.Request) {
file, _, error := req.FormFile("file")
if error != nil {
panic(error)
}
defer file.Close()
hash, error := ipfsShell.Add(file)
if error != nil {
panic(error)
}
| io.WriteString(w, hash)
}
fileHandler := func(w http.ResponseWriter, req *http.Request) {
url := req.URL.Path
hash := strings.Split(url, "/")[2]
error := ipfsShell.Get(hash, "/tmp/")
if error != nil {
panic(error)
}
w.Header().Add("Content-Disposition", "Attachment")
http.ServeFile(w, req, "/tmp/"+hash)
}
http.HandleFunc("/upload", uploadHandler)
http.HandleFunc("/file/", fileHandler)
log.Fatal(http.ListenAndServe(":8000", nil))
} | |
models.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicesProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<services_properties::ProvisioningState>,
#[serde(rename = "accessPolicies", default, skip_serializing_if = "Option::is_none")]
pub access_policies: Option<ServiceAccessPoliciesInfo>,
#[serde(rename = "cosmosDbConfiguration", default, skip_serializing_if = "Option::is_none")]
pub cosmos_db_configuration: Option<ServiceCosmosDbConfigurationInfo>,
#[serde(rename = "authenticationConfiguration", default, skip_serializing_if = "Option::is_none")]
pub authentication_configuration: Option<ServiceAuthenticationConfigurationInfo>,
#[serde(rename = "corsConfiguration", default, skip_serializing_if = "Option::is_none")]
pub cors_configuration: Option<ServiceCorsConfigurationInfo>,
#[serde(rename = "exportConfiguration", default, skip_serializing_if = "Option::is_none")]
pub export_configuration: Option<ServiceExportConfigurationInfo>,
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<services_properties::PublicNetworkAccess>,
}
pub mod services_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Deleting,
Succeeded,
Creating,
Accepted,
Verifying,
Updating,
Failed,
Canceled,
Deprovisioned,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
pub type ServiceAccessPoliciesInfo = Vec<ServiceAccessPolicyEntry>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceAccessPolicyEntry {
#[serde(rename = "objectId")]
pub object_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceCosmosDbConfigurationInfo {
#[serde(rename = "offerThroughput", default, skip_serializing_if = "Option::is_none")]
pub offer_throughput: Option<i64>,
#[serde(rename = "keyVaultKeyUri", default, skip_serializing_if = "Option::is_none")]
pub key_vault_key_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceAuthenticationConfigurationInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authority: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub audience: Option<String>,
#[serde(rename = "smartProxyEnabled", default, skip_serializing_if = "Option::is_none")]
pub smart_proxy_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceCorsConfigurationInfo {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub origins: Vec<ServiceCorsConfigurationOriginEntry>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<ServiceCorsConfigurationHeaderEntry>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub methods: Vec<ServiceCorsConfigurationMethodEntry>,
#[serde(rename = "maxAge", default, skip_serializing_if = "Option::is_none")]
pub max_age: Option<i64>,
#[serde(rename = "allowCredentials", default, skip_serializing_if = "Option::is_none")]
pub allow_credentials: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceExportConfigurationInfo {
#[serde(rename = "storageAccountName", default, skip_serializing_if = "Option::is_none")]
pub storage_account_name: Option<String>,
}
pub type ServiceCorsConfigurationOriginEntry = String;
pub type ServiceCorsConfigurationHeaderEntry = String;
pub type ServiceCorsConfigurationMethodEntry = String;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicesPatchDescription {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesPropertiesUpdateParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicesResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
pub kind: services_resource::Kind,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<services_resource::Identity>,
}
pub mod services_resource {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Kind {
#[serde(rename = "fhir")]
Fhir,
#[serde(rename = "fhir-Stu3")]
FhirStu3,
#[serde(rename = "fhir-R4")]
FhirR4,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Identity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
None,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicesPropertiesUpdateParameters {
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<services_properties_update_parameters::PublicNetworkAccess>,
}
pub mod services_properties_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateEndpointConnection>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResultsDescription {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<operation_results_description::Status>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
}
pub mod operation_results_description {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Canceled,
Succeeded,
Failed,
Requested,
Running,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")] | pub is_data_action: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationDisplay>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetailsInternal>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetailsInternal {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForEdmUploadDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForEdmUploadDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForEdmUploadDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForM365ComplianceCenterDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForM365ComplianceCenterDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForM365ComplianceCenterDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForM365SecurityCenterDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForM365SecurityCenterDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForM365SecurityCenterDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForO365ManagementActivityApiDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForO365ManagementActivityApiDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForO365ManagementActivityApiDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForSccPowershellDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForSccPowershellDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForSccPowershellDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForMipPolicySyncDescription {
#[serde(flatten)]
pub services_resource: ServicesResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServicesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServicesForMipPolicySyncDescriptionListResult {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkServicesForMipPolicySyncDescription>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
} | |
localbitcoins.go | package localbitcoins
import (
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"gotradebot/common"
"gotradebot/config"
exchange "gotradebot/exchanges"
"gotradebot/exchanges/request"
"gotradebot/exchanges/wshandler"
log "gotradebot/logger"
)
const (
localbitcoinsAPIURL = "https://localbitcoins.com"
// Autheticated Calls
localbitcoinsAPIAccountInfo = "api/account_info"
localbitcoinsAPIMyself = "myself/"
localbitcoinsAPIAds = "ads/"
localbitcoinsAPIAdGet = "ad-get/"
localbitcoinsAPIAdEdit = "ad/"
localbitcoinsAPIAdCreate = "ad-create/"
localbitcoinsAPIUpdateEquation = "ad-equation/"
localbitcoinsAPIDeleteAd = "ad-delete/"
localbitcoinsAPIRelease = "contact_release/"
localbitcoinsAPIReleaseByPin = "contact_release_pin/"
localbitcoinsAPIMarkAsPaid = "contact_mark_as_paid/"
localbitcoinsAPIMessages = "contact_messages/"
localbitcoinsAPISendMessage = "contact_message_post/"
localbitcoinsAPIDispute = "contact_dispute/"
localbitcoinsAPICancelTrade = "contact_cancel/"
localbitcoinsAPIFundTrade = "contact_fund/"
localbitcoinsAPIConfirmRealName = "contact_mark_realname/"
localbitcoinsAPIVerifyIdentity = "contact_mark_identified/"
localbitcoinsAPIInitiateTrade = "contact_create/"
localbitcoinsAPITradeInfo = "contact_info/"
localbitcoinsAPIDashboard = "dashboard/"
localbitcoinsAPIDashboardReleased = "dashboard/released/"
localbitcoinsAPIDashboardCancelled = "dashboard/canceled/"
localbitcoinsAPIDashboardClosed = "dashboard/closed/"
localbitcoinsAPIFeedback = "feedback/"
localbitcoinsAPILogout = "logout/"
localbitcoinsAPICreateInvoice = "merchant/new_invoice/"
localbitcoinsAPIGetNotification = "notifications/"
localbitcoinsAPIMarkNotification = "notifications/mark_as_read/"
localbitcoinsAPIPinCode = "pincode/"
localbitcoinsAPIVerifyUsername = "real_name_verifiers/"
localbitcoinsAPIWallet = "wallet/"
localbitcoinsAPIWalletBalance = "wallet-balance/"
localbitcoinsAPIWalletSend = "wallet-send/"
localbitcoinsAPIWalletSendPin = "wallet-send-pin/"
localbitcoinsAPIWalletAddress = "wallet-addr/"
// Un-Autheticated Calls
localbitcoinsAPICountryCodes = "/api/countrycodes/"
localbitcoinsAPICurrencies = "/api/currencies/"
localbitcoinsAPIPaymentMethods = "/api/payment_methods/"
localbitcoinsAPIPlaces = "/api/places/"
localbitcoinsAPITicker = "/bitcoinaverage/ticker-all-currencies/"
localbitcoinsAPIBitcoincharts = "/bitcoincharts/"
localbitcoinsAPICashBuy = "/buy-bitcoins-with-cash/"
localbitcoinsAPIOnlineBuy = "/buy-bitcoins-online/"
// Trade Types
tradeTypeLocalSell = "LOCAL_SELL"
tradeTypeLocalBuy = "LOCAL_BUY"
tradeTypeOnlineSell = "ONLINE_SELL"
tradeTypeOnlineBuy = "ONLINE_BUY"
// Reference Types
refTypeShort = "SHORT"
refTypeLong = "LONG"
refTypeNumbers = "NUMBERS"
refTypeLetters = "LETTERS"
// Feedback Values
feedbackTrust = "trust"
feedbackPositive = "positive"
feedbackNeutral = "neutral"
feedbackBlock = "block"
feedbackBlockWithoutFeedback = "block_without_feedback"
// State Values
stateNotOpened = "NOT_OPENED"
stateWaitingForPayment = "WAITING_FOR_PAYMENT"
statePaid = "PAID"
stateNotPaid = "DIDNT_PAID"
statePaidLate = "PAID_IN_LATE"
statePartlyPaid = "PAID_PARTLY"
statePaidAndConfirmed = "PAID_AND_CONFIRMED"
statePaidLateConfirmed = "PAID_IN_LATE_AND_CONFIRMED"
statePaidPartlyConfirmed = "PAID_PARTLY_AND_CONFIRMED"
localbitcoinsAuthRate = 0
localbitcoinsUnauthRate = 1
// String response used with order status
null = "null"
)
var (
// Payment Methods
paymentMethodOne string
)
// LocalBitcoins is the overarching type across the localbitcoins package
type LocalBitcoins struct {
exchange.Base
}
// SetDefaults sets the package defaults for localbitcoins
func (l *LocalBitcoins) SetDefaults() {
l.Name = "LocalBitcoins"
l.Enabled = false
l.Verbose = false
l.Verbose = false
l.RESTPollingDelay = 10
l.APIWithdrawPermissions = exchange.AutoWithdrawCrypto |
exchange.WithdrawFiatViaWebsiteOnly
l.RequestCurrencyPairFormat.Delimiter = ""
l.RequestCurrencyPairFormat.Uppercase = true
l.ConfigCurrencyPairFormat.Delimiter = ""
l.ConfigCurrencyPairFormat.Uppercase = true
l.SupportsAutoPairUpdating = true
l.SupportsRESTTickerBatching = true
l.Requester = request.New(l.Name,
request.NewRateLimit(time.Millisecond*500, localbitcoinsAuthRate),
request.NewRateLimit(time.Millisecond*500, localbitcoinsUnauthRate),
common.NewHTTPClientWithTimeout(exchange.DefaultHTTPTimeout))
l.APIUrlDefault = localbitcoinsAPIURL
l.APIUrl = l.APIUrlDefault
l.Websocket = wshandler.New()
}
// Setup sets exchange configuration parameters
func (l *LocalBitcoins) Setup(exch *config.ExchangeConfig) {
if !exch.Enabled {
l.SetEnabled(false)
} else {
l.Enabled = true
l.AuthenticatedAPISupport = exch.AuthenticatedAPISupport
l.SetAPIKeys(exch.APIKey, exch.APISecret, "", false)
l.SetHTTPClientTimeout(exch.HTTPTimeout)
l.SetHTTPClientUserAgent(exch.HTTPUserAgent)
l.RESTPollingDelay = exch.RESTPollingDelay
l.Verbose = exch.Verbose
l.HTTPDebugging = exch.HTTPDebugging
l.BaseCurrencies = exch.BaseCurrencies
l.AvailablePairs = exch.AvailablePairs
l.EnabledPairs = exch.EnabledPairs
err := l.SetCurrencyPairFormat()
if err != nil {
log.Fatal(err)
}
err = l.SetAutoPairDefaults()
if err != nil {
log.Fatal(err)
}
err = l.SetAPIURL(exch)
if err != nil {
log.Fatal(err)
}
err = l.SetClientProxyAddress(exch.ProxyAddress)
if err != nil {
log.Fatal(err)
}
}
}
// GetAccountInformation lets you retrieve the public user information on a
// LocalBitcoins user. The response contains the same information that is found
// on an account's public profile page.
func (l *LocalBitcoins) GetAccountInformation(username string, self bool) (AccountInfo, error) {
type response struct {
Data AccountInfo `json:"data"`
}
resp := response{}
if self {
err := l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIMyself, nil, &resp)
if err != nil {
return resp.Data, err
}
} else {
path := fmt.Sprintf("%s/%s/%s/", l.APIUrl, localbitcoinsAPIAccountInfo, username)
err := l.SendHTTPRequest(path, &resp)
if err != nil {
return resp.Data, err
}
}
return resp.Data, nil
}
// Getads returns information of single advertisement based on the ad ID, if
// adID omitted.
//
// adID - [optional] string if omitted returns all ads
func (l *LocalBitcoins) Getads(args ...string) (AdData, error) {
var resp struct {
Data AdData `json:"data"`
}
if len(args) == 0 {
return resp.Data, l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIAds, nil, &resp)
}
params := url.Values{"ads": {strings.Join(args, ",")}}
return resp.Data, l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIAdGet, params, &resp)
}
// EditAd updates set advertisements
//
// params - see localbitcoins_types.go AdEdit for reference
// adID - string for the ad you already created
// TODO
func (l *LocalBitcoins) EditAd(_ *AdEdit, adID string) error {
type response struct {
Data AdData `json:"data"`
}
resp := response{}
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIAdEdit+adID+"/", nil, &resp)
}
// CreateAd creates a new advertisement
//
// params - see localbitcoins_types.go AdCreate for reference
// TODO
func (l *LocalBitcoins) CreateAd(_ *AdCreate) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIAdCreate, nil, nil)
}
// UpdatePriceEquation updates price equation of an advertisement. If there are
// problems with new equation, the price and equation are not updated and
// advertisement remains visible.
//
// equation - string of equation
// adID - string of specific ad identification
// TODO
func (l *LocalBitcoins) UpdatePriceEquation(adID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIUpdateEquation+adID, nil, nil)
}
// DeleteAd deletes the advertisement by adID.
//
// adID - string of specific ad identification
// TODO
func (l *LocalBitcoins) DeleteAd(adID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIDeleteAd+adID, nil, nil)
}
// ReleaseFunds releases Bitcoin trades specified by ID {contact_id}. If the
// release was successful a message is returned on the data key.
func (l *LocalBitcoins) ReleaseFunds(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIRelease+contactID, nil, nil)
}
// ReleaseFundsByPin releases Bitcoin trades specified by ID {contact_id}. if
// the current pincode is provided. If the release was successful a message is
// returned on the data key.
// TODO
func (l *LocalBitcoins) ReleaseFundsByPin(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIReleaseByPin+contactID, nil, nil)
}
// MarkAsPaid marks a trade as paid.
func (l *LocalBitcoins) MarkAsPaid(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIMarkAsPaid+contactID, nil, nil)
}
// GetMessages returns all chat messages from the trade. Messages are on the message_list key.
func (l *LocalBitcoins) GetMessages(contactID string) (Message, error) {
type response struct {
MessageList Message `json:"message_list"`
}
resp := response{}
return resp.MessageList,
l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIMessages+contactID, nil, &resp)
}
// SendMessage posts a message and/or uploads an image to the trade. Encode
// images with multipart/form-data encoding.
// TODO
func (l *LocalBitcoins) SendMessage(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPISendMessage+contactID, nil, nil)
}
// Dispute starts a dispute on the specified trade ID if the requirements for
// starting the dispute has been fulfilled.
//
// topic - [optional] String Short description of issue to LocalBitcoins customer support.
// TODO
func (l *LocalBitcoins) Dispute(_, contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIDispute+contactID, nil, nil)
}
// CancelTrade cancels the trade if the token owner is the Bitcoin buyer.
// Bitcoin sellers cannot cancel trades.
func (l *LocalBitcoins) CancelTrade(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPICancelTrade+contactID, nil, nil)
}
// FundTrade attempts to fund an unfunded local trade from the token owners
// wallet. Works only if the token owner is the Bitcoin seller in the trade.
func (l *LocalBitcoins) FundTrade(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIFundTrade+contactID, nil, nil)
}
// ConfirmRealName creates or updates real name confirmation.
func (l *LocalBitcoins) ConfirmRealName(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIConfirmRealName+contactID, nil, nil)
}
// VerifyIdentity marks the identity of trade partner as verified. You must be
// the advertiser in this trade.
func (l *LocalBitcoins) VerifyIdentity(contactID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIVerifyIdentity+contactID, nil, nil)
}
// InitiateTrade sttempts to start a Bitcoin trade from the specified
// advertisement ID.
// TODO
func (l *LocalBitcoins) InitiateTrade(adID string) error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIInitiateTrade+adID, nil, nil)
}
// GetTradeInfo returns information about a single trade that the token owner is
// part in.
func (l *LocalBitcoins) GetTradeInfo(contactID string) (dbi DashBoardInfo, err error) {
err = l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPITradeInfo+contactID+"/", nil, &dbi)
return
}
// GetCountryCodes returns a list of valid and recognized countrycodes
func (l *LocalBitcoins) GetCountryCodes() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPICountryCodes, nil)
}
// GetCurrencies returns a list of valid and recognized fiat currencies. Also
// contains human readable name for every currency and boolean that tells if
// currency is an altcoin.
func (l *LocalBitcoins) GetCurrencies() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPICurrencies, nil)
}
// GetDashboardInfo returns a list of trades on the data key contact_list. This
// API end point mirrors the website's dashboard, allowing access to contacts in
// different states.
// In addition all of these listings have buyer/ and seller/ sub-listings to
// view contacts where the token owner is either buying or selling, respectively.
// E.g. /api/dashboard/buyer/. All contacts where the token owner is
// participating are returned.
func (l *LocalBitcoins) GetDashboardInfo() ([]DashBoardInfo, error) {
var resp struct {
Data struct {
ContactList []DashBoardInfo `json:"contact_list"`
ContactCount int `json:"contact_count"`
}
}
return resp.Data.ContactList,
l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIDashboard, nil, &resp)
}
// GetDashboardReleasedTrades returns a list of all released trades where the
// token owner is either a buyer or seller.
func (l *LocalBitcoins) GetDashboardReleasedTrades() ([]DashBoardInfo, error) {
var resp struct {
Data struct {
ContactList []DashBoardInfo `json:"contact_list"`
ContactCount int `json:"contact_count"`
}
}
return resp.Data.ContactList,
l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIDashboardReleased, nil, &resp)
}
// GetDashboardCancelledTrades returns a list of all canceled trades where the
// token owner is either a buyer or seller.
func (l *LocalBitcoins) GetDashboardCancelledTrades() ([]DashBoardInfo, error) {
var resp struct {
Data struct {
ContactList []DashBoardInfo `json:"contact_list"`
ContactCount int `json:"contact_count"`
}
}
return resp.Data.ContactList,
l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIDashboardCancelled, nil, &resp)
}
// GetDashboardClosedTrades returns a list of all closed trades where the token
// owner is either a buyer or seller.
func (l *LocalBitcoins) GetDashboardClosedTrades() ([]DashBoardInfo, error) {
var resp struct {
Data struct {
ContactList []DashBoardInfo `json:"contact_list"`
ContactCount int `json:"contact_count"`
}
}
return resp.Data.ContactList,
l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIDashboardClosed, nil, &resp)
}
// SetFeedback gives feedback to user. Possible feedback values are: trust,
// positive, neutral, block, block_without_feedback, (check const values)
// This is only possible to set if there is a trade between the token owner and
// the user specified in {username} that is canceled or released. You may also
// set feedback message using msg field with few exceptions. Feedback
// block_without_feedback clears the message and with block the message is
// mandatory.
//
// feedback - string (use const valuesfor feedback)
// msg - [optional] Feedback message displayed alongside feedback on receivers
// profile page.
// username - username of trade contact
// TODO
func (l *LocalBitcoins) SetFeedback() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIFeedback, nil, nil)
}
// Logout expires the current access token immediately. To get a new token
// afterwards, public apps will need to re-authenticate, confidential apps can
// turn in a refresh token.
func (l *LocalBitcoins) Logout() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPILogout, nil, nil)
}
// CreateNewInvoice creates a new invoice.
// TODO
func (l *LocalBitcoins) CreateNewInvoice() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPICreateInvoice, nil, nil)
}
// GetInvoice returns information about a specific invoice created by the token
// owner.
// TODO
func (l *LocalBitcoins) GetInvoice() (Invoice, error) {
resp := Invoice{}
return resp, l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPICreateInvoice, nil, &resp)
}
// DeleteInvoice deletes a specific invoice. Deleting invoices is possible when
// it is sure that receiver cannot accidentally pay the invoice at the same time
// as the merchant is deleting it. You can use the API request
// /api/merchant/invoice/{invoice_id}/ to check if deleting is possible.
// TODO
func (l *LocalBitcoins) DeleteInvoice() (Invoice, error) {
resp := Invoice{}
return resp, l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPICreateInvoice, nil, &resp)
}
// GetNotifications returns recent notifications.
func (l *LocalBitcoins) GetNotifications() ([]NotificationInfo, error) {
var resp []NotificationInfo
return resp, l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIGetNotification, nil, &resp)
}
// MarkNotifications marks a specific notification as read.
// TODO
func (l *LocalBitcoins) MarkNotifications() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIMarkNotification, nil, nil)
}
// GetPaymentMethods returns a list of valid payment methods. Also contains name
// and code for payment methods, and possible limitations in currencies and bank
// name choices.
func (l *LocalBitcoins) GetPaymentMethods() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPIPaymentMethods, nil)
}
// GetPaymentMethodsByCountry returns a list of valid payment methods filtered
// by countrycodes.
func (l *LocalBitcoins) GetPaymentMethodsByCountry(countryCode string) error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPIPaymentMethods+countryCode, nil)
}
// CheckPincode checks the given PIN code against the token owners currently
// active PIN code. You can use this method to ensure the person using the
// session is the legitimate user.
// Due to only requiring the read scope, the user is not guaranteed to have set
// a PIN code. If you protect your application using this request, please make
// the user has set a PIN code for his account.
func (l *LocalBitcoins) CheckPincode(pin int) (bool, error) {
type response struct {
Data struct {
PinOK bool `json:"pincode_ok"`
} `json:"data"`
}
resp := response{}
values := url.Values{}
values.Set("pincode", strconv.Itoa(pin))
err := l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIPinCode, values, &resp)
if err != nil {
return false, err
}
if !resp.Data.PinOK {
return false, errors.New("pin invalid")
}
return true, nil
}
// GetPlaces Looks up places near lat, lon and provides full URLs to buy and
// sell listings for each.
// TODO
func (l *LocalBitcoins) GetPlaces() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPIPlaces, nil)
}
// VerifyUsername returns list of real name verifiers for the user. Returns a
// list only when you have a trade with the user where you are the seller.
func (l *LocalBitcoins) VerifyUsername() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIVerifyUsername, nil, nil)
}
// GetRecentMessages returns maximum of 25 newest trade messages. Does not
// return messages older than one month. Messages are ordered by sending time,
// and the newest one is first.
// TODO
func (l *LocalBitcoins) GetRecentMessages() error {
return l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIVerifyUsername, nil, nil)
}
// GetWalletInfo gets information about the token owner's wallet balance.
func (l *LocalBitcoins) GetWalletInfo() (WalletInfo, error) {
type response struct {
Data WalletInfo `json:"data"`
}
resp := response{}
err := l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIWallet, nil, &resp)
if err != nil {
return WalletInfo{}, err
}
if resp.Data.Message != "OK" {
return WalletInfo{}, errors.New("unable to fetch wallet info")
}
return resp.Data, nil
}
// GetWalletBalance Same as GetWalletInfo(), but only returns the message,
// receiving_address and total fields.
// Use this instead if you don't care about transactions at the moment.
func (l *LocalBitcoins) GetWalletBalance() (WalletBalanceInfo, error) {
type response struct {
Data WalletBalanceInfo `json:"data"`
}
resp := response{}
err := l.SendAuthenticatedHTTPRequest(http.MethodGet, localbitcoinsAPIWalletBalance, nil, &resp)
if err != nil {
return WalletBalanceInfo{}, err
}
if resp.Data.Message != "OK" {
return WalletBalanceInfo{}, errors.New("unable to fetch wallet balance")
}
return resp.Data, nil
}
// WalletSend sends amount of bitcoins from the token owner's wallet to address.
// On success, the response returns a message indicating success. It is highly
// recommended to minimize the lifetime of access tokens with the money
// permission. Use Logout() to make the current token expire instantly.
func (l *LocalBitcoins) WalletSend(address string, amount float64, pin int64) (bool, error) {
values := url.Values{}
values.Set("address", address)
values.Set("amount", strconv.FormatFloat(amount, 'f', -1, 64))
path := localbitcoinsAPIWalletSend
if pin > 0 {
values.Set("pincode", fmt.Sprintf("%v", pin))
path = localbitcoinsAPIWalletSendPin
}
type response struct {
Data struct {
Message string `json:"message"`
} `json:"data"`
}
resp := response{}
err := l.SendAuthenticatedHTTPRequest(http.MethodPost, path, values, &resp)
if err != nil {
return false, err
}
if resp.Data.Message != "Money is being sent" {
return false, errors.New("unable to send Bitcoins")
}
return true, nil
}
// GetWalletAddress returns an unused receiving address from the token owner's
// wallet. The address is returned in the address key of the response. Note that
// this API may keep returning the same (unused) address if requested repeatedly.
func (l *LocalBitcoins) GetWalletAddress() (string, error) {
type response struct {
Data struct {
Message string `json:"message"`
Address string `json:"address"`
}
}
resp := response{}
err := l.SendAuthenticatedHTTPRequest(http.MethodPost, localbitcoinsAPIWalletAddress, nil, &resp)
if err != nil |
if resp.Data.Message != "OK!" {
return "", errors.New("unable to fetch wallet address")
}
return resp.Data.Address, nil
}
// GetBitcoinsWithCashAd returns buy or sell as cash local advertisements.
// TODO
func (l *LocalBitcoins) GetBitcoinsWithCashAd() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPICashBuy, nil)
}
// GetBitcoinsOnlineAd this API returns buy or sell Bitcoin online ads.
// TODO
func (l *LocalBitcoins) GetBitcoinsOnlineAd() error {
return l.SendHTTPRequest(l.APIUrl+localbitcoinsAPIOnlineBuy, nil)
}
// GetTicker returns list of all completed trades.
func (l *LocalBitcoins) GetTicker() (map[string]Ticker, error) {
result := make(map[string]Ticker)
return result, l.SendHTTPRequest(l.APIUrl+localbitcoinsAPITicker, &result)
}
// GetTradableCurrencies returns a list of tradable fiat currencies
func (l *LocalBitcoins) GetTradableCurrencies() ([]string, error) {
resp, err := l.GetTicker()
if err != nil {
return nil, err
}
var currencies []string
for x := range resp {
currencies = append(currencies, x)
}
return currencies, nil
}
// GetTrades returns all closed trades in online buy and online sell categories,
// updated every 15 minutes.
func (l *LocalBitcoins) GetTrades(currency string, values url.Values) ([]Trade, error) {
path := common.EncodeURLValues(fmt.Sprintf("%s/%s/trades.json", l.APIUrl+localbitcoinsAPIBitcoincharts, currency), values)
var result []Trade
return result, l.SendHTTPRequest(path, &result)
}
// GetOrderbook returns buy and sell bitcoin online advertisements. Amount is
// the maximum amount available for the trade request. Price is the hourly
// updated price. The price is based on the price equation and commission %
// entered by the ad author.
func (l *LocalBitcoins) GetOrderbook(currency string) (Orderbook, error) {
type response struct {
Bids [][]string `json:"bids"`
Asks [][]string `json:"asks"`
}
path := fmt.Sprintf("%s/%s/orderbook.json", l.APIUrl+localbitcoinsAPIBitcoincharts, currency)
resp := response{}
err := l.SendHTTPRequest(path, &resp)
if err != nil {
return Orderbook{}, err
}
orderbook := Orderbook{}
for _, x := range resp.Bids {
price, err := strconv.ParseFloat(x[0], 64)
if err != nil {
log.Error(err)
continue
}
amount, err := strconv.ParseFloat(x[1], 64)
if err != nil {
log.Error(err)
continue
}
orderbook.Bids = append(orderbook.Bids, Price{price, amount})
}
for _, x := range resp.Asks {
price, err := strconv.ParseFloat(x[0], 64)
if err != nil {
log.Error(err)
continue
}
amount, err := strconv.ParseFloat(x[1], 64)
if err != nil {
log.Error(err)
continue
}
orderbook.Asks = append(orderbook.Asks, Price{price, amount})
}
return orderbook, nil
}
// SendHTTPRequest sends an unauthenticated HTTP request
func (l *LocalBitcoins) SendHTTPRequest(path string, result interface{}) error {
return l.SendPayload(http.MethodGet, path, nil, nil, result, false, false, l.Verbose, l.HTTPDebugging)
}
// SendAuthenticatedHTTPRequest sends an authenticated HTTP request to
// localbitcoins
func (l *LocalBitcoins) SendAuthenticatedHTTPRequest(method, path string, params url.Values, result interface{}) (err error) {
if !l.AuthenticatedAPISupport {
return fmt.Errorf(exchange.WarningAuthenticatedRequestWithoutCredentialsSet, l.Name)
}
n := l.Requester.GetNonce(true).String()
path = "/api/" + path
encoded := params.Encode()
message := n + l.APIKey + path + encoded
hmac := common.GetHMAC(common.HashSHA256, []byte(message), []byte(l.APISecret))
headers := make(map[string]string)
headers["Apiauth-Key"] = l.APIKey
headers["Apiauth-Nonce"] = n
headers["Apiauth-Signature"] = common.StringToUpper(common.HexEncodeToString(hmac))
headers["Content-Type"] = "application/x-www-form-urlencoded"
if l.Verbose {
log.Debugf("Sending POST request to `%s`, path: `%s`, params: `%s`.", l.APIUrl, path, encoded)
}
if method == http.MethodGet && len(encoded) > 0 {
path += "?" + encoded
}
return l.SendPayload(method, l.APIUrl+path, headers, strings.NewReader(encoded), result, true, true, l.Verbose, l.HTTPDebugging)
}
// GetFee returns an estimate of fee based on type of transaction
func (l *LocalBitcoins) GetFee(feeBuilder *exchange.FeeBuilder) (float64, error) {
// No fees will be used
return 0, nil
}
| {
return "", err
} |
ShadowGaps.js | import GlideGapsComponent from '@glidejs/glide/src/components/gaps';
import { throttle } from '@glidejs/glide/src/utils/wait';
export default function (Glide, Components, Events) { | const Gaps = GlideGapsComponent(Glide, Components, Events);
/**
* Apply calculated gaps:
* - after building, so slides (including clones) will receive proper margins
* - on updating via API, to recalculate gaps with new options
*/
Events.on(
['build.after', 'update'],
throttle(() => {
Gaps.apply(Components.Html.host.children);
}, 30),
);
/**
* Remove gaps:
* - on destroying to bring markup to its inital state
*/
Events.on('destroy', () => {
Gaps.remove(Components.Html.host.children);
});
return Gaps;
} | |
properties.rs | use super::{
general::{cfg_deprecated, doc_alias, version_condition},
property_body,
};
use crate::{
analysis::{properties::Property, rust_type::RustType},
chunk::Chunk,
env::Env,
library,
traits::IntoString,
writer::{primitives::tabs, ToCode},
};
use std::io::{Result, Write};
pub fn generate(
w: &mut dyn Write,
env: &Env,
prop: &Property,
in_trait: bool,
only_declaration: bool,
indent: usize,
) -> Result<()> {
generate_prop_func(w, env, prop, in_trait, only_declaration, indent)?;
Ok(())
}
fn generate_prop_func(
w: &mut dyn Write,
env: &Env,
prop: &Property,
in_trait: bool,
only_declaration: bool,
indent: usize,
) -> Result<()> {
let pub_prefix = if in_trait { "" } else { "pub " };
let decl_suffix = if only_declaration { ";" } else { " {" };
let type_string = RustType::try_new(env, prop.typ);
let commented = type_string.is_err();
let comment_prefix = if commented { "//" } else { "" };
writeln!(w)?;
let decl = declaration(env, prop);
if !in_trait || only_declaration {
cfg_deprecated(w, env, prop.deprecated_version, commented, indent)?;
}
version_condition(w, env, prop.version, commented, indent)?;
if !in_trait || only_declaration {
let add_doc_alias = if let Some(func_name_alias) = prop.func_name_alias.as_ref() {
&prop.name != func_name_alias && prop.name != prop.var_name
} else {
prop.name != prop.var_name
};
if add_doc_alias {
doc_alias(w, &prop.name, comment_prefix, indent)?;
}
}
writeln!(
w,
"{}{}{}{}{}",
tabs(indent),
comment_prefix,
pub_prefix,
decl,
decl_suffix
)?;
if !only_declaration {
let body = body(env, prop, in_trait).to_code(env);
for s in body {
writeln!(w, "{}{}{}", tabs(indent), comment_prefix, s)?;
}
}
Ok(())
}
fn declaration(env: &Env, prop: &Property) -> String {
let bound: String;
let set_param = if prop.is_get {
bound = String::new();
String::new()
} else if let Some(ref set_bound) = prop.set_bound {
bound = format!("<{}: IsA<{}>>", set_bound.alias, set_bound.type_str);
format!(", {}: Option<&{}>", prop.var_name, set_bound.alias)
} else {
bound = String::new();
let dir = library::ParameterDirection::In;
let param_type = RustType::builder(env, prop.typ)
.direction(dir)
.nullable(prop.nullable)
.ref_mode(prop.set_in_ref_mode)
.try_build_param()
.into_string();
format!(", {}: {}", prop.var_name, param_type)
};
let return_str = if prop.is_get {
let dir = library::ParameterDirection::Return;
let ret_type = RustType::builder(env, prop.typ)
.direction(dir)
.nullable(prop.nullable)
.ref_mode(prop.get_out_ref_mode)
.try_build_param() | format!(" -> {}", ret_type)
} else {
"".to_string()
};
format!(
"fn {}{}(&self{}){}",
prop.func_name, bound, set_param, return_str
)
}
fn body(env: &Env, prop: &Property, in_trait: bool) -> Chunk {
let mut builder = property_body::Builder::new(env);
builder
.name(&prop.name)
.in_trait(in_trait)
.var_name(&prop.var_name)
.is_get(prop.is_get);
if let Ok(type_) = RustType::try_new(env, prop.typ) {
builder.type_(type_.as_str());
} else {
builder.type_("/*Unknown type*/");
}
builder.generate()
} | .into_string(); |
global.rs | use alloc::sync::Arc;
use core::cell::Cell;
use parity_wasm::elements::ValueType as EValueType;
use types::ValueType;
use value::RuntimeValue;
use Error;
/// Reference to a global variable (See [`GlobalInstance`] for details).
///
/// This reference has a reference-counting semantics.
///
/// [`GlobalInstance`]: struct.GlobalInstance.html
#[derive(Clone, Debug)]
pub struct GlobalRef(Arc<GlobalInstance>);
impl ::core::ops::Deref for GlobalRef {
type Target = GlobalInstance;
fn deref(&self) -> &GlobalInstance {
&self.0
}
}
/// Runtime representation of a global variable (or `global` for short).
///
/// Global contains a value of a specified type and flag which specifies whether this
/// global are mutable or immutable. Neither type of the value nor immutability can't be changed
/// after creation.
///
/// Attempt to change value of immutable global or to change type of
/// the value (e.g. assign [`I32`] value to a global that was created with [`I64`] type) will lead to an error.
///
/// [`I32`]: enum.RuntimeValue.html#variant.I32
/// [`I64`]: enum.RuntimeValue.html#variant.I64
#[derive(Debug)]
pub struct GlobalInstance {
val: Cell<RuntimeValue>,
mutable: bool,
}
impl GlobalInstance {
/// Allocate a global variable instance.
///
/// Since it is possible to export only immutable globals,
/// users likely want to set `mutable` to `false`.
pub fn alloc(val: RuntimeValue, mutable: bool) -> GlobalRef {
GlobalRef(Arc::new(GlobalInstance {
val: Cell::new(val),
mutable,
}))
}
/// Change the value of this global variable.
///
/// # Errors
///
/// Returns `Err` if this global isn't mutable or if
/// type of `val` doesn't match global's type.
pub fn set(&self, val: RuntimeValue) -> Result<(), Error> |
/// Get the value of this global variable.
pub fn get(&self) -> RuntimeValue {
self.val.get()
}
/// Returns if this global variable is mutable.
///
/// Note: Imported and/or exported globals are always immutable.
pub fn is_mutable(&self) -> bool {
self.mutable
}
/// Returns value type of this global variable.
pub fn value_type(&self) -> ValueType {
self.val.get().value_type()
}
pub(crate) fn elements_value_type(&self) -> EValueType {
self.value_type().into_elements()
}
}
unsafe impl Sync for GlobalRef {}
unsafe impl Send for GlobalRef {}
unsafe impl Sync for GlobalInstance {}
unsafe impl Send for GlobalInstance {}
| {
if !self.mutable {
return Err(Error::Global(
"Attempt to change an immutable variable".into(),
));
}
if self.value_type() != val.value_type() {
return Err(Error::Global("Attempt to change variable type".into()));
}
self.val.set(val);
Ok(())
} |
minification.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JS and CSS minification
============================
Author: Toni Heittola ([email protected])
This plugin will create dynamic datatable with charting features from given yaml-datafile.
"""
import os
import sys
import io
import argparse
import textwrap
from IPython import embed
__version_info__ = ('0', '1', '0')
__version__ = '.'.join(__version_info__)
def main(argv):
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
JS and CSS minification
---------------------------------------------
Author: Toni Heittola ( [email protected] )
'''))
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
print "JS and CSS minification"
print "-----------------------"
minify_css_directory2(source='css', target='css.min')
minify_js_directory(source='js', target='js.min')
def minify_css_directory(source, target):
"""
Move CSS resources from source directory to target directory and minify. Using csscompressor.
"""
from csscompressor import compress
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".css"):
current_file_path = os.path.join(root, current_file)
print " ", current_file_path
with open(current_file_path) as css_file:
with open(os.path.join(target, current_file.replace('.css', '.min.css')), "w") as minified_file:
minified_file.write(compress(css_file.read()))
def minify_css_directory2(source, target):
"""
Move CSS resources from source directory to target directory and minify. Using rcssmin.
"""
import rcssmin
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".css"):
current_file_path = os.path.join(root, current_file)
print " ", current_file_path
with open(current_file_path) as css_file:
with open(os.path.join(target, current_file.replace('.css', '.min.css')), "w") as minified_file:
minified_file.write(rcssmin.cssmin(css_file.read(), keep_bang_comments=True))
def | (source, target):
"""
Move JS resources from source directory to target directory and minify.
"""
from jsmin import jsmin
if os.path.isdir(source):
if not os.path.exists(target):
os.makedirs(target)
for root, dirs, files in os.walk(source):
for current_file in files:
if current_file.endswith(".js"):
current_file_path = os.path.join(root, current_file)
print " ", current_file_path
with open(current_file_path) as js_file:
with open(os.path.join(target, current_file.replace('.js', '.min.js')), "w") as minified_file:
minified_file.write(jsmin(js_file.read()))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e) | minify_js_directory |
main.go | /*
Copyright 2019 The xridge kubestone contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"github.com/xridge/kubestone/controllers/esrally"
"github.com/xridge/kubestone/controllers/ocplogtest"
"github.com/xridge/kubestone/controllers/ycsbbench"
"github.com/go-logr/zapr"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
k8sscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
perfv1alpha1 "github.com/xridge/kubestone/api/v1alpha1"
"github.com/xridge/kubestone/controllers/drill"
"github.com/xridge/kubestone/controllers/fio"
"github.com/xridge/kubestone/controllers/ioping"
"github.com/xridge/kubestone/controllers/iperf3"
"github.com/xridge/kubestone/controllers/kafkabench"
"github.com/xridge/kubestone/controllers/pgbench"
"github.com/xridge/kubestone/controllers/qperf"
"github.com/xridge/kubestone/controllers/s3bench"
"github.com/xridge/kubestone/controllers/sysbench"
"github.com/xridge/kubestone/pkg/k8s"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
rootLog = zap.NewRaw(zap.WriteTo(os.Stderr), zap.UseDevMode(true))
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = k8sscheme.AddToScheme(scheme)
_ = perfv1alpha1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
// +kubebuilder:rbac:groups="",resources=events,verbs=create
func main() {
var metricsAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.Parse()
ctrl.SetLogger(zapr.NewLogger(rootLog))
restClientConfig := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(restClientConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
})
if err != nil {
setupLog.Error(err, "Unable to start manager")
os.Exit(1)
}
clientSet := kubernetes.NewForConfigOrDie(restClientConfig)
k8sAccess := k8s.Access{
Client: mgr.GetClient(),
Clientset: clientSet,
Scheme: mgr.GetScheme(),
EventRecorder: k8s.NewEventRecorder(clientSet, rootLog.Sugar().Infof),
}
if err = (&iperf3.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Iperf3"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Iperf3")
os.Exit(1)
}
if err = (&fio.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Fio"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Fio")
os.Exit(1)
}
if err = (&sysbench.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Sysbench"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Sysbench")
os.Exit(1)
}
if err = (&drill.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Drill"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Drill")
os.Exit(1)
}
if err = (&pgbench.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Pgbench"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Pgbench")
os.Exit(1) | Log: ctrl.Log.WithName("controllers").WithName("Ioping"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Ioping")
os.Exit(1)
}
if err = (&qperf.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("Qperf"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Qperf")
os.Exit(1)
}
if err = (&ycsbbench.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("YcsbBench"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "YcsbBench")
os.Exit(1)
}
if err = (&ocplogtest.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("OcpLogtest"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OcpLogtest")
os.Exit(1)
}
if err = (&esrally.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("EsRally"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EsRally")
os.Exit(1)
}
if err = (&s3bench.Reconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("S3Bench"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "S3Bench")
os.Exit(1)
}
if err = (&kafkabench.KafkaBenchReconciler{
K8S: k8sAccess,
Log: ctrl.Log.WithName("controllers").WithName("KafkaBench"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "KafkaBench")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
} | }
if err = (&ioping.Reconciler{
K8S: k8sAccess, |
TwHistory.py | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
"""TwHistory class"""
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.a | teHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and k == '3707':
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB) #get type value for db
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date #如果DB撈的到相對應條件的資料,就只抓最後一天
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
if historyListLength > 2:
highPeak = []
lowPeak = []
for i, history in enumerate(historyList):
if i == 0 or i == historyListLength - 1:
continue
if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
highPeak.append(history)
if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
lowPeak.append(history)
if len(highPeak) == 2 and len(lowPeak) == 2:
break
if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
for low in lowPeak:
if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
break
if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
for high in highPeak:
if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
if code=='3707':
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
return False
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WEEK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
#twHistory.diverge(90, 10, -20, -80)
#twHistory.diverge(80, 20, -20, -80)
twHistory.diverge(70, 30, -20, -80) | s_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def dele |
endpoint_create.go | package endpoints
import (
"errors"
"fmt"
"net"
"net/http"
"net/url"
"runtime"
"strconv"
"strings"
"time"
"github.com/cloudogu/portainer-ce/api"
"github.com/cloudogu/portainer-ce/api/crypto"
"github.com/cloudogu/portainer-ce/api/http/client"
"github.com/cloudogu/portainer-ce/api/internal/edge"
httperror "github.com/portainer/libhttp/error"
"github.com/portainer/libhttp/request"
"github.com/portainer/libhttp/response"
)
type endpointCreatePayload struct {
Name string
URL string
EndpointCreationType endpointCreationEnum
PublicURL string
GroupID int
TLS bool
TLSSkipVerify bool
TLSSkipClientVerify bool
TLSCACertFile []byte
TLSCertFile []byte
TLSKeyFile []byte
AzureApplicationID string
AzureTenantID string
AzureAuthenticationKey string
TagIDs []portainer.TagID
EdgeCheckinInterval int
}
type endpointCreationEnum int
const (
_ endpointCreationEnum = iota
localDockerEnvironment
agentEnvironment
azureEnvironment
edgeAgentEnvironment
localKubernetesEnvironment
)
func (payload *endpointCreatePayload) Validate(r *http.Request) error {
name, err := request.RetrieveMultiPartFormValue(r, "Name", false)
if err != nil {
return errors.New("Invalid endpoint name")
}
payload.Name = name
endpointCreationType, err := request.RetrieveNumericMultiPartFormValue(r, "EndpointCreationType", false)
if err != nil || endpointCreationType == 0 {
return errors.New("Invalid endpoint type value. Value must be one of: 1 (Docker environment), 2 (Agent environment), 3 (Azure environment), 4 (Edge Agent environment) or 5 (Local Kubernetes environment)")
}
payload.EndpointCreationType = endpointCreationEnum(endpointCreationType)
groupID, _ := request.RetrieveNumericMultiPartFormValue(r, "GroupID", true)
if groupID == 0 {
groupID = 1
}
payload.GroupID = groupID
var tagIDs []portainer.TagID
err = request.RetrieveMultiPartFormJSONValue(r, "TagIds", &tagIDs, true)
if err != nil {
return errors.New("Invalid TagIds parameter")
}
payload.TagIDs = tagIDs
if payload.TagIDs == nil {
payload.TagIDs = make([]portainer.TagID, 0)
}
useTLS, _ := request.RetrieveBooleanMultiPartFormValue(r, "TLS", true)
payload.TLS = useTLS
if payload.TLS {
skipTLSServerVerification, _ := request.RetrieveBooleanMultiPartFormValue(r, "TLSSkipVerify", true)
payload.TLSSkipVerify = skipTLSServerVerification
skipTLSClientVerification, _ := request.RetrieveBooleanMultiPartFormValue(r, "TLSSkipClientVerify", true)
payload.TLSSkipClientVerify = skipTLSClientVerification
if !payload.TLSSkipVerify {
caCert, _, err := request.RetrieveMultiPartFormFile(r, "TLSCACertFile")
if err != nil {
return errors.New("Invalid CA certificate file. Ensure that the file is uploaded correctly")
}
payload.TLSCACertFile = caCert
}
if !payload.TLSSkipClientVerify {
cert, _, err := request.RetrieveMultiPartFormFile(r, "TLSCertFile")
if err != nil {
return errors.New("Invalid certificate file. Ensure that the file is uploaded correctly")
}
payload.TLSCertFile = cert
key, _, err := request.RetrieveMultiPartFormFile(r, "TLSKeyFile")
if err != nil {
return errors.New("Invalid key file. Ensure that the file is uploaded correctly")
}
payload.TLSKeyFile = key
}
}
switch payload.EndpointCreationType {
case azureEnvironment:
azureApplicationID, err := request.RetrieveMultiPartFormValue(r, "AzureApplicationID", false)
if err != nil {
return errors.New("Invalid Azure application ID")
}
payload.AzureApplicationID = azureApplicationID
azureTenantID, err := request.RetrieveMultiPartFormValue(r, "AzureTenantID", false)
if err != nil {
return errors.New("Invalid Azure tenant ID")
}
payload.AzureTenantID = azureTenantID
azureAuthenticationKey, err := request.RetrieveMultiPartFormValue(r, "AzureAuthenticationKey", false)
if err != nil {
return errors.New("Invalid Azure authentication key")
}
payload.AzureAuthenticationKey = azureAuthenticationKey
default:
endpointURL, err := request.RetrieveMultiPartFormValue(r, "URL", true)
if err != nil {
return errors.New("Invalid endpoint URL")
}
payload.URL = endpointURL
publicURL, _ := request.RetrieveMultiPartFormValue(r, "PublicURL", true)
payload.PublicURL = publicURL
}
checkinInterval, _ := request.RetrieveNumericMultiPartFormValue(r, "CheckinInterval", true)
payload.EdgeCheckinInterval = checkinInterval
return nil
}
// POST request on /api/endpoints
func (handler *Handler) endpointCreate(w http.ResponseWriter, r *http.Request) *httperror.HandlerError {
payload := &endpointCreatePayload{}
err := payload.Validate(r)
if err != nil {
return &httperror.HandlerError{http.StatusBadRequest, "Invalid request payload", err}
}
endpoint, endpointCreationError := handler.createEndpoint(payload)
if endpointCreationError != nil {
return endpointCreationError
}
endpointGroup, err := handler.DataStore.EndpointGroup().EndpointGroup(endpoint.GroupID)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to find an endpoint group inside the database", err}
}
edgeGroups, err := handler.DataStore.EdgeGroup().EdgeGroups()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve edge groups from the database", err}
}
edgeStacks, err := handler.DataStore.EdgeStack().EdgeStacks()
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to retrieve edge stacks from the database", err}
}
relationObject := &portainer.EndpointRelation{
EndpointID: endpoint.ID,
EdgeStacks: map[portainer.EdgeStackID]bool{},
}
if endpoint.Type == portainer.EdgeAgentOnDockerEnvironment || endpoint.Type == portainer.EdgeAgentOnKubernetesEnvironment {
relatedEdgeStacks := edge.EndpointRelatedEdgeStacks(endpoint, endpointGroup, edgeGroups, edgeStacks)
for _, stackID := range relatedEdgeStacks {
relationObject.EdgeStacks[stackID] = true
}
}
err = handler.DataStore.EndpointRelation().CreateEndpointRelation(relationObject)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist the relation object inside the database", err}
}
return response.JSON(w, endpoint)
}
func (handler *Handler) createEndpoint(payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) {
switch payload.EndpointCreationType {
case azureEnvironment:
return handler.createAzureEndpoint(payload)
case edgeAgentEnvironment:
return handler.createEdgeAgentEndpoint(payload)
case localKubernetesEnvironment:
return handler.createKubernetesEndpoint(payload)
}
endpointType := portainer.DockerEnvironment
if payload.EndpointCreationType == agentEnvironment {
agentPlatform, err := handler.pingAndCheckPlatform(payload)
if err != nil {
return nil, &httperror.HandlerError{http.StatusInternalServerError, "Unable to get endpoint type", err}
}
if agentPlatform == portainer.AgentPlatformDocker {
endpointType = portainer.AgentOnDockerEnvironment
} else if agentPlatform == portainer.AgentPlatformKubernetes {
endpointType = portainer.AgentOnKubernetesEnvironment
payload.URL = strings.TrimPrefix(payload.URL, "tcp://")
}
}
if payload.TLS {
return handler.createTLSSecuredEndpoint(payload, endpointType)
}
return handler.createUnsecuredEndpoint(payload)
}
func (handler *Handler) createAzureEndpoint(payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) {
credentials := portainer.AzureCredentials{
ApplicationID: payload.AzureApplicationID,
TenantID: payload.AzureTenantID,
AuthenticationKey: payload.AzureAuthenticationKey,
}
httpClient := client.NewHTTPClient()
_, err := httpClient.ExecuteAzureAuthenticationRequest(&credentials)
if err != nil {
return nil, &httperror.HandlerError{http.StatusInternalServerError, "Unable to authenticate against Azure", err}
}
endpointID := handler.DataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: payload.Name,
URL: "https://management.azure.com",
Type: portainer.AzureEnvironment,
GroupID: portainer.EndpointGroupID(payload.GroupID),
PublicURL: payload.PublicURL,
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
AzureCredentials: credentials,
TagIDs: payload.TagIDs,
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
}
err = handler.saveEndpointAndUpdateAuthorizations(endpoint)
if err != nil {
return nil, &httperror.HandlerError{http.StatusInternalServerError, "An error occured while trying to create the endpoint", err}
}
return endpoint, nil
}
func (handler *Handler) createEdgeAgentEndpoint(payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) {
endpointID := handler.DataStore.Endpoint().GetNextIdentifier()
portainerURL, err := url.Parse(payload.URL)
if err != nil {
return nil, &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint URL", err}
}
portainerHost, _, err := net.SplitHostPort(portainerURL.Host)
if err != nil {
portainerHost = portainerURL.Host
}
if portainerHost == "localhost" {
return nil, &httperror.HandlerError{http.StatusBadRequest, "Invalid endpoint URL", errors.New("cannot use localhost as endpoint URL")}
}
edgeKey := handler.ReverseTunnelService.GenerateEdgeKey(payload.URL, portainerHost, endpointID)
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: payload.Name,
URL: portainerHost,
Type: portainer.EdgeAgentOnDockerEnvironment,
GroupID: portainer.EndpointGroupID(payload.GroupID),
TLSConfig: portainer.TLSConfiguration{
TLS: false,
},
AuthorizedUsers: []portainer.UserID{},
AuthorizedTeams: []portainer.TeamID{},
Extensions: []portainer.EndpointExtension{},
TagIDs: payload.TagIDs,
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
EdgeKey: edgeKey,
EdgeCheckinInterval: payload.EdgeCheckinInterval,
Kubernetes: portainer.KubernetesDefault(),
}
err = handler.saveEndpointAndUpdateAuthorizations(endpoint)
if err != nil {
return nil, &httperror.HandlerError{http.StatusInternalServerError, "An error occured while trying to create the endpoint", err}
}
return endpoint, nil
}
func (handler *Handler) createUnsecuredEndpoint(payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) {
endpointType := portainer.DockerEnvironment
if payload.URL == "" {
payload.URL = "unix:///var/run/docker.sock"
if runtime.GOOS == "windows" {
payload.URL = "npipe:////./pipe/docker_engine"
}
}
endpointID := handler.DataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: payload.Name,
URL: payload.URL,
Type: endpointType,
GroupID: portainer.EndpointGroupID(payload.GroupID),
PublicURL: payload.PublicURL,
TLSConfig: portainer.TLSConfiguration{
TLS: false,
},
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
TagIDs: payload.TagIDs,
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
}
err := handler.snapshotAndPersistEndpoint(endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
func (handler *Handler) createKubernetesEndpoint(payload *endpointCreatePayload) (*portainer.Endpoint, *httperror.HandlerError) {
if payload.URL == "" {
payload.URL = "https://kubernetes.default.svc"
}
endpointID := handler.DataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: payload.Name,
URL: payload.URL,
Type: portainer.KubernetesLocalEnvironment,
GroupID: portainer.EndpointGroupID(payload.GroupID),
PublicURL: payload.PublicURL,
TLSConfig: portainer.TLSConfiguration{
TLS: payload.TLS,
TLSSkipVerify: payload.TLSSkipVerify,
},
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
TagIDs: payload.TagIDs,
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
}
err := handler.snapshotAndPersistEndpoint(endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
func (handler *Handler) createTLSSecuredEndpoint(payload *endpointCreatePayload, endpointType portainer.EndpointType) (*portainer.Endpoint, *httperror.HandlerError) {
endpointID := handler.DataStore.Endpoint().GetNextIdentifier()
endpoint := &portainer.Endpoint{
ID: portainer.EndpointID(endpointID),
Name: payload.Name,
URL: payload.URL,
Type: endpointType,
GroupID: portainer.EndpointGroupID(payload.GroupID),
PublicURL: payload.PublicURL,
TLSConfig: portainer.TLSConfiguration{
TLS: payload.TLS,
TLSSkipVerify: payload.TLSSkipVerify,
},
UserAccessPolicies: portainer.UserAccessPolicies{},
TeamAccessPolicies: portainer.TeamAccessPolicies{},
Extensions: []portainer.EndpointExtension{},
TagIDs: payload.TagIDs,
Status: portainer.EndpointStatusUp,
Snapshots: []portainer.DockerSnapshot{},
Kubernetes: portainer.KubernetesDefault(),
}
err := handler.storeTLSFiles(endpoint, payload)
if err != nil {
return nil, err
}
err = handler.snapshotAndPersistEndpoint(endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
func (handler *Handler) snapshotAndPersistEndpoint(endpoint *portainer.Endpoint) *httperror.HandlerError {
err := handler.SnapshotService.SnapshotEndpoint(endpoint)
if err != nil {
if strings.Contains(err.Error(), "Invalid request signature") {
err = errors.New("agent already paired with another Portainer instance")
}
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to initiate communications with endpoint", err}
}
err = handler.saveEndpointAndUpdateAuthorizations(endpoint)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "An error occured while trying to create the endpoint", err}
}
return nil
}
func (handler *Handler) saveEndpointAndUpdateAuthorizations(endpoint *portainer.Endpoint) error {
err := handler.DataStore.Endpoint().CreateEndpoint(endpoint)
if err != nil {
return err
}
for _, tagID := range endpoint.TagIDs {
tag, err := handler.DataStore.Tag().Tag(tagID)
if err != nil {
return err
}
tag.Endpoints[endpoint.ID] = true
err = handler.DataStore.Tag().UpdateTag(tagID, tag)
if err != nil {
return err
}
}
return nil
}
func (handler *Handler) storeTLSFiles(endpoint *portainer.Endpoint, payload *endpointCreatePayload) *httperror.HandlerError {
folder := strconv.Itoa(int(endpoint.ID))
if !payload.TLSSkipVerify {
caCertPath, err := handler.FileService.StoreTLSFileFromBytes(folder, portainer.TLSFileCA, payload.TLSCACertFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS CA certificate file on disk", err}
}
endpoint.TLSConfig.TLSCACertPath = caCertPath
}
if !payload.TLSSkipClientVerify {
certPath, err := handler.FileService.StoreTLSFileFromBytes(folder, portainer.TLSFileCert, payload.TLSCertFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS certificate file on disk", err}
}
endpoint.TLSConfig.TLSCertPath = certPath
keyPath, err := handler.FileService.StoreTLSFileFromBytes(folder, portainer.TLSFileKey, payload.TLSKeyFile)
if err != nil {
return &httperror.HandlerError{http.StatusInternalServerError, "Unable to persist TLS key file on disk", err}
}
endpoint.TLSConfig.TLSKeyPath = keyPath
}
return nil
}
func (handler *Handler) pingAndCheckPlatform(payload *endpointCreatePayload) (portainer.AgentPlatform, error) {
httpCli := &http.Client{
Timeout: 3 * time.Second,
}
if payload.TLS {
tlsConfig, err := crypto.CreateTLSConfigurationFromBytes(payload.TLSCACertFile, payload.TLSCertFile, payload.TLSKeyFile, payload.TLSSkipVerify, payload.TLSSkipClientVerify)
if err != nil |
httpCli.Transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
}
url, err := url.Parse(fmt.Sprintf("%s/ping", payload.URL))
if err != nil {
return 0, err
}
url.Scheme = "https"
req, err := http.NewRequest(http.MethodGet, url.String(), nil)
if err != nil {
return 0, err
}
resp, err := httpCli.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return 0, fmt.Errorf("Failed request with status %d", resp.StatusCode)
}
agentPlatformHeader := resp.Header.Get(portainer.HTTPResponseAgentPlatform)
if agentPlatformHeader == "" {
return 0, errors.New("Agent Platform Header is missing")
}
agentPlatformNumber, err := strconv.Atoi(agentPlatformHeader)
if err != nil {
return 0, err
}
if agentPlatformNumber == 0 {
return 0, errors.New("Agent platform is invalid")
}
return portainer.AgentPlatform(agentPlatformNumber), nil
}
| {
return 0, err
} |
base_test_setup.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package integration
import (
"os"
"path"
"time"
mspclient "github.com/hyperledger/fabric-sdk-go/pkg/client/msp"
"github.com/hyperledger/fabric-sdk-go/pkg/client/resmgmt"
"github.com/hyperledger/fabric-sdk-go/pkg/common/errors/retry"
contextAPI "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/context"
"github.com/hyperledger/fabric-sdk-go/pkg/common/providers/core"
fabAPI "github.com/hyperledger/fabric-sdk-go/pkg/common/providers/fab"
"github.com/hyperledger/fabric-sdk-go/pkg/common/providers/msp"
contextImpl "github.com/hyperledger/fabric-sdk-go/pkg/context"
"github.com/hyperledger/fabric-sdk-go/pkg/fab"
packager "github.com/hyperledger/fabric-sdk-go/pkg/fab/ccpackager/gopackager"
"github.com/hyperledger/fabric-sdk-go/pkg/fab/comm"
"github.com/hyperledger/fabric-sdk-go/pkg/fab/resource"
"github.com/hyperledger/fabric-sdk-go/pkg/fabsdk"
"github.com/hyperledger/fabric-sdk-go/pkg/util/test"
"github.com/hyperledger/fabric-sdk-go/test/metadata"
"github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/common/cauthdsl"
cb "github.com/hyperledger/fabric-sdk-go/third_party/github.com/hyperledger/fabric/protos/common"
"github.com/pkg/errors"
)
// BaseSetupImpl implementation of BaseTestSetup
type BaseSetupImpl struct {
Identity msp.Identity
Targets []string
ConfigFile string
OrgID string
ChannelID string
ChannelConfigFile string
}
// Initial B values for ExampleCC
const (
ExampleCCInitB = "200"
ExampleCCUpgradeB = "400"
AdminUser = "Admin"
OrdererOrgName = "ordererorg"
)
// ExampleCC query and transaction arguments
var queryArgs = [][]byte{[]byte("query"), []byte("b")}
var txArgs = [][]byte{[]byte("move"), []byte("a"), []byte("b"), []byte("1")}
// ExampleCC init and upgrade args
var initArgs = [][]byte{[]byte("init"), []byte("a"), []byte("100"), []byte("b"), []byte(ExampleCCInitB)}
var upgradeArgs = [][]byte{[]byte("init"), []byte("a"), []byte("100"), []byte("b"), []byte(ExampleCCUpgradeB)}
// ExampleCCQueryArgs returns example cc query args
func ExampleCCQueryArgs() [][]byte {
return queryArgs
}
// ExampleCCTxArgs returns example cc move funds args
func ExampleCCTxArgs() [][]byte {
return txArgs
}
//ExampleCCInitArgs returns example cc initialization args
func ExampleCCInitArgs() [][]byte {
return initArgs
}
//ExampleCCUpgradeArgs returns example cc upgrade args
func ExampleCCUpgradeArgs() [][]byte {
return upgradeArgs
}
// IsJoinedChannel returns true if the given peer has joined the given channel
func IsJoinedChannel(channelID string, resMgmtClient *resmgmt.Client, peer fabAPI.Peer) (bool, error) {
resp, err := resMgmtClient.QueryChannels(resmgmt.WithTargets(peer))
if err != nil {
return false, err
}
for _, chInfo := range resp.Channels {
if chInfo.ChannelId == channelID {
return true, nil
}
}
return false, nil
}
// Initialize reads configuration from file and sets up client, channel and event hub
func (setup *BaseSetupImpl) Initialize(sdk *fabsdk.FabricSDK) error {
mspClient, err := mspclient.New(sdk.Context(), mspclient.WithOrg(setup.OrgID))
adminIdentity, err := mspClient.GetSigningIdentity(AdminUser)
if err != nil {
return errors.WithMessage(err, "failed to get client context")
}
setup.Identity = adminIdentity
var cfgBackends []core.ConfigBackend
configBackend, err := sdk.Config()
if err != nil {
//For some tests SDK may not have backend set, try with config file if backend is missing
cfgBackends, err = ConfigBackend()
if err != nil {
return errors.Wrapf(err, "failed to get config backend from config: %s", err)
}
} else {
cfgBackends = append(cfgBackends, configBackend)
}
targets, err := OrgTargetPeers([]string{setup.OrgID}, cfgBackends...)
if err != nil {
return errors.Wrapf(err, "loading target peers from config failed")
}
setup.Targets = targets
r, err := os.Open(setup.ChannelConfigFile)
if err != nil {
return errors.Wrapf(err, "opening channel config file failed")
}
defer func() {
if err = r.Close(); err != nil {
test.Logf("close error %v", err)
}
}()
// Create channel for tests
req := resmgmt.SaveChannelRequest{ChannelID: setup.ChannelID, ChannelConfig: r, SigningIdentities: []msp.SigningIdentity{adminIdentity}}
if err = InitializeChannel(sdk, setup.OrgID, req, targets); err != nil {
return errors.WithMessage(err, "failed to initialize channel")
}
return nil
}
// GetDeployPath ..
func GetDeployPath() string {
pwd, _ := os.Getwd()
return path.Join(pwd, "../../fixtures/testdata")
}
// InstallAndInstantiateExampleCC install and instantiate using resource management client
func InstallAndInstantiateExampleCC(sdk *fabsdk.FabricSDK, user fabsdk.ContextOption, orgName string, chainCodeID string) (resmgmt.InstantiateCCResponse, error) {
return InstallAndInstantiateCC(sdk, user, orgName, chainCodeID, "github.com/example_cc", "v0", GetDeployPath(), initArgs)
}
// InstallAndInstantiateCC install and instantiate using resource management client
func InstallAndInstantiateCC(sdk *fabsdk.FabricSDK, user fabsdk.ContextOption, orgName string, ccName, ccPath, ccVersion, goPath string, ccArgs [][]byte) (resmgmt.InstantiateCCResponse, error) {
ccPkg, err := packager.NewCCPackage(ccPath, goPath)
if err != nil {
return resmgmt.InstantiateCCResponse{}, errors.WithMessage(err, "creating chaincode package failed")
}
configBackend, err := sdk.Config()
if err != nil {
return resmgmt.InstantiateCCResponse{}, errors.WithMessage(err, "failed to get config backend")
}
endpointConfig, err := fab.ConfigFromBackend(configBackend)
if err != nil {
return resmgmt.InstantiateCCResponse{}, errors.WithMessage(err, "failed to get endpoint config")
}
mspID, ok := comm.MSPID(endpointConfig, orgName)
if !ok {
return resmgmt.InstantiateCCResponse{}, errors.New("looking up MSP ID failed")
}
//prepare context
clientContext := sdk.Context(user, fabsdk.WithOrg(orgName))
// Resource management client is responsible for managing resources (joining channels, install/instantiate/upgrade chaincodes)
resMgmtClient, err := resmgmt.New(clientContext)
if err != nil {
return resmgmt.InstantiateCCResponse{}, errors.WithMessage(err, "Failed to create new resource management client")
}
_, err = resMgmtClient.InstallCC(resmgmt.InstallCCRequest{Name: ccName, Path: ccPath, Version: ccVersion, Package: ccPkg}, resmgmt.WithRetry(retry.DefaultResMgmtOpts))
if err != nil {
return resmgmt.InstantiateCCResponse{}, err
}
ccPolicy := cauthdsl.SignedByMspMember(mspID)
return resMgmtClient.InstantiateCC("mychannel", resmgmt.InstantiateCCRequest{Name: ccName, Path: ccPath, Version: ccVersion, Args: ccArgs, Policy: ccPolicy}, resmgmt.WithRetry(retry.DefaultResMgmtOpts))
}
// OrgContext provides SDK client context for a given org
type OrgContext struct {
OrgID string
CtxProvider contextAPI.ClientProvider
SigningIdentity msp.SigningIdentity
ResMgmt *resmgmt.Client
Peers []fabAPI.Peer
AnchorPeerConfigFile string
}
// CreateChannelAndUpdateAnchorPeers creates the channel and updates all of the anchor peers for all orgs
func CreateChannelAndUpdateAnchorPeers(sdk *fabsdk.FabricSDK, channelID string, channelConfigFile string, orgsContext []*OrgContext) error {
ordererCtx := sdk.Context(fabsdk.WithUser(AdminUser), fabsdk.WithOrg(OrdererOrgName))
// Channel management client is responsible for managing channels (create/update channel)
chMgmtClient, err := resmgmt.New(ordererCtx)
if err != nil {
return errors.New("failed to get a new resmgmt client for orderer")
}
var signingIdentities []msp.SigningIdentity
for _, orgCtx := range orgsContext {
signingIdentities = append(signingIdentities, orgCtx.SigningIdentity)
}
req := resmgmt.SaveChannelRequest{
ChannelID: channelID,
ChannelConfigPath: path.Join("../../../", metadata.ChannelConfigPath, channelConfigFile),
SigningIdentities: signingIdentities,
}
_, err = chMgmtClient.SaveChannel(req, resmgmt.WithRetry(retry.DefaultResMgmtOpts), resmgmt.WithOrdererEndpoint("orderer.example.com"))
if err != nil {
return err
}
for _, orgCtx := range orgsContext {
req := resmgmt.SaveChannelRequest{
ChannelID: channelID,
ChannelConfigPath: path.Join("../../../", metadata.ChannelConfigPath, orgCtx.AnchorPeerConfigFile),
SigningIdentities: []msp.SigningIdentity{orgCtx.SigningIdentity},
}
if _, err := orgCtx.ResMgmt.SaveChannel(req, resmgmt.WithRetry(retry.DefaultResMgmtOpts), resmgmt.WithOrdererEndpoint("orderer.example.com")); err != nil {
return err
}
}
return nil
}
// JoinPeersToChannel joins all peers in all of the given orgs to the given channel
func | (channelID string, orgsContext []*OrgContext) error {
for _, orgCtx := range orgsContext {
err := orgCtx.ResMgmt.JoinChannel(
channelID,
resmgmt.WithRetry(retry.DefaultResMgmtOpts),
resmgmt.WithOrdererEndpoint("orderer.example.com"),
resmgmt.WithTargets(orgCtx.Peers...),
)
if err != nil {
return errors.Wrapf(err, "failed to join peers in org [%s] to channel [%s]", orgCtx.OrgID, channelID)
}
}
return nil
}
// InstallAndInstantiateChaincode installs the given chaincode to all peers in the given orgs and instantiates it on the given channel
func InstallAndInstantiateChaincode(channelID string, ccPkg *resource.CCPackage, ccID, ccVersion, ccPolicy string, orgs []*OrgContext, collConfigs ...*cb.CollectionConfig) error {
for _, orgCtx := range orgs {
if err := InstallChaincode(orgCtx.ResMgmt, orgCtx.CtxProvider, ccPkg, ccID, ccVersion, orgCtx.Peers); err != nil {
return errors.Wrapf(err, "failed to install chaincode to peers in org [%s]", orgCtx.OrgID)
}
}
_, err := InstantiateChaincode(orgs[0].ResMgmt, channelID, ccID, ccVersion, ccPolicy, collConfigs...)
return err
}
// InstallChaincode installs the given chaincode to the given peers
func InstallChaincode(resMgmt *resmgmt.Client, ctxProvider contextAPI.ClientProvider, ccPkg *resource.CCPackage, ccName, ccVersion string, localPeers []fabAPI.Peer) error {
installCCReq := resmgmt.InstallCCRequest{Name: ccName, Path: "github.com/example_cc", Version: ccVersion, Package: ccPkg}
_, err := resMgmt.InstallCC(installCCReq, resmgmt.WithRetry(retry.DefaultResMgmtOpts))
return err
}
// InstantiateChaincode instantiates the given chaincode to the given channel
func InstantiateChaincode(resMgmt *resmgmt.Client, channelID, ccName, ccVersion string, ccPolicyStr string, collConfigs ...*cb.CollectionConfig) (resmgmt.InstantiateCCResponse, error) {
ccPolicy, err := cauthdsl.FromString(ccPolicyStr)
if err != nil {
return resmgmt.InstantiateCCResponse{}, errors.Wrapf(err, "error creating CC policy [%s]", ccPolicyStr)
}
return resMgmt.InstantiateCC(
channelID,
resmgmt.InstantiateCCRequest{
Name: ccName,
Path: "github.com/example_cc",
Version: ccVersion,
Args: ExampleCCInitArgs(),
Policy: ccPolicy,
CollConfig: collConfigs,
},
resmgmt.WithRetry(retry.DefaultResMgmtOpts),
)
}
// DiscoverLocalPeers queries the local peers for the given MSP context and returns all of the peers. If
// the number of peers does not match the expected number then an error is returned.
func DiscoverLocalPeers(ctxProvider contextAPI.ClientProvider, expectedPeers int) ([]fabAPI.Peer, error) {
ctx, err := contextImpl.NewLocal(ctxProvider)
if err != nil {
return nil, errors.Wrap(err, "error creating local context")
}
var peers []fabAPI.Peer
for i := 0; i < 10; i++ {
peers, err = ctx.LocalDiscoveryService().GetPeers()
if err != nil {
return nil, errors.Wrapf(err, "error getting peers for MSP [%s]", ctx.Identifier().MSPID)
}
if len(peers) >= expectedPeers {
break
}
// wait some time to allow the gossip to propagate the peers discovery
time.Sleep(3 * time.Second)
}
if expectedPeers != len(peers) {
return nil, errors.Errorf("Expecting %d peers but got %d", expectedPeers, len(peers))
}
return peers, nil
}
| JoinPeersToChannel |
menustate.go | package main
import (
"github.com/samuelyuan/openbiohazard2/client"
"github.com/samuelyuan/openbiohazard2/fileio"
"github.com/samuelyuan/openbiohazard2/game"
"github.com/samuelyuan/openbiohazard2/gui"
"github.com/samuelyuan/openbiohazard2/render"
)
const (
GAME_STATE_MAIN_MENU = 0
GAME_STATE_MAIN_GAME = 1
GAME_STATE_INVENTORY = 2
GAME_STATE_LOAD_SAVE = 3
GAME_STATE_SPECIAL_MENU = 4
STATE_CHANGE_DELAY = 0.2 // in seconds
)
type GameStateManager struct {
GameState int
ImageResourcesLoaded bool
LastTimeChangeState float64
}
type MainMenuStateInput struct {
RenderDef *render.RenderDef
MenuBackgroundImage *render.Image16Bit
MenuTextImages []*render.Image16Bit
Menu *gui.Menu
}
type InventoryStateInput struct {
RenderDef *render.RenderDef
InventoryMenuImages []*render.Image16Bit
InventoryItemImages []*render.Image16Bit
InventoryMenu *gui.InventoryMenu
}
func NewGameStateManager() *GameStateManager {
return &GameStateManager{
GameState: GAME_STATE_MAIN_MENU,
ImageResourcesLoaded: false,
LastTimeChangeState: windowHandler.GetCurrentTime(),
}
}
func (gameStateManager *GameStateManager) CanUpdateGameState() bool {
return windowHandler.GetCurrentTime()-gameStateManager.LastTimeChangeState >= STATE_CHANGE_DELAY
}
func (gameStateManager *GameStateManager) UpdateGameState(newGameState int) {
gameStateManager.GameState = newGameState
gameStateManager.ImageResourcesLoaded = false
}
func (gameStateManager *GameStateManager) UpdateLastTimeChangeState() {
gameStateManager.LastTimeChangeState = windowHandler.GetCurrentTime()
}
func NewInventoryStateInput(renderDef *render.RenderDef) *InventoryStateInput {
inventoryMenuImagesTIMOutput, _ := fileio.LoadTIMImages(game.INVENTORY_FILE)
inventoryMenuImages := make([]*render.Image16Bit, len(inventoryMenuImagesTIMOutput))
for i := 0; i < len(inventoryMenuImages); i++ {
inventoryMenuImages[i] = render.ConvertPixelsToImage16Bit(inventoryMenuImagesTIMOutput[i].PixelData)
}
inventoryItemImagesTIMOutput, _ := fileio.LoadTIMImages(game.ITEMALL_FILE)
inventoryItemImages := make([]*render.Image16Bit, len(inventoryItemImagesTIMOutput))
for i := 0; i < len(inventoryItemImages); i++ {
inventoryItemImages[i] = render.ConvertPixelsToImage16Bit(inventoryItemImagesTIMOutput[i].PixelData)
}
return &InventoryStateInput{
RenderDef: renderDef,
InventoryMenuImages: inventoryMenuImages,
InventoryItemImages: inventoryItemImages,
InventoryMenu: gui.NewInventoryMenu(),
}
}
func handleInventory(inventoryStateInput *InventoryStateInput, gameStateManager *GameStateManager) {
renderDef := inventoryStateInput.RenderDef
inventoryMenuImages := inventoryStateInput.InventoryMenuImages
inventoryItemImages := inventoryStateInput.InventoryItemImages
inventoryMenu := inventoryStateInput.InventoryMenu
if gameStateManager.ImageResourcesLoaded == false {
inventoryMenu.Reset()
gameStateManager.ImageResourcesLoaded = true
gameStateManager.UpdateLastTimeChangeState()
}
if windowHandler.InputHandler.IsActive(client.PLAYER_VIEW_INVENTORY) {
if gameStateManager.CanUpdateGameState() {
gameStateManager.UpdateGameState(GAME_STATE_MAIN_GAME)
gameStateManager.UpdateLastTimeChangeState()
}
}
if windowHandler.InputHandler.IsActive(client.ACTION_BUTTON) {
if gameStateManager.CanUpdateGameState() {
if inventoryMenu.IsCursorOnTopMenu() {
if inventoryMenu.IsTopMenuExit() {
gameStateManager.UpdateGameState(GAME_STATE_MAIN_GAME)
} else if inventoryMenu.IsTopMenuCursorOnItems() {
inventoryMenu.SetEditItemScreen()
}
}
gameStateManager.UpdateLastTimeChangeState()
}
}
if gameStateManager.CanUpdateGameState() {
inventoryMenu.HandleSwitchMenuOption(windowHandler)
gameStateManager.UpdateLastTimeChangeState()
}
timeElapsedSeconds := windowHandler.GetTimeSinceLastFrame()
renderDef.GenerateInventoryImage(inventoryMenuImages, inventoryItemImages, inventoryMenu, timeElapsedSeconds)
renderDef.RenderSolidVideoBuffer()
}
func handleMainMenu(mainMenuStateInput *MainMenuStateInput, gameStateManager *GameStateManager) {
renderDef := mainMenuStateInput.RenderDef
if gameStateManager.ImageResourcesLoaded == false {
menuBackgroundImageADTOutput := fileio.LoadADTFile(game.MENU_IMAGE_FILE)
menuBackgroundImage := render.ConvertPixelsToImage16Bit(menuBackgroundImageADTOutput.PixelData)
menuBackgroundTextImagesTIMOutput, _ := fileio.LoadTIMImages(game.MENU_TEXT_FILE)
menuTextImages := make([]*render.Image16Bit, len(menuBackgroundTextImagesTIMOutput))
for i := 0; i < len(menuBackgroundTextImagesTIMOutput); i++ {
menuTextImages[i] = render.ConvertPixelsToImage16Bit(menuBackgroundTextImagesTIMOutput[i].PixelData)
}
mainMenuStateInput.MenuBackgroundImage = menuBackgroundImage
mainMenuStateInput.MenuTextImages = menuTextImages
mainMenuStateInput.Menu.CurrentOption = 0
renderDef.UpdateMainMenu(mainMenuStateInput.MenuBackgroundImage, mainMenuStateInput.MenuTextImages,
mainMenuStateInput.Menu.CurrentOption)
gameStateManager.ImageResourcesLoaded = true
gameStateManager.UpdateLastTimeChangeState()
}
renderDef.RenderTransparentVideoBuffer()
if gameStateManager.CanUpdateGameState() {
mainMenuStateInput.Menu.HandleMenuEvent(windowHandler)
if mainMenuStateInput.Menu.IsOptionSelected {
if mainMenuStateInput.Menu.CurrentOption == 0 {
gameStateManager.UpdateGameState(GAME_STATE_LOAD_SAVE)
gameStateManager.UpdateLastTimeChangeState()
} else if mainMenuStateInput.Menu.CurrentOption == 1 {
gameStateManager.UpdateGameState(GAME_STATE_MAIN_GAME)
gameStateManager.UpdateLastTimeChangeState()
} else if mainMenuStateInput.Menu.CurrentOption == 2 {
gameStateManager.UpdateGameState(GAME_STATE_SPECIAL_MENU)
gameStateManager.UpdateLastTimeChangeState()
}
mainMenuStateInput.Menu.IsOptionSelected = false
} else if mainMenuStateInput.Menu.IsNewOption {
renderDef.UpdateMainMenu(mainMenuStateInput.MenuBackgroundImage, mainMenuStateInput.MenuTextImages,
mainMenuStateInput.Menu.CurrentOption)
gameStateManager.UpdateLastTimeChangeState()
mainMenuStateInput.Menu.IsNewOption = false
}
}
}
func handleLoadSave(renderDef *render.RenderDef, gameStateManager *GameStateManager) {
if gameStateManager.ImageResourcesLoaded == false {
// Initialize load save screen
saveScreenImageADTOutput := fileio.LoadADTFile(game.SAVE_SCREEN_FILE)
saveScreenImageRender := render.ConvertPixelsToImage16Bit(saveScreenImageADTOutput.PixelData)
renderDef.GenerateSaveScreenImage(saveScreenImageRender)
gameStateManager.ImageResourcesLoaded = true
gameStateManager.UpdateLastTimeChangeState()
}
renderDef.RenderTransparentVideoBuffer()
if windowHandler.InputHandler.IsActive(client.ACTION_BUTTON) {
if gameStateManager.CanUpdateGameState() {
gameStateManager.UpdateGameState(GAME_STATE_MAIN_MENU)
gameStateManager.UpdateLastTimeChangeState()
}
}
}
func handleSpecialMenu(specialMenuStateInput *MainMenuStateInput, gameStateManager *GameStateManager) {
renderDef := specialMenuStateInput.RenderDef
if gameStateManager.ImageResourcesLoaded == false {
menuBackgroundImageADTOutput := fileio.LoadADTFile(game.MENU_IMAGE_FILE)
menuBackgroundImage := render.ConvertPixelsToImage16Bit(menuBackgroundImageADTOutput.PixelData)
menuBackgroundTextImagesTIMOutput, _ := fileio.LoadTIMImages(game.MENU_TEXT_FILE)
menuTextImages := make([]*render.Image16Bit, len(menuBackgroundTextImagesTIMOutput))
for i := 0; i < len(menuBackgroundTextImagesTIMOutput); i++ {
menuTextImages[i] = render.ConvertPixelsToImage16Bit(menuBackgroundTextImagesTIMOutput[i].PixelData)
}
specialMenuStateInput.MenuBackgroundImage = menuBackgroundImage
specialMenuStateInput.MenuTextImages = menuTextImages |
gameStateManager.ImageResourcesLoaded = true
gameStateManager.UpdateLastTimeChangeState()
}
renderDef.RenderTransparentVideoBuffer()
if gameStateManager.CanUpdateGameState() {
specialMenuStateInput.Menu.HandleMenuEvent(windowHandler)
if specialMenuStateInput.Menu.IsOptionSelected {
if specialMenuStateInput.Menu.CurrentOption == 0 {
// TODO: Load gallery
} else if specialMenuStateInput.Menu.CurrentOption == 1 {
// Exit
gameStateManager.UpdateGameState(GAME_STATE_MAIN_MENU)
gameStateManager.UpdateLastTimeChangeState()
}
specialMenuStateInput.Menu.IsOptionSelected = false
} else if specialMenuStateInput.Menu.IsNewOption {
renderDef.UpdateSpecialMenu(specialMenuStateInput.MenuBackgroundImage, specialMenuStateInput.MenuTextImages,
specialMenuStateInput.Menu.CurrentOption)
gameStateManager.UpdateLastTimeChangeState()
specialMenuStateInput.Menu.IsNewOption = false
}
}
} | specialMenuStateInput.Menu.CurrentOption = 0
renderDef.UpdateSpecialMenu(specialMenuStateInput.MenuBackgroundImage, specialMenuStateInput.MenuTextImages,
specialMenuStateInput.Menu.CurrentOption) |
Notifications.js | const { html, Component } = require('htm/preact');
const { dispatch } = require('../lib/state/zeroFux');
const timeout = require('../lib/timeout');
// Notifcation component.
class | extends Component {
state = { fadeOut: false };
async componentDidMount() {
const { time } = this.props.notification;
await timeout(3000);
this.setState({ fadeOut: true });
await timeout(3000);
dispatch({
type: 'removeNotification',
payload: time,
});
}
render(props, state) {
const { notification } = props;
const { fadeOut } = state;
return html`
<div class=${`notification${fadeOut ? ' fade-out' : ''}`}>
<h1>Note</h1>
${notification.message}
</div>
`;
}
}
// Notifcations container.
class Notifications extends Component {
render(props) {
const { notifications } = props;
return html`
<div class="notifications">
${notifications.length > 0 &&
notifications.map(
notification =>
html`
<${Notification} notification=${notification} />
`
)}
</div>
`;
}
}
module.exports = Notifications;
| Notification |
Home.js | import React, { Component } from 'react'
import { Container, Icon, Segment, Header } from 'semantic-ui-react'
import { Link } from 'react-router-dom'
export default class | extends Component {
render() {
return (
<Container className="page">
<Header as='h1'>
<Icon name='rocket'/>
Welcome to PNGR!
</Header>
<Segment>
<p>This is a boilerplate app using React for the front-end, and Golang + Postgres for the backend.</p>
<p>The only things implemented are...</p>
<ul>
<li>Account Creation</li>
<li>Session Management</li>
<li><b>CR</b>eate<b>U</b>pdate<b>D</b>elete for simple "posts"</li>
</ul>
<p><Link to="/signup">Sign Up</Link> to see how sessions work and create/view secured posts.</p>
</Segment>
</Container>
);
}
} | Home |
difficulty.pipe.ts | import { Pipe, PipeTransform } from '@angular/core'; | import { Difficulty } from '../enums/difficulty.enum';
@Pipe({
name: 'difficulty',
})
export class DifficultyPipe implements PipeTransform {
transform(value: any, args?: any): any {
return Difficulty[value] || value;
}
} | |
mod.rs | // Unless explicitly stated otherwise all files in this repository are licensed
// under the MIT/Apache-2.0 License, at your convenience
//
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2020 Datadog, Inc.
//
//! Async executor.
//!
//! This crate offers two kinds of executors: single-threaded and
//! multi-threaded.
//!
//! # Examples
//!
//! Run four single-threaded executors concurrently:
//!
//! ```
//! use glommio::{
//! timer::Timer,
//! LocalExecutor,
//! LocalExecutorBuilder,
//! LocalExecutorPoolBuilder,
//! PoolPlacement,
//! };
//!
//! LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(4))
//! .on_all_shards(move || async {
//! Timer::new(std::time::Duration::from_millis(100)).await;
//! println!("Hello world!");
//! })
//! .expect("failed to spawn local executors")
//! .join_all();
//! ```
#![warn(missing_docs, missing_debug_implementations)]
mod latch;
mod multitask;
mod placement;
use latch::{Latch, LatchState};
pub use placement::{CpuSet, Placement, PoolPlacement};
use tracing::trace;
use std::{
cell::RefCell,
collections::{hash_map::Entry, BinaryHeap},
future::Future,
io,
marker::PhantomData,
pin::Pin,
rc::Rc,
sync::Arc,
task::{Context, Poll},
thread::{Builder, JoinHandle},
time::{Duration, Instant},
};
use futures_lite::pin;
use scoped_tls::scoped_thread_local;
use crate::{
error::BuilderErrorKind,
parking,
reactor,
sys,
task::{self, waker_fn::dummy_waker},
GlommioError,
IoRequirements,
IoStats,
Latency,
Reactor,
Shares,
};
use ahash::AHashMap;
/// Result type alias that removes the need to specify a type parameter
/// that's only valid in the channel variants of the error. Otherwise, it
/// might be confused with the error (`E`) that a result usually has in
/// the second type parameter.
type Result<T> = crate::Result<T, ()>;
scoped_thread_local!(static LOCAL_EX: LocalExecutor);
/// Returns a proxy struct to the [`LocalExecutor`]
pub fn executor() -> ExecutorProxy {
ExecutorProxy {}
}
pub(crate) fn executor_id() -> Option<usize> {
if LOCAL_EX.is_set() {
Some(LOCAL_EX.with(|ex| ex.id))
} else {
None
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
/// An opaque handle indicating in which queue a group of tasks will execute.
/// Tasks in the same group will execute in FIFO order but no guarantee is made
/// about ordering on different task queues.
pub struct TaskQueueHandle {
index: usize,
}
impl Default for TaskQueueHandle {
fn default() -> Self {
TaskQueueHandle { index: 0 }
}
}
impl TaskQueueHandle {
/// Returns a numeric ID that uniquely identifies this Task queue
pub fn index(&self) -> usize {
self.index
}
}
#[derive(Debug)]
pub(crate) struct TaskQueue {
pub(crate) ex: Rc<multitask::LocalExecutor>,
active: bool,
shares: Shares,
vruntime: u64,
io_requirements: IoRequirements,
name: String,
last_adjustment: Instant,
// for dynamic shares classes
yielded: bool,
stats: TaskQueueStats,
}
// Impl a custom order so we use a min-heap
impl Ord for TaskQueue {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
other.vruntime.cmp(&self.vruntime)
}
}
impl PartialOrd for TaskQueue {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(other.vruntime.cmp(&self.vruntime))
}
}
impl PartialEq for TaskQueue {
fn eq(&self, other: &Self) -> bool {
self.vruntime == other.vruntime
}
}
impl Eq for TaskQueue {}
impl TaskQueue {
fn new<S>(
index: TaskQueueHandle,
name: S,
shares: Shares,
ioreq: IoRequirements,
) -> Rc<RefCell<Self>>
where
S: Into<String>,
{
Rc::new(RefCell::new(TaskQueue {
ex: Rc::new(multitask::LocalExecutor::new()),
active: false,
stats: TaskQueueStats::new(index, shares.reciprocal_shares()),
shares,
vruntime: 0,
io_requirements: ioreq,
name: name.into(),
last_adjustment: Instant::now(),
yielded: false,
}))
}
fn is_active(&self) -> bool {
self.active
}
fn get_task(&mut self) -> Option<multitask::Runnable> {
self.ex.get_task()
}
fn yielded(&self) -> bool {
self.yielded
}
fn prepare_to_run(&mut self, now: Instant) {
self.yielded = false;
if let Shares::Dynamic(bm) = &self.shares {
if now.saturating_duration_since(self.last_adjustment) > bm.adjustment_period() {
self.last_adjustment = now;
self.stats.reciprocal_shares = self.shares.reciprocal_shares();
}
}
}
fn account_vruntime(&mut self, delta: Duration) -> Option<u64> {
let delta_scaled = (self.stats.reciprocal_shares * (delta.as_nanos() as u64)) >> 12;
self.stats.runtime += delta;
self.stats.queue_selected += 1;
self.active = self.ex.is_active();
let vruntime = self.vruntime.checked_add(delta_scaled);
if let Some(x) = vruntime {
self.vruntime = x;
}
vruntime
}
}
fn bind_to_cpu_set(cpus: impl IntoIterator<Item = usize>) -> Result<()> {
let mut cpuset = nix::sched::CpuSet::new();
for cpu in cpus {
cpuset.set(cpu).map_err(|e| to_io_error!(e))?;
}
let pid = nix::unistd::Pid::from_raw(0);
nix::sched::sched_setaffinity(pid, &cpuset).map_err(|e| Into::into(to_io_error!(e)))
}
// Dealing with references would imply getting a Rc, RefCells, and all of that
// Stats should be copied Infrequently, and if you have enough stats to fill a
// Kb with data from a single source, maybe you should rethink your life
// choices.
#[derive(Debug, Copy, Clone)]
/// Allows information about the current state of this executor to be consumed
/// by applications.
pub struct ExecutorStats {
executor_runtime: Duration,
// total_runtime include poll_io time, exclude spin loop time
total_runtime: Duration,
scheduler_runs: u64,
tasks_executed: u64,
}
impl ExecutorStats {
fn new() -> Self {
Self {
executor_runtime: Duration::from_nanos(0),
total_runtime: Duration::from_nanos(0),
scheduler_runs: 0,
tasks_executed: 0,
}
}
/// The total amount of runtime in this executor so far.
///
/// This is especially important for spinning executors, since the amount of
/// CPU time you will see in the operating system will be a far cry from
/// the CPU time it actually spent executing. Sleeping or Spinning are
/// not accounted here
pub fn executor_runtime(&self) -> Duration {
self.executor_runtime
}
/// The total amount of runtime in this executor, plus poll io time
pub fn total_runtime(&self) -> Duration {
self.total_runtime
}
/// Returns the amount of times the scheduler loop was called. Glommio
/// scheduler selects a task queue to run and runs many tasks in that
/// task queue. This number corresponds to the amount of times was
/// called upon to select a new queue.
pub fn scheduler_runs(&self) -> u64 {
self.scheduler_runs
}
/// Returns the amount of tasks executed in the system, over all queues.
pub fn tasks_executed(&self) -> u64 {
self.tasks_executed
}
}
#[derive(Debug, Copy, Clone)]
/// Allows information about the current state of a particular task queue to be
/// consumed by applications.
pub struct TaskQueueStats {
index: TaskQueueHandle,
// so we can easily produce a handle
reciprocal_shares: u64,
queue_selected: u64,
runtime: Duration,
}
impl TaskQueueStats {
fn new(index: TaskQueueHandle, reciprocal_shares: u64) -> Self {
Self {
index,
reciprocal_shares,
runtime: Duration::from_nanos(0),
queue_selected: 0,
}
}
/// Returns a numeric ID that uniquely identifies this Task queue
pub fn index(&self) -> TaskQueueHandle {
self.index
}
/// Returns the current number of shares in this task queue.
///
/// If the task queue is configured to use static shares this will never
/// change. If the task queue is configured to use dynamic shares, this
/// returns a sample of the shares values the last time the scheduler
/// ran.
pub fn current_shares(&self) -> usize {
((1u64 << 22) / self.reciprocal_shares) as usize
}
/// Returns the accumulated runtime this task queue had received since the
/// beginning of its execution
pub fn runtime(&self) -> Duration {
self.runtime
}
/// Returns the number of times this queue was selected to be executed. In
/// conjunction with the runtime, you can extract an average of the
/// amount of time this queue tends to run for
pub fn queue_selected(&self) -> u64 {
self.queue_selected
}
}
#[derive(Debug)]
struct ExecutorQueues {
active_executors: BinaryHeap<Rc<RefCell<TaskQueue>>>,
available_executors: AHashMap<usize, Rc<RefCell<TaskQueue>>>,
active_executing: Option<Rc<RefCell<TaskQueue>>>,
executor_index: usize,
last_vruntime: u64,
preempt_timer_duration: Duration,
default_preempt_timer_duration: Duration,
spin_before_park: Option<Duration>,
stats: ExecutorStats,
}
impl ExecutorQueues {
fn new(preempt_timer_duration: Duration, spin_before_park: Option<Duration>) -> Self {
ExecutorQueues {
active_executors: BinaryHeap::new(),
available_executors: AHashMap::new(),
active_executing: None,
executor_index: 1, // 0 is the default
last_vruntime: 0,
preempt_timer_duration,
default_preempt_timer_duration: preempt_timer_duration,
spin_before_park,
stats: ExecutorStats::new(),
}
}
fn reevaluate_preempt_timer(&mut self) {
self.preempt_timer_duration = self
.active_executors
.iter()
.map(|tq| match tq.borrow().io_requirements.latency_req {
Latency::NotImportant => self.default_preempt_timer_duration,
Latency::Matters(d) => d,
})
.min()
.unwrap_or(self.default_preempt_timer_duration)
}
fn maybe_activate(&mut self, queue: Rc<RefCell<TaskQueue>>) {
let mut state = queue.borrow_mut();
if !state.is_active() {
state.vruntime = self.last_vruntime;
state.active = true;
drop(state);
self.active_executors.push(queue);
self.reevaluate_preempt_timer();
}
}
}
/// A factory that can be used to configure and create a [`LocalExecutor`].
///
/// Methods can be chained on it in order to configure it.
///
/// The [`spawn`] method will take ownership of the builder and create a
/// `Result` to the [`LocalExecutor`] handle with the given configuration.
///
/// The [`LocalExecutor::default`] free function uses a Builder with default
/// configuration and unwraps its return value.
///
/// You may want to use [`LocalExecutorBuilder::spawn`] instead of
/// [`LocalExecutor::default`], when you want to recover from a failure to
/// launch a thread. The [`LocalExecutor::default`] function will panic where
/// the Builder method will return a `io::Result`.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutorBuilder;
///
/// let builder = LocalExecutorBuilder::default();
/// let ex = builder.make().unwrap();
/// ```
///
/// [`LocalExecutor`]: struct.LocalExecutor.html
///
/// [`LocalExecutor::default`]: struct.LocalExecutor.html#method.default
///
/// [`LocalExecutorBuilder::spawn`]:
/// struct.LocalExecutorBuilder.html#method.spawn
///
/// [`spawn`]: struct.LocalExecutorBuilder.html#method.spawn
#[derive(Debug)]
pub struct LocalExecutorBuilder {
/// The placement policy for the [`LocalExecutor`] to create
placement: Placement,
/// Spin for duration before parking a reactor
spin_before_park: Option<Duration>,
/// A name for the thread-to-be (if any), for identification in panic
/// messages
name: String,
/// Amount of memory to reserve for storage I/O. This will be preallocated
/// and registered with io_uring. It is still possible to use more than
/// that, but it will come from the standard allocator and performance
/// will suffer. Defaults to 10 MiB.
io_memory: usize,
/// How often to yield to other task queues
preempt_timer_duration: Duration,
}
impl LocalExecutorBuilder {
/// Generates the base configuration for spawning a [`LocalExecutor`], from
/// which configuration methods can be chained.
/// The method's only argument is the [`Placement`] policy by which the
/// [`LocalExecutor`] is bound to the machine's hardware topology. i.e.
/// how many and which CPUs to use.
pub fn new(placement: Placement) -> LocalExecutorBuilder {
LocalExecutorBuilder {
placement,
spin_before_park: None,
name: String::from("unnamed"),
io_memory: 10 << 20,
preempt_timer_duration: Duration::from_millis(100),
}
}
/// Spin for duration before parking a reactor
pub fn spin_before_park(mut self, spin: Duration) -> LocalExecutorBuilder {
self.spin_before_park = Some(spin);
self
}
/// Names the thread-to-be. Currently, the name is used for identification
/// only in panic messages.
pub fn name(mut self, name: &str) -> LocalExecutorBuilder {
self.name = String::from(name);
self
}
/// Amount of memory to reserve for storage I/O. This will be preallocated
/// and registered with io_uring. It is still possible to use more than
/// that, but it will come from the standard allocator and performance
/// will suffer.
///
/// The system will always try to allocate at least 64 kiB for I/O memory,
/// and the default is 10 MiB.
pub fn io_memory(mut self, io_memory: usize) -> LocalExecutorBuilder {
self.io_memory = io_memory;
self
}
/// How often [`need_preempt`] will return true by default.
///
/// Lower values mean task queues will switch execution more often, which
/// can help latency but harm throughput. When individual task queues
/// are present, this value can still be dynamically lowered through the
/// [`Latency`] setting.
///
/// Default is 100ms.
///
/// [`need_preempt`]: ExecutorProxy::need_preempt
/// [`Latency`]: crate::Latency
pub fn preempt_timer(mut self, dur: Duration) -> LocalExecutorBuilder {
self.preempt_timer_duration = dur;
self
}
/// Make a new [`LocalExecutor`] by taking ownership of the Builder, and
/// returns a [`Result`](crate::Result) to the executor.
/// # Examples
///
/// ```
/// use glommio::LocalExecutorBuilder;
///
/// let local_ex = LocalExecutorBuilder::default().make().unwrap();
/// ```
pub fn make(self) -> Result<LocalExecutor> {
let notifier = sys::new_sleep_notifier()?;
let mut cpu_set_gen = placement::CpuSetGenerator::one(self.placement)?;
let mut le = LocalExecutor::new(
notifier,
self.io_memory,
self.preempt_timer_duration,
cpu_set_gen.next().cpu_binding(),
self.spin_before_park,
)?;
le.init();
Ok(le)
}
/// Spawn a new [`LocalExecutor`] in a new thread with a given task.
///
/// This `spawn` function is an ergonomic shortcut for calling
/// `std::thread::spawn`, [`LocalExecutorBuilder::make`] in the spawned
/// thread, and then [`LocalExecutor::run`]. This `spawn` function takes
/// ownership of a [`LocalExecutorBuilder`] with the configuration for
/// the [`LocalExecutor`], spawns that executor in a new thread, and starts
/// the task given by `fut_gen()` in that thread.
///
/// The indirection of `fut_gen()` here (instead of taking a `Future`)
/// allows for futures that may not be `Send`-able once started. As this
/// executor is thread- it can guarantee that the futures will not
/// be Sent once started.
///
/// # Panics
///
/// The newly spawned thread panics if creating the executor fails. If you
/// need more fine-grained error handling consider initializing those
/// entities manually.
///
/// # Example
///
/// ```
/// use glommio::LocalExecutorBuilder;
///
/// let handle = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// println!("hello");
/// })
/// .unwrap();
///
/// handle.join().unwrap();
/// ```
///
/// [`LocalExecutor`]: struct.LocalExecutor.html
///
/// [`LocalExecutorBuilder`]: struct.LocalExecutorBuilder.html
///
/// [`LocalExecutorBuilder::make`]:
/// struct.LocalExecutorBuilder.html#method.make
///
/// [`LocalExecutor::run`]:struct.LocalExecutor.html#method.run
#[must_use = "This spawns an executor on a thread, so you may need to call \
`JoinHandle::join()` to keep the main thread alive"]
pub fn spawn<G, F, T>(self, fut_gen: G) -> Result<JoinHandle<()>>
where
G: FnOnce() -> F + Send + 'static,
F: Future<Output = T> + 'static,
{
let notifier = sys::new_sleep_notifier()?;
let name = format!("{}-{}", self.name, notifier.id());
let mut cpu_set_gen = placement::CpuSetGenerator::one(self.placement)?;
let io_memory = self.io_memory;
let preempt_timer_duration = self.preempt_timer_duration;
let spin_before_park = self.spin_before_park;
Builder::new()
.name(name)
.spawn(move || {
let mut le = LocalExecutor::new(
notifier,
io_memory,
preempt_timer_duration,
cpu_set_gen.next().cpu_binding(),
spin_before_park,
)
.unwrap();
le.init();
le.run(async move {
fut_gen().await;
})
})
.map_err(Into::into)
}
}
impl Default for LocalExecutorBuilder {
fn default() -> Self {
Self::new(Placement::Unbound)
}
}
/// A factory to configure and create a pool of [`LocalExecutor`]s.
///
/// Configuration methods apply their settings to all [`LocalExecutor`]s in the
/// pool unless otherwise specified. Methods can be chained on the builder in
/// order to configure it. The [`Self::on_all_shards`] method will take
/// ownership of the builder and create a [`PoolThreadHandles`] struct which can
/// be used to join the executor threads.
///
/// # Example
///
/// ```
/// use glommio::{LocalExecutorPoolBuilder, PoolPlacement};
///
/// let handles = LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(4))
/// .on_all_shards(|| async move {
/// let id = glommio::executor().id();
/// println!("hello from executor {}", id);
/// })
/// .unwrap();
///
/// handles.join_all();
/// ```
#[derive(Debug)]
pub struct LocalExecutorPoolBuilder {
/// Spin for duration before parking a reactor
spin_before_park: Option<Duration>,
/// A name for the thread-to-be (if any), for identification in panic
/// messages. Each executor in the pool will use this name followed by
/// a hyphen and numeric id (e.g. `myname-1`).
name: String,
/// Amount of memory to reserve for storage I/O. This will be preallocated
/// and registered with io_uring. It is still possible to use more than
/// that, but it will come from the standard allocator and performance
/// will suffer. Defaults to 10 MiB.
io_memory: usize,
/// How often to yield to other task queues
preempt_timer_duration: Duration,
/// Indicates a policy by which [`LocalExecutor`]s are bound to CPUs.
placement: PoolPlacement,
}
impl LocalExecutorPoolBuilder {
/// Generates the base configuration for spawning a pool of
/// [`LocalExecutor`]s, from which configuration methods can be chained.
/// The method's only argument is the [`PoolPlacement`] policy by which
/// [`LocalExecutor`]s are bound to the machine's hardware topology. i.e.
/// how many and which CPUs to use.
pub fn new(placement: PoolPlacement) -> Self {
Self {
spin_before_park: None,
name: String::from("unnamed"),
io_memory: 10 << 20,
preempt_timer_duration: Duration::from_millis(100),
placement,
}
}
/// Please see documentation under
/// [`LocalExecutorBuilder::spin_before_park`] for details. The setting
/// is applied to all executors in the pool.
pub fn spin_before_park(mut self, spin: Duration) -> Self {
self.spin_before_park = Some(spin);
self
}
/// Please see documentation under [`LocalExecutorBuilder::name`] for
/// details. The setting is applied to all executors in the pool. Note
/// that when a thread is spawned, the `name` is combined with a hyphen
/// and numeric id (e.g. `myname-1`) such that each thread has a unique
/// name.
pub fn name(mut self, name: &str) -> Self {
self.name = String::from(name);
self
}
/// Please see documentation under [`LocalExecutorBuilder::io_memory`] for
/// details. The setting is applied to all executors in the pool.
pub fn io_memory(mut self, io_memory: usize) -> Self {
self.io_memory = io_memory;
self
}
/// Please see documentation under [`LocalExecutorBuilder::preempt_timer`]
/// for details. The setting is applied to all executors in the pool.
pub fn preempt_timer(mut self, dur: Duration) -> Self {
self.preempt_timer_duration = dur;
self
}
/// Spawn a pool of [`LocalExecutor`]s in a new thread according to the
/// [`PoolPlacement`] policy, which is `Unbound` by default.
///
/// This method is the pool equivalent of [`LocalExecutorBuilder::spawn`].
///
/// The method takes a closure `fut_gen` which will be called on each new
/// thread to obtain the [`Future`] to be executed there.
///
/// # Panics
///
/// The newly spawned thread panics if creating the executor fails. If you
/// need more fine-grained error handling consider initializing those
/// entities manually.
#[must_use = "This spawns executors on multiple threads; threads may fail to spawn or you may \
need to call `PoolThreadHandles::join_all()` to keep the main thread alive"]
pub fn on_all_shards<G, F, T>(self, fut_gen: G) -> Result<PoolThreadHandles<T>>
where
G: FnOnce() -> F + Clone + Send + 'static,
F: Future<Output = T> + 'static,
T: Send + 'static,
{
let mut handles = PoolThreadHandles::new();
let nr_shards = self.placement.executor_count();
let mut cpu_set_gen = placement::CpuSetGenerator::pool(self.placement.clone())?;
let latch = Latch::new(nr_shards);
for _ in 0..nr_shards {
match self.spawn_thread(&mut cpu_set_gen, &latch, fut_gen.clone()) {
Ok(handle) => handles.push(handle),
Err(err) => {
handles.join_all();
return Err(err);
}
}
}
Ok(handles)
}
/// Spawns a thread
fn spawn_thread<G, F, T>(
&self,
cpu_set_gen: &mut placement::CpuSetGenerator,
latch: &Latch,
fut_gen: G,
) -> Result<JoinHandle<Result<T>>>
where
G: FnOnce() -> F + Clone + Send + 'static,
F: Future<Output = T> + 'static,
T: Send + 'static,
{
// NOTE: `self.placement` was `std::mem::take`en in `Self::on_all_shards`; you
// should no longer rely on its value at this point
let cpu_binding = cpu_set_gen.next().cpu_binding();
let notifier = sys::new_sleep_notifier()?;
let name = format!("{}-{}", &self.name, notifier.id());
let handle = Builder::new().name(name).spawn({
let io_memory = self.io_memory;
let preempt_timer_duration = self.preempt_timer_duration;
let spin_before_park = self.spin_before_park;
let latch = Latch::clone(latch);
move || {
// only allow the thread to create the `LocalExecutor` if all other threads that
// are supposed to be created by the pool builder were successfully spawned
if latch.arrive_and_wait() == LatchState::Ready {
let mut le = LocalExecutor::new(
notifier,
io_memory,
preempt_timer_duration,
cpu_binding,
spin_before_park,
)
.unwrap();
le.init();
le.run(async move { Ok(fut_gen().await) })
} else {
// this `Err` isn't visible to the user; the pool builder directly returns an
// `Err` from the `std::thread::Builder`
Err(io::Error::new(io::ErrorKind::Other, "spawn failed").into())
}
}
});
match handle {
Ok(h) => Ok(h),
Err(e) => {
// The `std::thread::Builder` was unable to spawn the thread and retuned an
// `Err`, so we notify other threads to let them know they
// should not proceed with constructing their `LocalExecutor`s
latch.cancel().expect("unreachable: latch was ready");
Err(e.into())
}
}
}
}
/// Holds a collection of [`JoinHandle`]s.
///
/// This struct is returned by [`LocalExecutorPoolBuilder::on_all_shards`].
#[derive(Debug)]
pub struct PoolThreadHandles<T> {
handles: Vec<JoinHandle<Result<T>>>,
}
impl<T> PoolThreadHandles<T> {
fn new() -> Self {
Self {
handles: Vec::new(),
}
}
fn push(&mut self, handle: JoinHandle<Result<T>>) {
self.handles.push(handle)
}
/// Obtain a reference to the `JoinHandle`s.
pub fn handles(&self) -> &Vec<JoinHandle<Result<T>>> {
&self.handles
}
/// Calls [`JoinHandle::join`] on all handles.
pub fn join_all(self) -> Vec<Result<T>> {
self.handles
.into_iter()
.map(|h| {
match h.join() {
Ok(ok @ Ok(_)) => ok,
// this variant is unreachable since `Err` is only returned from a thread if
// another thread failed to spawn; `LocalExecutorPoolBuilder::on_all_shards`
// returns an immediate `Err` if any thread fails to spawn, so
// `PoolThreadHandles` would never be created
Ok(err @ Err(_)) => err,
Err(e) => Err(GlommioError::BuilderError(BuilderErrorKind::ThreadPanic(e))),
}
})
.collect::<Vec<_>>()
}
}
pub(crate) fn maybe_activate(tq: Rc<RefCell<TaskQueue>>) {
LOCAL_EX.with(|local_ex| {
let mut queues = local_ex.queues.borrow_mut();
queues.maybe_activate(tq)
})
}
/// Single-threaded executor.
///
/// The executor can only be run on the thread that created it.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutor;
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// println!("Hello world!");
/// });
/// ```
///
/// In many cases, use of [`LocalExecutorBuilder`] will provide more
/// configuration options and more ergonomic methods. See
/// [`LocalExecutorBuilder::spawn`] for examples.
///
/// [`LocalExecutorBuilder`]: struct.LocalExecutorBuilder.html
///
/// [`LocalExecutorBuilder::spawn`]:
/// struct.LocalExecutorBuilder.html#method.spawn
#[derive(Debug)]
pub struct LocalExecutor {
queues: Rc<RefCell<ExecutorQueues>>,
parker: parking::Parker,
id: usize,
reactor: Rc<reactor::Reactor>,
}
impl LocalExecutor {
fn get_reactor(&self) -> Rc<Reactor> {
self.reactor.clone()
}
fn init(&mut self) {
let io_requirements = IoRequirements::new(Latency::NotImportant, 0);
self.queues.borrow_mut().available_executors.insert(
0,
TaskQueue::new(
Default::default(),
"default",
Shares::Static(1000),
io_requirements,
),
);
}
fn new(
notifier: Arc<sys::SleepNotifier>,
io_memory: usize,
preempt_timer: Duration,
cpu_binding: Option<impl IntoIterator<Item = usize>>,
mut spin_before_park: Option<Duration>,
) -> Result<LocalExecutor> {
// Linux's default memory policy is "local allocation" which allocates memory
// on the NUMA node containing the CPU where the allocation takes place.
// Hence, we bind to a CPU in the provided CPU set before allocating any
// memory for the `LocalExecutor`, thereby allowing any access to these
// data structures to occur on a local NUMA node (nevertheless, for some
// `Placement` variants a CPU set could span multiple NUMA nodes).
// For additional information see:
// https://www.kernel.org/doc/html/latest/admin-guide/mm/numa_memory_policy.html
match cpu_binding {
Some(cpu_set) => bind_to_cpu_set(cpu_set)?,
None => spin_before_park = None,
}
let p = parking::Parker::new();
let queues = ExecutorQueues::new(preempt_timer, spin_before_park);
trace!(id = notifier.id(), "Creating executor");
Ok(LocalExecutor {
queues: Rc::new(RefCell::new(queues)),
parker: p,
id: notifier.id(),
reactor: Rc::new(reactor::Reactor::new(notifier, io_memory)),
})
}
/// Returns a unique identifier for this Executor.
///
/// # Examples
/// ```
/// use glommio::LocalExecutor;
///
/// let local_ex = LocalExecutor::default();
/// println!("My ID: {}", local_ex.id());
/// ```
pub fn id(&self) -> usize {
self.id
}
fn create_task_queue<S>(&self, shares: Shares, latency: Latency, name: S) -> TaskQueueHandle
where
S: Into<String>,
{
let index = {
let mut ex = self.queues.borrow_mut();
let index = ex.executor_index;
ex.executor_index += 1;
index
};
let io_requirements = IoRequirements::new(latency, index);
let tq = TaskQueue::new(TaskQueueHandle { index }, name, shares, io_requirements);
self.queues
.borrow_mut()
.available_executors
.insert(index, tq);
TaskQueueHandle { index }
}
/// Removes a task queue.
///
/// The task queue cannot be removed if there are still pending tasks.
pub fn remove_task_queue(&self, handle: TaskQueueHandle) -> Result<()> {
let mut queues = self.queues.borrow_mut();
let queue_entry = queues.available_executors.entry(handle.index);
if let Entry::Occupied(entry) = queue_entry {
let tq = entry.get();
if tq.borrow().is_active() {
return Err(GlommioError::queue_still_active(handle.index));
}
entry.remove();
return Ok(());
}
Err(GlommioError::queue_not_found(handle.index))
}
fn get_queue(&self, handle: &TaskQueueHandle) -> Option<Rc<RefCell<TaskQueue>>> {
self.queues
.borrow()
.available_executors
.get(&handle.index)
.cloned()
}
fn current_task_queue(&self) -> TaskQueueHandle {
self.queues
.borrow()
.active_executing
.as_ref()
.unwrap()
.borrow()
.stats
.index
}
fn mark_me_for_yield(&self) {
let queues = self.queues.borrow();
let mut me = queues.active_executing.as_ref().unwrap().borrow_mut();
me.yielded = true;
}
fn spawn<T>(&self, future: impl Future<Output = T>) -> multitask::Task<T> {
let tq = self
.queues
.borrow()
.active_executing
.clone() // this clone is cheap because we clone an `Option<Rc<_>>`
.or_else(|| self.get_queue(&TaskQueueHandle { index: 0 }))
.unwrap();
let id = self.id;
let ex = tq.borrow().ex.clone();
ex.spawn_and_run(id, tq, future)
}
fn spawn_into<T, F>(&self, future: F, handle: TaskQueueHandle) -> Result<multitask::Task<T>>
where
F: Future<Output = T>,
{
let tq = self
.get_queue(&handle)
.ok_or_else(|| GlommioError::queue_not_found(handle.index))?;
let ex = tq.borrow().ex.clone();
let id = self.id;
// can't run right away, because we need to cross into a different task queue
Ok(ex.spawn_and_schedule(id, tq, future))
}
fn preempt_timer_duration(&self) -> Duration {
self.queues.borrow().preempt_timer_duration
}
fn spin_before_park(&self) -> Option<Duration> {
self.queues.borrow().spin_before_park
}
#[inline(always)]
pub(crate) fn need_preempt(&self) -> bool {
self.reactor.need_preempt()
}
fn run_task_queues(&self) -> bool {
let mut ran = false;
loop {
self.reactor.sys.install_eventfd();
if self.need_preempt() {
break;
}
if !self.run_one_task_queue() {
return false;
} else {
ran = true;
}
} | let mut tq = self.queues.borrow_mut();
let candidate = tq.active_executors.pop();
tq.stats.scheduler_runs += 1;
match candidate {
Some(queue) => {
tq.active_executing = Some(queue.clone());
drop(tq);
let time = {
let now = Instant::now();
let mut queue_ref = queue.borrow_mut();
queue_ref.prepare_to_run(now);
self.reactor
.inform_io_requirements(queue_ref.io_requirements);
now
};
let mut tasks_executed_this_loop = 0;
loop {
let mut queue_ref = queue.borrow_mut();
if self.need_preempt() || queue_ref.yielded() {
break;
}
if let Some(r) = queue_ref.get_task() {
drop(queue_ref);
r.run();
tasks_executed_this_loop += 1;
} else {
break;
}
}
let runtime = time.elapsed();
let (need_repush, last_vruntime) = {
let mut state = queue.borrow_mut();
let last_vruntime = state.account_vruntime(runtime);
(state.is_active(), last_vruntime)
};
let mut tq = self.queues.borrow_mut();
tq.active_executing = None;
tq.stats.executor_runtime += runtime;
tq.stats.tasks_executed += tasks_executed_this_loop;
tq.last_vruntime = match last_vruntime {
Some(x) => x,
None => {
for queue in tq.available_executors.values() {
let mut q = queue.borrow_mut();
q.vruntime = 0;
}
0
}
};
if need_repush {
tq.active_executors.push(queue);
} else {
tq.reevaluate_preempt_timer();
}
true
}
None => false,
}
}
/// Runs the executor until the given future completes.
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Task};
///
/// let local_ex = LocalExecutor::default();
///
/// let res = local_ex.run(async {
/// let task = glommio::spawn_local(async { 1 + 2 });
/// task.await * 2
/// });
///
/// assert_eq!(res, 6);
/// ```
pub fn run<T>(&self, future: impl Future<Output = T>) -> T {
// this waker is never exposed in the public interface and is only used to check
// whether the task's `JoinHandle` is `Ready`
let waker = dummy_waker();
let cx = &mut Context::from_waker(&waker);
let spin_before_park = self.spin_before_park().unwrap_or_default();
if LOCAL_EX.is_set() {
panic!("There is already an Executor running in this thread");
}
LOCAL_EX.set(self, || {
let future = self
.spawn_into(async move { future.await }, TaskQueueHandle::default())
.unwrap()
.detach();
pin!(future);
let mut pre_time = Instant::now();
loop {
if let Poll::Ready(t) = future.as_mut().poll(cx) {
// can't be canceled, and join handle is None only upon
// cancellation or panic. So in case of panic this just propagates
let cur_time = Instant::now();
self.queues.borrow_mut().stats.total_runtime += cur_time - pre_time;
break t.unwrap();
}
// We want to do I/O before we call run_task_queues,
// for the benefit of the latency ring. If there are pending
// requests that are latency sensitive we want them out of the
// ring ASAP (before we run the task queues). We will also use
// the opportunity to install the timer.
let duration = self.preempt_timer_duration();
self.parker.poll_io(duration);
let run = self.run_task_queues();
let cur_time = Instant::now();
self.queues.borrow_mut().stats.total_runtime += cur_time - pre_time;
pre_time = cur_time;
if !run {
if let Poll::Ready(t) = future.as_mut().poll(cx) {
// It may be that we just became ready now that the task queue
// is exhausted. But if we sleep (park) we'll never know so we
// test again here. We can't test *just* here because the main
// future is probably the one setting up the task queues and etc.
break t.unwrap();
} else {
while !self.reactor.spin_poll_io().unwrap() {
if pre_time.elapsed() > spin_before_park {
self.parker.park();
break;
}
}
// reset the timer for deduct spin loop time
pre_time = Instant::now();
}
}
}
})
}
}
/// Spawns a single-threaded executor with default settings on the current
/// thread.
///
/// This will create a executor using default parameters of
/// `LocalExecutorBuilder`, if you want to further customize it, use this API
/// instead.
///
/// # Panics
///
/// Panics if creating the executor fails; use `LocalExecutorBuilder::make` to
/// recover from such errors.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutor;
///
/// let local_ex = LocalExecutor::default();
/// ```
impl Default for LocalExecutor {
fn default() -> Self {
LocalExecutorBuilder::new(Placement::Unbound)
.make()
.unwrap()
}
}
/// A spawned future that can be detached
///
/// Tasks are also futures themselves and yield the output of the spawned
/// future.
///
/// When a task is dropped, its gets canceled and won't be polled again. To
/// cancel a task a bit more gracefully and wait until it stops running, use the
/// [`cancel()`][`Task::cancel()`] method.
///
/// Tasks that panic get immediately canceled. Awaiting a canceled task also
/// causes a panic.
///
/// # Examples
///
/// ```
/// # use glommio::{LocalExecutor, Task};
/// #
/// # let ex = LocalExecutor::default();
/// #
/// # ex.run(async {
/// let task = glommio::spawn_local(async {
/// println!("Hello from a task!");
/// 1 + 2
/// });
///
/// assert_eq!(task.await, 3);
/// # });
/// ```
/// Note that there is no guarantee of ordering when reasoning about when a
/// task runs, as that is an implementation detail.
///
/// In particular, acquiring a borrow and holding across a task spawning may
/// sometimes work but panic depending on scheduling decisions, so it is still
/// illegal.
///
///
/// ```no_run
/// # use glommio::{LocalExecutor, Task};
/// # use std::rc::Rc;
/// # use std::cell::RefCell;
/// #
/// # let ex = LocalExecutor::default();
/// #
/// # ex.run(async {
/// let example = Rc::new(RefCell::new(0));
/// let exclone = example.clone();
///
/// let mut ex_mut = example.borrow_mut();
/// *ex_mut = 1;
///
/// let task = glommio::spawn_local(async move {
/// let ex = exclone.borrow();
/// println!("Current value: {}", ex);
/// });
///
/// // This is fine if `task` executes after the current task, but will panic if
/// // preempts the current task and executes first. This is therefore invalid.
/// *ex_mut = 2;
/// drop(ex_mut);
///
/// task.await;
/// # });
/// ```
#[must_use = "tasks get canceled when dropped, use `.detach()` to run them in the background"]
#[derive(Debug)]
pub struct Task<T>(multitask::Task<T>);
impl<T> Task<T> {
/// Detaches the task to let it keep running in the background.
///
/// # Examples
///
/// ```
/// use futures_lite::future;
/// use glommio::{timer::Timer, LocalExecutor};
///
/// let ex = LocalExecutor::default();
/// ex.run(async {
/// glommio::spawn_local(async {
/// loop {
/// println!("I'm a background task looping forever.");
/// glommio::executor().yield_task_queue_now().await;
/// }
/// })
/// .detach();
/// Timer::new(std::time::Duration::from_micros(100)).await;
/// })
/// ```
pub fn detach(self) -> task::JoinHandle<T> {
self.0.detach()
}
/// Cancels the task and waits for it to stop running.
///
/// Returns the task's output if it was completed just before it got
/// canceled, or [`None`] if it didn't complete.
///
/// While it's possible to simply drop the [`Task`] to cancel it, this is a
/// cleaner way of canceling because it also waits for the task to stop
/// running.
///
/// # Examples
///
/// ```
/// use futures_lite::future;
/// use glommio::LocalExecutor;
///
/// let ex = LocalExecutor::default();
///
/// ex.run(async {
/// let task = glommio::spawn_local(async {
/// loop {
/// println!("Even though I'm in an infinite loop, you can still cancel me!");
/// future::yield_now().await;
/// }
/// });
///
/// task.cancel().await;
/// });
/// ```
pub async fn cancel(self) -> Option<T> {
self.0.cancel().await
}
}
impl<T> Future for Task<T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.0).poll(cx)
}
}
/// A spawned future that cannot be detached, and has a predictable lifetime.
///
/// Because their lifetimes are bounded, you don't need to make sure that data
/// you pass to the `ScopedTask` is `'static`, which can be cheaper (no need to
/// reference count). If you, however, would like to `.detach` this task and
/// have it run in the background, consider using [`Task`] instead.
///
/// Tasks are also futures themselves and yield the output of the spawned
/// future.
///
/// When a task is dropped, its gets canceled and won't be polled again. To
/// cancel a task a bit more gracefully and wait until it stops running, use the
/// [`cancel()`][`ScopedTask::cancel()`] method.
///
/// Tasks that panic get immediately canceled. Awaiting a canceled task also
/// causes a panic.
///
/// # Safety
///
/// `ScopedTask` is safe to use so long as it is guaranteed to be either awaited
/// or dropped. Rust does not guarantee that destructors will be called, and if
/// they are not, `ScopedTask`s can be kept alive after the scope is terminated.
///
/// Typically, the only situations in which `drop` is not executed are:
///
/// * If you manually choose not to, with [`std::mem::forget`] or
/// [`ManuallyDrop`].
/// * If cyclic reference counts prevents the task from being destroyed.
///
/// If you believe any of the above situations are present (the first one is,
/// of course, considerably easier to spot), avoid using the `ScopedTask`.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutor;
///
/// let ex = LocalExecutor::default();
///
/// ex.run(async {
/// let a = 2;
/// let task = unsafe {
/// glommio::spawn_scoped_local(async {
/// println!("Hello from a task!");
/// 1 + a // this is a reference, and it works just fine
/// })
/// };
///
/// assert_eq!(task.await, 3);
/// });
/// ```
/// The usual borrow checker rules apply. A [`ScopedTask`] can acquire a mutable
/// reference to a variable just fine:
///
/// ```
/// # use glommio::{LocalExecutor};
/// #
/// # let ex = LocalExecutor::default();
/// # ex.run(async {
/// let mut a = 2;
/// let task = unsafe {
/// glommio::spawn_scoped_local(async {
/// a = 3;
/// })
/// };
/// task.await;
/// assert_eq!(a, 3);
/// # });
/// ```
///
/// But until the task completes, the reference is mutably held, so we can no
/// longer immutably reference it:
///
/// ```compile_fail
/// # use glommio::LocalExecutor;
/// #
/// # let ex = LocalExecutor::default();
/// # ex.run(async {
/// let mut a = 2;
/// let task = unsafe {
/// glommio::scoped_local(async {
/// a = 3;
/// })
/// };
/// assert_eq!(a, 3); // task hasn't completed yet!
/// task.await;
/// # });
/// ```
///
/// You can still use [`Cell`] and [`RefCell`] normally to work around this.
/// Just keep in mind that there is no guarantee of ordering for execution of
/// tasks, and if the task has not yet finished the value may or may not have
/// changed (as with any interior mutability)
///
/// ```
/// # use glommio::{LocalExecutor};
/// # use std::cell::Cell;
/// #
/// # let ex = LocalExecutor::default();
/// # ex.run(async {
/// let a = Cell::new(2);
/// let task = unsafe {
/// glommio::spawn_scoped_local(async {
/// a.set(3);
/// })
/// };
///
/// assert!(a.get() == 3 || a.get() == 2); // impossible to know if it will be 2 or 3
/// task.await;
/// assert_eq!(a.get(), 3); // The task finished now.
/// //
/// # });
/// ```
///
/// The following code, however, will access invalid memory as drop is never
/// executed
///
/// ```no_run
/// # use glommio::{LocalExecutor};
/// # use std::cell::Cell;
/// #
/// # let ex = LocalExecutor::default();
/// # ex.run(async {
/// {
/// let a = &mut "mayhem";
/// let task = unsafe {
/// glommio::spawn_scoped_local(async {
/// *a = "doom";
/// })
/// };
/// std::mem::forget(task);
/// }
/// # });
/// ```
/// [`Task`]: crate::Task
/// [`Cell`]: std::cell::Cell
/// [`RefCell`]: std::cell::RefCell
/// [`std::mem::forget`]: std::mem::forget
/// [`ManuallyDrop`]: std::mem::ManuallyDrop
#[must_use = "scoped tasks get canceled when dropped, use a standard Task and `.detach()` to run \
them in the background"]
#[derive(Debug)]
pub struct ScopedTask<'a, T>(multitask::Task<T>, PhantomData<&'a T>);
impl<'a, T> ScopedTask<'a, T> {
/// Cancels the task and waits for it to stop running.
///
/// Returns the task's output if it was completed just before it got
/// canceled, or [`None`] if it didn't complete.
///
/// While it's possible to simply drop the [`ScopedTask`] to cancel it, this
/// is a cleaner way of canceling because it also waits for the task to
/// stop running.
///
/// # Examples
///
/// ```
/// use futures_lite::future;
/// use glommio::LocalExecutor;
///
/// let ex = LocalExecutor::default();
///
/// ex.run(async {
/// let task = unsafe {
/// glommio::spawn_scoped_local(async {
/// loop {
/// println!("Even though I'm in an infinite loop, you can still cancel me!");
/// future::yield_now().await;
/// }
/// })
/// };
///
/// task.cancel().await;
/// });
/// ```
pub async fn cancel(self) -> Option<T> {
self.0.cancel().await
}
}
impl<'a, T> Future for ScopedTask<'a, T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.0).poll(cx)
}
}
/// Conditionally yields the current task queue. The scheduler may then
/// process other task queues according to their latency requirements.
/// If a call to this function results in the current queue to yield,
/// then the calling task is moved to the back of the yielded task
/// queue.
///
/// Under which condition this function yield is an implementation detail
/// subject to change, but it will always be somehow related to the latency
/// guarantees that the task queues want to uphold in their
/// `Latency::Matters` parameter (or `Latency::NotImportant`).
///
/// This function is the central mechanism of task cooperation in Glommio
/// and should be preferred over unconditional yielding methods like
/// [`ExecutorProxy::yield_now`] and
/// [`ExecutorProxy::yield_task_queue_now`].
#[inline]
pub async fn yield_if_needed() {
executor().yield_if_needed().await
}
/// Spawns a task onto the current single-threaded executor.
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
/// Otherwise, this method panics.
///
/// Note that there is no guarantee of when the spawned task is scheduled.
/// The current task can continue its execution or be preempted by the
/// newly spawned task immediately. See the documentation for the
/// top-level [`Task`] for examples.
///
/// Proxy to [`ExecutorProxy::spawn_local`]
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Task};
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// let task = glommio::spawn_local(async { 1 + 2 });
/// assert_eq!(task.await, 3);
/// });
/// ```
pub fn spawn_local<T>(future: impl Future<Output = T> + 'static) -> Task<T>
where
T: 'static,
{
executor().spawn_local(future)
}
/// Spawns a task onto the current single-threaded executor, in a particular
/// task queue
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
/// Otherwise, this method panics.
///
/// Note that there is no guarantee of when the spawned task is scheduled.
/// The current task can continue its execution or be preempted by the
/// newly spawned task immediately. See the documentation for the
/// top-level [`Task`] for examples.
///
/// Proxy to [`ExecutorProxy::spawn_local_into`]
///
/// # Examples
///
/// ```
/// # use glommio::{ LocalExecutor, Shares, Task};
///
/// # let local_ex = LocalExecutor::default();
/// # local_ex.run(async {
/// let handle = glommio::executor().create_task_queue(
/// Shares::default(),
/// glommio::Latency::NotImportant,
/// "test_queue",
/// );
/// let task = glommio::spawn_local_into(async { 1 + 2 }, handle).expect("failed to spawn task");
/// assert_eq!(task.await, 3);
/// # });
/// ```
pub fn spawn_local_into<T>(
future: impl Future<Output = T> + 'static,
handle: TaskQueueHandle,
) -> Result<Task<T>>
where
T: 'static,
{
executor().spawn_local_into(future, handle)
}
/// Spawns a task onto the current single-threaded executor.
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
///
/// Otherwise, this method panics.
///
/// Proxy to [`ExecutorProxy::spawn_scoped_local`]
///
/// # Safety
///
/// `ScopedTask` depends on `drop` running or `.await` being called for
/// safety. See the struct [`ScopedTask`] for details.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutor;
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// let non_static = 2;
/// let task = unsafe { glommio::spawn_scoped_local(async { 1 + non_static }) };
/// assert_eq!(task.await, 3);
/// });
/// ```
pub unsafe fn spawn_scoped_local<'a, T>(future: impl Future<Output = T> + 'a) -> ScopedTask<'a, T> {
executor().spawn_scoped_local(future)
}
/// Spawns a task onto the current single-threaded executor, in a particular
/// task queue
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
///
/// Otherwise, this method panics.
///
/// Proxy to [`ExecutorProxy::spawn_scoped_local_into`]
///
/// # Safety
///
/// `ScopedTask` depends on `drop` running or `.await` being called for
/// safety. See the struct [`ScopedTask`] for details.
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Shares};
///
/// let local_ex = LocalExecutor::default();
/// local_ex.run(async {
/// let handle = glommio::executor().create_task_queue(
/// Shares::default(),
/// glommio::Latency::NotImportant,
/// "test_queue",
/// );
/// let non_static = 2;
/// let task = unsafe {
/// glommio::spawn_scoped_local_into(async { 1 + non_static }, handle)
/// .expect("failed to spawn task")
/// };
/// assert_eq!(task.await, 3);
/// })
/// ```
pub unsafe fn spawn_scoped_local_into<'a, T>(
future: impl Future<Output = T> + 'a,
handle: TaskQueueHandle,
) -> Result<ScopedTask<'a, T>> {
executor().spawn_scoped_local_into(future, handle)
}
/// A proxy struct to the underlying [`LocalExecutor`]. It is accessible from
/// anywhere within a Glommio context using [`executor()`].
#[derive(Debug)]
pub struct ExecutorProxy {}
impl ExecutorProxy {
async fn cond_yield<F>(cond: F)
where
F: FnOnce(&LocalExecutor) -> bool,
{
let need_yield = LOCAL_EX.with(|local_ex| {
if cond(local_ex) {
local_ex.mark_me_for_yield();
true
} else {
false
}
});
if need_yield {
futures_lite::future::yield_now().await;
}
}
/// Checks if this task has run for too long and need to be preempted. This
/// is useful for situations where we can't call .await, for instance,
/// if a [`RefMut`] is held. If this tests true, then the user is
/// responsible for making any preparations necessary for calling .await
/// and doing it themselves.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutorBuilder;
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async {
/// loop {
/// if glommio::executor().need_preempt() {
/// break;
/// }
/// }
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`RefMut`]: https://doc.rust-lang.org/std/cell/struct.RefMut.html
#[inline(always)]
// FIXME: This is a bit less efficient than it needs, because the scoped thread
// local key does lazy initialization. Every time we call into this, we are
// paying to test if this is initialized. This is what I got from objdump:
//
// 0: 50 push %rax
// 1: ff 15 00 00 00 00 callq *0x0(%rip)
// 7: 48 85 c0 test %rax,%rax
// a: 74 17 je 23 <== will call into the
// initialization routine c: 48 8b 88 38 03 00 00 mov
// 0x338(%rax),%rcx <== address of the head 13: 48 8b 80 40 03 00 00
// mov 0x340(%rax),%rax <== address of the tail 1a: 8b 00
// mov (%rax),%eax 1c: 3b 01 cmp (%rcx),%eax <==
// need preempt 1e: 0f 95 c0 setne %al
// 21: 59 pop %rcx
// 22: c3 retq
// 23 <== initialization stuff
//
// Rust has a thread local feature that is under experimental so we can maybe
// switch to that someday.
//
// We will prefer to use the stable compiler and pay that unfortunate price for
// now.
pub fn need_preempt(&self) -> bool {
LOCAL_EX.with(|local_ex| local_ex.need_preempt())
}
/// Conditionally yields the current task queue. The scheduler may then
/// process other task queues according to their latency requirements.
/// If a call to this function results in the current queue to yield,
/// then the calling task is moved to the back of the yielded task
/// queue.
///
/// Under which condition this function yield is an implementation detail
/// subject to change, but it will always be somehow related to the latency
/// guarantees that the task queues want to uphold in their
/// `Latency::Matters` parameter (or `Latency::NotImportant`).
///
/// This function is the central mechanism of task cooperation in Glommio
/// and should be preferred over unconditional yielding methods like
/// [`ExecutorProxy::yield_now`] and
/// [`ExecutorProxy::yield_task_queue_now`].
#[inline]
pub async fn yield_if_needed(&self) {
Self::cond_yield(|local_ex| local_ex.need_preempt()).await;
}
/// Unconditionally yields the current task and forces the scheduler
/// to poll another task within the current task queue.
/// Calling this wakes the current task and returns [`Poll::Pending`] once.
///
/// Unless you know you need to yield right now, using
/// [`ExecutorProxy::yield_if_needed`] instead is the better choice.
pub async fn yield_now(&self) {
futures_lite::future::yield_now().await
}
/// Unconditionally yields the current task queue and forces the scheduler
/// to poll another queue. Use [`ExecutorProxy::yield_now`] to yield within
/// a queue.
///
/// Unless you know you need to yield right now, using
/// [`ExecutorProxy::yield_if_needed`] instead is the better choice.
pub async fn yield_task_queue_now(&self) {
Self::cond_yield(|_| true).await
}
#[inline]
pub(crate) fn reactor(&self) -> Rc<reactor::Reactor> {
LOCAL_EX.with(|local_ex| local_ex.get_reactor())
}
/// Returns the id of the current executor
///
/// If called from a [`LocalExecutor`], returns the id of the executor.
///
/// Otherwise, this method panics.
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Task};
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// println!("my ID: {}", glommio::executor().id());
/// });
/// ```
pub fn id(&self) -> usize {
LOCAL_EX.with(|local_ex| local_ex.id())
}
/// Creates a new task queue, with a given latency hint and the provided
/// name
///
/// Each task queue is scheduled based on the [`Shares`] and [`Latency`]
/// system, and tasks within a queue will be scheduled in serial.
///
/// Returns an opaque handle that can later be used to launch tasks into
/// that queue with [`local_into`].
///
/// # Examples
///
/// ```
/// use glommio::{Latency, LocalExecutor, Shares};
/// use std::time::Duration;
///
/// let local_ex = LocalExecutor::default();
/// local_ex.run(async move {
/// let task_queue = glommio::executor().create_task_queue(
/// Shares::default(),
/// Latency::Matters(Duration::from_secs(1)),
/// "my_tq",
/// );
/// let task = glommio::spawn_local_into(
/// async {
/// println!("Hello world");
/// },
/// task_queue,
/// )
/// .expect("failed to spawn task");
/// });
/// ```
///
/// [`local_into`]: crate::spawn_local_into
/// [`Shares`]: enum.Shares.html
/// [`Latency`]: enum.Latency.html
pub fn create_task_queue(
&self,
shares: Shares,
latency: Latency,
name: &str,
) -> TaskQueueHandle {
LOCAL_EX.with(|local_ex| local_ex.create_task_queue(shares, latency, name))
}
/// Returns the [`TaskQueueHandle`] that represents the TaskQueue currently
/// running. This can be passed directly into [`crate::spawn_local_into`].
/// This must be run from a task that was generated through
/// [`crate::spawn_local`] or [`crate::spawn_local_into`]
///
/// # Examples
/// ```
/// use glommio::{Latency, LocalExecutor, LocalExecutorBuilder, Shares};
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// let original_tq = glommio::executor().current_task_queue();
/// let new_tq = glommio::executor().create_task_queue(
/// Shares::default(),
/// Latency::NotImportant,
/// "test",
/// );
///
/// let task = glommio::spawn_local_into(
/// async move {
/// glommio::spawn_local_into(
/// async move {
/// assert_eq!(glommio::executor().current_task_queue(), original_tq);
/// },
/// original_tq,
/// )
/// .unwrap();
/// },
/// new_tq,
/// )
/// .unwrap();
/// task.await;
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
pub fn current_task_queue(&self) -> TaskQueueHandle {
LOCAL_EX.with(|local_ex| local_ex.current_task_queue())
}
/// Returns a [`Result`] with its `Ok` value wrapping a [`TaskQueueStats`]
/// or a [`GlommioError`] of type `[QueueErrorKind`] if there is no task
/// queue with this handle
///
/// # Examples
/// ```
/// use glommio::{Latency, LocalExecutorBuilder, Shares};
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// let new_tq = glommio::executor().create_task_queue(
/// Shares::default(),
/// Latency::NotImportant,
/// "test",
/// );
/// println!(
/// "Stats for test: {:?}",
/// glommio::executor().task_queue_stats(new_tq).unwrap()
/// );
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`ExecutorStats`]: struct.ExecutorStats.html
/// [`GlommioError`]: crate::error::GlommioError
/// [`QueueErrorKind`]: crate::error::QueueErrorKind
/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
pub fn task_queue_stats(&self, handle: TaskQueueHandle) -> Result<TaskQueueStats> {
LOCAL_EX.with(|local_ex| match local_ex.get_queue(&handle) {
Some(x) => Ok(x.borrow().stats),
None => Err(GlommioError::queue_not_found(handle.index)),
})
}
/// Returns a collection of [`TaskQueueStats`] with information about all
/// task queues in the system
///
/// The collection can be anything that implements [`Extend`] and it is
/// initially passed by the user, so they can control how allocations are
/// done.
///
/// # Examples
/// ```
/// use glommio::{executor, Latency, LocalExecutorBuilder, Shares};
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// let new_tq = glommio::executor().create_task_queue(
/// Shares::default(),
/// Latency::NotImportant,
/// "test",
/// );
/// let v = Vec::new();
/// println!(
/// "Stats for all queues: {:?}",
/// glommio::executor().all_task_queue_stats(v)
/// );
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`ExecutorStats`]: struct.ExecutorStats.html
/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
/// [`Extend`]: https://doc.rust-lang.org/std/iter/trait.Extend.html
pub fn all_task_queue_stats<V>(&self, mut output: V) -> V
where
V: Extend<TaskQueueStats>,
{
LOCAL_EX.with(|local_ex| {
let tq = local_ex.queues.borrow();
output.extend(tq.available_executors.values().map(|x| x.borrow().stats));
output
})
}
/// Returns a [`ExecutorStats`] struct with information about this Executor
///
/// # Examples:
///
/// ```
/// use glommio::{executor, LocalExecutorBuilder};
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// println!(
/// "Stats for executor: {:?}",
/// glommio::executor().executor_stats()
/// );
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`ExecutorStats`]: struct.ExecutorStats.html
pub fn executor_stats(&self) -> ExecutorStats {
LOCAL_EX.with(|local_ex| local_ex.queues.borrow().stats)
}
/// Returns an [`IoStats`] struct with information about IO performed by
/// this executor's reactor
///
/// # Examples:
///
/// ```
/// use glommio::LocalExecutorBuilder;
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// println!("Stats for executor: {:?}", glommio::executor().io_stats());
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`IoStats`]: crate::IoStats
pub fn io_stats(&self) -> IoStats {
LOCAL_EX.with(|local_ex| local_ex.get_reactor().io_stats())
}
/// Returns an [`IoStats`] struct with information about IO performed from
/// the provided TaskQueue by this executor's reactor
///
/// # Examples:
///
/// ```
/// use glommio::{Latency, LocalExecutorBuilder, Shares};
///
/// let ex = LocalExecutorBuilder::default()
/// .spawn(|| async move {
/// let new_tq = glommio::executor().create_task_queue(
/// Shares::default(),
/// Latency::NotImportant,
/// "test",
/// );
/// println!(
/// "Stats for executor: {:?}",
/// glommio::executor().task_queue_io_stats(new_tq)
/// );
/// })
/// .unwrap();
///
/// ex.join().unwrap();
/// ```
///
/// [`IoStats`]: crate::IoStats
pub fn task_queue_io_stats(&self, handle: TaskQueueHandle) -> Result<IoStats> {
LOCAL_EX.with(
|local_ex| match local_ex.get_reactor().task_queue_io_stats(&handle) {
Some(x) => Ok(x),
None => Err(GlommioError::queue_not_found(handle.index)),
},
)
}
/// Spawns a task onto the current single-threaded executor.
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
/// Otherwise, this method panics.
///
/// Note that there is no guarantee of when the spawned task is scheduled.
/// The current task can continue its execution or be preempted by the
/// newly spawned task immediately. See the documentation for the
/// top-level [`Task`] for examples.
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Task};
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// let task = glommio::executor().spawn_local(async { 1 + 2 });
/// assert_eq!(task.await, 3);
/// });
/// ```
pub fn spawn_local<T>(&self, future: impl Future<Output = T> + 'static) -> Task<T>
where
T: 'static,
{
LOCAL_EX.with(|local_ex| Task::<T>(local_ex.spawn(future)))
}
/// Spawns a task onto the current single-threaded executor, in a particular
/// task queue
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
/// Otherwise, this method panics.
///
/// Note that there is no guarantee of when the spawned task is scheduled.
/// The current task can continue its execution or be preempted by the
/// newly spawned task immediately. See the documentation for the
/// top-level [`Task`] for examples.
///
/// # Examples
///
/// ```
/// # use glommio::{LocalExecutor, Shares, Task};
///
/// # let local_ex = LocalExecutor::default();
/// # local_ex.run(async {
/// let handle = glommio::executor().create_task_queue(
/// Shares::default(),
/// glommio::Latency::NotImportant,
/// "test_queue",
/// );
/// let task = glommio::executor()
/// .spawn_local_into(async { 1 + 2 }, handle)
/// .expect("failed to spawn task");
/// assert_eq!(task.await, 3);
/// # });
/// ```
pub fn spawn_local_into<T>(
&self,
future: impl Future<Output = T> + 'static,
handle: TaskQueueHandle,
) -> Result<Task<T>>
where
T: 'static,
{
LOCAL_EX.with(|local_ex| local_ex.spawn_into(future, handle).map(Task::<T>))
}
/// Spawns a task onto the current single-threaded executor.
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
///
/// Otherwise, this method panics.
///
/// # Safety
///
/// `ScopedTask` depends on `drop` running or `.await` being called for
/// safety. See the struct [`ScopedTask`] for details.
///
/// # Examples
///
/// ```
/// use glommio::LocalExecutor;
///
/// let local_ex = LocalExecutor::default();
///
/// local_ex.run(async {
/// let non_static = 2;
/// let task = unsafe { glommio::executor().spawn_scoped_local(async { 1 + non_static }) };
/// assert_eq!(task.await, 3);
/// });
/// ```
pub unsafe fn spawn_scoped_local<'a, T>(
&self,
future: impl Future<Output = T> + 'a,
) -> ScopedTask<'a, T> {
LOCAL_EX.with(|local_ex| ScopedTask::<'a, T>(local_ex.spawn(future), PhantomData))
}
/// Spawns a task onto the current single-threaded executor, in a particular
/// task queue
///
/// If called from a [`LocalExecutor`], the task is spawned on it.
///
/// Otherwise, this method panics.
///
/// # Safety
///
/// `ScopedTask` depends on `drop` running or `.await` being called for
/// safety. See the struct [`ScopedTask`] for details.
///
/// # Examples
///
/// ```
/// use glommio::{LocalExecutor, Shares};
///
/// let local_ex = LocalExecutor::default();
/// local_ex.run(async {
/// let handle = glommio::executor().create_task_queue(
/// Shares::default(),
/// glommio::Latency::NotImportant,
/// "test_queue",
/// );
/// let non_static = 2;
/// let task = unsafe {
/// glommio::executor()
/// .spawn_scoped_local_into(async { 1 + non_static }, handle)
/// .expect("failed to spawn task")
/// };
/// assert_eq!(task.await, 3);
/// })
/// ```
pub unsafe fn spawn_scoped_local_into<'a, T>(
&self,
future: impl Future<Output = T> + 'a,
handle: TaskQueueHandle,
) -> Result<ScopedTask<'a, T>> {
LOCAL_EX.with(|local_ex| {
local_ex
.spawn_into(future, handle)
.map(|x| ScopedTask::<'a, T>(x, PhantomData))
})
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::{
enclose,
timer::{self, sleep, Timer},
SharesManager,
};
use core::mem::MaybeUninit;
use futures::{
future::{join_all, poll_fn},
join,
};
use std::{
cell::Cell,
collections::HashMap,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
Mutex,
},
task::Waker,
};
#[test]
fn create_and_destroy_executor() {
let mut var = Rc::new(RefCell::new(0));
let local_ex = LocalExecutor::default();
let varclone = var.clone();
local_ex.run(async move {
let mut m = varclone.borrow_mut();
*m += 10;
});
let v = Rc::get_mut(&mut var).unwrap();
let v = v.replace(0);
assert_eq!(v, 10);
}
#[test]
fn create_fail_to_bind() {
// If you have a system with 4 billion CPUs let me know and I will
// update this test.
if LocalExecutorBuilder::new(Placement::Fixed(usize::MAX))
.make()
.is_ok()
{
unreachable!("Should have failed");
}
}
#[test]
fn bind_to_cpu_set_range() {
// libc supports cpu ids up to 1023 and will use the intersection of values
// specified by the cpu mask and those present on the system
// https://man7.org/linux/man-pages/man2/sched_setaffinity.2.html#NOTES
assert!(bind_to_cpu_set(vec![0, 1, 2, 3]).is_ok());
assert!(bind_to_cpu_set(0..1024).is_ok());
assert!(bind_to_cpu_set(0..1025).is_err());
}
#[test]
fn create_and_bind() {
if let Err(x) = LocalExecutorBuilder::new(Placement::Fixed(0)).make() {
panic!("got error {:?}", x);
}
}
#[test]
#[should_panic]
fn spawn_without_executor() {
let _ = LocalExecutor::default();
let _ = crate::spawn_local(async move {});
}
#[test]
fn invalid_task_queue() {
let local_ex = LocalExecutor::default();
local_ex.run(async {
let task = crate::spawn_local_into(
async move {
unreachable!("Should not have executed this");
},
TaskQueueHandle { index: 1 },
);
if task.is_ok() {
unreachable!("Should have failed");
}
});
}
#[test]
fn ten_yielding_queues() {
let local_ex = LocalExecutor::default();
// 0 -> no one
// 1 -> t1
// 2 -> t2...
let executed_last = Rc::new(RefCell::new(0));
local_ex.run(async {
let mut joins = Vec::with_capacity(10);
for id in 1..11 {
let exec = executed_last.clone();
joins.push(crate::spawn_local(async move {
for _ in 0..10_000 {
let mut last = exec.borrow_mut();
assert_ne!(id, *last);
*last = id;
drop(last);
crate::executor().yield_task_queue_now().await;
}
}));
}
futures::future::join_all(joins).await;
});
}
#[test]
fn task_with_latency_requirements() {
let local_ex = LocalExecutor::default();
local_ex.run(async {
let not_latency = crate::executor().create_task_queue(
Shares::default(),
Latency::NotImportant,
"test",
);
let latency = crate::executor().create_task_queue(
Shares::default(),
Latency::Matters(Duration::from_millis(2)),
"testlat",
);
let nolat_started = Rc::new(RefCell::new(false));
let lat_status = Rc::new(RefCell::new(false));
// Loop until need_preempt is set. It is set to 2ms, but because this is a test
// and can be running overcommited or in whichever shared infrastructure, we'll
// allow the timer to fire in up to 1s. If it didn't fire in 1s, that's broken.
let nolat = local_ex
.spawn_into(
crate::enclose! { (nolat_started, lat_status)
async move {
*(nolat_started.borrow_mut()) = true;
let start = Instant::now();
// Now busy loop and make sure that we yield when we have too.
loop {
if *(lat_status.borrow()) {
break; // Success!
}
if start.elapsed().as_secs() > 1 {
panic!("Never received preempt signal");
}
crate::yield_if_needed().await;
}
}
},
not_latency,
)
.unwrap();
let lat = local_ex
.spawn_into(
crate::enclose! { (nolat_started, lat_status)
async move {
// In case we are executed first, yield to the the other task
loop {
if !(*(nolat_started.borrow())) {
crate::executor().yield_task_queue_now().await;
} else {
break;
}
}
*(lat_status.borrow_mut()) = true;
}
},
latency,
)
.unwrap();
futures::join!(nolat, lat);
});
}
#[test]
fn current_task_queue_matches() {
let local_ex = LocalExecutor::default();
local_ex.run(async {
let tq1 = crate::executor().create_task_queue(
Shares::default(),
Latency::NotImportant,
"test1",
);
let tq2 = crate::executor().create_task_queue(
Shares::default(),
Latency::NotImportant,
"test2",
);
let id1 = tq1.index;
let id2 = tq2.index;
let j0 = crate::spawn_local(async {
assert_eq!(crate::executor().current_task_queue().index, 0);
});
let j1 = crate::spawn_local_into(
async move {
assert_eq!(crate::executor().current_task_queue().index, id1);
},
tq1,
)
.unwrap();
let j2 = crate::spawn_local_into(
async move {
assert_eq!(crate::executor().current_task_queue().index, id2);
},
tq2,
)
.unwrap();
futures::join!(j0, j1, j2);
})
}
#[test]
fn task_optimized_for_throughput() {
let local_ex = LocalExecutor::default();
local_ex.run(async {
let tq1 = crate::executor().create_task_queue(
Shares::default(),
Latency::NotImportant,
"test",
);
let tq2 = crate::executor().create_task_queue(
Shares::default(),
Latency::NotImportant,
"testlat",
);
let first_started = Rc::new(RefCell::new(false));
let second_status = Rc::new(RefCell::new(false));
let first = local_ex
.spawn_into(
crate::enclose! { (first_started, second_status)
async move {
*(first_started.borrow_mut()) = true;
let start = Instant::now();
// Now busy loop and make sure that we yield when we have too.
loop {
if start.elapsed().as_millis() >= 99 {
break;
}
if *(second_status.borrow()) {
panic!("I was preempted but should not have been");
}
crate::yield_if_needed().await;
}
}
},
tq1,
)
.unwrap();
let second = local_ex
.spawn_into(
crate::enclose! { (first_started, second_status)
async move {
// In case we are executed first, yield to the the other task
loop {
if !(*(first_started.borrow())) {
crate::executor().yield_task_queue_now().await;
} else {
break;
}
}
*(second_status.borrow_mut()) = true;
}
},
tq2,
)
.unwrap();
futures::join!(first, second);
});
}
#[test]
fn test_detach() {
let ex = LocalExecutor::default();
ex.run(async {
crate::spawn_local(async {
loop {
crate::executor().yield_task_queue_now().await;
}
})
.detach();
Timer::new(Duration::from_micros(100)).await;
});
}
/// As far as impl From<libc::timeval> for Duration is not allowed.
fn from_timeval(v: libc::timeval) -> Duration {
Duration::from_secs(v.tv_sec as u64) + Duration::from_micros(v.tv_usec as u64)
}
fn getrusage() -> libc::rusage {
let mut s0 = MaybeUninit::<libc::rusage>::uninit();
let err = unsafe { libc::getrusage(libc::RUSAGE_THREAD, s0.as_mut_ptr()) };
if err != 0 {
panic!("getrusage error = {}", err);
}
unsafe { s0.assume_init() }
}
fn getrusage_utime() -> Duration {
from_timeval(getrusage().ru_utime)
}
#[test]
fn test_no_spin() {
let ex = LocalExecutor::default();
let task_queue = ex.create_task_queue(
Shares::default(),
Latency::Matters(Duration::from_millis(10)),
"my_tq",
);
let start = getrusage_utime();
ex.run(async {
crate::spawn_local_into(
async { timer::sleep(Duration::from_secs(1)).await },
task_queue,
)
.expect("failed to spawn task")
.await;
});
assert!(
getrusage_utime() - start < Duration::from_millis(2),
"expected user time on LE is less than 2 millisecond"
);
}
#[test]
fn test_spin() {
let dur = Duration::from_secs(1);
let ex0 = LocalExecutorBuilder::default().make().unwrap();
let ex0_ru_start = getrusage_utime();
ex0.run(async { timer::sleep(dur).await });
let ex0_ru_finish = getrusage_utime();
let ex = LocalExecutorBuilder::new(Placement::Fixed(0))
.spin_before_park(Duration::from_millis(100))
.make()
.unwrap();
let ex_ru_start = getrusage_utime();
ex.run(async {
crate::spawn_local(async move { timer::sleep(dur).await }).await;
});
let ex_ru_finish = getrusage_utime();
assert!(
ex0_ru_finish - ex0_ru_start < Duration::from_millis(10),
"expected user time on LE0 is less than 10 millisecond"
);
// 100 ms may have passed without us running for 100ms in case
// there are other threads. Need to be a bit more relaxed
assert!(
ex_ru_finish - ex_ru_start >= Duration::from_millis(50),
"expected user time on LE is much greater than 50 millisecond"
);
}
#[test]
fn test_runtime_stats() {
let dur = Duration::from_secs(2);
let ex0 = LocalExecutorBuilder::default().make().unwrap();
ex0.run(async {
assert!(
crate::executor().executor_stats().total_runtime() < Duration::from_nanos(10),
"expected runtime on LE {:#?} is less than 10 ns",
crate::executor().executor_stats().total_runtime()
);
let now = Instant::now();
while now.elapsed().as_millis() < 200 {}
crate::executor().yield_task_queue_now().await;
assert!(
crate::executor().executor_stats().total_runtime() >= Duration::from_millis(200),
"expected runtime on LE0 {:#?} is greater than 200 ms",
crate::executor().executor_stats().total_runtime()
);
timer::sleep(dur).await;
assert!(
crate::executor().executor_stats().total_runtime() < Duration::from_millis(400),
"expected runtime on LE0 {:#?} is not greater than 400 ms",
crate::executor().executor_stats().total_runtime()
);
});
let ex = LocalExecutorBuilder::new(Placement::Fixed(0))
// ensure entire sleep should spin
.spin_before_park(Duration::from_secs(5))
.make()
.unwrap();
ex.run(async {
crate::spawn_local(async move {
assert!(
crate::executor().executor_stats().total_runtime() < Duration::from_nanos(10),
"expected runtime on LE {:#?} is less than 10 ns",
crate::executor().executor_stats().total_runtime()
);
let now = Instant::now();
while now.elapsed().as_millis() < 200 {}
crate::executor().yield_task_queue_now().await;
assert!(
crate::executor().executor_stats().total_runtime()
>= Duration::from_millis(200),
"expected runtime on LE {:#?} is greater than 200 ms",
crate::executor().executor_stats().total_runtime()
);
timer::sleep(dur).await;
assert!(
crate::executor().executor_stats().total_runtime() < Duration::from_millis(400),
"expected runtime on LE {:#?} is not greater than 400 ms",
crate::executor().executor_stats().total_runtime()
);
})
.await;
});
}
// Spin for 2ms and then yield. How many shares we have should control how many
// quantas we manage to execute.
async fn work_quanta() {
let now = Instant::now();
while now.elapsed().as_millis() < 2 {}
crate::executor().yield_task_queue_now().await;
}
macro_rules! test_static_shares {
( $s1:expr, $s2:expr, $work:block ) => {
let local_ex = LocalExecutor::default();
local_ex.run(async {
// Run a latency queue, otherwise a queue will run for too long uninterrupted
// and we'd have to run this test for a very long time for things to equalize.
let tq1 = crate::executor().create_task_queue(
Shares::Static($s1),
Latency::Matters(Duration::from_millis(1)),
"test_1",
);
let tq2 = crate::executor().create_task_queue(
Shares::Static($s2),
Latency::Matters(Duration::from_millis(1)),
"test_2",
);
let tq1_count = Rc::new(Cell::new(0));
let tq2_count = Rc::new(Cell::new(0));
let now = Instant::now();
let t1 = crate::spawn_local_into(
enclose! { (tq1_count, now) async move {
while now.elapsed().as_secs() < 5 {
$work;
tq1_count.replace(tq1_count.get() + 1);
}
}},
tq1,
)
.unwrap();
let t2 = crate::spawn_local_into(
enclose! { (tq2_count, now ) async move {
while now.elapsed().as_secs() < 5 {
$work;
tq2_count.replace(tq2_count.get() + 1);
}
}},
tq2,
)
.unwrap();
join!(t1, t2);
let expected_ratio = $s2 as f64 / (($s2 + $s1) as f64);
let actual_ratio =
tq2_count.get() as f64 / ((tq1_count.get() + tq2_count.get()) as f64);
// Be gentle: we don't know if we're running against other threads, under which
// conditions, etc
assert!((expected_ratio - actual_ratio).abs() < 0.1);
});
};
}
#[test]
fn test_shares_high_disparity_fat_task() {
test_static_shares!(1000, 10, { work_quanta().await });
}
#[test]
fn test_shares_low_disparity_fat_task() {
test_static_shares!(1000, 1000, { work_quanta().await });
}
struct DynamicSharesTest {
shares: Cell<usize>,
}
impl DynamicSharesTest {
fn new() -> Rc<Self> {
Rc::new(Self {
shares: Cell::new(0),
})
}
fn tick(&self, millis: u64) {
if millis < 1000 {
self.shares.replace(1);
} else {
self.shares.replace(1000);
}
}
}
impl SharesManager for DynamicSharesTest {
fn shares(&self) -> usize {
self.shares.get()
}
fn adjustment_period(&self) -> Duration {
Duration::from_millis(1)
}
}
#[test]
fn test_dynamic_shares() {
let local_ex = LocalExecutor::default();
local_ex.run(async {
let bm = DynamicSharesTest::new();
// Reference task queue.
let tq1 = crate::executor().create_task_queue(
Shares::Static(1000),
Latency::Matters(Duration::from_millis(1)),
"test_1",
);
let tq2 = crate::executor().create_task_queue(
Shares::Dynamic(bm.clone()),
Latency::Matters(Duration::from_millis(1)),
"test_2",
);
let tq1_count = Rc::new(RefCell::new(vec![0, 0]));
let tq2_count = Rc::new(RefCell::new(vec![0, 0]));
let now = Instant::now();
let t1 = crate::spawn_local_into(
enclose! { (tq1_count, now) async move {
loop {
let secs = now.elapsed().as_secs();
if secs >= 2 {
break;
}
(*tq1_count.borrow_mut())[secs as usize] += 1;
crate::executor().yield_task_queue_now().await;
}
}},
tq1,
)
.unwrap();
let t2 = crate::spawn_local_into(
enclose! { (tq2_count, now, bm) async move {
loop {
let elapsed = now.elapsed();
let secs = elapsed.as_secs();
if secs >= 2 {
break;
}
bm.tick(elapsed.as_millis() as u64);
(*tq2_count.borrow_mut())[secs as usize] += 1;
crate::executor().yield_task_queue_now().await;
}
}},
tq2,
)
.unwrap();
join!(t1, t2);
// Keep this very simple because every new processor, every load condition, will
// yield different results. All we want to validate is: for a large
// part of the first two seconds shares were very low, we should
// have received very low ratio. On the second half we should have
// accumulated much more. Real numbers are likely much higher than
// the targets, but those targets are safe.
let ratios: Vec<f64> = tq1_count
.borrow()
.iter()
.zip(tq2_count.borrow().iter())
.map(|(x, y)| *y as f64 / *x as f64)
.collect();
assert!(ratios[1] > ratios[0]);
assert!(ratios[0] < 0.25);
assert!(ratios[1] > 0.50);
});
}
#[test]
fn multiple_spawn() {
// Issue 241
LocalExecutor::default().run(async {
crate::spawn_local(async {}).detach().await;
// In issue 241, the presence of the second detached waiter caused
// the program to hang.
crate::spawn_local(async {}).detach().await;
});
}
#[test]
#[should_panic(expected = "Message!")]
fn panic_is_not_list() {
LocalExecutor::default().run(async { panic!("Message!") });
}
struct TestFuture {
w: Arc<Mutex<Option<Waker>>>,
}
impl Future for TestFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut w = self.w.lock().unwrap();
match w.take() {
Some(_) => Poll::Ready(()),
None => {
*w = Some(cx.waker().clone());
Poll::Pending
}
}
}
}
#[test]
fn cross_executor_wake_by_ref() {
let w = Arc::new(Mutex::new(None));
let t = w.clone();
let fut = TestFuture { w };
let ex1 = LocalExecutorBuilder::default()
.spawn(|| async move {
fut.await;
})
.unwrap();
let ex2 = LocalExecutorBuilder::default()
.spawn(|| async move {
loop {
sleep(Duration::from_secs(1)).await;
let w = t.lock().unwrap();
if let Some(ref x) = *w {
x.wake_by_ref();
return;
}
}
})
.unwrap();
ex1.join().unwrap();
ex2.join().unwrap();
}
#[test]
fn cross_executor_wake_by_value() {
let w = Arc::new(Mutex::new(None));
let t = w.clone();
let fut = TestFuture { w };
let ex1 = LocalExecutorBuilder::default()
.spawn(|| async move {
fut.await;
})
.unwrap();
let ex2 = LocalExecutorBuilder::default()
.spawn(|| async move {
loop {
sleep(Duration::from_secs(1)).await;
let w = t.lock().unwrap();
if let Some(x) = w.clone() {
x.wake();
return;
}
}
})
.unwrap();
ex1.join().unwrap();
ex2.join().unwrap();
}
// Wakes up the waker in a remote executor
#[test]
fn cross_executor_wake_with_join_handle() {
let w = Arc::new(Mutex::new(None));
let t = w.clone();
let fut = TestFuture { w };
let ex1 = LocalExecutorBuilder::default()
.spawn(|| async move {
let x = crate::spawn_local(fut).detach();
x.await;
})
.unwrap();
let ex2 = LocalExecutorBuilder::default()
.spawn(|| async move {
loop {
sleep(Duration::from_secs(1)).await;
let w = t.lock().unwrap();
if let Some(x) = w.clone() {
x.wake();
return;
}
}
})
.unwrap();
ex1.join().unwrap();
ex2.join().unwrap();
}
// The other side won't be alive to get the notification. We should still
// survive.
#[test]
fn cross_executor_wake_early_drop() {
let w = Arc::new(Mutex::new(None));
let t = w.clone();
let fut = TestFuture { w };
let ex1 = LocalExecutorBuilder::default()
.spawn(|| async move {
let _drop = futures_lite::future::poll_once(fut).await;
})
.unwrap();
let ex2 = LocalExecutorBuilder::default()
.spawn(|| async move {
loop {
sleep(Duration::from_secs(1)).await;
let w = t.lock().unwrap();
if let Some(ref x) = *w {
x.wake_by_ref();
return;
}
}
})
.unwrap();
ex1.join().unwrap();
ex2.join().unwrap();
}
// The other side won't be alive to get the notification and even worse, we hold
// a waker that we notify after the first executor is surely dead. We should
// still survive.
#[test]
fn cross_executor_wake_hold_waker() {
let w = Arc::new(Mutex::new(None));
let t = w.clone();
let fut = TestFuture { w };
let ex1 = LocalExecutorBuilder::default()
.spawn(|| async move {
let _drop = futures_lite::future::poll_once(fut).await;
})
.unwrap();
ex1.join().unwrap();
let ex2 = LocalExecutorBuilder::default()
.spawn(|| async move {
let w = t.lock().unwrap().clone().unwrap();
w.wake_by_ref();
})
.unwrap();
ex2.join().unwrap();
}
#[test]
fn executor_pool_builder() {
let nr_cpus = 4;
let count = Arc::new(AtomicUsize::new(0));
let handles = LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(nr_cpus))
.on_all_shards({
let count = Arc::clone(&count);
|| async move { count.fetch_add(1, Ordering::Relaxed) }
})
.unwrap();
let _: std::thread::ThreadId = handles.handles[0].thread().id();
assert_eq!(nr_cpus, handles.handles().iter().count());
let mut fut_output = handles
.join_all()
.into_iter()
.map(Result::unwrap)
.collect::<Vec<_>>();
fut_output.sort_unstable();
assert_eq!(fut_output, (0..nr_cpus).into_iter().collect::<Vec<_>>());
assert_eq!(nr_cpus, count.load(Ordering::Relaxed));
}
#[test]
fn executor_invalid_executor_count() {
assert!(
LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(0))
.on_all_shards(|| async move {})
.is_err()
);
}
#[test]
fn executor_pool_builder_placements() {
let cpu_set = CpuSet::online().unwrap();
assert!(!cpu_set.is_empty());
for nn in 1..2 {
let nr_execs = nn * cpu_set.len();
let placements = [
PoolPlacement::Unbound(nr_execs),
PoolPlacement::Fenced(nr_execs, cpu_set.clone()),
PoolPlacement::MaxSpread(nr_execs, None),
PoolPlacement::MaxSpread(nr_execs, Some(cpu_set.clone())),
PoolPlacement::MaxPack(nr_execs, None),
PoolPlacement::MaxPack(nr_execs, Some(cpu_set.clone())),
];
for pp in std::array::IntoIter::new(placements) {
let ids = Arc::new(Mutex::new(HashMap::new()));
let cpus = Arc::new(Mutex::new(HashMap::new()));
let cpu_hard_bind =
!matches!(pp, PoolPlacement::Unbound(_) | PoolPlacement::Fenced(_, _));
let handles = LocalExecutorPoolBuilder::new(pp)
.on_all_shards({
let ids = Arc::clone(&ids);
let cpus = Arc::clone(&cpus);
|| async move {
ids.lock()
.unwrap()
.entry(crate::executor().id())
.and_modify(|e| *e += 1)
.or_insert(1);
let pid = nix::unistd::Pid::from_raw(0);
let cpu = nix::sched::sched_getaffinity(pid).unwrap();
cpus.lock()
.unwrap()
.entry(cpu)
.and_modify(|e| *e += 1)
.or_insert(1);
}
})
.unwrap();
assert_eq!(nr_execs, handles.handles().len());
handles
.join_all()
.into_iter()
.for_each(|r| assert!(r.is_ok()));
assert_eq!(nr_execs, ids.lock().unwrap().len());
ids.lock().unwrap().values().for_each(|v| assert_eq!(*v, 1));
if cpu_hard_bind {
assert_eq!(nr_execs, cpus.lock().unwrap().len());
cpus.lock()
.unwrap()
.values()
.for_each(|v| assert_eq!(*v, nn));
}
}
}
}
#[test]
fn executor_pool_builder_shards_limit() {
let cpu_set = CpuSet::online().unwrap();
assert!(!cpu_set.is_empty());
// test: confirm that we can always get shards up to the # of cpus
{
let placements = [
(false, PoolPlacement::Unbound(cpu_set.len())),
(false, PoolPlacement::Fenced(cpu_set.len(), cpu_set.clone())),
(true, PoolPlacement::MaxSpread(cpu_set.len(), None)),
(
true,
PoolPlacement::MaxSpread(cpu_set.len(), Some(cpu_set.clone())),
),
(true, PoolPlacement::MaxPack(cpu_set.len(), None)),
(
true,
PoolPlacement::MaxPack(cpu_set.len(), Some(cpu_set.clone())),
),
];
for (_shard_limited, p) in std::array::IntoIter::new(placements) {
LocalExecutorPoolBuilder::new(p)
.on_all_shards(|| async move {})
.unwrap()
.join_all();
}
}
// test: confirm that some placements fail when shards are # of cpus + 1
{
let placements = [
(false, PoolPlacement::Unbound(1 + cpu_set.len())),
(
false,
PoolPlacement::Fenced(1 + cpu_set.len(), cpu_set.clone()),
),
(true, PoolPlacement::MaxSpread(1 + cpu_set.len(), None)),
(
true,
PoolPlacement::MaxSpread(1 + cpu_set.len(), Some(cpu_set.clone())),
),
(true, PoolPlacement::MaxPack(1 + cpu_set.len(), None)),
(
true,
PoolPlacement::MaxPack(1 + cpu_set.len(), Some(cpu_set)),
),
];
for (shard_limited, p) in std::array::IntoIter::new(placements) {
match LocalExecutorPoolBuilder::new(p).on_all_shards(|| async move {}) {
Ok(handles) => {
handles.join_all();
assert!(!shard_limited);
}
Err(_) => assert!(shard_limited),
}
}
}
}
#[test]
fn scoped_task() {
LocalExecutor::default().run(async {
let mut a = 1;
unsafe {
crate::spawn_scoped_local(async {
a = 2;
})
.await;
}
crate::executor().yield_task_queue_now().await;
assert_eq!(a, 2);
let mut a = 1;
let do_later = unsafe {
crate::spawn_scoped_local(async {
a = 2;
})
};
crate::executor().yield_task_queue_now().await;
do_later.await;
assert_eq!(a, 2);
});
}
#[test]
fn executor_pool_builder_thread_panic() {
let nr_execs = 8;
let res = LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(nr_execs))
.on_all_shards(|| async move { panic!("join handle will be Err") })
.unwrap()
.join_all();
assert_eq!(nr_execs, res.len());
assert!(res.into_iter().all(|r| r.is_err()));
}
#[test]
fn executor_pool_builder_return_values() {
let nr_execs = 8;
let x = Arc::new(AtomicUsize::new(0));
let mut values = LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(nr_execs))
.on_all_shards(|| async move { x.fetch_add(1, Ordering::Relaxed) })
.unwrap()
.join_all()
.into_iter()
.map(Result::unwrap)
.collect::<Vec<_>>();
values.sort_unstable();
assert_eq!(values, (0..nr_execs).into_iter().collect::<Vec<_>>());
}
#[test]
fn executor_pool_builder_spawn_cancel() {
let nr_shards = 8;
let builder = LocalExecutorPoolBuilder::new(PoolPlacement::Unbound(nr_shards));
let nr_exectuted = Arc::new(AtomicUsize::new(0));
let fut_gen = {
let nr_exectuted = Arc::clone(&nr_exectuted);
|| async move {
nr_exectuted.fetch_add(1, Ordering::Relaxed);
unreachable!("should not execute")
}
};
let mut handles = PoolThreadHandles::new();
let mut cpu_set_gen = placement::CpuSetGenerator::pool(builder.placement.clone()).unwrap();
let latch = Latch::new(builder.placement.executor_count());
let ii_cxl = 2;
for ii in 0..builder.placement.executor_count() {
if ii == nr_shards - ii_cxl {
std::thread::sleep(std::time::Duration::from_millis(100));
assert!(ii_cxl <= latch.cancel().unwrap());
}
match builder.spawn_thread(&mut cpu_set_gen, &latch, fut_gen.clone()) {
Ok(handle) => handles.push(handle),
Err(_) => break,
}
}
assert_eq!(0, nr_exectuted.load(Ordering::Relaxed));
assert_eq!(nr_shards, handles.handles.len());
handles.join_all().into_iter().for_each(|s| {
assert!(format!("{}", s.unwrap_err()).contains("spawn failed"));
});
}
#[should_panic]
#[test]
fn executor_inception() {
LocalExecutor::default().run(async {
LocalExecutor::default().run(async {});
});
}
enum TaskState {
Pending(Option<Waker>),
Ready,
}
// following four tests are regression ones for https://github.com/DataDog/glommio/issues/379.
// here we test against task reference count underflow
// test includes two scenarios, with join handles and with sleep, for each case
// we test both, wake and wake_by_ref
#[test]
fn wake_by_ref_refcount_underflow_with_join_handle() {
LocalExecutor::default().run(async {
let slot: Rc<RefCell<TaskState>> = Rc::new(RefCell::new(TaskState::Pending(None)));
let cloned_slot = slot.clone();
let jh = crate::spawn_local(async move {
// first task, places waker of self into slot, when polled checks for result, if
// it's ready, returns Ready, otherwise return Pending
poll_fn::<(), _>(|cx| {
let current = &mut *cloned_slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => match maybe_waker {
Some(_) => unreachable!(),
None => {
*current = TaskState::Pending(Some(cx.waker().clone()));
Poll::Pending
}
},
TaskState::Ready => Poll::Ready(()),
}
})
.await;
})
.detach();
let jh2 = crate::spawn_local(async move {
// second task, checks slot for first task waker, wakes it by ref, and then it
// is dropped.
let current = &mut *slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => {
let waker = maybe_waker.take().unwrap();
waker.wake_by_ref();
*current = TaskState::Ready; // <-- waker dropped here, refcount is zero
}
TaskState::Ready => unreachable!(), // task cannot be ready at this time
}
})
.detach();
join_all(vec![jh, jh2]).await;
});
}
#[test]
fn wake_by_ref_refcount_underflow_with_sleep() {
LocalExecutor::default().run(async {
let slot: Rc<RefCell<TaskState>> = Rc::new(RefCell::new(TaskState::Pending(None)));
let cloned_slot = slot.clone();
crate::spawn_local(async move {
poll_fn::<(), _>(|cx| {
let current = &mut *cloned_slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => match maybe_waker {
Some(_) => unreachable!(),
None => {
*current = TaskState::Pending(Some(cx.waker().clone()));
Poll::Pending
}
},
TaskState::Ready => Poll::Ready(()),
}
})
.await;
})
.detach();
crate::spawn_local(async move {
let current = &mut *slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => {
let waker = maybe_waker.take().unwrap();
waker.wake_by_ref();
*current = TaskState::Ready;
}
TaskState::Ready => unreachable!(),
}
})
.detach();
timer::sleep(Duration::from_millis(1)).await;
});
}
#[test]
fn wake_refcount_underflow_with_join_handle() {
LocalExecutor::default().run(async {
let slot: Rc<RefCell<TaskState>> = Rc::new(RefCell::new(TaskState::Pending(None)));
let cloned_slot = slot.clone();
let jh = crate::spawn_local(async move {
poll_fn::<(), _>(|cx| {
let current = &mut *cloned_slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => match maybe_waker {
Some(_) => unreachable!(),
None => {
*current = TaskState::Pending(Some(cx.waker().clone()));
Poll::Pending
}
},
TaskState::Ready => Poll::Ready(()),
}
})
.await;
})
.detach();
let jh2 = crate::spawn_local(async move {
let current = &mut *slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => {
let waker = maybe_waker.take().unwrap();
waker.wake();
*current = TaskState::Ready;
}
TaskState::Ready => unreachable!(),
}
})
.detach();
join_all(vec![jh, jh2]).await;
});
}
#[test]
fn wake_refcount_underflow_with_sleep() {
LocalExecutor::default().run(async {
let slot: Rc<RefCell<TaskState>> = Rc::new(RefCell::new(TaskState::Pending(None)));
let cloned_slot = slot.clone();
crate::spawn_local(async move {
poll_fn::<(), _>(|cx| {
let current = &mut *cloned_slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => match maybe_waker {
Some(_) => unreachable!(),
None => {
*current = TaskState::Pending(Some(cx.waker().clone()));
Poll::Pending
}
},
TaskState::Ready => Poll::Ready(()),
}
})
.await;
})
.detach();
crate::spawn_local(async move {
let current = &mut *slot.borrow_mut();
match current {
TaskState::Pending(maybe_waker) => {
let waker = maybe_waker.take().unwrap();
waker.wake();
*current = TaskState::Ready;
}
TaskState::Ready => unreachable!(),
}
})
.detach();
timer::sleep(Duration::from_millis(1)).await;
});
}
} | ran
}
fn run_one_task_queue(&self) -> bool { |
Player.js | import { getTrackDetail, scrobble, getMP3 } from '@/api/track';
import shuffle from 'lodash/shuffle';
import { Howler, Howl } from 'howler';
import { cacheTrackSource, getTrackSource } from '@/utils/db';
import { getAlbum } from '@/api/album';
import { getPlaylistDetail } from '@/api/playlist';
import { getArtist } from '@/api/artist';
import { personalFM, fmTrash } from '@/api/others';
import store from '@/store';
import { isAccountLoggedIn } from '@/utils/auth';
import { trackUpdateNowPlaying, trackScrobble } from '@/api/lastfm';
const electron =
process.env.IS_ELECTRON === true ? window.require('electron') : null;
const ipcRenderer =
process.env.IS_ELECTRON === true ? electron.ipcRenderer : null;
export default class {
constructor() {
// 播放器状态
this._playing = false; // 是否正在播放中
this._progress = 0; // 当前播放歌曲的进度
this._enabled = false; // 是否启用Player
this._repeatMode = 'off'; // off | on | one
this._shuffle = false; // true | false
this._volume = 1; // 0 to 1
this._volumeBeforeMuted = 1; // 用于保存静音前的音量
// 播放信息
this._list = []; // 播放列表
this._current = 0; // 当前播放歌曲在播放列表里的index
this._shuffledList = []; // 被随机打乱的播放列表,随机播放模式下会使用此播放列表
this._shuffledCurrent = 0; // 当前播放歌曲在随机列表里面的index
this._playlistSource = { type: 'album', id: 123 }; // 当前播放列表的信息
this._currentTrack = { id: 86827685 }; // 当前播放歌曲的详细信息
this._playNextList = []; // 当这个list不为空时,会优先播放这个list的歌
this._isPersonalFM = false; // 是否是私人FM模式
this._personalFMTrack = { id: 0 }; // 私人FM当前歌曲
this._personalFMNextTrack = { id: 0 }; // 私人FM下一首歌曲信息(为了快速加载下一首)
// howler (https://github.com/goldfire/howler.js)
this._howler = null;
Object.defineProperty(this, '_howler', {
enumerable: false,
});
// init
this._init();
window.yesplaymusic = {};
window.yesplaymusic.player = this;
}
get repeatMode() {
return this._repeatMode;
}
set repeatMode(mode) {
if (this._isPersonalFM) return;
if (!['off', 'on', 'one'].includes(mode)) {
console.warn("repeatMode: invalid args, must be 'on' | 'off' | 'one'");
return;
}
this._repeatMode = mode;
}
get shuffle() {
return this._shuffle;
}
set shuffle(shuffle) {
if (this._isPersonalFM) return;
if (shuffle !== true && shuffle !== false) {
console.warn('shuffle: invalid args, must be Boolean');
return;
}
this._shuffle = shuffle;
if (shuffle) {
this._shuffleTheList();
}
}
get volume() {
return this._volume;
}
set volume(volume) {
this._volume = volume;
Howler.volume(volume);
}
get list() {
return this.shuffle ? this._shuffledList : this._list;
}
set list(list) {
this._list = list;
}
get current() {
return this.shuffle ? this._shuffledCurrent : this._current;
}
set current(current) {
if (this.shuffle) {
this._shuffledCurrent = current;
} else {
this._current = current;
}
}
get enabled() {
return this._enabled;
}
get playing() {
return this._playing;
}
get currentTrack() {
return this._currentTrack;
}
get playlistSource() {
return this._playlistSource;
}
get playNextList() {
return this._playNextList;
}
get isPersonalFM() {
return this._isPersonalFM;
}
get personalFMTrack() {
return this._personalFMTrack;
}
get currentTrackDuration() {
const trackDuration = this._currentTrack.dt || 1000;
let duration = ~~(trackDuration / 1000);
return duration > 1 ? duration - 1 : duration;
}
get progress() {
return this._progress;
}
set progress(value) {
if (this._howler) {
this._howler.seek(value);
}
}
get isCurrentTrackLiked() {
return store.state.liked.songs.includes(this.currentTrack.id);
}
_init() {
this._loadSelfFromLocalStorage();
Howler.autoUnlock = false;
Howler.usingWebAudio = true;
Howler.volume(this.volume);
if (this._enabled) {
// 恢复当前播放歌曲
this._replaceCurrentTrack(this._currentTrack.id, false).then(() => {
this._howler?.seek(localStorage.getItem('playerCurrentTrackTime') ?? 0);
setInterval(
() =>
localStorage.setItem(
'playerCurrentTrackTime',
this._howler?.seek()
),
1000
);
}); // update audio source and init howler
this._initMediaSession();
this._setIntervals();
}
// 初始化私人FM
if (this._personalFMTrack.id === 0 || this._personalFMNextTrack.id === 0) {
personalFM().then(result => {
this._personalFMTrack = result.data[0];
this._personalFMNextTrack = result.data[1];
return this._personalFMTrack;
});
}
}
_setIntervals() {
// 同步播放进度
// TODO: 如果 _progress 在别的地方被改变了,这个定时器会覆盖之前改变的值,是bug
setInterval(() => {
this._progress = this._howler === null ? 0 : this._howler.seek();
}, 1000);
}
_getNextTrack() {
if (this._playNextList.length > 0) {
let trackID = this._playNextList.shift();
return [trackID, this.current];
}
// 当歌曲是列表最后一首 && 循环模式开启
if (this.list.length === this.current + 1 && this.repeatMode === 'on') {
return [this.list[0], 0];
}
// 返回 [trackID, index]
return [this.list[this.current + 1], this.current + 1];
}
_getPrevTrack() {
// 当歌曲是列表第一首 && 循环模式开启
if (this.current === 0 && this.repeatMode === 'on') {
return [this.list[this.list.length - 1], this.list.length - 1];
}
// 返回 [trackID, index]
return [this.list[this.current - 1], this.current - 1];
}
async _shuffleTheList(firstTrackID = this._currentTrack.id) {
let list = this._list.filter(tid => tid !== firstTrackID);
if (firstTrackID === 'first') list = this._list;
this._shuffledList = shuffle(list);
if (firstTrackID !== 'first') this._shuffledList.unshift(firstTrackID);
}
async _scrobble(track, time, completed = false) {
console.debug(
`[debug][Player.js] scrobble track 👉 ${track.name} by ${track.ar[0].name} 👉 time:${time} completed: ${completed}`
);
const trackDuration = ~~(track.dt / 1000);
time = completed ? trackDuration : ~~time;
scrobble({
id: track.id,
sourceid: this.playlistSource.id,
time,
});
if (
store.state.lastfm.key !== undefined &&
(time >= trackDuration / 2 || time >= 240)
) {
const timestamp = ~~(new Date().getTime() / 1000) - time;
trackScrobble({
artist: track.ar[0].name,
track: track.name,
timestamp,
album: track.al.name,
trackNumber: track.no,
duration: trackDuration,
});
}
}
_playAudioSource(source, autoplay = true) {
Howler.unload();
this._howler = new Howl({
src: [source],
html5: true,
format: ['mp3', 'flac'],
});
if (autoplay) {
this.play();
document.title = `${this._currentTrack.name} · ${this._currentTrack.ar[0].name} - YesPlayMusic`;
}
this.setOutputDevice();
this._howler.once('end', () => {
this._nextTrackCallback();
});
}
_getAudioSourceFromCache(id) {
return getTrackSource(id).then(t => {
if (!t) return null;
const source = URL.createObjectURL(new Blob([t.source]));
return source;
});
}
_getAudioSourceFromNetease(track) {
if (isAccountLoggedIn()) {
return getMP3(track.id).then(result => {
if (!result.data[0]) return null;
if (!result.data[0].url) return null;
if (result.data[0].freeTrialInfo !== null) return null; // 跳过只能试听的歌曲
const source = result.data[0].url.replace(/^http:/, 'https:');
if (store.state.settings.automaticallyCacheSongs) {
cacheTrackSource(track, source, result.data[0].br);
}
return source;
});
} else {
return new Promise(resolve => {
resolve(`https://music.163.com/song/media/outer/url?id=${track.id}`);
});
}
}
_getAudioSourceFromUnblockMusic(track) {
console.debug(`[debug][Player.js] _getAudioSourceFromUnblockMusic`);
if (
process.env.IS_ELECTRON !== true ||
store.state.settings.enableUnblockNeteaseMusic === false
) {
return null;
}
const source = ipcRenderer.sendSync('unblock-music', track);
if (store.state.settings.automaticallyCacheSongs && source?.url) {
// TODO: 将unblockMusic字样换成真正的来源(比如酷我咪咕等)
cacheTrackSource(track, source.url, 128000, 'unblockMusic');
}
return source?.url;
}
_getAudioSource(track) {
return this._getAudioSourceFromCache(String(track.id))
.then(source => {
return source ?? this._getAudioSourceFromNetease(track);
})
.then(source => {
return source ?? this._getAudioSourceFromUnblockMusic(track);
});
}
_replaceCurrentTrack(
id,
autoplay = true,
ifUnplayableThen = 'playNextTrack'
) {
if (autoplay && this._currentTrack.name) {
this._scrobble(this.currentTrack, this._howler?.seek());
}
return getTrackDetail(id).then(data => {
let track = data.songs[0];
this._currentTrack = track;
this._updateMediaSessionMetaData(track);
return this._getAudioSource(track).then(source => {
if (source) {
this._playAudioSource(source, autoplay);
this._cacheNextTrack();
return source;
} else {
store.dispatch('showToast', `无法播放 ${track.name}`);
ifUnplayableThen === 'playNextTrack'
? this.playNextTrack()
: this.playPrevTrack();
}
});
});
}
_cacheNextTrack() {
let nextTrackID = this._isPersonalFM
? this._personalFMNextTrack.id
: this._getNextTrack()[0];
if (!nextTrackID) return;
getTrackDetail(nextTrackID).then(data => {
let track = data.songs[0];
this._getAudioSource(track);
});
}
_loadSelfFromLocalStorage() {
const player = JSON.parse(localStorage.getItem('player'));
if (!player) return;
for (const [key, value] of Object.entries(player)) {
this[key] = value; | }
}
_initMediaSession() {
if ('mediaSession' in navigator) {
navigator.mediaSession.setActionHandler('play', () => {
this.play();
});
navigator.mediaSession.setActionHandler('pause', () => {
this.pause();
});
navigator.mediaSession.setActionHandler('previoustrack', () => {
this.playPrevTrack();
});
navigator.mediaSession.setActionHandler('nexttrack', () => {
this.playNextTrack();
});
navigator.mediaSession.setActionHandler('stop', () => {
this.pause();
});
navigator.mediaSession.setActionHandler('seekto', event => {
this.seek(event.seekTime);
this._updateMediaSessionPositionState();
});
navigator.mediaSession.setActionHandler('seekbackward', event => {
this.seek(this.seek() - (event.seekOffset || 10));
this._updateMediaSessionPositionState();
});
navigator.mediaSession.setActionHandler('seekforward', event => {
this.seek(this.seek() + (event.seekOffset || 10));
this._updateMediaSessionPositionState();
});
}
}
_updateMediaSessionMetaData(track) {
if ('mediaSession' in navigator === false) {
return;
}
let artists = track.ar.map(a => a.name);
navigator.mediaSession.metadata = new window.MediaMetadata({
title: track.name,
artist: artists.join(','),
album: track.al.name,
artwork: [
{
src: track.al.picUrl + '?param=512y512',
type: 'image/jpg',
sizes: '512x512',
},
],
});
}
_updateMediaSessionPositionState() {
if ('mediaSession' in navigator === false) {
return;
}
if ('setPositionState' in navigator.mediaSession) {
navigator.mediaSession.setPositionState({
duration: ~~(this.currentTrack.dt / 1000),
playbackRate: 1.0,
position: this.seek(),
});
}
}
_nextTrackCallback() {
this._scrobble(this._currentTrack, 0, true);
if (!this.isPersonalFM && this.repeatMode === 'one') {
this._replaceCurrentTrack(this._currentTrack.id);
} else {
this.playNextTrack();
}
}
_loadPersonalFMNextTrack() {
return personalFM().then(result => {
this._personalFMNextTrack = result.data[0];
return this._personalFMNextTrack;
});
}
_playDiscordPresence(track, seekTime = 0) {
if (
process.env.IS_ELECTRON !== true ||
store.state.settings.enableDiscordRichPresence === false
) {
return null;
}
let copyTrack = { ...track };
copyTrack.dt -= seekTime * 1000;
ipcRenderer.send('playDiscordPresence', copyTrack);
}
_pauseDiscordPresence(track) {
if (
process.env.IS_ELECTRON !== true ||
store.state.settings.enableDiscordRichPresence === false
) {
return null;
}
ipcRenderer.send('pauseDiscordPresence', track);
}
currentTrackID() {
const { list, current } = this._getListAndCurrent();
return list[current];
}
appendTrack(trackID) {
this.list.append(trackID);
}
playNextTrack(isFM = false) {
if (this._isPersonalFM || isFM === true) {
this._isPersonalFM = true;
this._personalFMTrack = this._personalFMNextTrack;
this._replaceCurrentTrack(this._personalFMTrack.id);
this._loadPersonalFMNextTrack();
return true;
}
// TODO: 切换歌曲时增加加载中的状态
const [trackID, index] = this._getNextTrack();
if (trackID === undefined) {
this._howler?.stop();
this._playing = false;
return false;
}
this.current = index;
this._replaceCurrentTrack(trackID);
return true;
}
playPrevTrack() {
const [trackID, index] = this._getPrevTrack();
if (trackID === undefined) return false;
this.current = index;
this._replaceCurrentTrack(trackID, true, 'playPrevTrack');
return true;
}
saveSelfToLocalStorage() {
let player = {};
for (let [key, value] of Object.entries(this)) {
if (key === '_playing') continue;
player[key] = value;
}
localStorage.setItem('player', JSON.stringify(player));
}
pause() {
this._howler?.pause();
this._playing = false;
document.title = 'YesPlayMusic';
this._pauseDiscordPresence(this._currentTrack);
}
play() {
if (this._howler?.playing()) return;
this._howler?.play();
this._playing = true;
document.title = `${this._currentTrack.name} · ${this._currentTrack.ar[0].name} - YesPlayMusic`;
this._playDiscordPresence(this._currentTrack, this.seek());
if (store.state.lastfm.key !== undefined) {
trackUpdateNowPlaying({
artist: this.currentTrack.ar[0].name,
track: this.currentTrack.name,
album: this.currentTrack.al.name,
trackNumber: this.currentTrack.no,
duration: ~~(this.currentTrack.dt / 1000),
});
}
}
playOrPause() {
if (this._howler?.playing()) {
this.pause();
} else {
this.play();
}
}
seek(time = null) {
if (time !== null) {
this._howler?.seek(time);
if (this._playing)
this._playDiscordPresence(this._currentTrack, this.seek());
}
return this._howler === null ? 0 : this._howler.seek();
}
mute() {
if (this.volume === 0) {
this.volume = this._volumeBeforeMuted;
} else {
this._volumeBeforeMuted = this.volume;
this.volume = 0;
}
}
setOutputDevice() {
if (this._howler?._sounds.length <= 0 || !this._howler?._sounds[0]._node) {
return;
}
this._howler?._sounds[0]._node.setSinkId(store.state.settings.outputDevice);
}
replacePlaylist(
trackIDs,
playlistSourceID,
playlistSourceType,
autoPlayTrackID = 'first'
) {
this._isPersonalFM = false;
if (!this._enabled) this._enabled = true;
this.list = trackIDs;
this.current = 0;
this._playlistSource = {
type: playlistSourceType,
id: playlistSourceID,
};
if (this.shuffle) this._shuffleTheList(autoPlayTrackID);
if (autoPlayTrackID === 'first') {
this._replaceCurrentTrack(this.list[0]);
} else {
this.current = trackIDs.indexOf(autoPlayTrackID);
this._replaceCurrentTrack(autoPlayTrackID);
}
}
playAlbumByID(id, trackID = 'first') {
getAlbum(id).then(data => {
let trackIDs = data.songs.map(t => t.id);
this.replacePlaylist(trackIDs, id, 'album', trackID);
});
}
playPlaylistByID(id, trackID = 'first', noCache = false) {
console.debug(
`[debug][Player.js] playPlaylistByID 👉 id:${id} trackID:${trackID} noCache:${noCache}`
);
getPlaylistDetail(id, noCache).then(data => {
let trackIDs = data.playlist.trackIds.map(t => t.id);
this.replacePlaylist(trackIDs, id, 'playlist', trackID);
});
}
playArtistByID(id, trackID = 'first') {
getArtist(id).then(data => {
let trackIDs = data.hotSongs.map(t => t.id);
this.replacePlaylist(trackIDs, id, 'artist', trackID);
});
}
playTrackOnListByID(id, listName = 'default') {
if (listName === 'default') {
this._current = this._list.findIndex(t => t === id);
}
this._replaceCurrentTrack(id);
}
addTrackToPlayNext(trackID, playNow = false) {
this._playNextList.push(trackID);
if (playNow) this.playNextTrack();
}
playPersonalFM() {
this._isPersonalFM = true;
if (!this._enabled) this._enabled = true;
if (this._currentTrack.id !== this._personalFMTrack.id) {
this._replaceCurrentTrack(this._personalFMTrack.id).then(() =>
this.playOrPause()
);
} else {
this.playOrPause();
}
}
moveToFMTrash() {
this._isPersonalFM = true;
this.playNextTrack();
fmTrash(this._personalFMTrack.id);
}
sendSelfToIpcMain() {
if (process.env.IS_ELECTRON !== true) return false;
ipcRenderer.send('player', {
playing: this.playing,
likedCurrentTrack: store.state.liked.songs.includes(this.currentTrack.id),
});
}
switchRepeatMode() {
if (this._repeatMode === 'on') {
this.repeatMode = 'one';
} else if (this._repeatMode === 'one') {
this.repeatMode = 'off';
} else {
this.repeatMode = 'on';
}
}
switchShuffle() {
this.shuffle = !this.shuffle;
}
} | |
find_path.rs | //! An algorithm to find a path to refer to a certain item.
use crate::{
db::DefDatabase,
item_scope::ItemInNs,
path::{ModPath, PathKind},
visibility::Visibility,
CrateId, ModuleDefId, ModuleId,
};
use hir_expand::name::Name;
const MAX_PATH_LEN: usize = 15;
// FIXME: handle local items
/// Find a path that can be used to refer to a certain item. This can depend on
/// *from where* you're referring to the item, hence the `from` parameter.
pub fn find_path(db: &impl DefDatabase, item: ItemInNs, from: ModuleId) -> Option<ModPath> {
find_path_inner(db, item, from, MAX_PATH_LEN)
}
fn find_path_inner(
db: &impl DefDatabase,
item: ItemInNs,
from: ModuleId,
max_len: usize,
) -> Option<ModPath> {
if max_len == 0 {
return None;
}
// Base cases:
// - if the item is already in scope, return the name under which it is
let def_map = db.crate_def_map(from.krate);
let from_scope: &crate::item_scope::ItemScope = &def_map.modules[from.local_id].scope;
if let Some((name, _)) = from_scope.name_of(item) {
return Some(ModPath::from_simple_segments(PathKind::Plain, vec![name.clone()]));
}
// - if the item is the crate root, return `crate`
if item
== ItemInNs::Types(ModuleDefId::ModuleId(ModuleId {
krate: from.krate,
local_id: def_map.root,
}))
{
return Some(ModPath::from_simple_segments(PathKind::Crate, Vec::new()));
}
// - if the item is the module we're in, use `self`
if item == ItemInNs::Types(from.into()) {
return Some(ModPath::from_simple_segments(PathKind::Super(0), Vec::new()));
}
// - if the item is the parent module, use `super` (this is not used recursively, since `super::super` is ugly)
if let Some(parent_id) = def_map.modules[from.local_id].parent {
if item
== ItemInNs::Types(ModuleDefId::ModuleId(ModuleId {
krate: from.krate,
local_id: parent_id,
}))
{
return Some(ModPath::from_simple_segments(PathKind::Super(1), Vec::new()));
}
}
// - if the item is the crate root of a dependency crate, return the name from the extern prelude
for (name, def_id) in &def_map.extern_prelude {
if item == ItemInNs::Types(*def_id) {
return Some(ModPath::from_simple_segments(PathKind::Plain, vec![name.clone()]));
}
}
// - if the item is in the prelude, return the name from there
if let Some(prelude_module) = def_map.prelude {
let prelude_def_map = db.crate_def_map(prelude_module.krate);
let prelude_scope: &crate::item_scope::ItemScope =
&prelude_def_map.modules[prelude_module.local_id].scope;
if let Some((name, vis)) = prelude_scope.name_of(item) {
if vis.is_visible_from(db, from) {
return Some(ModPath::from_simple_segments(PathKind::Plain, vec![name.clone()]));
}
}
}
// Recursive case:
// - if the item is an enum variant, refer to it via the enum
if let Some(ModuleDefId::EnumVariantId(variant)) = item.as_module_def_id() {
if let Some(mut path) = find_path(db, ItemInNs::Types(variant.parent.into()), from) {
let data = db.enum_data(variant.parent);
path.segments.push(data.variants[variant.local_id].name.clone());
return Some(path);
}
// If this doesn't work, it seems we have no way of referring to the
// enum; that's very weird, but there might still be a reexport of the
// variant somewhere
}
// - otherwise, look for modules containing (reexporting) it and import it from one of those
let importable_locations = find_importable_locations(db, item, from);
let mut best_path = None;
let mut best_path_len = max_len;
for (module_id, name) in importable_locations {
let mut path = match find_path_inner(
db,
ItemInNs::Types(ModuleDefId::ModuleId(module_id)),
from,
best_path_len - 1,
) {
None => continue,
Some(path) => path,
};
path.segments.push(name);
if path_len(&path) < best_path_len {
best_path_len = path_len(&path);
best_path = Some(path);
}
}
best_path
}
fn path_len(path: &ModPath) -> usize {
path.segments.len()
+ match path.kind {
PathKind::Plain => 0,
PathKind::Super(i) => i as usize,
PathKind::Crate => 1,
PathKind::Abs => 0,
PathKind::DollarCrate(_) => 1,
}
}
fn find_importable_locations(
db: &impl DefDatabase,
item: ItemInNs,
from: ModuleId,
) -> Vec<(ModuleId, Name)> {
let crate_graph = db.crate_graph();
let mut result = Vec::new();
// We only look in the crate from which we are importing, and the direct
// dependencies. We cannot refer to names from transitive dependencies
// directly (only through reexports in direct dependencies).
for krate in Some(from.krate)
.into_iter()
.chain(crate_graph.dependencies(from.krate).map(|dep| dep.crate_id))
{
result.extend(
importable_locations_in_crate(db, item, krate)
.iter()
.filter(|(_, _, vis)| vis.is_visible_from(db, from))
.map(|(m, n, _)| (*m, n.clone())),
);
}
result
}
/// Collects all locations from which we might import the item in a particular
/// crate. These include the original definition of the item, and any
/// non-private `use`s.
///
/// Note that the crate doesn't need to be the one in which the item is defined;
/// it might be re-exported in other crates.
fn importable_locations_in_crate(
db: &impl DefDatabase,
item: ItemInNs,
krate: CrateId,
) -> Vec<(ModuleId, Name, Visibility)> {
let def_map = db.crate_def_map(krate);
let mut result = Vec::new();
for (local_id, data) in def_map.modules.iter() {
if let Some((name, vis)) = data.scope.name_of(item) {
let is_private = if let Visibility::Module(private_to) = vis {
private_to.local_id == local_id
} else {
false
};
let is_original_def = if let Some(module_def_id) = item.as_module_def_id() {
data.scope.declarations().any(|it| it == module_def_id)
} else {
false
};
if is_private && !is_original_def {
// Ignore private imports. these could be used if we are
// in a submodule of this module, but that's usually not
// what the user wants; and if this module can import
// the item and we're a submodule of it, so can we.
// Also this keeps the cached data smaller.
continue;
}
result.push((ModuleId { krate, local_id }, name.clone(), vis));
}
}
result
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_db::TestDB;
use hir_expand::hygiene::Hygiene;
use ra_db::fixture::WithFixture;
use ra_syntax::ast::AstNode;
/// `code` needs to contain a cursor marker; checks that `find_path` for the
/// item the `path` refers to returns that same path when called from the
/// module the cursor is in.
fn check_found_path(code: &str, path: &str) {
let (db, pos) = TestDB::with_position(code);
let module = db.module_for_file(pos.file_id);
let parsed_path_file = ra_syntax::SourceFile::parse(&format!("use {};", path));
let ast_path = parsed_path_file
.syntax_node()
.descendants()
.find_map(ra_syntax::ast::Path::cast)
.unwrap();
let mod_path = ModPath::from_src(ast_path, &Hygiene::new_unhygienic()).unwrap();
let crate_def_map = db.crate_def_map(module.krate);
let resolved = crate_def_map
.resolve_path(
&db,
module.local_id,
&mod_path,
crate::item_scope::BuiltinShadowMode::Module,
)
.0
.take_types()
.unwrap();
let found_path = find_path(&db, ItemInNs::Types(resolved), module);
assert_eq!(found_path, Some(mod_path));
}
#[test]
fn same_module() {
let code = r#"
//- /main.rs
struct S;
<|>
"#;
check_found_path(code, "S");
}
#[test]
fn enum_variant() {
let code = r#"
//- /main.rs
enum E { A }
<|>
"#;
check_found_path(code, "E::A");
}
#[test]
fn sub_module() {
let code = r#"
//- /main.rs
mod foo {
pub struct S;
}
<|>
"#;
check_found_path(code, "foo::S");
}
#[test]
fn super_module() {
let code = r#"
//- /main.rs
mod foo;
//- /foo.rs
mod bar;
struct S;
//- /foo/bar.rs
<|>
"#;
check_found_path(code, "super::S");
}
#[test]
fn self_module() {
let code = r#"
//- /main.rs
mod foo;
//- /foo.rs
<|>
"#;
check_found_path(code, "self");
}
#[test]
fn crate_root() {
let code = r#"
//- /main.rs
mod foo;
//- /foo.rs
<|>
"#;
check_found_path(code, "crate");
}
#[test]
fn same_crate() |
#[test]
fn different_crate() {
let code = r#"
//- /main.rs crate:main deps:std
<|>
//- /std.rs crate:std
pub struct S;
"#;
check_found_path(code, "std::S");
}
#[test]
fn different_crate_renamed() {
let code = r#"
//- /main.rs crate:main deps:std
extern crate std as std_renamed;
<|>
//- /std.rs crate:std
pub struct S;
"#;
check_found_path(code, "std_renamed::S");
}
#[test]
fn same_crate_reexport() {
let code = r#"
//- /main.rs
mod bar {
mod foo { pub(super) struct S; }
pub(crate) use foo::*;
}
<|>
"#;
check_found_path(code, "bar::S");
}
#[test]
fn same_crate_reexport_rename() {
let code = r#"
//- /main.rs
mod bar {
mod foo { pub(super) struct S; }
pub(crate) use foo::S as U;
}
<|>
"#;
check_found_path(code, "bar::U");
}
#[test]
fn different_crate_reexport() {
let code = r#"
//- /main.rs crate:main deps:std
<|>
//- /std.rs crate:std deps:core
pub use core::S;
//- /core.rs crate:core
pub struct S;
"#;
check_found_path(code, "std::S");
}
#[test]
fn prelude() {
let code = r#"
//- /main.rs crate:main deps:std
<|>
//- /std.rs crate:std
pub mod prelude { pub struct S; }
#[prelude_import]
pub use prelude::*;
"#;
check_found_path(code, "S");
}
#[test]
fn enum_variant_from_prelude() {
let code = r#"
//- /main.rs crate:main deps:std
<|>
//- /std.rs crate:std
pub mod prelude {
pub enum Option<T> { Some(T), None }
pub use Option::*;
}
#[prelude_import]
pub use prelude::*;
"#;
check_found_path(code, "None");
check_found_path(code, "Some");
}
#[test]
fn shortest_path() {
let code = r#"
//- /main.rs
pub mod foo;
pub mod baz;
struct S;
<|>
//- /foo.rs
pub mod bar { pub struct S; }
//- /baz.rs
pub use crate::foo::bar::S;
"#;
check_found_path(code, "baz::S");
}
#[test]
fn discount_private_imports() {
let code = r#"
//- /main.rs
mod foo;
pub mod bar { pub struct S; }
use bar::S;
//- /foo.rs
<|>
"#;
// crate::S would be shorter, but using private imports seems wrong
check_found_path(code, "crate::bar::S");
}
#[test]
fn import_cycle() {
let code = r#"
//- /main.rs
pub mod foo;
pub mod bar;
pub mod baz;
//- /bar.rs
<|>
//- /foo.rs
pub use super::baz;
pub struct S;
//- /baz.rs
pub use super::foo;
"#;
check_found_path(code, "crate::foo::S");
}
}
| {
let code = r#"
//- /main.rs
mod foo;
struct S;
//- /foo.rs
<|>
"#;
check_found_path(code, "crate::S");
} |
lib.rs | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
#![warn(clippy::pedantic)]
// Bindgen auto generated code
// won't adhere to the clippy rules below
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::unseparated_literal_suffix)]
#![allow(clippy::used_underscore_binding)]
#[macro_use]
mod exp;
#[macro_use]
mod p11;
pub mod aead;
pub mod agent;
mod agentio;
mod auth;
mod cert;
pub mod constants;
mod err;
pub mod ext;
pub mod hkdf;
pub mod hp;
mod prio;
mod replay;
mod secrets;
pub mod selfencrypt;
mod ssl;
mod time;
pub use self::agent::{
Agent, Client, HandshakeState, Record, RecordList, SecretAgent, SecretAgentInfo,
SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker,
};
pub use self::constants::*;
pub use self::err::{Error, PRErrorCode, Res};
pub use self::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult};
pub use self::p11::{random, SymKey};
pub use self::replay::AntiReplay;
pub use self::secrets::SecretDirection;
pub use auth::AuthenticationStatus;
use neqo_common::once::OnceResult;
use std::ffi::CString;
use std::os::raw::c_char;
use std::path::{Path, PathBuf};
use std::ptr::null;
mod nss {
#![allow(clippy::redundant_static_lifetimes, non_upper_case_globals)]
include!(concat!(env!("OUT_DIR"), "/nss_init.rs"));
}
// Need to map the types through.
fn secstatus_to_res(code: nss::SECStatus) -> Res<()> {
crate::err::secstatus_to_res(code as crate::ssl::SECStatus)
}
enum NssLoaded {
External,
NoDb,
Db(Box<Path>),
}
impl Drop for NssLoaded {
fn drop(&mut self) {
match self {
Self::NoDb | Self::Db(_) => unsafe {
secstatus_to_res(nss::NSS_Shutdown()).expect("NSS Shutdown failed")
},
_ => {}
}
}
}
static mut INITIALIZED: OnceResult<NssLoaded> = OnceResult::new();
fn already_initialized() -> bool { | }
/// Initialize NSS. This only executes the initialization routines once, so if there is any chance that
pub fn init() {
// Set time zero.
time::init();
unsafe {
INITIALIZED.call_once(|| {
if already_initialized() {
return NssLoaded::External;
}
secstatus_to_res(nss::NSS_NoDB_Init(null())).expect("NSS_NoDB_Init failed");
secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed");
NssLoaded::NoDb
});
}
}
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
/// side effects. SSLTRACE is not enabled in NSS until a socket is made or
/// global options are accessed. Reading an option is the least impact approach.
/// This allows us to use SSLTRACE in all of our unit tests and programs.
#[cfg(debug_assertions)]
fn enable_ssl_trace() {
let opt = ssl::Opt::Locking.as_int();
let mut _v: ::std::os::raw::c_int = 0;
secstatus_to_res(unsafe { ssl::SSL_OptionGetDefault(opt, &mut _v) })
.expect("SSL_OptionGetDefault failed");
}
pub fn init_db<P: Into<PathBuf>>(dir: P) {
time::init();
unsafe {
INITIALIZED.call_once(|| {
if already_initialized() {
return NssLoaded::External;
}
let path = dir.into();
assert!(path.is_dir());
let pathstr = path.to_str().expect("path converts to string").to_string();
let dircstr = CString::new(pathstr).expect("new CString");
let empty = CString::new("").expect("new empty CString");
secstatus_to_res(nss::NSS_Initialize(
dircstr.as_ptr(),
empty.as_ptr(),
empty.as_ptr(),
nss::SECMOD_DB.as_ptr() as *const c_char,
nss::NSS_INIT_READONLY,
))
.expect("NSS_Initialize failed");
secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed");
secstatus_to_res(ssl::SSL_ConfigServerSessionIDCache(
1024,
0,
0,
dircstr.as_ptr(),
))
.expect("SSL_ConfigServerSessionIDCache failed");
#[cfg(debug_assertions)]
enable_ssl_trace();
NssLoaded::Db(path.into_boxed_path())
});
}
}
/// Panic if NSS isn't initialized.
pub fn assert_initialized() {
unsafe {
INITIALIZED.call_once(|| {
panic!("NSS not initialized with init or init_db");
});
}
} | unsafe { nss::NSS_IsInitialized() != 0 } |
net.go | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proc
import (
"bytes"
"fmt"
"io"
"time"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/context"
"gvisor.dev/gvisor/pkg/sentry/fs"
"gvisor.dev/gvisor/pkg/sentry/fs/proc/seqfile"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/socket"
"gvisor.dev/gvisor/pkg/sentry/socket/unix"
"gvisor.dev/gvisor/pkg/sentry/socket/unix/transport"
"gvisor.dev/gvisor/pkg/sentry/usermem"
)
// newNet creates a new proc net entry.
func (p *proc) newNetDir(ctx context.Context, k *kernel.Kernel, msrc *fs.MountSource) *fs.Inode {
var contents map[string]*fs.Inode
if s := p.k.NetworkStack(); s != nil |
d := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0555))
return newProcInode(ctx, d, msrc, fs.SpecialDirectory, nil)
}
// ifinet6 implements seqfile.SeqSource for /proc/net/if_inet6.
//
// +stateify savable
type ifinet6 struct {
s inet.Stack
}
func (n *ifinet6) contents() []string {
var lines []string
nics := n.s.Interfaces()
for id, naddrs := range n.s.InterfaceAddrs() {
nic, ok := nics[id]
if !ok {
// NIC was added after NICNames was called. We'll just
// ignore it.
continue
}
for _, a := range naddrs {
// IPv6 only.
if a.Family != linux.AF_INET6 {
continue
}
// Fields:
// IPv6 address displayed in 32 hexadecimal chars without colons
// Netlink device number (interface index) in hexadecimal (use nic id)
// Prefix length in hexadecimal
// Scope value (use 0)
// Interface flags
// Device name
lines = append(lines, fmt.Sprintf("%032x %02x %02x %02x %02x %8s\n", a.Addr, id, a.PrefixLen, 0, a.Flags, nic.Name))
}
}
return lines
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (*ifinet6) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (n *ifinet6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
if h != nil {
return nil, 0
}
var data []seqfile.SeqData
for _, l := range n.contents() {
data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*ifinet6)(nil)})
}
return data, 0
}
// netDev implements seqfile.SeqSource for /proc/net/dev.
//
// +stateify savable
type netDev struct {
s inet.Stack
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (n *netDev) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData. See Linux's
// net/core/net-procfs.c:dev_seq_show.
func (n *netDev) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
if h != nil {
return nil, 0
}
interfaces := n.s.Interfaces()
contents := make([]string, 2, 2+len(interfaces))
// Add the table header. From net/core/net-procfs.c:dev_seq_show.
contents[0] = "Inter-| Receive | Transmit\n"
contents[1] = " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n"
for _, i := range interfaces {
// Implements the same format as
// net/core/net-procfs.c:dev_seq_printf_stats.
var stats inet.StatDev
if err := n.s.Statistics(&stats, i.Name); err != nil {
log.Warningf("Failed to retrieve interface statistics for %v: %v", i.Name, err)
continue
}
l := fmt.Sprintf(
"%6s: %7d %7d %4d %4d %4d %5d %10d %9d %8d %7d %4d %4d %4d %5d %7d %10d\n",
i.Name,
// Received
stats[0], // bytes
stats[1], // packets
stats[2], // errors
stats[3], // dropped
stats[4], // fifo
stats[5], // frame
stats[6], // compressed
stats[7], // multicast
// Transmitted
stats[8], // bytes
stats[9], // packets
stats[10], // errors
stats[11], // dropped
stats[12], // fifo
stats[13], // frame
stats[14], // compressed
stats[15]) // multicast
contents = append(contents, l)
}
var data []seqfile.SeqData
for _, l := range contents {
data = append(data, seqfile.SeqData{Buf: []byte(l), Handle: (*netDev)(nil)})
}
return data, 0
}
// netUnix implements seqfile.SeqSource for /proc/net/unix.
//
// +stateify savable
type netUnix struct {
k *kernel.Kernel
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (*netUnix) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (n *netUnix) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
if h != nil {
return []seqfile.SeqData{}, 0
}
var buf bytes.Buffer
for _, se := range n.k.ListSockets() {
s := se.Sock.Get()
if s == nil {
log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID)
continue
}
sfile := s.(*fs.File)
if family, _, _ := sfile.FileOperations.(socket.Socket).Type(); family != linux.AF_UNIX {
s.DecRef()
// Not a unix socket.
continue
}
sops := sfile.FileOperations.(*unix.SocketOperations)
addr, err := sops.Endpoint().GetLocalAddress()
if err != nil {
log.Warningf("Failed to retrieve socket name from %+v: %v", sfile, err)
addr.Addr = "<unknown>"
}
sockFlags := 0
if ce, ok := sops.Endpoint().(transport.ConnectingEndpoint); ok {
if ce.Listening() {
// For unix domain sockets, linux reports a single flag
// value if the socket is listening, of __SO_ACCEPTCON.
sockFlags = linux.SO_ACCEPTCON
}
}
// In the socket entry below, the value for the 'Num' field requires
// some consideration. Linux prints the address to the struct
// unix_sock representing a socket in the kernel, but may redact the
// value for unprivileged users depending on the kptr_restrict
// sysctl.
//
// One use for this field is to allow a privileged user to
// introspect into the kernel memory to determine information about
// a socket not available through procfs, such as the socket's peer.
//
// On gvisor, returning a pointer to our internal structures would
// be pointless, as it wouldn't match the memory layout for struct
// unix_sock, making introspection difficult. We could populate a
// struct unix_sock with the appropriate data, but even that
// requires consideration for which kernel version to emulate, as
// the definition of this struct changes over time.
//
// For now, we always redact this pointer.
fmt.Fprintf(&buf, "%#016p: %08X %08X %08X %04X %02X %5d",
(*unix.SocketOperations)(nil), // Num, pointer to kernel socket struct.
sfile.ReadRefs()-1, // RefCount, don't count our own ref.
0, // Protocol, always 0 for UDS.
sockFlags, // Flags.
sops.Endpoint().Type(), // Type.
sops.State(), // State.
sfile.InodeID(), // Inode.
)
// Path
if len(addr.Addr) != 0 {
if addr.Addr[0] == 0 {
// Abstract path.
fmt.Fprintf(&buf, " @%s", string(addr.Addr[1:]))
} else {
fmt.Fprintf(&buf, " %s", string(addr.Addr))
}
}
fmt.Fprintf(&buf, "\n")
s.DecRef()
}
data := []seqfile.SeqData{
{
Buf: []byte("Num RefCount Protocol Flags Type St Inode Path\n"),
Handle: n,
},
{
Buf: buf.Bytes(),
Handle: n,
},
}
return data, 0
}
func networkToHost16(n uint16) uint16 {
// n is in network byte order, so is big-endian. The most-significant byte
// should be stored in the lower address.
//
// We manually inline binary.BigEndian.Uint16() because Go does not support
// non-primitive consts, so binary.BigEndian is a (mutable) var, so calls to
// binary.BigEndian.Uint16() require a read of binary.BigEndian and an
// interface method call, defeating inlining.
buf := [2]byte{byte(n >> 8 & 0xff), byte(n & 0xff)}
return usermem.ByteOrder.Uint16(buf[:])
}
func writeInetAddr(w io.Writer, family int, i linux.SockAddr) {
switch family {
case linux.AF_INET:
var a linux.SockAddrInet
if i != nil {
a = *i.(*linux.SockAddrInet)
}
// linux.SockAddrInet.Port is stored in the network byte order and is
// printed like a number in host byte order. Note that all numbers in host
// byte order are printed with the most-significant byte first when
// formatted with %X. See get_tcp4_sock() and udp4_format_sock() in Linux.
port := networkToHost16(a.Port)
// linux.SockAddrInet.Addr is stored as a byte slice in big-endian order
// (i.e. most-significant byte in index 0). Linux represents this as a
// __be32 which is a typedef for an unsigned int, and is printed with
// %X. This means that for a little-endian machine, Linux prints the
// least-significant byte of the address first. To emulate this, we first
// invert the byte order for the address using usermem.ByteOrder.Uint32,
// which makes it have the equivalent encoding to a __be32 on a little
// endian machine. Note that this operation is a no-op on a big endian
// machine. Then similar to Linux, we format it with %X, which will print
// the most-significant byte of the __be32 address first, which is now
// actually the least-significant byte of the original address in
// linux.SockAddrInet.Addr on little endian machines, due to the conversion.
addr := usermem.ByteOrder.Uint32(a.Addr[:])
fmt.Fprintf(w, "%08X:%04X ", addr, port)
case linux.AF_INET6:
var a linux.SockAddrInet6
if i != nil {
a = *i.(*linux.SockAddrInet6)
}
port := networkToHost16(a.Port)
addr0 := usermem.ByteOrder.Uint32(a.Addr[0:4])
addr1 := usermem.ByteOrder.Uint32(a.Addr[4:8])
addr2 := usermem.ByteOrder.Uint32(a.Addr[8:12])
addr3 := usermem.ByteOrder.Uint32(a.Addr[12:16])
fmt.Fprintf(w, "%08X%08X%08X%08X:%04X ", addr0, addr1, addr2, addr3, port)
}
}
func commonReadSeqFileDataTCP(ctx context.Context, n seqfile.SeqHandle, k *kernel.Kernel, h seqfile.SeqHandle, fa int, header []byte) ([]seqfile.SeqData, int64) {
// t may be nil here if our caller is not part of a task goroutine. This can
// happen for example if we're here for "sentryctl cat". When t is nil,
// degrade gracefully and retrieve what we can.
t := kernel.TaskFromContext(ctx)
if h != nil {
return nil, 0
}
var buf bytes.Buffer
for _, se := range k.ListSockets() {
s := se.Sock.Get()
if s == nil {
log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID)
continue
}
sfile := s.(*fs.File)
sops, ok := sfile.FileOperations.(socket.Socket)
if !ok {
panic(fmt.Sprintf("Found non-socket file in socket table: %+v", sfile))
}
if family, stype, _ := sops.Type(); !(family == fa && stype == linux.SOCK_STREAM) {
s.DecRef()
// Not tcp4 sockets.
continue
}
// Linux's documentation for the fields below can be found at
// https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt.
// For Linux's implementation, see net/ipv4/tcp_ipv4.c:get_tcp4_sock().
// Note that the header doesn't contain labels for all the fields.
// Field: sl; entry number.
fmt.Fprintf(&buf, "%4d: ", se.ID)
// Field: local_adddress.
var localAddr linux.SockAddr
if t != nil {
if local, _, err := sops.GetSockName(t); err == nil {
localAddr = local
}
}
writeInetAddr(&buf, fa, localAddr)
// Field: rem_address.
var remoteAddr linux.SockAddr
if t != nil {
if remote, _, err := sops.GetPeerName(t); err == nil {
remoteAddr = remote
}
}
writeInetAddr(&buf, fa, remoteAddr)
// Field: state; socket state.
fmt.Fprintf(&buf, "%02X ", sops.State())
// Field: tx_queue, rx_queue; number of packets in the transmit and
// receive queue. Unimplemented.
fmt.Fprintf(&buf, "%08X:%08X ", 0, 0)
// Field: tr, tm->when; timer active state and number of jiffies
// until timer expires. Unimplemented.
fmt.Fprintf(&buf, "%02X:%08X ", 0, 0)
// Field: retrnsmt; number of unrecovered RTO timeouts.
// Unimplemented.
fmt.Fprintf(&buf, "%08X ", 0)
// Field: uid.
uattr, err := sfile.Dirent.Inode.UnstableAttr(ctx)
if err != nil {
log.Warningf("Failed to retrieve unstable attr for socket file: %v", err)
fmt.Fprintf(&buf, "%5d ", 0)
} else {
creds := auth.CredentialsFromContext(ctx)
fmt.Fprintf(&buf, "%5d ", uint32(uattr.Owner.UID.In(creds.UserNamespace).OrOverflow()))
}
// Field: timeout; number of unanswered 0-window probes.
// Unimplemented.
fmt.Fprintf(&buf, "%8d ", 0)
// Field: inode.
fmt.Fprintf(&buf, "%8d ", sfile.InodeID())
// Field: refcount. Don't count the ref we obtain while deferencing
// the weakref to this socket.
fmt.Fprintf(&buf, "%d ", sfile.ReadRefs()-1)
// Field: Socket struct address. Redacted due to the same reason as
// the 'Num' field in /proc/net/unix, see netUnix.ReadSeqFileData.
fmt.Fprintf(&buf, "%#016p ", (*socket.Socket)(nil))
// Field: retransmit timeout. Unimplemented.
fmt.Fprintf(&buf, "%d ", 0)
// Field: predicted tick of soft clock (delayed ACK control data).
// Unimplemented.
fmt.Fprintf(&buf, "%d ", 0)
// Field: (ack.quick<<1)|ack.pingpong, Unimplemented.
fmt.Fprintf(&buf, "%d ", 0)
// Field: sending congestion window, Unimplemented.
fmt.Fprintf(&buf, "%d ", 0)
// Field: Slow start size threshold, -1 if threshold >= 0xFFFF.
// Unimplemented, report as large threshold.
fmt.Fprintf(&buf, "%d", -1)
fmt.Fprintf(&buf, "\n")
s.DecRef()
}
data := []seqfile.SeqData{
{
Buf: header,
Handle: n,
},
{
Buf: buf.Bytes(),
Handle: n,
},
}
return data, 0
}
// netTCP implements seqfile.SeqSource for /proc/net/tcp.
//
// +stateify savable
type netTCP struct {
k *kernel.Kernel
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (*netTCP) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (n *netTCP) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
header := []byte(" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode \n")
return commonReadSeqFileDataTCP(ctx, n, n.k, h, linux.AF_INET, header)
}
// netTCP6 implements seqfile.SeqSource for /proc/net/tcp6.
//
// +stateify savable
type netTCP6 struct {
k *kernel.Kernel
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (*netTCP6) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (n *netTCP6) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
header := []byte(" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n")
return commonReadSeqFileDataTCP(ctx, n, n.k, h, linux.AF_INET6, header)
}
// netUDP implements seqfile.SeqSource for /proc/net/udp.
//
// +stateify savable
type netUDP struct {
k *kernel.Kernel
}
// NeedsUpdate implements seqfile.SeqSource.NeedsUpdate.
func (*netUDP) NeedsUpdate(generation int64) bool {
return true
}
// ReadSeqFileData implements seqfile.SeqSource.ReadSeqFileData.
func (n *netUDP) ReadSeqFileData(ctx context.Context, h seqfile.SeqHandle) ([]seqfile.SeqData, int64) {
// t may be nil here if our caller is not part of a task goroutine. This can
// happen for example if we're here for "sentryctl cat". When t is nil,
// degrade gracefully and retrieve what we can.
t := kernel.TaskFromContext(ctx)
if h != nil {
return nil, 0
}
var buf bytes.Buffer
for _, se := range n.k.ListSockets() {
s := se.Sock.Get()
if s == nil {
log.Debugf("Couldn't resolve weakref with ID %v in socket table, racing with destruction?", se.ID)
continue
}
sfile := s.(*fs.File)
sops, ok := sfile.FileOperations.(socket.Socket)
if !ok {
panic(fmt.Sprintf("Found non-socket file in socket table: %+v", sfile))
}
if family, stype, _ := sops.Type(); family != linux.AF_INET || stype != linux.SOCK_DGRAM {
s.DecRef()
// Not udp4 socket.
continue
}
// For Linux's implementation, see net/ipv4/udp.c:udp4_format_sock().
// Field: sl; entry number.
fmt.Fprintf(&buf, "%5d: ", se.ID)
// Field: local_adddress.
var localAddr linux.SockAddrInet
if t != nil {
if local, _, err := sops.GetSockName(t); err == nil {
localAddr = *local.(*linux.SockAddrInet)
}
}
writeInetAddr(&buf, linux.AF_INET, &localAddr)
// Field: rem_address.
var remoteAddr linux.SockAddrInet
if t != nil {
if remote, _, err := sops.GetPeerName(t); err == nil {
remoteAddr = *remote.(*linux.SockAddrInet)
}
}
writeInetAddr(&buf, linux.AF_INET, &remoteAddr)
// Field: state; socket state.
fmt.Fprintf(&buf, "%02X ", sops.State())
// Field: tx_queue, rx_queue; number of packets in the transmit and
// receive queue. Unimplemented.
fmt.Fprintf(&buf, "%08X:%08X ", 0, 0)
// Field: tr, tm->when. Always 0 for UDP.
fmt.Fprintf(&buf, "%02X:%08X ", 0, 0)
// Field: retrnsmt. Always 0 for UDP.
fmt.Fprintf(&buf, "%08X ", 0)
// Field: uid.
uattr, err := sfile.Dirent.Inode.UnstableAttr(ctx)
if err != nil {
log.Warningf("Failed to retrieve unstable attr for socket file: %v", err)
fmt.Fprintf(&buf, "%5d ", 0)
} else {
creds := auth.CredentialsFromContext(ctx)
fmt.Fprintf(&buf, "%5d ", uint32(uattr.Owner.UID.In(creds.UserNamespace).OrOverflow()))
}
// Field: timeout. Always 0 for UDP.
fmt.Fprintf(&buf, "%8d ", 0)
// Field: inode.
fmt.Fprintf(&buf, "%8d ", sfile.InodeID())
// Field: ref; reference count on the socket inode. Don't count the ref
// we obtain while deferencing the weakref to this socket.
fmt.Fprintf(&buf, "%d ", sfile.ReadRefs()-1)
// Field: Socket struct address. Redacted due to the same reason as
// the 'Num' field in /proc/net/unix, see netUnix.ReadSeqFileData.
fmt.Fprintf(&buf, "%#016p ", (*socket.Socket)(nil))
// Field: drops; number of dropped packets. Unimplemented.
fmt.Fprintf(&buf, "%d", 0)
fmt.Fprintf(&buf, "\n")
s.DecRef()
}
data := []seqfile.SeqData{
{
Buf: []byte(" sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops \n"),
Handle: n,
},
{
Buf: buf.Bytes(),
Handle: n,
},
}
return data, 0
}
| {
contents = map[string]*fs.Inode{
"dev": seqfile.NewSeqFileInode(ctx, &netDev{s: s}, msrc),
// The following files are simple stubs until they are
// implemented in netstack, if the file contains a
// header the stub is just the header otherwise it is
// an empty file.
"arp": newStaticProcInode(ctx, msrc, []byte("IP address HW type Flags HW address Mask Device")),
"netlink": newStaticProcInode(ctx, msrc, []byte("sk Eth Pid Groups Rmem Wmem Dump Locks Drops Inode")),
"netstat": newStaticProcInode(ctx, msrc, []byte("TcpExt: SyncookiesSent SyncookiesRecv SyncookiesFailed EmbryonicRsts PruneCalled RcvPruned OfoPruned OutOfWindowIcmps LockDroppedIcmps ArpFilter TW TWRecycled TWKilled PAWSPassive PAWSActive PAWSEstab DelayedACKs DelayedACKLocked DelayedACKLost ListenOverflows ListenDrops TCPPrequeued TCPDirectCopyFromBacklog TCPDirectCopyFromPrequeue TCPPrequeueDropped TCPHPHits TCPHPHitsToUser TCPPureAcks TCPHPAcks TCPRenoRecovery TCPSackRecovery TCPSACKReneging TCPFACKReorder TCPSACKReorder TCPRenoReorder TCPTSReorder TCPFullUndo TCPPartialUndo TCPDSACKUndo TCPLossUndo TCPLostRetransmit TCPRenoFailures TCPSackFailures TCPLossFailures TCPFastRetrans TCPForwardRetrans TCPSlowStartRetrans TCPTimeouts TCPLossProbes TCPLossProbeRecovery TCPRenoRecoveryFail TCPSackRecoveryFail TCPSchedulerFailed TCPRcvCollapsed TCPDSACKOldSent TCPDSACKOfoSent TCPDSACKRecv TCPDSACKOfoRecv TCPAbortOnData TCPAbortOnClose TCPAbortOnMemory TCPAbortOnTimeout TCPAbortOnLinger TCPAbortFailed TCPMemoryPressures TCPSACKDiscard TCPDSACKIgnoredOld TCPDSACKIgnoredNoUndo TCPSpuriousRTOs TCPMD5NotFound TCPMD5Unexpected TCPMD5Failure TCPSackShifted TCPSackMerged TCPSackShiftFallback TCPBacklogDrop TCPMinTTLDrop TCPDeferAcceptDrop IPReversePathFilter TCPTimeWaitOverflow TCPReqQFullDoCookies TCPReqQFullDrop TCPRetransFail TCPRcvCoalesce TCPOFOQueue TCPOFODrop TCPOFOMerge TCPChallengeACK TCPSYNChallenge TCPFastOpenActive TCPFastOpenActiveFail TCPFastOpenPassive TCPFastOpenPassiveFail TCPFastOpenListenOverflow TCPFastOpenCookieReqd TCPSpuriousRtxHostQueues BusyPollRxPackets TCPAutoCorking TCPFromZeroWindowAdv TCPToZeroWindowAdv TCPWantZeroWindowAdv TCPSynRetrans TCPOrigDataSent TCPHystartTrainDetect TCPHystartTrainCwnd TCPHystartDelayDetect TCPHystartDelayCwnd TCPACKSkippedSynRecv TCPACKSkippedPAWS TCPACKSkippedSeq TCPACKSkippedFinWait2 TCPACKSkippedTimeWait TCPACKSkippedChallenge TCPWinProbe TCPKeepAlive TCPMTUPFail TCPMTUPSuccess")),
"packet": newStaticProcInode(ctx, msrc, []byte("sk RefCnt Type Proto Iface R Rmem User Inode")),
"protocols": newStaticProcInode(ctx, msrc, []byte("protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em")),
// Linux sets psched values to: nsec per usec, psched
// tick in ns, 1000000, high res timer ticks per sec
// (ClockGetres returns 1ns resolution).
"psched": newStaticProcInode(ctx, msrc, []byte(fmt.Sprintf("%08x %08x %08x %08x\n", uint64(time.Microsecond/time.Nanosecond), 64, 1000000, uint64(time.Second/time.Nanosecond)))),
"ptype": newStaticProcInode(ctx, msrc, []byte("Type Device Function")),
"route": newStaticProcInode(ctx, msrc, []byte("Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT")),
"tcp": seqfile.NewSeqFileInode(ctx, &netTCP{k: k}, msrc),
"udp": seqfile.NewSeqFileInode(ctx, &netUDP{k: k}, msrc),
"unix": seqfile.NewSeqFileInode(ctx, &netUnix{k: k}, msrc),
}
if s.SupportsIPv6() {
contents["if_inet6"] = seqfile.NewSeqFileInode(ctx, &ifinet6{s: s}, msrc)
contents["ipv6_route"] = newStaticProcInode(ctx, msrc, []byte(""))
contents["tcp6"] = seqfile.NewSeqFileInode(ctx, &netTCP6{k: k}, msrc)
contents["udp6"] = newStaticProcInode(ctx, msrc, []byte(" sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode"))
}
} |
userAtEvents.index.js | /**
* @package Core
* @link http://ican.openacalendar.org/ OpenACalendar Open Source Software - Website
* @license http://ican.openacalendar.org/license.html 3-clause BSD
* @copyright (c) 2013-2014, JMB Technology Limited, http://jmbtechnology.co.uk/
* @author James Baster <[email protected]>
*/
function showCurrentUserAttendanceForEvent(data) {
$('#currentUserAttendanceForSite'+data.site+'Event'+data.slug+' .formWrapper').html("Loading ...");
var ajax = $.ajax({
url: '/site/'+data.site+'/event/'+data.slug+'/myAttendance.json',
type: 'POST',
}).success(function ( attendanceData ) {
var wrapper = $('#currentUserAttendanceForSite'+data.site+'Event'+data.slug+' .formWrapper');
if (attendanceData.inPast == 1) {
var html = '';
html += 'You said you ';
html += (attendanceData.attending == 'no'?'wouldn\'t':'');
html += (attendanceData.attending == 'maybe'?'might':'');
html += (attendanceData.attending == 'yes'?'would':'');
html += ' attend.';
wrapper.html(html);
} else {
var html = '<form action="/site/'+data.site+'/event/'+data.slug+'/myAttendance.json" method="post">';
html += '<input type="hidden" name="CSFRToken" value="'+attendanceData.CSFRToken+'">';
html += 'You ';
html += '<select name="attending">';
html += '<option value="no" '+(attendanceData.attending == 'no'?'selected':'')+'>are not</option>';
html += '<option value="maybe" '+(attendanceData.attending == 'maybe'?'selected':'')+'>might be</option>';
html += '<option value="yes" '+(attendanceData.attending == 'yes'?'selected':'')+'>will be</option>';
html += '</select> attending.';
html += '<span class="UserAttendingPrivacyOptionsWrapper" '+(attendanceData.attending == 'no'?'style="display:none;"':'')+'>';
html += ' This is ';
html += '<select name="privacy">';
html += '<option value="private" '+(attendanceData.privacy == 'private'?'selected':'')+'>private</option>';
html += '<option value="public" '+(attendanceData.privacy == 'public'?'selected':'')+'>public</option>';
html += '</select>';
html += '</span>';
html += '<span class="savingIndicator" style="display:none;"><img src="/theme/default/img/ajaxLoading.gif"> Saving ...</span>';
html += '<span class="savedIndicator" style="display:none;">Saved!</span>';
html += '</form>';
wrapper.html(html);
wrapper.children('form').change(function() {
var formObj = $(this);
var savingIndicatorObj = formObj.children(".savingIndicator");
var savedIndicatorObj = formObj.children(".savedIndicator");
savingIndicatorObj.show();
savedIndicatorObj.hide();
var ajax = $.ajax({
url: formObj.attr('action'),
type: 'POST',
data : formObj.serialize()
}).success(function ( eventdata ) {
savingIndicatorObj.hide();
savedIndicatorObj.show();
});
var attendingObj = formObj.children('select[name="attending"]');
var privacyWrapperObj = formObj.children(".UserAttendingPrivacyOptionsWrapper");
if (attendingObj.val() == 'no') {
privacyWrapperObj.hide();
} else {
privacyWrapperObj.show();
}
var imageDiv = formObj.parent().parent().children('.activationLinkWrapper');
if (attendingObj.val() == 'yes') {
imageDiv.html('<img src="/theme/default/img/actionUserAttendingIcon.png" alt="You are attending" title="You are attending">');
} else if (attendingObj.val() == 'maybe') { | }
});
}
});
} | imageDiv.html('<img src="/theme/default/img/actionUserMaybeAttendingIcon.png" alt="You are maybe attending" title="You are maybe attending">');
} else {
imageDiv.html('<img src="/theme/default/img/actionUserNotAttendingIcon.png" alt="You are not attending" title="You are not attending">'); |
regex.py | """ Regular expression match """
import re
pattern = re.compile("(.*) regex '(.*)'")
def | (groups, lsv_fn):
"""Regular expression search function"""
field, pattern = groups
route_regex = re.compile(pattern)
return lambda data: route_regex.search(str(lsv_fn(data, field))) != None
| fn |
user.component.ts | import { Component } from '@angular/core';
import { PostsService } from '../services/posts.service';
@Component({
moduleId: module.id
,selector: 'user'
,templateUrl: 'user.component.html'
,providers: [PostsService]
})
export class | {
firstName: string;
lastName: string;
email: string;
address: address;
hobbies: string[];
showHobbies: boolean;
posts: Post[];
constructor(private postsService: PostsService) {
this.firstName = 'Josh';
this.lastName = 'Orvis';
this.email = '[email protected]'
this.address = {
street: '2906 Blueridge Ave'
,city: 'Silver Spring'
,state: 'MD'
,zip: '20902'
};
this.hobbies = ['Music','Movies'];
this.showHobbies = false;
this.postsService.getPosts().subscribe(posts => {
this.posts = posts;
});
}
toggleHobbies() {
this.showHobbies = !this.showHobbies;
}
addHobby(hobby:string) {
if (this.hobbies.indexOf(hobby) < 0) {
this.hobbies.push(hobby);
}
}
deleteHobby(i:number) {
this.hobbies.splice(i,1);
}
}
interface address {
street: string;
city: string;
state: string;
zip: string;
}
interface Post {
id: number;
title: string;
body: string;
}
| UserComponent |
models.rs | use super::sanitiser::{SanitisedString, VecSanitisedString};
use crate::{create_manager::Blob, models::Document};
use chrono::{SecondsFormat, Utc};
use regex::Regex;
use search_client::models::{DocumentType, IndexEntry};
use std::{collections::HashMap, str};
#[derive(Clone, Debug, PartialEq)]
pub struct BlobMetadata {
pub file_name: SanitisedString,
pub doc_type: DocumentType,
pub title: SanitisedString,
pub pl_number: String,
pub product_names: VecSanitisedString,
pub active_substances: VecSanitisedString,
pub author: SanitisedString,
pub keywords: Option<VecSanitisedString>,
}
impl BlobMetadata {
#[allow(clippy::too_many_arguments)]
pub fn new(
file_name: String,
doc_type: DocumentType,
title: String,
pl_number: String,
product_names: Vec<String>,
active_substances: Vec<String>,
author: String,
keywords: Option<Vec<String>>,
) -> Self {
BlobMetadata {
file_name: file_name.into(),
doc_type,
title: title.into(),
pl_number,
product_names: product_names.into(),
active_substances: active_substances.into(),
author: author.into(),
keywords: keywords.map(|keywords| keywords.into()),
}
}
fn facets(&self) -> Vec<String> {
create_facets_by_active_substance(
self.product_names.clone(),
self.active_substances.clone(),
)
}
}
impl Into<BlobMetadata> for Document {
fn into(self) -> BlobMetadata {
let title = SanitisedString::from(&self.name);
let pl_number = extract_product_licences(&self.pl_number);
BlobMetadata {
file_name: SanitisedString::from(&self.id),
doc_type: self.document_type,
title,
pl_number,
product_names: VecSanitisedString::from(
self.products
.iter()
.map(|a| a.to_uppercase())
.collect::<Vec<String>>(),
),
active_substances: VecSanitisedString::from(
self.active_substances
.iter()
.map(|a| a.to_uppercase())
.collect::<Vec<String>>(),
),
author: SanitisedString::from(&self.author),
keywords: match self.keywords {
Some(a) => Some(VecSanitisedString::from(a)),
None => None,
},
}
}
}
impl Into<HashMap<String, String>> for BlobMetadata {
fn into(self) -> HashMap<String, String> {
let mut metadata: HashMap<String, String> = HashMap::new();
metadata.insert("file_name".to_string(), self.file_name.to_string());
metadata.insert("doc_type".to_string(), self.doc_type.to_string());
metadata.insert("title".to_string(), self.title.to_string());
metadata.insert("product_name".to_string(), self.product_names.join(", "));
metadata.insert(
"substance_name".to_string(),
self.active_substances.to_json(),
);
metadata.insert("facets".to_string(), to_json(self.facets()));
if let Some(keywords) = self.keywords.clone() {
metadata.insert("keywords".to_string(), keywords.join(" "));
}
metadata.insert("pl_number".to_string(), self.pl_number.clone());
metadata.insert("author".to_string(), self.author.to_string());
metadata
}
}
impl From<Blob> for IndexEntry {
fn | (blob: Blob) -> Self {
Self {
content: "Content not yet available".to_owned(),
rev_label: "1".to_owned(),
product_name: blob.metadata.product_names.join(", "),
created: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true),
release_state: "Y".to_owned(),
keywords: blob
.metadata
.keywords
.clone()
.unwrap_or_default()
.join(", "),
title: blob.metadata.title.to_string(),
pl_number: vec![blob.metadata.pl_number.to_string()],
file_name: blob.metadata.file_name.to_string(),
doc_type: blob.metadata.doc_type,
suggestions: vec![],
substance_name: blob.metadata.active_substances.to_vec_string(),
facets: blob.metadata.facets(),
metadata_storage_content_type: String::default(),
metadata_storage_size: blob.size,
metadata_storage_last_modified: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true),
metadata_storage_content_md5: String::default(),
metadata_storage_name: blob.name.to_owned(),
metadata_storage_path: blob.path,
metadata_content_type: String::default(),
metadata_language: String::default(),
}
}
}
pub fn to_json(words: Vec<String>) -> String {
serde_json::to_string(&words).expect("Couldn't create JSON array.")
}
pub fn create_facets_by_active_substance(
products: VecSanitisedString,
active_substances: VecSanitisedString,
) -> Vec<String> {
let mut facets: Vec<String> = active_substances
.to_vec_string()
.iter()
.map(|a| {
if let Some(first) = a.to_string().chars().next() {
vec![
first.to_string(),
[first.to_string(), a.to_string()].join(", "),
[first.to_string(), a.to_string(), products.join(", ")].join(", "),
]
} else {
vec![]
}
})
.flatten()
.collect();
facets.sort();
facets.dedup();
facets
}
pub fn extract_product_licences(input: &str) -> String {
lazy_static! {
static ref RE_WHITESPACE: Regex = Regex::new(r"(\s+|/|_|-)").expect("cannot compile regex");
static ref RE_PL: Regex = Regex::new(r"(?i:\b|PL)(\s+|/|_|-)*\d{5}(\s+|/|_|-)*\d{4}")
.expect("cannot compile regex");
}
let product_licences: Vec<String> = RE_PL
.find_iter(input)
.map(|m| {
RE_WHITESPACE
.replace_all(m.as_str(), "")
.to_ascii_uppercase()
})
.map(|s| {
if s.starts_with("PL") {
s
} else {
String::from("PL") + s.as_str()
}
})
.collect();
to_json(product_licences)
}
#[cfg(test)]
mod test {
use super::*;
use crate::models::FileSource;
use search_client::models::DocumentType;
#[test]
fn derive_metadata() {
let doc = Document {
id: "CON123456".to_string(),
name: "Paracetamol Plus PL 12345/6789".to_string(),
document_type: DocumentType::Spc,
author: "JRR Tolkien".to_string(),
products: vec![
"Effective product 1".to_string(),
"Effective product 2".to_string(),
],
keywords: Some(vec![
"Very good for you".to_string(),
"Cures headaches".to_string(),
"PL 12345/6789".to_string(),
]),
pl_number: "PL 12345/6789".to_string(),
active_substances: vec!["Paracetamol".to_string(), "Caffeine".to_string()],
file_path: "location/on/disk".to_string(),
file_source: FileSource::Sentinel,
};
let expected_file_name = "CON123456".to_string();
let expected_doc_type = "Spc".to_string();
let expected_title = "Paracetamol Plus PL 12345/6789".to_string();
let expected_author = "JRR Tolkien".to_string();
let expected_product_name = "EFFECTIVE PRODUCT 1, EFFECTIVE PRODUCT 2".to_string();
let expected_substance_name = "[\"PARACETAMOL\",\"CAFFEINE\"]".to_string();
let expected_keywords = "Very good for you Cures headaches PL 12345/6789".to_string();
let expected_pl_number = "[\"PL123456789\"]".to_string();
let output_metadata: HashMap<String, String> = Into::<BlobMetadata>::into(doc).into();
assert_eq!(output_metadata["file_name"], expected_file_name);
assert_eq!(output_metadata["doc_type"], expected_doc_type);
assert_eq!(output_metadata["title"], expected_title);
assert_eq!(output_metadata["author"], expected_author);
assert_eq!(output_metadata["product_name"], expected_product_name);
assert_eq!(output_metadata["substance_name"], expected_substance_name);
assert_eq!(output_metadata["keywords"], expected_keywords);
assert_eq!(output_metadata["pl_number"], expected_pl_number);
}
#[test]
fn test_create_facets_by_active_substance() {
let active_substances = vec![
"LOSARTAN POTASSIUM".to_string(),
"HYDROCHLOROTHIAZIDE".to_string(),
"L-TEST".to_string(),
];
let products = vec![
"LOSARTAN POTASSIUM / HYDROCHLOROTHIAZIDE 100 MG /25 MG FILM-COATED TABLETS".to_owned(),
];
let expected = vec![
"H",
"H, HYDROCHLOROTHIAZIDE",
"H, HYDROCHLOROTHIAZIDE, LOSARTAN POTASSIUM / HYDROCHLOROTHIAZIDE 100 MG /25 MG FILM-COATED TABLETS",
"L",
"L, L-TEST",
"L, L-TEST, LOSARTAN POTASSIUM / HYDROCHLOROTHIAZIDE 100 MG /25 MG FILM-COATED TABLETS",
"L, LOSARTAN POTASSIUM",
"L, LOSARTAN POTASSIUM, LOSARTAN POTASSIUM / HYDROCHLOROTHIAZIDE 100 MG /25 MG FILM-COATED TABLETS",
];
assert_eq!(
create_facets_by_active_substance(
VecSanitisedString::from(products),
VecSanitisedString::from(active_substances)
),
expected
);
}
#[test]
fn test_create_facets_by_active_substance_sanitises() {
let active_substances = vec!["CAFÉ".to_string(), "FÊTE".to_string(), "NAÏVE".to_string()];
let products = vec!["MOTÖRHEAD".to_owned()];
let expected = vec![
"C",
"C, CAF",
"C, CAF, MOTRHEAD",
"F",
"F, FTE",
"F, FTE, MOTRHEAD",
"N",
"N, NAVE",
"N, NAVE, MOTRHEAD",
];
assert_eq!(
create_facets_by_active_substance(
VecSanitisedString::from(products),
VecSanitisedString::from(active_substances)
),
expected
);
}
#[test]
fn extract_product_license_test() {
let input = vec![
"00 PL123451234",
"01 pl123451234",
"02 123451234",
"03 PL 12345 1234",
"04 PL 12345 1234",
"05 test pl 12345 1234",
"06 pl 12345 1234 test",
"07 12345 1234",
"08 PL 12345/1234",
"09 PL/12345/1234",
"10 pl 12345/1234",
"11 pl/12345/1234",
"12 12345/1234",
"13 PL 12345_1234",
"14 PL_12345_1234",
"15 pl 12345_1234",
"16 pl_12345_1234",
"17 12345_1234",
"18 PL 12345-1234",
"19 PL-12345-1234",
"20 pl 12345-1234",
"21 pl-12345-1234",
"22 12345-1234",
"23 12345-1234GG",
"PL 12345/1234-0001",
"leaflet MAH GENERIC_PL 12345-1234R.pdf",
];
let output = "[\"PL123451234\"]";
input
.iter()
.for_each(|i| assert_eq!(extract_product_licences(i), output));
}
#[test]
fn extract_multiple_product_licences() {
let input = "00 PL123451234 01 pl123451235__ 02 123451236-03 PL 12345 1237";
let output = "[\"PL123451234\",\"PL123451235\",\"PL123451236\",\"PL123451237\"]";
assert_eq!(extract_product_licences(input), output);
}
#[test]
fn extract_product_license_test_not_found() {
assert_eq!(extract_product_licences("no pl number here"), "[]");
}
#[test]
fn parses_blob_metadata_from_document() {
let document = Document {
id: "con12345".to_string(),
name: "Some SPC".to_string(),
document_type: DocumentType::Spc,
author: "test".to_string(),
products: vec![
"Generic Paracetamol".to_string(),
"Special Paracetamol".to_string(),
],
keywords: None,
pl_number: "PL 12345/0010-0001".to_string(),
active_substances: vec!["paracetamol".to_string()],
file_source: FileSource::Sentinel,
file_path: "/home/sentinel/something.pdf".to_string(),
};
let result: BlobMetadata = document.into();
assert_eq!(
result,
BlobMetadata {
file_name: SanitisedString::from("con12345".to_string()),
doc_type: DocumentType::Spc,
title: SanitisedString::from("Some SPC".to_string()),
pl_number: "[\"PL123450010\"]".to_string(),
product_names: VecSanitisedString::from(vec![
"GENERIC PARACETAMOL".to_string(),
"SPECIAL PARACETAMOL".to_string()
]),
active_substances: VecSanitisedString::from(vec!["PARACETAMOL".to_string()]),
author: SanitisedString::from("test".to_string()),
keywords: None,
}
)
}
}
| from |
DownloadAnnotationsExampleUseCase.ts | import { UploadDataRepository } from "../repositories/UploadDataRepository";
export class DownloadAnnotationsExampleUseCase {
constructor(private uploadDataRepository: UploadDataRepository) {}
execute() { | return this.uploadDataRepository.downloadAnnotationsExample();
}
} | |
networks.go | package networks
type Network struct {
Base58P2PKHVersion byte
Base58P2SHVersion byte
InsightURL string
Bech32Prefix string
P2ProxyStratum string
P2ProxyURL string
WalletDB string
OCMBackend string
}
var Active Network
func SetNetwork(testnet bool) {
if testnet {
Active = Network{
Base58P2PKHVersion: 74,
Base58P2SHVersion: 196,
InsightURL: "https://vtc-insight-testnet.gertjaap.org/",
Bech32Prefix: "tvtc",
P2ProxyStratum: "stratum+tcp://p2proxy-testnet.gertjaap.org:9171",
P2ProxyURL: "https://p2proxy-testnet.gertjaap.org/",
WalletDB: "wallet-testnet.db",
}
} else {
Active = Network{
Base58P2PKHVersion: 30,
Base58P2SHVersion: 22,
InsightURL: "https://sochain.com/",
OCMBackend: "https://ocm-backend.blkidx.org/",
Bech32Prefix: "vtc",
P2ProxyStratum: getClosestNodeStratum(),
P2ProxyURL: getClosestNodeURL(),
WalletDB: "wallet-testnet.db",
}
} | } |
|
rover_publisher.py | #!/usr/bin/env python
# Copyright (C) 2022 Rhys Mainwaring
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.spatial.transform import Rotation as Rotation
import math
import time
from ignition.msgs.header_pb2 import Header
from ignition.msgs.pose_pb2 import Pose
from ignition.msgs.quaternion_pb2 import Quaternion
from ignition.msgs.time_pb2 import Time
from ignition.msgs.twist_pb2 import Twist
from ignition.msgs.vector3d_pb2 import Vector3d
from ignition.transport import AdvertiseMessageOptions
from ignition.transport import Node
def main():
# Create a transport node and advertise a topic
node = Node()
pub_options = AdvertiseMessageOptions()
pose_topic = "/pose"
pose_msg_type_name = Pose.DESCRIPTOR.full_name
pose_pub = node.advertise(
pose_topic, pose_msg_type_name, pub_options)
if pose_pub.valid():
print("Advertising {} on topic [{}]".format(
pose_msg_type_name, pose_topic))
else:
print("Error advertising topic [{}]".format(pose_topic))
twist_topic = "/twist"
twist_msg_type_name = Twist.DESCRIPTOR.full_name
twist_pub = node.advertise(
twist_topic, twist_msg_type_name, pub_options)
if twist_pub.valid():
print("Advertising {} on topic [{}]".format(
twist_msg_type_name, twist_topic))
else:
print("Error advertising topic [{}]".format(twist_topic))
# rover moving in a circle of radius with constant velocity
radius = 5.0
ang_vel = 0.1
# publish messages at 2Hz
start = time.time_ns()
count = 0
try:
while True:
# update time
now = time.time_ns()
time_ns = now - start
time_s = int(time_ns/1.0E9)
time_ns = int(time_ns % 1000000000)
id = count
count += 1
# update position, orientation and velocity
wz = ang_vel
theta = wz * time_s
c = math.cos(theta)
s = math.sin(theta)
x = radius * c
y = radius * s
vx = -1.0 * radius * wz * s
vy = radius * wz * c
rot = Rotation.from_euler("xyz", [0.0, 0.0, theta])
quat = rot.as_quat()
# # Prepare the messages.
time_msg = Time()
time_msg.sec = time_s
time_msg.nsec = time_ns
header = Header()
header.stamp.CopyFrom(time_msg)
position = Vector3d()
position.x = x
position.y = y
position.z = 0.0
orientation = Quaternion()
orientation.x = quat[0]
orientation.y = quat[1] | pose = Pose()
pose.name = "base_link"
pose.id = id
pose.header.CopyFrom(header)
pose.position.CopyFrom(position)
pose.orientation.CopyFrom(orientation)
lin_vel_msg = Vector3d()
lin_vel_msg.x = vx
lin_vel_msg.y = vy
lin_vel_msg.z = 0.0
ang_vel_msg = Vector3d()
ang_vel_msg.x = 0.0
ang_vel_msg.y = 0.0
ang_vel_msg.z = wz
twist = Twist()
twist.header.CopyFrom(header)
twist.linear.CopyFrom(lin_vel_msg)
twist.angular.CopyFrom(ang_vel_msg)
if not pose_pub.publish(pose):
break
if not twist_pub.publish(twist):
break
print("Publishing pose on topic [{}], twist on topic [{}]".format(
pose_topic, twist_topic))
time.sleep(0.5)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main() | orientation.z = quat[2]
orientation.w = quat[3]
|
ast.rs | use std::fmt;
use string_cache::DefaultAtom as Atom;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Span {
pub lo: usize,
pub hi: usize,
}
impl Span {
pub fn new(lo: usize, hi: usize) -> Self {
Span { lo, hi }
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Program {
pub items: Vec<Item>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Item {
StructDefn(StructDefn),
FnDefn(FnDefn),
TraitDefn(TraitDefn),
OpaqueTyDefn(OpaqueTyDefn),
Impl(Impl),
Clause(Clause),
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct StructDefn {
pub name: Identifier,
pub variable_kinds: Vec<VariableKind>,
pub where_clauses: Vec<QuantifiedWhereClause>,
pub fields: Vec<Field>,
pub flags: StructFlags,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct StructFlags {
pub upstream: bool,
pub fundamental: bool,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct FnDefn {
pub name: Identifier,
pub variable_kinds: Vec<VariableKind>,
pub where_clauses: Vec<QuantifiedWhereClause>,
pub argument_types: Vec<Ty>,
pub return_type: Ty,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TraitDefn {
pub name: Identifier,
pub variable_kinds: Vec<VariableKind>,
pub where_clauses: Vec<QuantifiedWhereClause>,
pub assoc_ty_defns: Vec<AssocTyDefn>,
pub flags: TraitFlags,
pub well_known: Option<WellKnownTrait>,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum WellKnownTrait {
SizedTrait,
CopyTrait,
CloneTrait,
DropTrait,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TraitFlags {
pub auto: bool,
pub marker: bool,
pub upstream: bool,
pub fundamental: bool,
pub non_enumerable: bool,
pub coinductive: bool,
pub object_safe: bool,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct AssocTyDefn {
pub name: Identifier,
pub variable_kinds: Vec<VariableKind>,
pub bounds: Vec<QuantifiedInlineBound>,
pub where_clauses: Vec<QuantifiedWhereClause>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OpaqueTyDefn {
pub ty: Ty,
pub variable_kinds: Vec<VariableKind>,
pub identifier: Identifier,
pub bounds: Vec<QuantifiedInlineBound>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum VariableKind {
Ty(Identifier),
IntegerTy(Identifier),
FloatTy(Identifier),
Lifetime(Identifier),
Const(Identifier),
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum GenericArg {
Ty(Ty),
Lifetime(Lifetime),
Id(Identifier),
Const(Const),
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Const {
Id(Identifier),
Value(u32),
}
#[derive(Clone, PartialEq, Eq, Debug)]
/// An inline bound, e.g. `: Foo<K>` in `impl<K, T: Foo<K>> SomeType<T>`.
pub enum InlineBound {
TraitBound(TraitBound),
AliasEqBound(AliasEqBound),
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct QuantifiedInlineBound {
pub variable_kinds: Vec<VariableKind>,
pub bound: InlineBound,
}
#[derive(Clone, PartialEq, Eq, Debug)]
/// Represents a trait bound on e.g. a type or type parameter.
/// Does not know anything about what it's binding.
pub struct TraitBound {
pub trait_name: Identifier,
pub args_no_self: Vec<GenericArg>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
/// Represents an alias equality bound on e.g. a type or type parameter.
/// Does not know anything about what it's binding.
pub struct AliasEqBound {
pub trait_bound: TraitBound,
pub name: Identifier,
pub args: Vec<GenericArg>,
pub value: Ty,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Kind {
Ty,
Lifetime,
Const,
}
impl fmt::Display for Kind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Kind::Ty => "type",
Kind::Lifetime => "lifetime",
Kind::Const => "const",
})
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Impl {
pub variable_kinds: Vec<VariableKind>,
pub trait_ref: TraitRef,
pub polarity: Polarity,
pub where_clauses: Vec<QuantifiedWhereClause>,
pub assoc_ty_values: Vec<AssocTyValue>,
pub impl_type: ImplType,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ImplType {
Local,
External,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct AssocTyValue {
pub name: Identifier,
pub variable_kinds: Vec<VariableKind>,
pub value: Ty,
pub default: bool,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Ty {
Id {
name: Identifier,
},
Dyn {
bounds: Vec<QuantifiedInlineBound>,
lifetime: Lifetime,
},
Apply {
name: Identifier,
args: Vec<GenericArg>,
},
Projection {
proj: ProjectionTy,
},
ForAll {
lifetime_names: Vec<Identifier>,
ty: Box<Ty>,
},
Tuple {
types: Vec<Box<Ty>>,
},
Scalar {
ty: ScalarType,
},
Slice {
ty: Box<Ty>,
},
Array {
ty: Box<Ty>,
len: Const,
},
Raw {
mutability: Mutability,
ty: Box<Ty>,
},
Ref {
mutability: Mutability,
lifetime: Lifetime,
ty: Box<Ty>,
},
Str,
Never,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum IntTy {
Isize,
I8,
I16,
I32,
I64,
I128,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum UintTy {
Usize,
U8,
U16,
U32,
U64,
U128,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum FloatTy {
F32,
F64,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum ScalarType {
Bool,
Char,
Int(IntTy),
Uint(UintTy),
Float(FloatTy),
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Mutability {
Mut,
Not,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Lifetime {
Id { name: Identifier },
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct ProjectionTy {
pub trait_ref: TraitRef,
pub name: Identifier,
pub args: Vec<GenericArg>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TraitRef {
pub trait_name: Identifier,
pub args: Vec<GenericArg>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Polarity {
/// `impl Foo for Bar`
Positive,
/// `impl !Foo for Bar`
Negative,
}
impl Polarity {
pub fn from_bool(polarity: bool) -> Polarity {
if polarity {
Polarity::Positive
} else |
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Identifier {
pub str: Atom,
pub span: Span,
}
impl fmt::Display for Identifier {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.str)
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum WhereClause {
Implemented { trait_ref: TraitRef },
ProjectionEq { projection: ProjectionTy, ty: Ty },
LifetimeOutlives { a: Lifetime, b: Lifetime },
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum DomainGoal {
Holds { where_clause: WhereClause },
Normalize { projection: ProjectionTy, ty: Ty },
TraitRefWellFormed { trait_ref: TraitRef },
TyWellFormed { ty: Ty },
TyFromEnv { ty: Ty },
TraitRefFromEnv { trait_ref: TraitRef },
IsLocal { ty: Ty },
IsUpstream { ty: Ty },
IsFullyVisible { ty: Ty },
LocalImplAllowed { trait_ref: TraitRef },
Compatible,
DownstreamType { ty: Ty },
Reveal,
ObjectSafe { id: Identifier },
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum LeafGoal {
DomainGoal { goal: DomainGoal },
UnifyGenericArgs { a: GenericArg, b: GenericArg },
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct QuantifiedWhereClause {
pub variable_kinds: Vec<VariableKind>,
pub where_clause: WhereClause,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Field {
pub name: Identifier,
pub ty: Ty,
}
#[derive(Clone, PartialEq, Eq, Debug)]
/// This allows users to add arbitrary `A :- B` clauses into the
/// logic; it has no equivalent in Rust, but it's useful for testing.
pub struct Clause {
pub variable_kinds: Vec<VariableKind>,
pub consequence: DomainGoal,
pub conditions: Vec<Box<Goal>>,
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Goal {
ForAll(Vec<VariableKind>, Box<Goal>),
Exists(Vec<VariableKind>, Box<Goal>),
Implies(Vec<Clause>, Box<Goal>),
And(Box<Goal>, Vec<Box<Goal>>),
Not(Box<Goal>),
/// The `compatible { G }` syntax
Compatible(Box<Goal>),
// Additional kinds of goals:
Leaf(LeafGoal),
}
| {
Polarity::Negative
} |
Server.ts | import * as http from 'http';
import * as tls from 'tls';
import * as https from 'https';
import * as net from 'net';
import * as fs from 'fs';
import * as _ from 'lodash';
import {ILoggerApi, Log, TodoException} from '@typexs/base';
import {DEFAULT_SERVER_OPTIONS, IServerOptions} from './IServerOptions';
import {Exceptions} from './Exceptions';
export interface IServerApi {
beforeStart?(server: Server): Promise<any>;
}
export class Server {
_options: IServerOptions;
_abort = false;
_secured = true;
_both = false;
inc = 0;
cache: { [key: number]: { t: any, s: net.Socket } } = {};
server: net.Server = null;
wrapper: IServerApi = null;
fn: Function = null;
private logger: ILoggerApi;
private $connections: { [key: string]: net.Socket } = {};
initialize(options: IServerOptions, wrapper: IServerApi = null) {
this._options = _.defaultsDeep(options, DEFAULT_SERVER_OPTIONS);
this.logger = _.get(options, 'logger', Log.getLoggerFor(Server));
this._secured = /^https/.test(this._options.protocol);
if (this._options.cert_file) {
this._options.cert = fs.readFileSync(this._options.cert_file);
}
if (this._options.key_file) {
this._options.key = fs.readFileSync(this._options.key_file);
}
if (this._options.ca_file) {
this._options.ca = fs.readFileSync(this._options.ca_file);
}
if (this._options.fn) {
if (typeof this._options.fn === 'function') {
this.fn = this._options.fn;
} else if (typeof this._options.fn === 'string' && this[this._options.fn] && typeof this[this._options.fn] === 'function') {
this.fn = this[this._options.fn];
} else {
throw new TodoException('wrong callback');
}
} else {
this.fn = this.root;
}
this.wrapper = wrapper;
}
url(): string {
return this._options.protocol + '://' + this._options.ip + ':' + this._options.port;
}
get protocol(): string {
return this._options.protocol;
}
get stall(): number {
return this._options.stall;
}
set stall(n: number) {
this._options.stall = n;
}
get isSecured(): boolean {
return this._secured;
}
response(req: http.IncomingMessage, res: http.ServerResponse) {
const inc = this.inc++;
const self = this;
const t = setTimeout(function () {
// self._options.fn;
self.fn(req, res);
clearTimeout(self.cache[inc].t);
delete self.cache[inc];
}, this.stall);
this.cache[inc] = {t: t, s: req.socket};
}
createServer(): net.Server {
const self = this;
let server: net.Server = null;
self.$connections = {};
if (this._secured) {
const https_server = https.createServer(this._options, this.response.bind(this));
server = https_server;
} else {
const http_server = http.createServer(this.response.bind(this));
http_server.setTimeout(
self._options.timeout, (socket?: net.Socket) => {
self.debug('server timeout reached: ' + self._options.timeout);
if (socket) {
socket.end();
socket.destroy(Exceptions.newSocketTimeout());
}
});
server = http_server;
}
return server;
}
root(req: http.IncomingMessage, res: http.ServerResponse) {
this.debug('process');
res.writeHead(200, {'Content-Type': 'application/json'});
const data = {time: (new Date()).getTime(), headers: req.headers, rawHeaders: req.rawHeaders};
const json = JSON.stringify(data);
res.end(json);
}
shutdown(): Promise<any> {
this._abort = true;
for (const x in this.cache) {
if (this.cache.hasOwnProperty(x)) {
if (this.cache[x].t) {
clearTimeout(this.cache[x].t);
}
if (this.cache[x].s) {
this.cache[x].s.unref();
this.cache[x].s.destroy();
}
delete this.cache[x];
}
}
return this.stop();
}
/**
*
*
* @see https://nodejs.org/api/http.html#http_event_connect
*
* @param request
* @param upstream
* @param head
*/
onServerConnect(request: http.IncomingMessage, upstream: net.Socket, head: Buffer): void {
// this.debug('onServerConnect ' + this.url + '\n' + head.toString('utf8'));
/*
let self = this;
let rurl: url.Url = url.parse(`https://${request.url}`);
| upstream.write(
'HTTP/' + request.httpVersion + ' 200 Connection Established\r\n' +
'Proxy-agent: Proxybroker\r\n' +
'\r\n');
downstream.write(head);
downstream.pipe(upstream);
upstream.pipe(downstream)
});
*/
}
/*
onServerConnectData(data: Buffer): void {
// this.debug('onServerConnectData ' + data.toString('utf-8'))
}
*/
onServerUpgrade(request: http.IncomingMessage, socket: net.Socket, head: Buffer): void {
// this.debug('onServerUpgrade ' + this._options.url)
}
onServerClientError(exception: Error, socket: net.Socket): void {
// this.debug('onServerClientError ' + this._options.url)
this.debug('onServerClientError ' + this.url() + ' [' + socket['handle_id'] + ']', exception);
if (socket) {
socket.destroy(exception);
}
}
onServerError(exception: Error, socket: net.Socket): void {
this.debug('onServerError ' + this.url(), exception);
if (socket) {
socket.destroy(exception);
}
}
onServerClose(): void {
this.debug('onServerClose ' + this.url());
}
onServerConnection(socket: net.Socket, secured: boolean = false): void {
// this.debug('Server->onServerConnection secured=' + secured + ' ' + this.url());
// register connection
const key = socket.remoteAddress + ':' + socket.remotePort;
this.$connections[key] = socket;
socket.once('close', () => {
delete this.$connections[key];
});
}
onSecureConnection(socket: tls.TLSSocket): void {
this.onServerConnection(socket, true);
}
// private onServerConnection(socket: net.Socket): void { }
async start(done?: Function): Promise<boolean> {
const self = this;
this.prepare();
this.server = this.createServer();
if (this.isSecured) {
this.server.on('secureConnection', this.onSecureConnection.bind(this));
} else {
this.server.on('connection', this.onServerConnection.bind(this));
}
// this.server.on('upgrade', this.onServerUpgrade.bind(this));
this.server.on('clientError', this.onServerClientError.bind(this));
this.server.on('close', this.onServerClose.bind(this));
this.server.on('connect', this.onServerConnect.bind(this));
this.server.on('error', this.onServerError.bind(this));
if (this.wrapper && this.wrapper.beforeStart) {
await this.wrapper.beforeStart(this);
}
const p = new Promise<boolean>((resolve, reject) => {
self.server.once('error', (err) => {
const nErr = Exceptions.handle(err);
if (nErr.code === Exceptions.EADDRINUSE) {
reject(err);
} else {
this.logger.error('server error:', err);
}
});
self.server = self.server.listen(self._options.port, self._options.ip, () => {
self.debug('start server on ' + self.url() + ' (SSL: ' + self.isSecured + ')');
resolve(true);
});
});
if (done) {
const res = await p;
done(res);
return res;
} else {
return p;
}
}
async stop(done?: Function): Promise<boolean> {
await this.preFinalize();
const p = new Promise<boolean>((resolve) => {
// destroy and unref socket connections
this.debug('server-stop: ' + this.url() + ' ' + _.keys(this.$connections).length);
for (const conn in this.$connections) {
if (this.$connections.hasOwnProperty(conn)) {
try {
this.$connections[conn].unref();
this.$connections[conn].destroy();
delete this.$connections[conn];
} catch (e) {
}
}
}
if (this.server) {
this.server.close(() => {
this.server.removeAllListeners();
this.server = null;
this.debug('server-stop: ' + this.url());
resolve(true);
});
} else {
resolve(false);
}
});
if (done) {
const res = await p;
await this.finalize();
done(res);
return res;
} else {
await this.finalize();
return p;
}
}
hasServer() {
return this.server !== null;
}
prepare(): void {
}
finalize(): void {
}
preFinalize(): void {
}
debug(...msg: any[]) {
this.logger.debug.apply(this.logger, msg);
}
} | let downstream = net.connect(parseInt(rurl.port), rurl.hostname, function () {
self.debug('downstream connected to ' + request.url); |
events.ts | import {WalletContractInterface, GnosisSafeInterface} from '@universal-login/contracts'; | export const eventInterface = {...WalletContractInterface.events, ...GnosisSafeInterface.events};
export function parseArgs(type: WalletEventType, event: Log): WalletEventArgs {
if (event.topics[0] === eventInterface[type].topic) {
const args = WalletContractInterface.parseLog(event);
const {key} = args.values;
return {key};
}
throw new TypeError(`Not supported event with topic: ${event.topics[0]}`);
}
export function parseArgsGnosis(type: WalletEventType, event: Log): WalletEventArgs {
if (event.topics[0] === eventInterface[type].topic) {
const args = GnosisSafeInterface.parseLog(event);
const {owner} = args.values;
return {key: owner};
}
throw new TypeError(`Not supported event with topic: ${event.topics[0]}`);
} | import {Log} from 'ethers/providers';
import {WalletEventArgs, WalletEventType} from '../models/events';
|
fake_ies.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
appendable::BufferTooSmall,
ie::{
rsn::{akm, cipher, rsne::Rsne},
wpa::WpaIe,
write_wpa1_ie, *,
},
organization::Oui,
},
std::convert::TryInto,
};
pub fn fake_ht_cap_chanwidth(chanwidth: ChanWidthSet) -> HtCapabilities {
let mut ht_cap = fake_ht_capabilities();
ht_cap.ht_cap_info = ht_cap.ht_cap_info.with_chan_width_set(chanwidth);
ht_cap
}
pub fn fake_ht_op_sec_offset(secondary_offset: SecChanOffset) -> HtOperation {
let mut ht_op = fake_ht_operation();
let mut ht_op_info_head = ht_op.ht_op_info_head;
ht_op_info_head.set_secondary_chan_offset(secondary_offset);
ht_op.ht_op_info_head = ht_op_info_head;
ht_op
}
fn fake_supported_mcs_set() -> SupportedMcsSet {
SupportedMcsSet(0)
.with_rx_mcs(RxMcsBitmask(0x01000000ff))
.with_rx_highest_rate(0)
.with_tx_set_defined(true)
.with_tx_rx_diff(false)
.with_tx_max_ss(NumSpatialStreams::from_human(1).unwrap())
.with_tx_ueqm(false)
}
pub fn fake_ht_capabilities() -> HtCapabilities {
HtCapabilities {
ht_cap_info: HtCapabilityInfo(0)
.with_chan_width_set(ChanWidthSet::TWENTY_FORTY)
.with_sm_power_save(SmPowerSave::DISABLED)
.with_greenfield(true)
.with_short_gi_20(true)
.with_short_gi_40(true)
.with_tx_stbc(true)
.with_rx_stbc(1),
ampdu_params: AmpduParams(0),
mcs_set: fake_supported_mcs_set(),
ht_ext_cap: HtExtCapabilities(0),
txbf_cap: TxBfCapability(0)
.with_csi_antennas(NumAntennas::from_human(1).unwrap())
.with_noncomp_steering_ants(NumAntennas::from_human(1).unwrap())
.with_comp_steering_ants(NumAntennas::from_human(1).unwrap())
.with_csi_rows(NumCsiRows::from_human(1).unwrap())
.with_chan_estimation(NumSpaceTimeStreams::from_human(1).unwrap()),
asel_cap: AselCapability(0),
}
}
pub fn fake_ht_operation() -> HtOperation {
HtOperation {
primary_chan: 36,
ht_op_info_head: HtOpInfoHead(0)
.with_secondary_chan_offset(SecChanOffset::SECONDARY_ABOVE)
.with_sta_chan_width(StaChanWidth::ANY)
.with_rifs_mode_permitted(false)
.with_ht_protection(HtProtection::NONE)
.with_nongreenfield_present(true)
.with_obss_non_ht_stas_present(true)
.with_center_freq_seg2(0)
.with_dual_beacon(false)
.with_dual_cts_protection(false),
ht_op_info_tail: HtOpInfoTail(0)
.with_stbc_beacon(false)
.with_lsig_txop_protection(false)
.with_pco_active(false)
.with_pco_phase(PcoPhase::TWENTY_MHZ),
basic_ht_mcs_set: fake_supported_mcs_set(),
}
}
pub fn fake_vht_capabilities() -> VhtCapabilities {
VhtCapabilities {
vht_cap_info: VhtCapabilitiesInfo(0)
.with_max_mpdu_len(MaxMpduLen::OCTECTS_7991)
.with_supported_cbw_set(0)
.with_rx_ldpc(true)
.with_sgi_cbw80(true)
.with_sgi_cbw160(false)
.with_tx_stbc(true)
.with_rx_stbc(2)
.with_su_bfer(false)
.with_su_bfee(false)
.with_bfee_sts(0)
.with_num_sounding(0)
.with_mu_bfer(false)
.with_mu_bfee(false)
.with_txop_ps(false)
.with_htc_vht(false)
.with_max_ampdu_exponent(MaxAmpduExponent(2))
.with_link_adapt(VhtLinkAdaptation::NO_FEEDBACK)
.with_rx_ant_pattern(true)
.with_tx_ant_pattern(true)
.with_ext_nss_bw(2),
vht_mcs_nss: VhtMcsNssSet(0)
.with_rx_max_mcs_raw(0x0001020300010203)
.with_rx_max_data_rate(867)
.with_max_nsts(2)
.with_tx_max_mcs_raw(0x0001020300010203)
.with_tx_max_data_rate(867)
.with_ext_nss_bw(false),
}
}
pub fn fake_vht_op_cbw(vht_cbw: VhtChannelBandwidth) -> VhtOperation {
VhtOperation { vht_cbw, ..fake_vht_operation() }
}
pub fn fake_vht_operation() -> VhtOperation {
VhtOperation {
vht_cbw: VhtChannelBandwidth::CBW_80_160_80P80,
center_freq_seg0: 42,
center_freq_seg1: 0,
basic_mcs_nss: VhtMcsNssMap(0x1b1b),
}
}
pub fn fake_ht_cap_bytes() -> [u8; std::mem::size_of::<HtCapabilities>()] {
// Safe to unwrap because the size matches the IE.
fake_ht_capabilities().as_bytes().try_into().unwrap()
}
pub fn fake_ht_op_bytes() -> [u8; std::mem::size_of::<HtOperation>()] {
// Safe to unwrap because the size matches the IE.
fake_ht_operation().as_bytes().try_into().unwrap()
}
pub fn fake_vht_cap_bytes() -> [u8; std::mem::size_of::<VhtCapabilities>()] {
// Safe to unwrap because the size matches the IE.
fake_vht_capabilities().as_bytes().try_into().unwrap()
}
pub fn fake_vht_op_bytes() -> [u8; std::mem::size_of::<VhtOperation>()] {
// Safe to unwrap because the size matches the IE.
fake_vht_operation().as_bytes().try_into().unwrap()
}
pub fn fake_wpa_ie() -> WpaIe {
WpaIe {
unicast_cipher_list: vec![cipher::Cipher { oui: Oui::MSFT, suite_type: cipher::TKIP }],
akm_list: vec![akm::Akm { oui: Oui::MSFT, suite_type: akm::PSK }],
multicast_cipher: cipher::Cipher { oui: Oui::MSFT, suite_type: cipher::TKIP },
}
}
| }
pub fn get_rsn_ie_bytes(rsne: &Rsne) -> Vec<u8> {
let mut buf = Vec::with_capacity(rsne.len());
rsne.write_into(&mut buf).expect("error writing RSNE into buffer");
buf
} | pub fn get_vendor_ie_bytes_for_wpa_ie(wpa_ie: &WpaIe) -> Result<Vec<u8>, BufferTooSmall> {
let mut buf = vec![];
write_wpa1_ie(&mut buf, &wpa_ie).map(|_| buf) |
docker_container.py | # Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class | (object):
"""Tracks progress using tqdm for a set of layers that are pushed."""
def __init__(self):
# This tracks the information for a given layer id.
self.progress = {}
self.idx = -1
def __del__(self):
for k in self.progress:
self.progress[k]["tqdm"].close()
def update(self, entry):
"""Update the progress bars given a an entry.."""
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True), # The progress bar
"total": 0, # Total of bytes we are shipping
"status": "", # Status message.
"current": 0, # Current of total already send.
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
"""A Docker Device is capable of creating and launching docker images.
In order to successfully create and launch a docker image you must either
run this as root, or have enabled sudoless docker.
"""
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def __init__(self, repo=None):
if repo and repo[-1] != "/":
repo += "/"
self.repo = repo
def get_client(self):
return docker.from_env()
def get_api_client(self):
try:
api_client = docker.APIClient()
logging.info(api_client.version())
return api_client
except:
logging.exception("Failed to create default client, trying domain socket.", exc_info=True)
api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
logging.info(api_client.version())
return api_client
def push(self):
image = self.full_name()
print("Pushing docker image: {}.. be patient this can take a while!".format(self.full_name()))
tracker = ProgressTracker()
try:
client = docker.from_env()
result = client.images.push(image, "latest", stream=True, decode=True)
for entry in result:
tracker.update(entry)
self.docker_image().tag("{}{}:latest".format(self.repo, self.image_name()))
except:
logging.exception("Failed to push image.", exc_info=True)
logging.warning("You can manually push the image as follows:")
logging.warning("docker push %s", image)
def launch(self, port_map):
"""Launches the container with the given sha, publishing abd on port, and gRPC on port 8554
Returns the container.
"""
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
"""Creates the docker container, returning the sha of the container, or None in case of failure."""
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def clean(self, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
mkdir_p(dest)
def pull(self, image, tag):
"""Tries to retrieve the given image and tag.
Return True if succeeded, False when failed.
"""
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def full_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), self.docker_tag())
return (self.image_name(), self.docker_tag())
def latest_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), "latest")
return (self.image_name(), "latest")
def create_cloud_build_step(self, dest):
return {
"name": "gcr.io/cloud-builders/docker",
"args": [
"build",
"-t",
self.full_name(),
"-t",
self.latest_name(),
os.path.basename(dest),
],
}
def docker_image(self):
"""The docker local docker image if any
Returns:
{docker.models.images.Image}: A docker image object, or None.
"""
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
"""True if this container image is locally available."""
return self.docker_image() != None
def build(self, dest):
self.write(dest)
return self.create_container(dest)
def can_pull(self):
"""True if this container image can be pulled from a registry."""
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
"""Method responsible for writing the Dockerfile and all necessary files to build a container.
Args:
destination ({string}): A path to a directory where all the container files should reside.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
"""The image name without the tag used to uniquely identify this image.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def docker_tag(self):
raise NotImplementedError()
@abc.abstractmethod
def depends_on(self):
"""Name of the system image this container is build on."""
raise NotImplementedError()
def __str__(self):
return self.image_name() + ":" + self.docker_tag()
| ProgressTracker |
datamanager.go | package datamanager
import (
"sync"
"github.com/toolglobal/api/database"
)
// DBCreator to create db instance
type DBCreator func(dbname string) database.Database
// DataManager data access between app and database
type DataManager struct {
wdb database.Database
rdb database.Database
qNeedLock bool
qLock sync.Mutex
}
// NewDataManager create data manager
func NewDataManager(dbname string, dbc DBCreator) (*DataManager, error) |
// Close close all dbs
func (m *DataManager) Close() {
if m.qNeedLock {
m.qLock.Lock()
defer m.qLock.Unlock()
}
if m.wdb != nil {
m.wdb.Close()
m.wdb = nil
}
if m.rdb != nil {
m.rdb.Close()
m.rdb = nil
}
}
// QTxBegin start database transaction of wdb
func (m *DataManager) QTxBegin() error {
if m.qNeedLock {
m.qLock.Lock()
defer m.qLock.Unlock()
}
return m.wdb.Begin()
}
// QTxCommit commit database transaction of wdb
func (m *DataManager) QTxCommit() error {
if m.qNeedLock {
m.qLock.Lock()
defer m.qLock.Unlock()
}
return m.wdb.Commit()
}
// QTxRollback rollback database transaction of wdb
func (m *DataManager) QTxRollback() error {
if m.qNeedLock {
m.qLock.Lock()
defer m.qLock.Unlock()
}
return m.wdb.Rollback()
}
| {
wdb := dbc(dbname)
qt, qi := wdb.GetInitSQLs()
err := wdb.PrepareTables(qt, qi)
if err != nil {
return nil, err
}
dm := &DataManager{
wdb: wdb,
rdb: dbc(dbname),
qNeedLock: true,
}
return dm, nil
} |
rust_api.py | import asyncio | from io import BytesIO
from importlib import resources
from datetime import datetime
from collections import defaultdict
from .base_rust_api import BaseRustSocket
from .structures import RustTime, RustInfo, RustMap, RustMarker, RustChatMessage, RustTeamInfo, RustTeamMember, RustTeamNote, RustEntityInfo, RustContents, RustItem
from .remote.rustplus_pb2 import *
from .remote import HeartBeat
from ..commands import CommandOptions
from ..exceptions import *
from ..utils import *
class RustSocket(BaseRustSocket):
def __init__(self, ip: str = None, port: str = None, steamid: int = None, playertoken: int = None, command_options : CommandOptions = None, raise_ratelimit_exception : bool = True, ratelimit_limit : int = 25, ratelimit_refill : int = 3) -> None:
super().__init__(ip=ip, port=port, steamid=steamid, playertoken=playertoken, command_options=command_options, raise_ratelimit_exception=raise_ratelimit_exception, ratelimit_limit=ratelimit_limit, ratelimit_refill=ratelimit_refill, heartbeat=HeartBeat(self))
def entity_event(self, eid):
"""
Decorator to register a smart device listener
"""
def wrap_func(coro):
def entity_event_callback(future : Future):
try:
entity_info : RustEntityInfo = future.result()
self.remote.event_handler.register_event(eid, (coro, loop, entity_info.type))
except:
raise SmartDeviceRegistrationError("Not Found")
loop = asyncio.get_event_loop()
future = asyncio.run_coroutine_threadsafe(self.get_entity_info(eid), loop)
future.add_done_callback(entity_event_callback)
return wrap_func
async def get_time(self) -> RustTime:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTime.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return format_time(response)
async def send_team_message(self, message: str) -> None:
await self._handle_ratelimit(2)
app_send_message = AppSendMessage()
app_send_message.message = message
app_request = self._generate_protobuf()
app_request.sendTeamMessage.CopyFrom(app_send_message)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_info(self) -> RustInfo:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
response = await self.remote.get_response(app_request.seq, app_request)
return RustInfo(response.response.info)
async def get_team_chat(self) -> List[RustChatMessage]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamChat.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
messages = (await self.remote.get_response(app_request.seq, app_request)).response.teamChat.messages
return [RustChatMessage(message) for message in messages]
async def get_team_info(self):
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getTeamInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustTeamInfo(app_message.response.teamInfo)
async def get_markers(self) -> List[RustMarker]:
await self._handle_ratelimit()
app_request = self._generate_protobuf()
app_request.getMapMarkers.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return [RustMarker(marker) for marker in app_message.response.mapMarkers.markers]
async def get_raw_map_data(self) -> RustMap:
await self._handle_ratelimit(5)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustMap(app_message.response.map)
async def get_map(self, add_icons: bool = False, add_events: bool = False, add_vending_machines: bool = False, override_images: dict = {}) -> Image:
MAPSIZE = int((await self.get_info()).size)
await self._handle_ratelimit(5 + 1 if [add_icons, add_events, add_vending_machines].count(True) >= 1 else 0)
app_request = self._generate_protobuf()
app_request.getMap.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
map = app_message.response.map
monuments = list(map.monuments)
try:
image = Image.open(BytesIO(map.jpgImage))
except:
raise ImageError("Invalid bytes for the image")
image = image.crop((500,500,map.height-500,map.width-500))
map = image.resize((MAPSIZE,MAPSIZE), Image.ANTIALIAS)
if add_icons or add_events or add_vending_machines:
mapMarkers = await self.get_markers()
if add_icons:
for monument in monuments:
if str(monument.token) == "DungeonBase":
continue
icon = convert_monument(monument.token, override_images)
if monument.token in override_images:
icon = icon.resize((150, 150))
if str(monument.token) == "train_tunnel_display_name":
icon = icon.resize((100, 125))
map.paste(icon, (format_cood(int(monument.x), int(monument.y), MAPSIZE)), icon)
if add_vending_machines:
with resources.path("rustplus.api.icons", "vending_machine.png") as path:
vendingMachine = Image.open(path).convert("RGBA")
vendingMachine = vendingMachine.resize((100, 100))
for marker in mapMarkers:
if add_events:
if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6:
icon = convert_marker(str(marker.type), marker.rotation)
if marker.type == 6:
x = marker.x
y = marker.y
if y > MAPSIZE: y = MAPSIZE
if y < 0: y = 100
if x > MAPSIZE: x = MAPSIZE - 75
if x < 0: x = 50
map.paste(icon, (int(x), MAPSIZE - int(y)), icon)
else:
map.paste(icon, (format_cood(int(marker.x), int(marker.y), MAPSIZE)), icon)
if add_vending_machines and marker.type == 3:
map.paste(vendingMachine, (int(marker.x) - 50, MAPSIZE - int(marker.y) - 50), vendingMachine)
return map.resize((2000, 2000), Image.ANTIALIAS)
async def get_entity_info(self, eid: int = None) -> RustEntityInfo:
await self._handle_ratelimit()
if eid is None:
raise ValueError("EID cannot be None")
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.getEntityInfo.CopyFrom(AppEmpty())
await self.remote.send_message(app_request)
app_message = await self.remote.get_response(app_request.seq, app_request)
return RustEntityInfo(app_message.response.entityInfo)
async def _update_smart_device(self, eid : int, value : bool) -> None:
await self._handle_ratelimit()
entityValue = AppSetEntityValue()
entityValue.value = value
app_request = self._generate_protobuf()
app_request.entityId = eid
app_request.setEntityValue.CopyFrom(entityValue)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def turn_on_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, True)
async def turn_off_smart_switch(self, eid: int = None) -> None:
if eid is None:
raise ValueError("EID cannot be None")
await self._update_smart_device(eid, False)
async def promote_to_team_leader(self, steamid: int = None) -> None:
if steamid is None:
raise ValueError("SteamID cannot be None")
await self._handle_ratelimit()
leaderPacket = AppPromoteToLeader()
leaderPacket.steamId = steamid
app_request = self._generate_protobuf()
app_request.promoteToLeader.CopyFrom(leaderPacket)
self.remote.ignored_responses.append(app_request.seq)
await self.remote.send_message(app_request)
async def get_current_events(self) -> List[RustMarker]:
return [marker for marker in (await self.get_markers()) if marker.type == 2 or marker.type == 4 or marker.type == 5 or marker.type == 6]
async def get_tc_storage_contents(self, eid: int = None, combine_stacks: bool = False) -> RustContents:
if eid is None:
raise ValueError("EID cannot be None")
returnedData = await self.get_entity_info(eid)
targetTime = datetime.utcfromtimestamp(int(returnedData.protectionExpiry))
difference = targetTime - datetime.utcnow()
items = []
for item in returnedData.items:
items.append(RustItem(translate_id_to_stack(item.itemId), item.itemId, item.quantity, item.itemIsBlueprint))
if combine_stacks:
mergedMap = defaultdict(tuple)
for item in items:
data = mergedMap[str(item.itemId)]
if data:
count = int(data[0]) + int(item.quantity)
mergedMap[str(item.itemId)] = (count, bool(item.isBlueprint))
else:
mergedMap[str(item.itemId)] = (int(item.quantity), bool(item.isBlueprint))
items = []
for key in mergedMap.keys():
items.append(RustItem(translate_id_to_stack(key), key, int(mergedMap[key][0]), bool(mergedMap[key][1])))
return RustContents(difference, bool(returnedData.hasProtection), items) | from asyncio.futures import Future
from typing import List
from PIL import Image |
string_column_style.rs | ////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2018, the Perspective Authors.
//
// This file is part of the Perspective library, distributed under the terms
// of the Apache License 2.0. The full license can be found in the LICENSE
// file.
use crate::components::string_column_style::*;
use crate::custom_elements::modal::*;
use crate::*;
use wasm_bindgen::prelude::*;
use web_sys::*;
use yew::prelude::*;
#[wasm_bindgen]
#[derive(Clone)]
pub struct PerspectiveStringColumnStyleElement {
modal: ModalElement<StringColumnStyle>,
props: StringColumnStyleProps,
}
fn | (elem: &web_sys::HtmlElement, config: &StringColumnStyleConfig) {
let mut event_init = web_sys::CustomEventInit::new();
event_init.detail(&JsValue::from_serde(config).unwrap());
let event = CustomEvent::new_with_event_init_dict(
"perspective-column-style-change",
&event_init,
);
elem.dispatch_event(&event.unwrap()).unwrap();
}
impl ResizableMessage for <StringColumnStyle as Component>::Message {
fn resize(y: i32, x: i32, _: bool) -> Self {
StringColumnStyleMsg::SetPos(y, x)
}
}
#[wasm_bindgen]
impl PerspectiveStringColumnStyleElement {
#[wasm_bindgen(constructor)]
pub fn new(
elem: web_sys::HtmlElement,
js_config: JsValue,
js_default_config: JsValue,
) -> PerspectiveStringColumnStyleElement {
let config = js_config.into_serde().unwrap();
let default_config = js_default_config.into_serde().unwrap();
let on_change = {
clone!(elem);
Callback::from(move |x: StringColumnStyleConfig| on_change(&elem, &x))
};
let props = StringColumnStyleProps {
config,
default_config,
on_change,
};
let modal = ModalElement::new(elem, props.clone(), true);
PerspectiveStringColumnStyleElement { modal, props }
}
/// Reset to a provided JSON config, to be used in place of `new()` when
/// re-using this component.
///
/// # Arguments
/// * `config` - a `ColumnStyle` config in JSON form.
pub fn reset(&mut self, config: JsValue) {
let msg = StringColumnStyleMsg::Reset(config.into_serde().unwrap());
self.modal.send_message(msg);
}
/// Dispatches to `ModalElement::open(target)`
///
/// # Arguments
/// `target` - the relative target to pin this `ModalElement` to.
pub fn open(&mut self, target: web_sys::HtmlElement) {
self.modal.open(target, None);
}
/// Remove this `ModalElement` from the DOM.
pub fn close(&mut self) -> Result<(), JsValue> {
self.modal.hide()
}
pub fn destroy(self) -> Result<(), JsValue> {
self.modal.destroy()
}
/// DOM lifecycle method when connected. We don't use this, as it can fire during
/// innocuous events like re-parenting.
pub fn connected_callback(&self) {}
}
| on_change |
index.ts | import { App } from 'vue'
import VXETable from '../v-x-e-table'
import PanelComponent from './src/panel'
import filterHook from './src/hook'
export const Filter = {
install (app: App) { | }
}
export default Filter | VXETable.hooks.add('$tableFilter', filterHook)
app.component(PanelComponent.name, PanelComponent) |
output_format.rs | // Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use std::str::FromStr;
use common_datablocks::DataBlock;
use common_datavalues::DataSchemaRef;
use common_exception::ErrorCode;
use common_exception::Result;
use common_io::prelude::FormatSettings;
use strum_macros::EnumIter;
use super::output_format_ndjson::JsonEachRowOutputFormat;
use super::output_format_parquet::ParquetOutputFormat;
use super::output_format_values::ValuesOutputFormat;
use crate::formats::output_format_csv::CSVOutputFormat;
use crate::formats::output_format_csv::CSVWithNamesAndTypesOutputFormat;
use crate::formats::output_format_csv::CSVWithNamesOutputFormat;
use crate::formats::output_format_csv::TSVOutputFormat;
use crate::formats::output_format_csv::TSVWithNamesAndTypesOutputFormat;
use crate::formats::output_format_csv::TSVWithNamesOutputFormat;
use crate::formats::FormatFactory;
pub trait OutputFormat: Send {
fn serialize_block(
&mut self,
_data_block: &DataBlock,
_format_setting: &FormatSettings,
) -> Result<Vec<u8>> {
unimplemented!()
}
fn serialize_prefix(&self, _format: &FormatSettings) -> Result<Vec<u8>> {
Ok(vec![])
}
fn finalize(&mut self) -> Result<Vec<u8>>;
}
#[derive(Clone, Copy, Default)]
pub struct HeaderConfig {
pub with_name: bool,
pub with_type: bool,
}
impl HeaderConfig {
pub fn new(with_name: bool, with_type: bool) -> Self {
Self {
with_name,
with_type,
}
}
}
#[derive(Clone, Copy, Debug, EnumIter, Eq, PartialEq)]
pub enum OutputFormatType {
CSV,
CSVWithNames,
CSVWithNamesAndTypes,
TSV,
TSVWithNames,
TSVWithNamesAndTypes,
Parquet,
JsonEachRow,
Values,
}
impl fmt::Display for OutputFormatType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl OutputFormatType {
pub fn with_names(&self) -> Option<OutputFormatType> {
match self {
OutputFormatType::CSV => Some(OutputFormatType::CSVWithNames),
OutputFormatType::TSV => Some(OutputFormatType::TSVWithNames),
_ => None,
}
}
pub fn with_names_and_types(&self) -> Option<OutputFormatType> {
match self {
OutputFormatType::CSV => Some(OutputFormatType::CSVWithNamesAndTypes),
OutputFormatType::TSV => Some(OutputFormatType::TSVWithNamesAndTypes),
_ => None,
}
}
pub fn base_alias(&self) -> Vec<String> {
match self {
OutputFormatType::TSV => vec!["TabSeparated".to_string()],
OutputFormatType::JsonEachRow => vec!["NDJson".to_string()],
_ => vec![],
}
}
}
impl OutputFormatType {
pub fn | (&self, schema: DataSchemaRef) -> Box<dyn OutputFormat> {
match self {
OutputFormatType::TSV => Box::new(TSVOutputFormat::create(schema)),
OutputFormatType::TSVWithNames => Box::new(TSVWithNamesOutputFormat::create(schema)),
OutputFormatType::TSVWithNamesAndTypes => {
Box::new(TSVWithNamesAndTypesOutputFormat::create(schema))
}
OutputFormatType::CSV => Box::new(CSVOutputFormat::create(schema)),
OutputFormatType::CSVWithNames => Box::new(CSVWithNamesOutputFormat::create(schema)),
OutputFormatType::CSVWithNamesAndTypes => {
Box::new(CSVWithNamesAndTypesOutputFormat::create(schema))
}
OutputFormatType::Parquet => Box::new(ParquetOutputFormat::create(schema)),
OutputFormatType::JsonEachRow => Box::new(JsonEachRowOutputFormat::create(schema)),
OutputFormatType::Values => Box::new(ValuesOutputFormat::create(schema)),
}
}
}
impl Default for OutputFormatType {
fn default() -> Self {
Self::TSV
}
}
impl FromStr for OutputFormatType {
type Err = ErrorCode;
fn from_str(s: &str) -> std::result::Result<Self, ErrorCode> {
FormatFactory::instance().get_output(s)
}
}
| create_format |
user_playlists.py | import asyncio
import spotify
client = spotify.Client('someid', 'somesecret')
async def | ():
# You can use a user with a http presence
user = await client.user_from_token('sometoken')
# Or you can get a generic user
user = await client.get_user(user_id)
# returns a list of spotify.Playlist objects
playlists = await user.get_playlists()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| main |
process_outpost_id.go | package customizations
import (
"context"
"fmt"
"strings"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
)
// processOutpostIDMiddleware is special customization middleware to be applied for operations
// CreateBucket, ListRegionalBuckets which must resolve endpoint to s3-outposts.{region}.amazonaws.com
// with region as client region and signed by s3-control if an outpost id is provided.
type processOutpostIDMiddleware struct {
// GetOutpostID points to a function that processes an input and returns an outpostID as string ptr,
// and bool indicating if outpostID is supported or set.
GetOutpostID func(interface{}) (*string, bool)
// UseDualStack indicates of dual stack endpoints should be used
UseDualstack bool
}
// ID returns the middleware ID.
func (*processOutpostIDMiddleware) ID() string { return "S3Control:ProcessOutpostIDMiddleware" }
// HandleSerialize adds a serialize step, this has to be before operation serializer and arn endpoint logic.
// Ideally this step will be ahead of ARN customization for CreateBucket, ListRegionalBucket operation.
func (m *processOutpostIDMiddleware) HandleSerialize(
ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) { |
// attempt to fetch an outpost id
outpostID, ok := m.GetOutpostID(in.Parameters)
if !ok {
return next.HandleSerialize(ctx, in)
}
// check if outpostID was not set or is empty
if outpostID == nil || len(strings.TrimSpace(*outpostID)) == 0 {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown request type %T", req)
}
serviceEndpointLabel := "s3-outposts."
requestRegion := awsmiddleware.GetRegion(ctx)
// validate if fips
if s3shared.IsFIPS(requestRegion) {
return out, metadata, fmt.Errorf("unsupported fips region provided for outposts request")
}
// validate if dualstack
if m.UseDualstack {
return out, metadata, fmt.Errorf("dualstack is not supported for outposts request")
}
// set request url
req.URL.Host = serviceEndpointLabel + requestRegion + ".amazonaws.com"
// Disable endpoint host prefix for s3-control
ctx = smithyhttp.DisableEndpointHostPrefix(ctx, true)
// redirect signer
ctx = awsmiddleware.SetSigningName(ctx, "s3-outposts")
ctx = awsmiddleware.SetSigningRegion(ctx, requestRegion)
return next.HandleSerialize(ctx, in)
} | // if host name is immutable, skip this customization
if smithyhttp.GetHostnameImmutable(ctx) {
return next.HandleSerialize(ctx, in)
} |
.prettierrc.js | module.exports = {
tabWidth: 4,
useTabs: true,
semi: true,
singleQuote: true, | arrowParens: 'avoid',
endOfLine: 'lf'
}; | trailingComma: 'none',
bracketSpacing: true, |
shared-helpers.js | import { expect } from 'chai';
export const waitAndClick = async (client, selector, ...waitArgs) => {
await client.waitForVisible(selector, ...waitArgs);
return client.click(selector);
};
|
export const expectTextInSelector = async (client, { selector, text }) => {
await client.waitForText(selector);
let textOnScreen = await client.getText(selector);
// The selector could exist multiple times in the DOM
if (typeof textOnScreen === 'string') textOnScreen = [textOnScreen];
// We only compare the first result
expect(textOnScreen[0]).to.equal(text);
}; | |
write.py | """Functions to write Science Instrument Aperture Files (SIAF).
SIAF content in an aperture_collection object can be written to an xml file that can be ingested in
the PRD. Format and order of the xml fields are defined in SIAF reference files.
Writing to Microsoft Excel .xlsx format is supported.
Writing to .csv and other formats supported by astropy.table.Table.write is enabled.
Authors
-------
Johannes Sahlmann
"""
import os
import numpy as np
import lxml.etree as ET
from astropy.time import Time
from astropy.table import Table, Column
from openpyxl import Workbook
from openpyxl.styles import Font, Color
from openpyxl.styles import Alignment
from ..version import __version__
from ..constants import _JWST_TEMPORARY_ROOT
from ..aperture import PRD_REQUIRED_ATTRIBUTES_ORDERED, SIAF_XML_FIELD_FORMAT, FLOAT_ATTRIBUTES
# dictionary used to set field precision in SIAF.XML
xml_decimal_precision = {}
field_names = list(SIAF_XML_FIELD_FORMAT['field_name'])
for attr in PRD_REQUIRED_ATTRIBUTES_ORDERED:
index = field_names.index(attr)
xml_decimal_precision[attr] = SIAF_XML_FIELD_FORMAT['pyformat'][index]
def | (aperture_collection, filename=None, basepath=None, label=None,
file_format='xml', verbose=True):
"""Write the content of aperture_collection into xml and xlsx files that are PRD-compliant.
Parameters
----------
aperture_collection : ApertureCollection
dictionary of apertures
filename
basepath
label
file_format : str list
one of ['xml', 'xlsx', 'csv', and formats supported by astropy Table.write]
verbose
Returns
-------
TODO
----
test support of astropy Table.write formats (FITS not working)
"""
if type(file_format) == str:
file_format = [file_format]
aperture_names = np.array([key for key in aperture_collection.apertures.keys()])
instrument = aperture_collection.apertures[aperture_names[0]].InstrName
if instrument == 'NIRCAM':
instrument = 'NIRCam'
elif instrument == 'NIRSPEC':
instrument = 'NIRSpec'
if (filename is not None) and (len(list(file_format)) != 1):
raise RuntimeError('When filename is specified, only one output format is supported')
if label is not None:
name_seed = instrument + '_SIAF_{}'.format(label)
else:
name_seed = instrument + '_SIAF'
filenames = []
# hostname = os.uname()[1]
username = os.getlogin()
timestamp = Time.now()
for file_format in list(file_format):
if filename is None:
if basepath is None:
basepath = _JWST_TEMPORARY_ROOT
if not os.path.isdir(basepath):
raise RuntimeError("Could not write SIAF data "
"to {}. Directory does not exist.".format(basepath))
if file_format == 'xml':
out_filename = os.path.join(basepath, name_seed+'.xml')
elif file_format == 'xlsx':
out_filename = os.path.join(basepath, name_seed+'.xlsx')
# elif file_format == 'csv':
# out_filename = os.path.join(basepath, name_seed+'.csv')
else:
out_filename = os.path.join(basepath, name_seed+'.{}'.format(file_format))
else:
out_filename = filename
if file_format == 'xml':
root = ET.Element('SiafEntries')
# add generation info as comment to SIAFXML
root.append(ET.Comment('Generated {} {}'.format(timestamp.isot, timestamp.scale)))
root.append(ET.Comment('by {}'.format(username)))
# try:
# repo = git.Repo(os.path.abspath(__file__), search_parent_directories=True)
# git_version = git.Git(repo.working_dir).describe()
# root.append(ET.Comment('pysiaf git-version {}'.format(git_version)))
# except git.exc.InvalidGitRepositoryError:
root.append(ET.Comment('pysiaf version {}'.format(__version__)))
for aperture_name in aperture_names:
aperture = aperture_collection.apertures[aperture_name]
siaf_entry = ET.SubElement(root, 'SiafEntry')
for attribute in PRD_REQUIRED_ATTRIBUTES_ORDERED:
attribute_value = getattr(aperture_collection.apertures[aperture_name],
attribute)
if attribute_value is None:
attribute_text = None
# NIRSpec special case
elif (aperture.AperType in ['TRANSFORM']) and \
(attribute in 'XSciRef YSciRef XSciScale YSciScale V2Ref V3Ref'.
split()):
attribute_text = '{:{prec}}'.format(attribute_value,
prec='.15e').strip()
elif attribute in FLOAT_ATTRIBUTES:
attribute_text = '{:{prec}}'.format(
attribute_value, prec=xml_decimal_precision[attribute]).strip()
else:
attribute_text = str(attribute_value)
if (not isinstance(attribute_value, str)) and (attribute_text is not None):
if np.isnan(attribute_value):
attribute_text = None
ET.SubElement(siaf_entry, attribute).text = attribute_text
doc = ET.ElementTree(root)
doc.write(out_filename, pretty_print=True, xml_declaration=False)
if verbose:
print('Wrote Siaf to xml file {}'.format(out_filename))
elif file_format == 'xlsx':
siaf_workbook = Workbook()
ws1 = siaf_workbook.active
ws1.title = 'SIAF'
header_row_description = 1
header_row_attributes = 2
# write descriptive header
for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):
col = j + 1
if attribute_name == 'InstrName':
text = 'Aperture Basic Info'
elif attribute_name == 'XDetSize':
text = 'Detector Frame'
elif attribute_name == 'XSciSize':
text = 'Science Frame'
elif attribute_name == 'V2Ref':
text = 'V Frame'
elif attribute_name == 'V2IdlYAngle':
text = 'Frame Relationships'
elif attribute_name == 'XIdlVert1':
text = 'Vertices'
elif attribute_name == 'Sci2IdlDeg':
text = 'Science to Ideal Polynomial'
else:
text = ''
cell = ws1.cell(column=col, row=header_row_description, value="{}".format(text))
cell.font = Font(name='Courier', b=True, i=True, family=3.0, sz=14.0)
# cell.font.color = Color(rgb='FF0000FF', type='rgb')
# write aperture attributes
for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):
col = j + 1
cell = ws1.cell(column=col, row=header_row_attributes, value="{}".
format(attribute_name))
cell.font = Font(name='Calibri', b=True, family=2.0, sz=15.0)
cell.alignment = Alignment(horizontal='center')
# write aperture values
for i, aper_name in enumerate(aperture_names):
aperture = aperture_collection.apertures[aper_name]
# aperture = siaf[aper_name]
row = i + 1 + header_row_attributes
for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):
col = j + 1
cell = ws1.cell(column=col, row=row, value="{}".
format(getattr(aperture, attribute_name)))
if attribute_name not in 'InstrName AperName DDCName AperType AperShape'.\
split():
cell.alignment = Alignment(horizontal='right')
# adjust column width
for column_cells in ws1.columns:
length = max(len(cell.value or '') for cell in column_cells[1:])
ws1.column_dimensions[column_cells[0].column].width = length * 1.5
siaf_workbook.save(filename=out_filename)
if verbose:
print('Wrote Siaf to xlsx file {}'.format(out_filename))
else:
table = Table()
for attribute_name in PRD_REQUIRED_ATTRIBUTES_ORDERED:
data = [getattr(aperture_collection.apertures[aperture_name], attribute_name) for
aperture_name in aperture_names]
table.add_column(Column(data=data, name=attribute_name))
table.write(out_filename, format=file_format)
if verbose:
print('Wrote Siaf to {} file {}'.format(file_format, out_filename))
filenames.append(out_filename)
return filenames
| write_jwst_siaf |
post_list.py | # -*- coding: utf-8 -*-
# Copyright © 2013-2014 Udo Spallek, Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import uuid
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola import utils
from nikola.plugin_categories import RestExtension
# WARNING: the directive name is post-list
# (with a DASH instead of an UNDERSCORE)
class Plugin(RestExtension):
name = "rest_post_list"
def set_site(self, site):
self.site = site
directives.register_directive('post-list', PostList)
PostList.site = site
return super(Plugin, self).set_site(site)
class PostList(Directive):
"""
Post List
=========
:Directive Arguments: None.
:Directive Options: lang, start, stop, reverse, tags, template, id
:Directive Content: None.
Provides a reStructuredText directive to create a list of posts.
The posts appearing in the list can be filtered by options.
*List slicing* is provided with the *start*, *stop* and *reverse* options.
The following not required options are recognized:
``start`` : integer
The index of the first post to show.
A negative value like ``-3`` will show the *last* three posts in the
post-list.
Defaults to None.
``stop`` : integer
The index of the last post to show.
A value negative value like ``-1`` will show every post, but not the
*last* in the post-list.
Defaults to None.
``reverse`` : flag
Reverse the order of the post-list.
Defaults is to not reverse the order of posts.
``tags`` : string [, string...]
Filter posts to show only posts having at least one of the ``tags``.
Defaults to None.
``slugs`` : string [, string...]
Filter posts to show only posts having at least one of the ``slugs``. | Shows all posts and pages in the post list.
Defaults to show only posts with set *use_in_feeds*.
``lang`` : string
The language of post *titles* and *links*.
Defaults to default language.
``template`` : string
The name of an alternative template to render the post-list.
Defaults to ``post_list_directive.tmpl``
``id`` : string
A manual id for the post list.
Defaults to a random name composed by 'post_list_' + uuid.uuid4().hex.
"""
option_spec = {
'start': int,
'stop': int,
'reverse': directives.flag,
'tags': directives.unchanged,
'slugs': directives.unchanged,
'all': directives.flag,
'lang': directives.unchanged,
'template': directives.path,
'id': directives.unchanged,
}
def run(self):
start = self.options.get('start')
stop = self.options.get('stop')
reverse = self.options.get('reverse', False)
tags = self.options.get('tags')
tags = [t.strip().lower() for t in tags.split(',')] if tags else []
slugs = self.options.get('slugs')
slugs = [s.strip() for s in slugs.split(',')] if slugs else []
show_all = self.options.get('all', False)
lang = self.options.get('lang', utils.LocaleBorg().current_lang)
template = self.options.get('template', 'post_list_directive.tmpl')
if self.site.invariant: # for testing purposes
post_list_id = self.options.get('id', 'post_list_' + 'fixedvaluethatisnotauuid')
else:
post_list_id = self.options.get('id', 'post_list_' + uuid.uuid4().hex)
filtered_timeline = []
posts = []
step = -1 if reverse is None else None
if show_all is None:
timeline = [p for p in self.site.timeline]
else:
timeline = [p for p in self.site.timeline if p.use_in_feeds]
for post in timeline:
if tags:
cont = True
tags_lower = [t.lower() for t in post.tags]
for tag in tags:
if tag in tags_lower:
cont = False
if cont:
continue
filtered_timeline.append(post)
for post in filtered_timeline[start:stop:step]:
if slugs:
cont = True
for slug in slugs:
if slug == post.meta('slug'):
cont = False
if cont:
continue
posts += [post]
if not posts:
return []
template_data = {
'lang': lang,
'posts': posts,
'date_format': self.site.GLOBAL_CONTEXT.get('date_format'),
'post_list_id': post_list_id,
}
output = self.site.template_system.render_template(
template, None, template_data)
return [nodes.raw('', output, format='html')] | Defaults to None.
``all`` : flag |
general.js | 'use strict';
| const { Types } = require('mongoose');
const { ObjectId } = Types;
const checkId = id => ObjectId.isValid(id);
const checkArrayString = arr =>
!arr.map(item => typeof item === 'string').includes(false);
module.exports = { checkId, checkArrayString }; | |
regexp.rs | use std::ops::Deref;
use gc::Gc;
use regex::Regex;
use crate::{
exec::Interpreter,
js::{
function::NativeFunctionData,
object::{InternalState, Object, ObjectKind, PROTOTYPE},
property::Property,
value::{from_value, to_value, FromValue, ResultValue, Value, ValueData},
},
};
#[derive(Debug)]
struct RegExp {
/// Regex matcher.
matcher: Regex,
/// Update last_index, set if global or sticky flags are set.
use_last_index: bool,
/// String of parsed flags.
flags: String,
/// Flag 's' - dot matches newline characters.
dot_all: bool,
/// Flag 'g'
global: bool,
/// Flag 'i' - ignore case.
ignore_case: bool,
/// Flag 'm' - '^' and '$' match beginning/end of line.
multiline: bool,
/// Flag 'y'
sticky: bool,
/// Flag 'u' - Unicode.
unicode: bool,
}
impl InternalState for RegExp {}
fn get_argument<T: FromValue>(args: &[Value], idx: usize) -> Result<T, Value> {
match args.get(idx) {
Some(arg) => from_value(arg.clone()).map_err(to_value),
None => Err(to_value(format!("expected argument at index {}", idx))),
}
}
/// Create a new `RegExp`
pub fn make_regexp(this: &Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
if args.is_empty() {
return Err(Gc::new(ValueData::Undefined));
}
let mut regex_body = String::new();
let mut regex_flags = String::new();
#[allow(clippy::indexing_slicing)] // length has been checked
match args[0].deref() {
ValueData::String(ref body) => {
// first argument is a string -> use it as regex pattern
regex_body = body.into();
}
ValueData::Object(ref obj) => {
let slots = &*obj.borrow().internal_slots;
if slots.get("RegExpMatcher").is_some() {
// first argument is another `RegExp` object, so copy its pattern and flags
if let Some(body) = slots.get("OriginalSource") |
if let Some(flags) = slots.get("OriginalFlags") {
regex_flags =
from_value(flags.clone()).expect("Could not convert value to String");
}
}
}
_ => return Err(Gc::new(ValueData::Undefined)),
}
// if a second argument is given and it's a string, use it as flags
match args.get(1) {
None => {}
Some(flags) => {
if let ValueData::String(flags) = flags.deref() {
regex_flags = flags.into();
}
}
}
// parse flags
let mut sorted_flags = String::new();
let mut pattern = String::new();
let mut dot_all = false;
let mut global = false;
let mut ignore_case = false;
let mut multiline = false;
let mut sticky = false;
let mut unicode = false;
if regex_flags.contains('g') {
global = true;
sorted_flags.push('g');
}
if regex_flags.contains('i') {
ignore_case = true;
sorted_flags.push('i');
pattern.push('i');
}
if regex_flags.contains('m') {
multiline = true;
sorted_flags.push('m');
pattern.push('m');
}
if regex_flags.contains('s') {
dot_all = true;
sorted_flags.push('s');
pattern.push('s');
}
if regex_flags.contains('u') {
unicode = true;
sorted_flags.push('u');
//pattern.push('s'); // rust uses utf-8 anyway
}
if regex_flags.contains('y') {
sticky = true;
sorted_flags.push('y');
}
// the `regex` crate uses '(?{flags})` inside the pattern to enable flags
if !pattern.is_empty() {
pattern = format!("(?{})", pattern);
}
pattern.push_str(regex_body.as_str());
let matcher = Regex::new(pattern.as_str()).expect("failed to create matcher");
let regexp = RegExp {
matcher,
use_last_index: global || sticky,
flags: sorted_flags,
dot_all,
global,
ignore_case,
multiline,
sticky,
unicode,
};
// This value is used by console.log and other routines to match Object type
// to its Javascript Identifier (global constructor method name)
this.set_kind(ObjectKind::Ordinary);
this.set_internal_slot("RegExpMatcher", Gc::new(ValueData::Undefined));
this.set_internal_slot("OriginalSource", to_value(regex_body));
this.set_internal_slot("OriginalFlags", to_value(regex_flags));
this.set_internal_state(regexp);
Ok(this.clone())
}
fn get_dot_all(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.dot_all)))
}
fn get_flags(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.flags.clone())))
}
fn get_global(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.global)))
}
fn get_ignore_case(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.ignore_case)))
}
fn get_multiline(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.multiline)))
}
fn get_source(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
Ok(this.get_internal_slot("OriginalSource"))
}
fn get_sticky(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.sticky)))
}
fn get_unicode(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
this.with_internal_state_ref(|regex: &RegExp| Ok(to_value(regex.unicode)))
}
fn _make_prop(getter: NativeFunctionData) -> Property {
Property::default().get(to_value(getter))
}
/// Search for a match between this regex and a specified string
pub fn test(this: &Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let arg_str = get_argument::<String>(args, 0)?;
let mut last_index = from_value::<usize>(this.get_field("lastIndex")).map_err(to_value)?;
let result = this.with_internal_state_ref(|regex: &RegExp| {
let result = match regex.matcher.find_at(arg_str.as_str(), last_index) {
Some(m) => {
if regex.use_last_index {
last_index = m.end();
}
true
}
None => {
if regex.use_last_index {
last_index = 0;
}
false
}
};
Ok(Gc::new(ValueData::Boolean(result)))
});
this.set_field_slice("lastIndex", to_value(last_index));
result
}
/// Search for a match between this regex and a specified string
pub fn exec(this: &Value, args: &[Value], _: &mut Interpreter) -> ResultValue {
let arg_str = get_argument::<String>(args, 0)?;
let mut last_index = from_value::<usize>(this.get_field("lastIndex")).map_err(to_value)?;
let result = this.with_internal_state_ref(|regex: &RegExp| {
let mut locations = regex.matcher.capture_locations();
let result =
match regex
.matcher
.captures_read_at(&mut locations, arg_str.as_str(), last_index)
{
Some(m) => {
if regex.use_last_index {
last_index = m.end();
}
let mut result = Vec::with_capacity(locations.len());
for i in 0..locations.len() {
if let Some((start, end)) = locations.get(i) {
result.push(to_value(
arg_str.get(start..end).expect("Could not get slice"),
));
} else {
result.push(Gc::new(ValueData::Undefined));
}
}
let result = to_value(result);
result.set_prop_slice("index", Property::default().value(to_value(m.start())));
result.set_prop_slice("input", Property::default().value(to_value(arg_str)));
result
}
None => {
if regex.use_last_index {
last_index = 0;
}
Gc::new(ValueData::Null)
}
};
Ok(result)
});
this.set_field_slice("lastIndex", to_value(last_index));
result
}
/// Return a string representing the regular expression
pub fn to_string(this: &Value, _: &[Value], _: &mut Interpreter) -> ResultValue {
let body = from_value::<String>(this.get_internal_slot("OriginalSource")).map_err(to_value)?;
let flags = this.with_internal_state_ref(|regex: &RegExp| regex.flags.clone());
Ok(to_value(format!("/{}/{}", body, flags)))
}
/// Create a new `RegExp` object
pub fn create_constructor(global: &Value) -> Value {
// Create constructor function
let mut regexp_constructor = Object::default();
regexp_constructor.kind = ObjectKind::Function;
regexp_constructor.set_internal_method("construct", make_regexp);
// Todo: add call function, currently call points to contructor, this is wrong
regexp_constructor.set_internal_method("call", make_regexp);
// Create prototype
let proto = ValueData::new_obj(Some(global));
proto.set_field_slice("test", to_value(test as NativeFunctionData));
proto.set_field_slice("exec", to_value(exec as NativeFunctionData));
proto.set_field_slice("toString", to_value(to_string as NativeFunctionData));
proto.set_field_slice("lastIndex", to_value(0));
proto.set_prop_slice("dotAll", _make_prop(get_dot_all));
proto.set_prop_slice("flags", _make_prop(get_flags));
proto.set_prop_slice("global", _make_prop(get_global));
proto.set_prop_slice("ignoreCase", _make_prop(get_ignore_case));
proto.set_prop_slice("multiline", _make_prop(get_multiline));
proto.set_prop_slice("source", _make_prop(get_source));
proto.set_prop_slice("sticky", _make_prop(get_sticky));
proto.set_prop_slice("unicode", _make_prop(get_unicode));
let regexp = to_value(regexp_constructor);
regexp.set_field_slice(PROTOTYPE, proto.clone());
proto.set_field_slice("constructor", regexp.clone());
regexp
}
#[cfg(test)]
mod tests {
use super::*;
use crate::exec::Executor;
use crate::forward;
use crate::realm::Realm;
#[test]
fn test_constructors() {
let realm = Realm::create();
let mut engine = Executor::new(realm);
let init = r#"
let constructed = new RegExp("[0-9]+(\\.[0-9]+)?");
let literal = /[0-9]+(\.[0-9]+)?/;
let ctor_literal = new RegExp(/[0-9]+(\.[0-9]+)?/);
"#;
forward(&mut engine, init);
assert_eq!(forward(&mut engine, "constructed.test('1.0')"), "true");
assert_eq!(forward(&mut engine, "literal.test('1.0')"), "true");
assert_eq!(forward(&mut engine, "ctor_literal.test('1.0')"), "true");
}
#[test]
fn check_regexp_constructor_is_function() {
let global = ValueData::new_obj(None);
let regexp_constructor = create_constructor(&global);
assert_eq!(regexp_constructor.is_function(), true);
}
// TODO: uncomment this test when property getters are supported
// #[test]
// fn test_flags() {
// let mut engine = Executor::new();
// let init = r#"
// var re_gi = /test/gi;
// var re_sm = /test/sm;
// "#;
//
// forward(&mut engine, init);
// assert_eq!(forward(&mut engine, "re_gi.global"), "true");
// assert_eq!(forward(&mut engine, "re_gi.ignoreCase"), "true");
// assert_eq!(forward(&mut engine, "re_gi.multiline"), "false");
// assert_eq!(forward(&mut engine, "re_gi.dotAll"), "false");
// assert_eq!(forward(&mut engine, "re_gi.unicode"), "false");
// assert_eq!(forward(&mut engine, "re_gi.sticky"), "false");
// assert_eq!(forward(&mut engine, "re_gi.flags"), "gi");
//
// assert_eq!(forward(&mut engine, "re_sm.global"), "false");
// assert_eq!(forward(&mut engine, "re_sm.ignoreCase"), "false");
// assert_eq!(forward(&mut engine, "re_sm.multiline"), "true");
// assert_eq!(forward(&mut engine, "re_sm.dotAll"), "true");
// assert_eq!(forward(&mut engine, "re_sm.unicode"), "false");
// assert_eq!(forward(&mut engine, "re_sm.sticky"), "false");
// assert_eq!(forward(&mut engine, "re_sm.flags"), "ms");
// }
#[test]
fn test_last_index() {
let realm = Realm::create();
let mut engine = Executor::new(realm);
let init = r#"
let regex = /[0-9]+(\.[0-9]+)?/g;
"#;
forward(&mut engine, init);
assert_eq!(forward(&mut engine, "regex.lastIndex"), "0");
assert_eq!(forward(&mut engine, "regex.test('1.0foo')"), "true");
assert_eq!(forward(&mut engine, "regex.lastIndex"), "3");
assert_eq!(forward(&mut engine, "regex.test('1.0foo')"), "false");
assert_eq!(forward(&mut engine, "regex.lastIndex"), "0");
}
#[test]
fn test_exec() {
let realm = Realm::create();
let mut engine = Executor::new(realm);
let init = r#"
var re = /quick\s(brown).+?(jumps)/ig;
var result = re.exec('The Quick Brown Fox Jumps Over The Lazy Dog');
"#;
forward(&mut engine, init);
assert_eq!(forward(&mut engine, "result[0]"), "Quick Brown Fox Jumps");
assert_eq!(forward(&mut engine, "result[1]"), "Brown");
assert_eq!(forward(&mut engine, "result[2]"), "Jumps");
assert_eq!(forward(&mut engine, "result.index"), "4");
assert_eq!(
forward(&mut engine, "result.input"),
"The Quick Brown Fox Jumps Over The Lazy Dog"
);
}
#[test]
fn test_to_string() {
let realm = Realm::create();
let mut engine = Executor::new(realm);
assert_eq!(
forward(&mut engine, "(new RegExp('a+b+c')).toString()"),
"/a+b+c/"
);
assert_eq!(
forward(&mut engine, "(new RegExp('bar', 'g')).toString()"),
"/bar/g"
);
assert_eq!(
forward(&mut engine, "(new RegExp('\\\\n', 'g')).toString()"),
"/\\n/g"
);
assert_eq!(forward(&mut engine, "/\\n/g.toString()"), "/\\n/g");
}
}
| {
regex_body =
from_value(body.clone()).expect("Could not convert value to String");
} |
full_piv_lu.rs | use na::Matrix3;
#[test]
#[rustfmt::skip]
fn | () {
let m = Matrix3::new(
2.0, -1.0, 0.0,
-1.0, 2.0, -1.0,
0.0, -1.0, 2.0);
let lu = m.full_piv_lu();
assert_eq!(lu.determinant(), 4.0);
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
assert!(relative_eq!(m, lu, epsilon = 1.0e-7));
}
#[test]
#[rustfmt::skip]
fn full_piv_lu_simple_with_pivot() {
let m = Matrix3::new(0.0, -1.0, 2.0,
-1.0, 2.0, -1.0,
2.0, -1.0, 0.0);
let lu = m.full_piv_lu();
assert_eq!(lu.determinant(), -4.0);
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
assert!(relative_eq!(m, lu, epsilon = 1.0e-7));
}
#[cfg(feature = "arbitrary")]
mod proptest_tests {
macro_rules! gen_tests(
($module: ident, $scalar: expr, $scalar_type: ty) => {
mod $module {
use std::cmp;
use num::One;
use na::{DMatrix, Matrix4x3, DVector, Vector4};
#[allow(unused_imports)]
use crate::core::helper::{RandScalar, RandComplex};
use crate::proptest::*;
use proptest::{prop_assert, proptest};
proptest! {
#[test]
fn full_piv_lu(m in dmatrix_($scalar)) {
let lu = m.clone().full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7))
}
#[test]
fn full_piv_lu_static_3_5(m in matrix3x5_($scalar)) {
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7))
}
#[test]
fn full_piv_lu_static_5_3(m in matrix5x3_($scalar)) {
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7))
}
#[test]
fn full_piv_lu_static_square(m in matrix4_($scalar)) {
let lu = m.full_piv_lu();
let (p, l, u, q) = lu.unpack();
let mut lu = l * u;
p.inv_permute_rows(&mut lu);
q.inv_permute_columns(&mut lu);
prop_assert!(relative_eq!(m, lu, epsilon = 1.0e-7))
}
#[test]
fn full_piv_lu_solve(n in PROPTEST_MATRIX_DIM, nb in PROPTEST_MATRIX_DIM) {
let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0);
let lu = m.clone().full_piv_lu();
let b1 = DVector::<$scalar_type>::new_random(n).map(|e| e.0);
let b2 = DMatrix::<$scalar_type>::new_random(n, nb).map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6));
prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6));
}
#[test]
fn full_piv_lu_solve_static(m in matrix4_($scalar)) {
let lu = m.full_piv_lu();
let b1 = Vector4::<$scalar_type>::new_random().map(|e| e.0);
let b2 = Matrix4x3::<$scalar_type>::new_random().map(|e| e.0);
let sol1 = lu.solve(&b1);
let sol2 = lu.solve(&b2);
prop_assert!(sol1.is_none() || relative_eq!(&m * sol1.unwrap(), b1, epsilon = 1.0e-6));
prop_assert!(sol2.is_none() || relative_eq!(&m * sol2.unwrap(), b2, epsilon = 1.0e-6));
}
#[test]
fn full_piv_lu_inverse(n in PROPTEST_MATRIX_DIM) {
let n = cmp::max(1, cmp::min(n, 15)); // To avoid slowing down the test too much.
let m = DMatrix::<$scalar_type>::new_random(n, n).map(|e| e.0);
let mut l = m.lower_triangle();
let mut u = m.upper_triangle();
// Ensure the matrix is well conditioned for inversion.
l.fill_diagonal(One::one());
u.fill_diagonal(One::one());
let m = l * u;
let m1 = m.clone().full_piv_lu().try_inverse().unwrap();
let id1 = &m * &m1;
let id2 = &m1 * &m;
prop_assert!(id1.is_identity(1.0e-5));
prop_assert!(id2.is_identity(1.0e-5));
}
#[test]
fn full_piv_lu_inverse_static(m in matrix4_($scalar)) {
let lu = m.full_piv_lu();
if let Some(m1) = lu.try_inverse() {
let id1 = &m * &m1;
let id2 = &m1 * &m;
prop_assert!(id1.is_identity(1.0e-5));
prop_assert!(id2.is_identity(1.0e-5));
}
}
}
}
}
);
gen_tests!(complex, complex_f64(), RandComplex<f64>);
gen_tests!(f64, PROPTEST_F64, RandScalar<f64>);
}
/*
#[test]
fn swap_rows() {
let mut m = Matrix5x3::new(
11.0, 12.0, 13.0,
21.0, 22.0, 23.0,
31.0, 32.0, 33.0,
41.0, 42.0, 43.0,
51.0, 52.0, 53.0);
let expected = Matrix5x3::new(
11.0, 12.0, 13.0,
41.0, 42.0, 43.0,
31.0, 32.0, 33.0,
21.0, 22.0, 23.0,
51.0, 52.0, 53.0);
m.swap_rows(1, 3);
assert_eq!(m, expected);
}
#[test]
fn swap_columns() {
let mut m = Matrix3x5::new(
11.0, 12.0, 13.0, 14.0, 15.0,
21.0, 22.0, 23.0, 24.0, 25.0,
31.0, 32.0, 33.0, 34.0, 35.0);
let expected = Matrix3x5::new(
11.0, 14.0, 13.0, 12.0, 15.0,
21.0, 24.0, 23.0, 22.0, 25.0,
31.0, 34.0, 33.0, 32.0, 35.0);
m.swap_columns(1, 3);
assert_eq!(m, expected);
}
#[test]
fn remove_columns() {
let m = Matrix3x5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35);
let expected1 = Matrix3x4::new(
12, 13, 14, 15,
22, 23, 24, 25,
32, 33, 34, 35);
let expected2 = Matrix3x4::new(
11, 12, 13, 14,
21, 22, 23, 24,
31, 32, 33, 34);
let expected3 = Matrix3x4::new(
11, 12, 14, 15,
21, 22, 24, 25,
31, 32, 34, 35);
assert_eq!(m.remove_column(0), expected1);
assert_eq!(m.remove_column(4), expected2);
assert_eq!(m.remove_column(2), expected3);
let expected1 = Matrix3::new(
13, 14, 15,
23, 24, 25,
33, 34, 35);
let expected2 = Matrix3::new(
11, 12, 13,
21, 22, 23,
31, 32, 33);
let expected3 = Matrix3::new(
11, 12, 15,
21, 22, 25,
31, 32, 35);
assert_eq!(m.remove_fixed_columns::<U2>(0), expected1);
assert_eq!(m.remove_fixed_columns::<U2>(3), expected2);
assert_eq!(m.remove_fixed_columns::<U2>(2), expected3);
// The following is just to verify that the return type dimensions is correctly inferred.
let computed: Matrix<_, U3, Dynamic, _> = m.remove_columns(3, 2);
assert!(computed.eq(&expected2));
}
#[test]
fn remove_rows() {
let m = Matrix5x3::new(
11, 12, 13,
21, 22, 23,
31, 32, 33,
41, 42, 43,
51, 52, 53);
let expected1 = Matrix4x3::new(
21, 22, 23,
31, 32, 33,
41, 42, 43,
51, 52, 53);
let expected2 = Matrix4x3::new(
11, 12, 13,
21, 22, 23,
31, 32, 33,
41, 42, 43);
let expected3 = Matrix4x3::new(
11, 12, 13,
21, 22, 23,
41, 42, 43,
51, 52, 53);
assert_eq!(m.remove_row(0), expected1);
assert_eq!(m.remove_row(4), expected2);
assert_eq!(m.remove_row(2), expected3);
let expected1 = Matrix3::new(
31, 32, 33,
41, 42, 43,
51, 52, 53);
let expected2 = Matrix3::new(
11, 12, 13,
21, 22, 23,
31, 32, 33);
let expected3 = Matrix3::new(
11, 12, 13,
21, 22, 23,
51, 52, 53);
assert_eq!(m.remove_fixed_rows::<U2>(0), expected1);
assert_eq!(m.remove_fixed_rows::<U2>(3), expected2);
assert_eq!(m.remove_fixed_rows::<U2>(2), expected3);
// The following is just to verify that the return type dimensions is correctly inferred.
let computed: Matrix<_, Dynamic, U3, _> = m.remove_rows(3, 2);
assert!(computed.eq(&expected2));
}
#[test]
fn insert_columns() {
let m = Matrix5x3::new(
11, 12, 13,
21, 22, 23,
31, 32, 33,
41, 42, 43,
51, 52, 53);
let expected1 = Matrix5x4::new(
0, 11, 12, 13,
0, 21, 22, 23,
0, 31, 32, 33,
0, 41, 42, 43,
0, 51, 52, 53);
let expected2 = Matrix5x4::new(
11, 12, 13, 0,
21, 22, 23, 0,
31, 32, 33, 0,
41, 42, 43, 0,
51, 52, 53, 0);
let expected3 = Matrix5x4::new(
11, 12, 0, 13,
21, 22, 0, 23,
31, 32, 0, 33,
41, 42, 0, 43,
51, 52, 0, 53);
assert_eq!(m.insert_column(0, 0), expected1);
assert_eq!(m.insert_column(3, 0), expected2);
assert_eq!(m.insert_column(2, 0), expected3);
let expected1 = Matrix5::new(
0, 0, 11, 12, 13,
0, 0, 21, 22, 23,
0, 0, 31, 32, 33,
0, 0, 41, 42, 43,
0, 0, 51, 52, 53);
let expected2 = Matrix5::new(
11, 12, 13, 0, 0,
21, 22, 23, 0, 0,
31, 32, 33, 0, 0,
41, 42, 43, 0, 0,
51, 52, 53, 0, 0);
let expected3 = Matrix5::new(
11, 12, 0, 0, 13,
21, 22, 0, 0, 23,
31, 32, 0, 0, 33,
41, 42, 0, 0, 43,
51, 52, 0, 0, 53);
assert_eq!(m.insert_fixed_columns::<U2>(0, 0), expected1);
assert_eq!(m.insert_fixed_columns::<U2>(3, 0), expected2);
assert_eq!(m.insert_fixed_columns::<U2>(2, 0), expected3);
// The following is just to verify that the return type dimensions is correctly inferred.
let computed: Matrix<_, U5, Dynamic, _> = m.insert_columns(3, 2, 0);
assert!(computed.eq(&expected2));
}
#[test]
fn insert_rows() {
let m = Matrix3x5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35);
let expected1 = Matrix4x5::new(
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35);
let expected2 = Matrix4x5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35,
0, 0, 0, 0, 0);
let expected3 = Matrix4x5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
0, 0, 0, 0, 0,
31, 32, 33, 34, 35);
assert_eq!(m.insert_row(0, 0), expected1);
assert_eq!(m.insert_row(3, 0), expected2);
assert_eq!(m.insert_row(2, 0), expected3);
let expected1 = Matrix5::new(
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35);
let expected2 = Matrix5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0);
let expected3 = Matrix5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
31, 32, 33, 34, 35);
assert_eq!(m.insert_fixed_rows::<U2>(0, 0), expected1);
assert_eq!(m.insert_fixed_rows::<U2>(3, 0), expected2);
assert_eq!(m.insert_fixed_rows::<U2>(2, 0), expected3);
// The following is just to verify that the return type dimensions is correctly inferred.
let computed: Matrix<_, Dynamic, U5, _> = m.insert_rows(3, 2, 0);
assert!(computed.eq(&expected2));
}
#[test]
fn resize() {
let m = Matrix3x5::new(
11, 12, 13, 14, 15,
21, 22, 23, 24, 25,
31, 32, 33, 34, 35);
let add_add = DMatrix::from_row_slice(5, 6, &[
11, 12, 13, 14, 15, 42,
21, 22, 23, 24, 25, 42,
31, 32, 33, 34, 35, 42,
42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42]);
let del_del = DMatrix::from_row_slice(1, 2, &[11, 12]);
let add_del = DMatrix::from_row_slice(5, 2, &[
11, 12,
21, 22,
31, 32,
42, 42,
42, 42]);
let del_add = DMatrix::from_row_slice(1, 8, &[
11, 12, 13, 14, 15, 42, 42, 42]);
assert_eq!(del_del, m.resize(1, 2, 42));
assert_eq!(add_add, m.resize(5, 6, 42));
assert_eq!(add_del, m.resize(5, 2, 42));
assert_eq!(del_add, m.resize(1, 8, 42));
}
*/
| full_piv_lu_simple |
node_classification.py | import yaml
import random
import torch.backends.cudnn
import numpy as np
from autogl.datasets import build_dataset_from_name
from autogl.solver import AutoNodeClassifier
from autogl.module import Acc
from autogl.backend import DependentBackend
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
"auto node classification", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="cora",
type=str,
help="dataset to use",
choices=[
"cora",
"pubmed",
"citeseer",
"coauthor_cs",
"coauthor_physics",
"amazon_computers",
"amazon_photo",
],
)
parser.add_argument(
"--configs",
type=str,
default="../configs/nodeclf_gcn_benchmark_small.yml",
help="config to use",
)
# following arguments will override parameters in the config file | "--max_eval", type=int, default=50, help="max hpo evaluation times"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--device", default=0, type=int, help="GPU device")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
# set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dataset = build_dataset_from_name(args.dataset)
label = dataset[0].nodes.data["y" if DependentBackend.is_pyg() else "label"]
num_classes = len(np.unique(label.numpy()))
configs = yaml.load(open(args.configs, "r").read(), Loader=yaml.FullLoader)
configs["hpo"]["name"] = args.hpo
configs["hpo"]["max_evals"] = args.max_eval
autoClassifier = AutoNodeClassifier.from_config(configs)
# train
if args.dataset in ["cora", "citeseer", "pubmed"]:
autoClassifier.fit(dataset, time_limit=3600, evaluation_method=[Acc])
else:
autoClassifier.fit(
dataset,
time_limit=3600,
evaluation_method=[Acc],
seed=seed,
train_split=20 * num_classes,
val_split=30 * num_classes,
balanced=False,
)
autoClassifier.get_leaderboard().show()
acc = autoClassifier.evaluate(metric="acc")
print("test acc: {:.4f}".format(acc)) | parser.add_argument("--hpo", type=str, default="tpe", help="hpo methods")
parser.add_argument( |
checkbox.wrapper-a606288b.js | import { d as _objectWithoutProperties, b as _slicedToArray, _ as _objectSpread2, e as _extends } from './_rollupPluginBabelHelpers-b49fe34a.js';
import React__default, { forwardRef, useState, useCallback, useImperativeHandle, useMemo } from 'react';
import { V as View } from './view.native-f7a27d15.js'; | import './h3.js';
import './h4.js';
import './h5.js';
import './h6.js';
import './label.js';
import './p.js';
import './subtitle.js';
import { Text } from './text.js';
import { toBool, checkCall, get, isStr, noOp } from '@keg-hub/jsutils';
import { renderFromType } from './renderFromType.js';
import { getOnChangeHandler } from './getOnChangeHandler.js';
import { getChecked } from './getChecked.js';
import '@keg-hub/re-theme/colors';
import { useThemePath } from './useThemePath.js';
import './useThemeWithHeight.js';
import 'react-native';
import { u as useClassList } from './useClassList.native-70068878.js';
import { u as useThemeTypeAsClass } from './useThemeTypeAsClass.native-a05b9a50.js';
var _excluded = ["className", "initChecked", "checked", "children", "elType", "Element", "CheckIcon", "disabled", "disableCheck", "disableUncheck", "allowAdjacentPress", "isWeb", "LeftComponent", "leftClassName", "close", "onChange", "onValueChange", "RightComponent", "rightClassName", "styles", "CheckboxComponent", "type", "themePath", "value"];
var useCheckedState = function useCheckedState(isChecked, themeStyles) {
return useMemo(function () {
return _objectSpread2(_objectSpread2({}, themeStyles), {}, {
content: _objectSpread2(_objectSpread2({}, themeStyles.content), {}, {
area: _objectSpread2(_objectSpread2({}, get(themeStyles, 'content.area.off')), isChecked && get(themeStyles, 'content.area.on')),
indicator: _objectSpread2(_objectSpread2({}, get(themeStyles, 'content.indicator.off')), isChecked && get(themeStyles, 'content.indicator.on'))
})
});
}, [isChecked, themeStyles]);
};
var useCheckboxPressHandler = function useCheckboxPressHandler(isChecked, setChecked, onChange, _ref) {
var _ref$disableCheck = _ref.disableCheck,
disableCheck = _ref$disableCheck === void 0 ? false : _ref$disableCheck,
_ref$disableUncheck = _ref.disableUncheck,
disableUncheck = _ref$disableUncheck === void 0 ? true : _ref$disableUncheck;
return useCallback(function (event) {
if (isChecked) !disableUncheck && setChecked(false);else !disableCheck && setChecked(true);
checkCall(onChange, event, !isChecked);
}, [isChecked, setChecked, onChange, disableCheck, disableUncheck]);
};
var SideComponent = function SideComponent(_ref2) {
var className = _ref2.className,
Component = _ref2.Component,
styles = _ref2.styles,
style = _ref2.style,
onPress = _ref2.onPress;
var sideProps = onPress ? {
onPress: onPress
} : undefined;
return isStr(Component) ? React__default.createElement(Text, _extends({
className: className,
style: style
}, sideProps), Component) : renderFromType(Component, _objectSpread2({
className: className,
style: style,
styles: styles
}, sideProps));
};
var ChildrenComponent = function ChildrenComponent(_ref3) {
var children = _ref3.children,
className = _ref3.className;
return React__default.createElement(React__default.Fragment, null, renderFromType(children, {
className: className
}, null));
};
var useCheckboxHandle = function useCheckboxHandle(ref, isChecked, _setChecked, pressHandler) {
return useImperativeHandle(ref, function () {
return {
isChecked: isChecked,
setChecked: function setChecked(checked) {
_setChecked(checked);
pressHandler({}, checked);
}
};
}, [ref, isChecked, _setChecked, pressHandler]);
};
var CheckboxWrapper = forwardRef(function (props, ref) {
props.className;
var initChecked = props.initChecked,
checked = props.checked,
children = props.children,
elType = props.elType,
Element = props.Element,
CheckIcon = props.CheckIcon,
disabled = props.disabled,
_props$disableCheck = props.disableCheck,
disableCheck = _props$disableCheck === void 0 ? false : _props$disableCheck,
_props$disableUncheck = props.disableUncheck,
disableUncheck = _props$disableUncheck === void 0 ? false : _props$disableUncheck,
_props$allowAdjacentP = props.allowAdjacentPress,
allowAdjacentPress = _props$allowAdjacentP === void 0 ? true : _props$allowAdjacentP,
isWeb = props.isWeb,
LeftComponent = props.LeftComponent;
props.leftClassName;
var close = props.close,
onChange = props.onChange,
onValueChange = props.onValueChange,
RightComponent = props.RightComponent;
props.rightClassName;
var styles = props.styles,
CheckboxComponent = props.CheckboxComponent;
props.type;
var themePath = props.themePath,
value = props.value,
elProps = _objectWithoutProperties(props, _excluded);
var initCheckedValue = toBool(checked || initChecked || value);
var _useState = useState(initCheckedValue),
_useState2 = _slicedToArray(_useState, 2),
isChecked = _useState2[0],
setChecked = _useState2[1];
var pressHandler = useCheckboxPressHandler(isChecked, setChecked, onChange || onValueChange,
{
disableCheck: disableCheck,
disableUncheck: disableUncheck
});
useCheckboxHandle(ref, isChecked, setChecked, onChange || onValueChange);
var canUseHandler = !disabled && (isChecked && !disableUncheck || !isChecked && !disableCheck);
var elThemePath = themePath || "form.".concat(elType, ".").concat(close && 'close' || 'default');
var themeStyles = useThemePath(elThemePath, styles);
var disabledStyles = useThemePath("form.".concat(elType, ".disabled"), themeStyles);
var activeStyles = useCheckedState(isChecked, canUseHandler ? themeStyles : disabledStyles);
var typeClassName = useThemeTypeAsClass();
var pressHandlerProp = getOnChangeHandler(isWeb, canUseHandler ? pressHandler : noOp);
var ChildrenView = children && React__default.createElement(View, {
className: typeClassName,
style: activeStyles.main
}, React__default.createElement(ChildrenComponent, {
className: "keg-checkbox-container",
children: children
}));
return ChildrenView || React__default.createElement(View, {
className: typeClassName,
style: activeStyles.main
}, LeftComponent && React__default.createElement(SideComponent, {
className: useClassList(),
Component: LeftComponent,
style: activeStyles.content.left,
onPress: allowAdjacentPress && canUseHandler && pressHandler
}), CheckboxComponent ? renderFromType(CheckboxComponent, _objectSpread2(_objectSpread2({}, props), {}, {
styles: activeStyles.content
})) : React__default.createElement(Element, _extends({
className: "keg-checkbox-container",
elProps: elProps,
disabled: disabled,
styles: activeStyles.content,
CheckIcon: CheckIcon
}, getChecked(isWeb, isChecked), pressHandlerProp)), RightComponent && React__default.createElement(SideComponent, {
className: useClassList(),
Component: RightComponent,
style: activeStyles.content.right,
onPress: allowAdjacentPress && canUseHandler && pressHandler
}));
});
export { CheckboxWrapper as C };
//# sourceMappingURL=checkbox.wrapper-a606288b.js.map | import './caption.js';
import './h1.js';
import './h2.js'; |
cstore_impl.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use cstore::{self, LoadedMacro};
use encoder;
use link_args;
use native_libs;
use foreign_modules;
use schema;
use rustc::ty::query::QueryConfig;
use rustc::middle::cstore::{CrateStore, DepKind,
EncodedMetadata, NativeLibraryKind};
use rustc::middle::exported_symbols::ExportedSymbol;
use rustc::middle::stability::DeprecationEntry;
use rustc::hir::def;
use rustc::session::{CrateDisambiguator, Session};
use rustc::ty::{self, TyCtxt};
use rustc::ty::query::Providers;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX};
use rustc::hir::map::{DefKey, DefPath, DefPathHash};
use rustc::hir::map::definitions::DefPathTable;
use rustc::util::nodemap::DefIdMap;
use rustc_data_structures::svh::Svh;
use std::any::Any;
use rustc_data_structures::sync::Lrc;
use std::sync::Arc;
use syntax::ast;
use syntax::attr;
use syntax::source_map;
use syntax::edition::Edition;
use syntax::parse::source_file_to_stream;
use syntax::symbol::Symbol;
use syntax_pos::{Span, NO_EXPANSION, FileName};
use rustc_data_structures::bit_set::BitSet;
macro_rules! provide {
(<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident,
$($name:ident => $compute:block)*) => {
pub fn provide_extern<$lt>(providers: &mut Providers<$lt>) {
$(fn $name<'a, $lt:$lt, T>($tcx: TyCtxt<'a, $lt, $lt>, def_id_arg: T)
-> <ty::queries::$name<$lt> as
QueryConfig<$lt>>::Value
where T: IntoArgs,
{
#[allow(unused_variables)]
let ($def_id, $other) = def_id_arg.into_args();
assert!(!$def_id.is_local());
let def_path_hash = $tcx.def_path_hash(DefId {
krate: $def_id.krate,
index: CRATE_DEF_INDEX
});
let dep_node = def_path_hash
.to_dep_node(::rustc::dep_graph::DepKind::CrateMetadata);
// The DepNodeIndex of the DepNode::CrateMetadata should be
// cached somewhere, so that we can use read_index().
$tcx.dep_graph.read(dep_node);
let $cdata = $tcx.crate_data_as_rc_any($def_id.krate);
let $cdata = $cdata.downcast_ref::<cstore::CrateMetadata>()
.expect("CrateStore crated ata is not a CrateMetadata");
$compute
})*
*providers = Providers {
$($name,)*
..*providers
};
}
}
}
// small trait to work around different signature queries all being defined via
// the macro above.
trait IntoArgs {
fn into_args(self) -> (DefId, DefId);
}
impl IntoArgs for DefId {
fn into_args(self) -> (DefId, DefId) { (self, self) }
}
impl IntoArgs for CrateNum {
fn into_args(self) -> (DefId, DefId) { (self.as_def_id(), self.as_def_id()) }
}
impl IntoArgs for (CrateNum, DefId) {
fn into_args(self) -> (DefId, DefId) { (self.0.as_def_id(), self.1) }
}
provide! { <'tcx> tcx, def_id, other, cdata,
type_of => { cdata.get_type(def_id.index, tcx) }
generics_of => {
tcx.alloc_generics(cdata.get_generics(def_id.index, tcx.sess))
}
predicates_of => { cdata.get_predicates(def_id.index, tcx) }
predicates_defined_on => { cdata.get_predicates_defined_on(def_id.index, tcx) }
super_predicates_of => { cdata.get_super_predicates(def_id.index, tcx) }
trait_def => {
tcx.alloc_trait_def(cdata.get_trait_def(def_id.index, tcx.sess))
}
adt_def => { cdata.get_adt_def(def_id.index, tcx) }
adt_destructor => {
let _ = cdata;
tcx.calculate_dtor(def_id, &mut |_,_| Ok(()))
}
variances_of => { Lrc::new(cdata.get_item_variances(def_id.index)) }
associated_item_def_ids => {
let mut result = vec![];
cdata.each_child_of_item(def_id.index,
|child| result.push(child.def.def_id()), tcx.sess);
Lrc::new(result)
}
associated_item => { cdata.get_associated_item(def_id.index) }
impl_trait_ref => { cdata.get_impl_trait(def_id.index, tcx) }
impl_polarity => { cdata.get_impl_polarity(def_id.index) }
coerce_unsized_info => {
cdata.get_coerce_unsized_info(def_id.index).unwrap_or_else(|| {
bug!("coerce_unsized_info: `{:?}` is missing its info", def_id);
})
}
optimized_mir => {
let mir = cdata.maybe_get_optimized_mir(tcx, def_id.index).unwrap_or_else(|| {
bug!("get_optimized_mir: missing MIR for `{:?}`", def_id)
});
let mir = tcx.alloc_mir(mir);
mir
}
mir_const_qualif => {
(cdata.mir_const_qualif(def_id.index), Lrc::new(BitSet::new_empty(0)))
}
fn_sig => { cdata.fn_sig(def_id.index, tcx) }
inherent_impls => { Lrc::new(cdata.get_inherent_implementations_for_type(def_id.index)) }
is_const_fn_raw => { cdata.is_const_fn_raw(def_id.index) }
is_foreign_item => { cdata.is_foreign_item(def_id.index) }
describe_def => { cdata.get_def(def_id.index) }
def_span => { cdata.get_span(def_id.index, &tcx.sess) }
lookup_stability => {
cdata.get_stability(def_id.index).map(|s| tcx.intern_stability(s))
}
lookup_deprecation_entry => {
cdata.get_deprecation(def_id.index).map(DeprecationEntry::external)
}
item_attrs => { cdata.get_item_attrs(def_id.index, tcx.sess) }
// FIXME(#38501) We've skipped a `read` on the `HirBody` of
// a `fn` when encoding, so the dep-tracking wouldn't work.
// This is only used by rustdoc anyway, which shouldn't have
// incremental recompilation ever enabled.
fn_arg_names => { cdata.get_fn_arg_names(def_id.index) }
rendered_const => { cdata.get_rendered_const(def_id.index) }
impl_parent => { cdata.get_parent_impl(def_id.index) }
trait_of_item => { cdata.get_trait_of_item(def_id.index) }
const_is_rvalue_promotable_to_static => {
cdata.const_is_rvalue_promotable_to_static(def_id.index)
}
is_mir_available => { cdata.is_item_mir_available(def_id.index) }
dylib_dependency_formats => { Lrc::new(cdata.get_dylib_dependency_formats()) }
is_panic_runtime => { cdata.root.panic_runtime }
is_compiler_builtins => { cdata.root.compiler_builtins }
has_global_allocator => { cdata.root.has_global_allocator }
has_panic_handler => { cdata.root.has_panic_handler }
is_sanitizer_runtime => { cdata.root.sanitizer_runtime }
is_profiler_runtime => { cdata.root.profiler_runtime }
panic_strategy => { cdata.root.panic_strategy }
extern_crate => {
let r = Lrc::new(*cdata.extern_crate.lock());
r
}
is_no_builtins => { cdata.root.no_builtins }
impl_defaultness => { cdata.get_impl_defaultness(def_id.index) }
reachable_non_generics => {
let reachable_non_generics = tcx
.exported_symbols(cdata.cnum)
.iter()
.filter_map(|&(exported_symbol, export_level)| {
if let ExportedSymbol::NonGeneric(def_id) = exported_symbol {
return Some((def_id, export_level))
} else {
None
}
})
.collect();
Lrc::new(reachable_non_generics)
}
native_libraries => { Lrc::new(cdata.get_native_libraries(tcx.sess)) }
foreign_modules => { Lrc::new(cdata.get_foreign_modules(tcx.sess)) }
plugin_registrar_fn => {
cdata.root.plugin_registrar_fn.map(|index| {
DefId { krate: def_id.krate, index }
})
}
derive_registrar_fn => {
cdata.root.macro_derive_registrar.map(|index| {
DefId { krate: def_id.krate, index }
})
}
crate_disambiguator => { cdata.root.disambiguator }
crate_hash => { cdata.root.hash }
original_crate_name => { cdata.root.name }
extra_filename => { cdata.root.extra_filename.clone() }
implementations_of_trait => {
let mut result = vec![];
let filter = Some(other);
cdata.get_implementations_for_trait(filter, &mut result);
Lrc::new(result)
}
all_trait_implementations => {
let mut result = vec![];
cdata.get_implementations_for_trait(None, &mut result);
Lrc::new(result)
}
visibility => { cdata.get_visibility(def_id.index) }
dep_kind => {
let r = *cdata.dep_kind.lock();
r
}
crate_name => { cdata.name }
item_children => {
let mut result = vec![];
cdata.each_child_of_item(def_id.index, |child| result.push(child), tcx.sess);
Lrc::new(result)
}
defined_lib_features => { Lrc::new(cdata.get_lib_features()) }
defined_lang_items => { Lrc::new(cdata.get_lang_items()) }
missing_lang_items => { Lrc::new(cdata.get_missing_lang_items()) }
missing_extern_crate_item => {
let r = match *cdata.extern_crate.borrow() {
Some(extern_crate) if !extern_crate.direct => true,
_ => false,
};
r
}
used_crate_source => { Lrc::new(cdata.source.clone()) }
exported_symbols => {
let cnum = cdata.cnum; | }
}
pub fn provide<'tcx>(providers: &mut Providers<'tcx>) {
// FIXME(#44234) - almost all of these queries have no sub-queries and
// therefore no actual inputs, they're just reading tables calculated in
// resolve! Does this work? Unsure! That's what the issue is about
*providers = Providers {
is_dllimport_foreign_item: |tcx, id| {
tcx.native_library_kind(id) == Some(NativeLibraryKind::NativeUnknown)
},
is_statically_included_foreign_item: |tcx, id| {
match tcx.native_library_kind(id) {
Some(NativeLibraryKind::NativeStatic) |
Some(NativeLibraryKind::NativeStaticNobundle) => true,
_ => false,
}
},
native_library_kind: |tcx, id| {
tcx.native_libraries(id.krate)
.iter()
.filter(|lib| native_libs::relevant_lib(&tcx.sess, lib))
.find(|lib| {
let fm_id = match lib.foreign_module {
Some(id) => id,
None => return false,
};
tcx.foreign_modules(id.krate)
.iter()
.find(|m| m.def_id == fm_id)
.expect("failed to find foreign module")
.foreign_items
.contains(&id)
})
.map(|l| l.kind)
},
native_libraries: |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(native_libs::collect(tcx))
},
foreign_modules: |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(foreign_modules::collect(tcx))
},
link_args: |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
Lrc::new(link_args::collect(tcx))
},
// Returns a map from a sufficiently visible external item (i.e. an
// external item that is visible from at least one local module) to a
// sufficiently visible parent (considering modules that re-export the
// external item to be parents).
visible_parent_map: |tcx, cnum| {
use std::collections::vec_deque::VecDeque;
use std::collections::hash_map::Entry;
assert_eq!(cnum, LOCAL_CRATE);
let mut visible_parent_map: DefIdMap<DefId> = DefIdMap();
// Issue 46112: We want the map to prefer the shortest
// paths when reporting the path to an item. Therefore we
// build up the map via a breadth-first search (BFS),
// which naturally yields minimal-length paths.
//
// Note that it needs to be a BFS over the whole forest of
// crates, not just each individual crate; otherwise you
// only get paths that are locally minimal with respect to
// whatever crate we happened to encounter first in this
// traversal, but not globally minimal across all crates.
let bfs_queue = &mut VecDeque::new();
// Preferring shortest paths alone does not guarantee a
// deterministic result; so sort by crate num to avoid
// hashtable iteration non-determinism. This only makes
// things as deterministic as crate-nums assignment is,
// which is to say, its not deterministic in general. But
// we believe that libstd is consistently assigned crate
// num 1, so it should be enough to resolve #46112.
let mut crates: Vec<CrateNum> = (*tcx.crates()).clone();
crates.sort();
for &cnum in crates.iter() {
// Ignore crates without a corresponding local `extern crate` item.
if tcx.missing_extern_crate_item(cnum) {
continue
}
bfs_queue.push_back(DefId {
krate: cnum,
index: CRATE_DEF_INDEX
});
}
// (restrict scope of mutable-borrow of `visible_parent_map`)
{
let visible_parent_map = &mut visible_parent_map;
let mut add_child = |bfs_queue: &mut VecDeque<_>,
child: &def::Export,
parent: DefId| {
if child.vis != ty::Visibility::Public {
return;
}
let child = child.def.def_id();
match visible_parent_map.entry(child) {
Entry::Occupied(mut entry) => {
// If `child` is defined in crate `cnum`, ensure
// that it is mapped to a parent in `cnum`.
if child.krate == cnum && entry.get().krate != cnum {
entry.insert(parent);
}
}
Entry::Vacant(entry) => {
entry.insert(parent);
bfs_queue.push_back(child);
}
}
};
while let Some(def) = bfs_queue.pop_front() {
for child in tcx.item_children(def).iter() {
add_child(bfs_queue, child, def);
}
}
}
Lrc::new(visible_parent_map)
},
..*providers
};
}
impl cstore::CStore {
pub fn export_macros_untracked(&self, cnum: CrateNum) {
let data = self.get_crate_data(cnum);
let mut dep_kind = data.dep_kind.lock();
if *dep_kind == DepKind::UnexportedMacrosOnly {
*dep_kind = DepKind::MacrosOnly;
}
}
pub fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind {
let data = self.get_crate_data(cnum);
let r = *data.dep_kind.lock();
r
}
pub fn crate_edition_untracked(&self, cnum: CrateNum) -> Edition {
self.get_crate_data(cnum).root.edition
}
pub fn struct_field_names_untracked(&self, def: DefId) -> Vec<ast::Name> {
self.get_crate_data(def.krate).get_struct_field_names(def.index)
}
pub fn item_children_untracked(&self, def_id: DefId, sess: &Session) -> Vec<def::Export> {
let mut result = vec![];
self.get_crate_data(def_id.krate)
.each_child_of_item(def_id.index, |child| result.push(child), sess);
result
}
pub fn load_macro_untracked(&self, id: DefId, sess: &Session) -> LoadedMacro {
let data = self.get_crate_data(id.krate);
if let Some(ref proc_macros) = data.proc_macros {
return LoadedMacro::ProcMacro(proc_macros[id.index.to_proc_macro_index()].1.clone());
} else if data.name == "proc_macro" && data.item_name(id.index) == "quote" {
use syntax::ext::base::SyntaxExtension;
use syntax_ext::proc_macro_impl::BangProcMacro;
let ext = SyntaxExtension::ProcMacro {
expander: Box::new(BangProcMacro { inner: ::proc_macro::quote }),
allow_internal_unstable: true,
edition: data.root.edition,
};
return LoadedMacro::ProcMacro(Lrc::new(ext));
}
let def = data.get_macro(id.index);
let macro_full_name = data.def_path(id.index).to_string_friendly(|_| data.imported_name);
let source_name = FileName::Macros(macro_full_name);
let source_file = sess.parse_sess.source_map().new_source_file(source_name, def.body);
let local_span = Span::new(source_file.start_pos, source_file.end_pos, NO_EXPANSION);
let body = source_file_to_stream(&sess.parse_sess, source_file, None);
// Mark the attrs as used
let attrs = data.get_item_attrs(id.index, sess);
for attr in attrs.iter() {
attr::mark_used(attr);
}
let name = data.def_key(id.index).disambiguated_data.data
.get_opt_name().expect("no name in load_macro");
sess.imported_macro_spans.borrow_mut()
.insert(local_span, (name.to_string(), data.get_span(id.index, sess)));
LoadedMacro::MacroDef(ast::Item {
ident: ast::Ident::from_str(&name.as_str()),
id: ast::DUMMY_NODE_ID,
span: local_span,
attrs: attrs.iter().cloned().collect(),
node: ast::ItemKind::MacroDef(ast::MacroDef {
tokens: body.into(),
legacy: def.legacy,
}),
vis: source_map::respan(local_span.shrink_to_lo(), ast::VisibilityKind::Inherited),
tokens: None,
})
}
pub fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem {
self.get_crate_data(def.krate).get_associated_item(def.index)
}
}
impl CrateStore for cstore::CStore {
fn crate_data_as_rc_any(&self, krate: CrateNum) -> Lrc<dyn Any> {
self.get_crate_data(krate)
}
fn item_generics_cloned_untracked(&self, def: DefId, sess: &Session) -> ty::Generics {
self.get_crate_data(def.krate).get_generics(def.index, sess)
}
fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol
{
self.get_crate_data(cnum).name
}
fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> CrateDisambiguator
{
self.get_crate_data(cnum).root.disambiguator
}
fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh
{
self.get_crate_data(cnum).root.hash
}
/// Returns the `DefKey` for a given `DefId`. This indicates the
/// parent `DefId` as well as some idea of what kind of data the
/// `DefId` refers to.
fn def_key(&self, def: DefId) -> DefKey {
// Note: loading the def-key (or def-path) for a def-id is not
// a *read* of its metadata. This is because the def-id is
// really just an interned shorthand for a def-path, which is the
// canonical name for an item.
//
// self.dep_graph.read(DepNode::MetaData(def));
self.get_crate_data(def.krate).def_key(def.index)
}
fn def_path(&self, def: DefId) -> DefPath {
// See `Note` above in `def_key()` for why this read is
// commented out:
//
// self.dep_graph.read(DepNode::MetaData(def));
self.get_crate_data(def.krate).def_path(def.index)
}
fn def_path_hash(&self, def: DefId) -> DefPathHash {
self.get_crate_data(def.krate).def_path_hash(def.index)
}
fn def_path_table(&self, cnum: CrateNum) -> Lrc<DefPathTable> {
self.get_crate_data(cnum).def_path_table.clone()
}
fn crates_untracked(&self) -> Vec<CrateNum>
{
let mut result = vec![];
self.iter_crate_data(|cnum, _| result.push(cnum));
result
}
fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option<CrateNum>
{
self.do_extern_mod_stmt_cnum(emod_id)
}
fn postorder_cnums_untracked(&self) -> Vec<CrateNum> {
self.do_postorder_cnums_untracked()
}
fn encode_metadata<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> EncodedMetadata
{
encoder::encode_metadata(tcx)
}
fn metadata_encoding_version(&self) -> &[u8]
{
schema::METADATA_HEADER
}
} | assert!(cnum != LOCAL_CRATE);
Arc::new(cdata.exported_symbols(tcx)) |
tm2eth_test.go | package tests
import (
"context"
"testing"
"github.com/stretchr/testify/suite"
"github.com/datachainlab/ibc-trusted-ethereum-client/tests/chains/ethereum"
"github.com/datachainlab/ibc-trusted-ethereum-client/tests/chains/ethereum/pkg/client"
"github.com/datachainlab/ibc-trusted-ethereum-client/tests/chains/ethereum/pkg/consts"
tm "github.com/datachainlab/ibc-trusted-ethereum-client/tests/chains/tendermint"
ibctesting "github.com/datachainlab/ibc-trusted-ethereum-client/tests/testing"
"github.com/datachainlab/ibc-trusted-ethereum-client/tests/testing/types"
)
const mnemonicPhrase = "math razor capable expose worth grape metal sunset metal sudden usage scheme"
/*
NOTE: This test is intended to be run on ganache. Therefore, we are using MockClient instead of IBFT2Client.
*/
type TM2EthTestSuite struct {
suite.Suite
coordinator *ibctesting.Coordinator
chainA types.TestChainI
chainB types.TestChainI
}
func (suite *TM2EthTestSuite) SetupTest() {
chainClient, err := client.NewETHClient("http://127.0.0.1:8545", 2021)
suite.Require().NoError(err)
suite.chainA = tm.NewTestChain(suite.T())
suite.chainB = ethereum.NewChain(suite.T(), *chainClient, consts.Contract, mnemonicPhrase)
suite.coordinator = ibctesting.NewCoordinator(suite.T(), suite.chainA, suite.chainB)
}
func NewTransferPath(chainA, chainB types.TestChainI) *ibctesting.Path {
path := ibctesting.NewPath(chainA, chainB)
return path
}
func (suite *TM2EthTestSuite) TestChannelTM2Eth() {
ctx := context.Background()
path := NewTransferPath(suite.chainA, suite.chainB)
path.EndpointA.ClientConfig = ibctesting.NewTrustedEthereumConfig("chainA-chainB")
suite.coordinator.Setup(ctx, path)
}
func (suite *TM2EthTestSuite) TestChannelEth2TM() {
ctx := context.Background()
path := NewTransferPath(suite.chainB, suite.chainA)
path.EndpointB.ClientConfig = ibctesting.NewTrustedEthereumConfig("chainB-chainA")
suite.coordinator.Setup(ctx, path)
} | } |
func TestTM2EthTestSuite(t *testing.T) {
suite.Run(t, new(TM2EthTestSuite)) |
shell_quoting.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Almost every FBCodeBuilder string is ultimately passed to a shell. Escaping
too little or too much tends to be the most common error. The utilities in
this file give a systematic way of avoiding such bugs:
- When you write literal strings destined for the shell, use `ShellQuoted`.
- When these literal strings are parameterized, use `ShellQuoted.format`.
- Any parameters that are raw strings get `shell_quote`d automatically,
while any ShellQuoted parameters will be left intact.
- Use `path_join` to join path components.
- Use `shell_join` to join already-quoted command arguments or shell lines.
"""
import os
from collections import namedtuple
# pyre-fixme[13] This is too magical for Pyre.
class ShellQuoted(namedtuple("ShellQuoted", ("do_not_use_raw_str",))):
"""
Wrap a string with this to make it transparent to shell_quote(). It
will almost always suffice to use ShellQuoted.format(), path_join(),
or shell_join().
If you really must, use raw_shell() to access the raw string.
"""
def __new__(cls, s):
"No need to nest ShellQuoted."
return super(ShellQuoted, cls).__new__(
cls, s.do_not_use_raw_str if isinstance(s, ShellQuoted) else s
)
def __str__(self):
raise RuntimeError(
"One does not simply convert {0} to a string -- use path_join() "
"or ShellQuoted.format() instead".format(repr(self))
)
def __repr__(self) -> str:
return "{0}({1})".format(self.__class__.__name__, repr(self.do_not_use_raw_str))
def format(self, **kwargs) -> "ShellQuoted":
"""
Use instead of str.format() when the arguments are either
`ShellQuoted()` or raw strings needing to be `shell_quote()`d.
Positional args are deliberately not supported since they are more
error-prone.
"""
return ShellQuoted(
self.do_not_use_raw_str.format(
**dict(
(k, shell_quote(v).do_not_use_raw_str) for k, v in kwargs.items()
)
)
)
def shell_quote(s) -> ShellQuoted:
"Quotes a string if it is not already quoted"
return (
s
if isinstance(s, ShellQuoted)
else ShellQuoted("'" + str(s).replace("'", "'\\''") + "'")
)
def raw_shell(s: ShellQuoted):
"Not a member of ShellQuoted so we get a useful error for raw strings"
if isinstance(s, ShellQuoted):
return s.do_not_use_raw_str
raise RuntimeError("{0} should have been ShellQuoted".format(s))
def shell_join(delim, it) -> ShellQuoted:
"Joins an iterable of ShellQuoted with a delimiter between each two"
return ShellQuoted(delim.join(raw_shell(s) for s in it))
def path_join(*args) -> ShellQuoted:
"Joins ShellQuoted and raw pieces of paths to make a shell-quoted path"
return ShellQuoted(os.path.join(*[raw_shell(shell_quote(s)) for s in args]))
def | (c: ShellQuoted) -> ShellQuoted:
"Do not shell-escape raw strings in comments, but do handle line breaks."
return ShellQuoted("# {c}").format(
c=ShellQuoted(
(raw_shell(c) if isinstance(c, ShellQuoted) else c).replace("\n", "\n# ")
)
)
| shell_comment |
serviceAction.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package servicecatalog
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Manages a Service Catalog self-service action.
//
// ## Example Usage
// ### Basic Usage
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v4/go/aws/servicecatalog"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := servicecatalog.NewServiceAction(ctx, "example", &servicecatalog.ServiceActionArgs{
// Definition: &servicecatalog.ServiceActionDefinitionArgs{
// Name: pulumi.String("AWS-RestartEC2Instance"),
// },
// Description: pulumi.String("Motor generator unit"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// `aws_servicecatalog_service_action` can be imported using the service action ID, e.g.
//
// ```sh
// $ pulumi import aws:servicecatalog/serviceAction:ServiceAction example act-f1w12eperfslh
// ```
type ServiceAction struct {
pulumi.CustomResourceState
// Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`.
AcceptLanguage pulumi.StringPtrOutput `pulumi:"acceptLanguage"`
// Self-service action definition configuration block. Detailed below.
Definition ServiceActionDefinitionOutput `pulumi:"definition"`
// Self-service action description.
Description pulumi.StringOutput `pulumi:"description"`
// Self-service action name.
Name pulumi.StringOutput `pulumi:"name"`
}
// NewServiceAction registers a new resource with the given unique name, arguments, and options.
func NewServiceAction(ctx *pulumi.Context,
name string, args *ServiceActionArgs, opts ...pulumi.ResourceOption) (*ServiceAction, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.Definition == nil {
return nil, errors.New("invalid value for required argument 'Definition'")
}
var resource ServiceAction
err := ctx.RegisterResource("aws:servicecatalog/serviceAction:ServiceAction", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetServiceAction gets an existing ServiceAction resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetServiceAction(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ServiceActionState, opts ...pulumi.ResourceOption) (*ServiceAction, error) |
// Input properties used for looking up and filtering ServiceAction resources.
type serviceActionState struct {
// Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`.
AcceptLanguage *string `pulumi:"acceptLanguage"`
// Self-service action definition configuration block. Detailed below.
Definition *ServiceActionDefinition `pulumi:"definition"`
// Self-service action description.
Description *string `pulumi:"description"`
// Self-service action name.
Name *string `pulumi:"name"`
}
type ServiceActionState struct {
// Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`.
AcceptLanguage pulumi.StringPtrInput
// Self-service action definition configuration block. Detailed below.
Definition ServiceActionDefinitionPtrInput
// Self-service action description.
Description pulumi.StringPtrInput
// Self-service action name.
Name pulumi.StringPtrInput
}
func (ServiceActionState) ElementType() reflect.Type {
return reflect.TypeOf((*serviceActionState)(nil)).Elem()
}
type serviceActionArgs struct {
// Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`.
AcceptLanguage *string `pulumi:"acceptLanguage"`
// Self-service action definition configuration block. Detailed below.
Definition ServiceActionDefinition `pulumi:"definition"`
// Self-service action description.
Description *string `pulumi:"description"`
// Self-service action name.
Name *string `pulumi:"name"`
}
// The set of arguments for constructing a ServiceAction resource.
type ServiceActionArgs struct {
// Language code. Valid values are `en` (English), `jp` (Japanese), and `zh` (Chinese). Default is `en`.
AcceptLanguage pulumi.StringPtrInput
// Self-service action definition configuration block. Detailed below.
Definition ServiceActionDefinitionInput
// Self-service action description.
Description pulumi.StringPtrInput
// Self-service action name.
Name pulumi.StringPtrInput
}
func (ServiceActionArgs) ElementType() reflect.Type {
return reflect.TypeOf((*serviceActionArgs)(nil)).Elem()
}
type ServiceActionInput interface {
pulumi.Input
ToServiceActionOutput() ServiceActionOutput
ToServiceActionOutputWithContext(ctx context.Context) ServiceActionOutput
}
func (*ServiceAction) ElementType() reflect.Type {
return reflect.TypeOf((*ServiceAction)(nil))
}
func (i *ServiceAction) ToServiceActionOutput() ServiceActionOutput {
return i.ToServiceActionOutputWithContext(context.Background())
}
func (i *ServiceAction) ToServiceActionOutputWithContext(ctx context.Context) ServiceActionOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceActionOutput)
}
func (i *ServiceAction) ToServiceActionPtrOutput() ServiceActionPtrOutput {
return i.ToServiceActionPtrOutputWithContext(context.Background())
}
func (i *ServiceAction) ToServiceActionPtrOutputWithContext(ctx context.Context) ServiceActionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceActionPtrOutput)
}
type ServiceActionPtrInput interface {
pulumi.Input
ToServiceActionPtrOutput() ServiceActionPtrOutput
ToServiceActionPtrOutputWithContext(ctx context.Context) ServiceActionPtrOutput
}
type serviceActionPtrType ServiceActionArgs
func (*serviceActionPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**ServiceAction)(nil))
}
func (i *serviceActionPtrType) ToServiceActionPtrOutput() ServiceActionPtrOutput {
return i.ToServiceActionPtrOutputWithContext(context.Background())
}
func (i *serviceActionPtrType) ToServiceActionPtrOutputWithContext(ctx context.Context) ServiceActionPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceActionPtrOutput)
}
// ServiceActionArrayInput is an input type that accepts ServiceActionArray and ServiceActionArrayOutput values.
// You can construct a concrete instance of `ServiceActionArrayInput` via:
//
// ServiceActionArray{ ServiceActionArgs{...} }
type ServiceActionArrayInput interface {
pulumi.Input
ToServiceActionArrayOutput() ServiceActionArrayOutput
ToServiceActionArrayOutputWithContext(context.Context) ServiceActionArrayOutput
}
type ServiceActionArray []ServiceActionInput
func (ServiceActionArray) ElementType() reflect.Type {
return reflect.TypeOf((*[]*ServiceAction)(nil)).Elem()
}
func (i ServiceActionArray) ToServiceActionArrayOutput() ServiceActionArrayOutput {
return i.ToServiceActionArrayOutputWithContext(context.Background())
}
func (i ServiceActionArray) ToServiceActionArrayOutputWithContext(ctx context.Context) ServiceActionArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceActionArrayOutput)
}
// ServiceActionMapInput is an input type that accepts ServiceActionMap and ServiceActionMapOutput values.
// You can construct a concrete instance of `ServiceActionMapInput` via:
//
// ServiceActionMap{ "key": ServiceActionArgs{...} }
type ServiceActionMapInput interface {
pulumi.Input
ToServiceActionMapOutput() ServiceActionMapOutput
ToServiceActionMapOutputWithContext(context.Context) ServiceActionMapOutput
}
type ServiceActionMap map[string]ServiceActionInput
func (ServiceActionMap) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]*ServiceAction)(nil)).Elem()
}
func (i ServiceActionMap) ToServiceActionMapOutput() ServiceActionMapOutput {
return i.ToServiceActionMapOutputWithContext(context.Background())
}
func (i ServiceActionMap) ToServiceActionMapOutputWithContext(ctx context.Context) ServiceActionMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(ServiceActionMapOutput)
}
type ServiceActionOutput struct{ *pulumi.OutputState }
func (ServiceActionOutput) ElementType() reflect.Type {
return reflect.TypeOf((*ServiceAction)(nil))
}
func (o ServiceActionOutput) ToServiceActionOutput() ServiceActionOutput {
return o
}
func (o ServiceActionOutput) ToServiceActionOutputWithContext(ctx context.Context) ServiceActionOutput {
return o
}
func (o ServiceActionOutput) ToServiceActionPtrOutput() ServiceActionPtrOutput {
return o.ToServiceActionPtrOutputWithContext(context.Background())
}
func (o ServiceActionOutput) ToServiceActionPtrOutputWithContext(ctx context.Context) ServiceActionPtrOutput {
return o.ApplyTWithContext(ctx, func(_ context.Context, v ServiceAction) *ServiceAction {
return &v
}).(ServiceActionPtrOutput)
}
type ServiceActionPtrOutput struct{ *pulumi.OutputState }
func (ServiceActionPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**ServiceAction)(nil))
}
func (o ServiceActionPtrOutput) ToServiceActionPtrOutput() ServiceActionPtrOutput {
return o
}
func (o ServiceActionPtrOutput) ToServiceActionPtrOutputWithContext(ctx context.Context) ServiceActionPtrOutput {
return o
}
func (o ServiceActionPtrOutput) Elem() ServiceActionOutput {
return o.ApplyT(func(v *ServiceAction) ServiceAction {
if v != nil {
return *v
}
var ret ServiceAction
return ret
}).(ServiceActionOutput)
}
type ServiceActionArrayOutput struct{ *pulumi.OutputState }
func (ServiceActionArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]ServiceAction)(nil))
}
func (o ServiceActionArrayOutput) ToServiceActionArrayOutput() ServiceActionArrayOutput {
return o
}
func (o ServiceActionArrayOutput) ToServiceActionArrayOutputWithContext(ctx context.Context) ServiceActionArrayOutput {
return o
}
func (o ServiceActionArrayOutput) Index(i pulumi.IntInput) ServiceActionOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) ServiceAction {
return vs[0].([]ServiceAction)[vs[1].(int)]
}).(ServiceActionOutput)
}
type ServiceActionMapOutput struct{ *pulumi.OutputState }
func (ServiceActionMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]ServiceAction)(nil))
}
func (o ServiceActionMapOutput) ToServiceActionMapOutput() ServiceActionMapOutput {
return o
}
func (o ServiceActionMapOutput) ToServiceActionMapOutputWithContext(ctx context.Context) ServiceActionMapOutput {
return o
}
func (o ServiceActionMapOutput) MapIndex(k pulumi.StringInput) ServiceActionOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) ServiceAction {
return vs[0].(map[string]ServiceAction)[vs[1].(string)]
}).(ServiceActionOutput)
}
func init() {
pulumi.RegisterOutputType(ServiceActionOutput{})
pulumi.RegisterOutputType(ServiceActionPtrOutput{})
pulumi.RegisterOutputType(ServiceActionArrayOutput{})
pulumi.RegisterOutputType(ServiceActionMapOutput{})
}
| {
var resource ServiceAction
err := ctx.ReadResource("aws:servicecatalog/serviceAction:ServiceAction", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
models.py | from django.contrib.auth import get_user_model
from django.core.validators import MinValueValidator
from django.db import models
User = get_user_model()
class Tag(models.Model):
"""Describes a tag object."""
name = models.CharField(max_length=255, verbose_name='Имя')
color = models.CharField(max_length=100, blank=True,
verbose_name='Цвет', default='')
class Meta:
verbose_name = 'Тег'
verbose_name_plural = 'Теги'
def __str__(self):
return self.name
| class Ingredient(models.Model):
"""Describes an ingredient object."""
name = models.CharField(
max_length=256,
verbose_name='Название',
)
unit = models.CharField(
max_length=64,
verbose_name='Ед. измерения',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['name', 'unit'],
name='name_unit'
)
]
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
def __str__(self):
return f'{self.name}, {self.unit}'
class Recipe(models.Model):
"""
Describes a recipe object. Related to 'auth.User', 'recipe.Tag' and
'recipe.Ingredient' through intermediate model 'IngredientRecipe'.
"""
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='recipes',
verbose_name='Автор',
)
title = models.CharField(
max_length=256,
blank=False,
verbose_name='Название',
)
image = models.ImageField(
upload_to='kartinki/',
blank=False,
null=True,
verbose_name='Изображение',
)
description = models.TextField(
blank=False,
verbose_name='Описание',
)
ingredients = models.ManyToManyField(
Ingredient,
through='IngredientRecipe',
related_name='recipes',
verbose_name='Ингредиенты',
)
cooking_time = models.PositiveIntegerField(
blank=False,
verbose_name='Время приготовления, мин',
validators=[MinValueValidator(1)]
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации',
)
tags = models.ManyToManyField(
Tag,
related_name='recipes',
verbose_name='Теги',
)
slug = models.SlugField(max_length=100, unique=True, blank=True, null=True)
class Meta:
ordering = ('-pub_date', )
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
def __str__(self):
return (f'{self.author} : {self.title}')
class IngredientRecipe(models.Model):
"""
Serves to connect a recipe oиject with an ingredient object
via Many2Many relationship. Adds an additional field 'quantity'.
"""
ingredient = models.ForeignKey(Ingredient,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
recipe = models.ForeignKey(Recipe,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
quantity = models.DecimalField(
max_digits=8,
decimal_places=1,
verbose_name='Количество',
validators=[MinValueValidator(0.1)]
)
class Meta:
verbose_name = 'Ингредиент в рецепте'
verbose_name_plural = 'Ингредиенты в рецепте'
def __str__(self):
return (self.ingredient.name)
class Follow(models.Model):
"""Describes a follow object."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='follower',
verbose_name='Подписчик',
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='following',
verbose_name='Автор',
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='user_author'
)
]
ordering = ('author', )
verbose_name = 'Подписка'
verbose_name_plural = 'Подписки'
def __str__(self):
return (f'Подписчик: {self.user}, Автор: {self.author}')
class Purchase(models.Model):
"""Describes a purchase object."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='purchases',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Покупка'
verbose_name_plural = 'Покупки'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_purchase'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}')
class Favourite(models.Model):
"""Describes a user's favourite recipes."""
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Пользователь',
)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='favourites',
verbose_name='Рецепт',
)
class Meta:
verbose_name = 'Избранное'
verbose_name_plural = 'Избранное'
constraints = [
models.UniqueConstraint(
fields=['user', 'recipe'],
name='unique_favourite'
)
]
def __str__(self):
return (f'Пользователь: {self.user}, Рецепт: {self.recipe}') | |
getApi.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package apimanagement
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Api details.
// API Version: 2020-12-01.
func LookupApi(ctx *pulumi.Context, args *LookupApiArgs, opts ...pulumi.InvokeOption) (*LookupApiResult, error) {
var rv LookupApiResult
err := ctx.Invoke("azure-native:apimanagement:getApi", args, &rv, opts...)
if err != nil |
return &rv, nil
}
type LookupApiArgs struct {
// API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
ApiId string `pulumi:"apiId"`
// The name of the resource group.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the API Management service.
ServiceName string `pulumi:"serviceName"`
}
// Api details.
type LookupApiResult struct {
// Describes the Revision of the Api. If no value is provided, default revision 1 is created
ApiRevision *string `pulumi:"apiRevision"`
// Description of the Api Revision.
ApiRevisionDescription *string `pulumi:"apiRevisionDescription"`
// Type of API.
ApiType *string `pulumi:"apiType"`
// Indicates the Version identifier of the API if the API is versioned
ApiVersion *string `pulumi:"apiVersion"`
// Description of the Api Version.
ApiVersionDescription *string `pulumi:"apiVersionDescription"`
// Version set details
ApiVersionSet *ApiVersionSetContractDetailsResponse `pulumi:"apiVersionSet"`
// A resource identifier for the related ApiVersionSet.
ApiVersionSetId *string `pulumi:"apiVersionSetId"`
// Collection of authentication settings included into this API.
AuthenticationSettings *AuthenticationSettingsContractResponse `pulumi:"authenticationSettings"`
// Description of the API. May include HTML formatting tags.
Description *string `pulumi:"description"`
// API name. Must be 1 to 300 characters long.
DisplayName *string `pulumi:"displayName"`
// Resource ID.
Id string `pulumi:"id"`
// Indicates if API revision is current api revision.
IsCurrent *bool `pulumi:"isCurrent"`
// Indicates if API revision is accessible via the gateway.
IsOnline bool `pulumi:"isOnline"`
// Resource name.
Name string `pulumi:"name"`
// Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance. It is appended to the API endpoint base URL specified during the service instance creation to form a public URL for this API.
Path string `pulumi:"path"`
// Describes on which protocols the operations in this API can be invoked.
Protocols []string `pulumi:"protocols"`
// Absolute URL of the backend service implementing this API. Cannot be more than 2000 characters long.
ServiceUrl *string `pulumi:"serviceUrl"`
// API identifier of the source API.
SourceApiId *string `pulumi:"sourceApiId"`
// Protocols over which API is made available.
SubscriptionKeyParameterNames *SubscriptionKeyParameterNamesContractResponse `pulumi:"subscriptionKeyParameterNames"`
// Specifies whether an API or Product subscription is required for accessing the API.
SubscriptionRequired *bool `pulumi:"subscriptionRequired"`
// Resource type for API Management resource.
Type string `pulumi:"type"`
}
| {
return nil, err
} |
0008_host_remote_management.py | # Generated by Django 2.1 on 2019-03-06 16:42
from django.db import migrations, models
import resource_inventory.models
class Migration(migrations.Migration):
dependencies = [ | operations = [
migrations.AddField(
model_name='host',
name='remote_management',
field=models.ForeignKey(default=resource_inventory.models.get_default_remote_info, on_delete=models.SET(resource_inventory.models.get_default_remote_info), to='resource_inventory.RemoteInfo'),
),
] | ('resource_inventory', '0007_auto_20190306_1616'),
]
|
setup.py | from setuptools import setup
import bip32
import io
with io.open("README.md", encoding="utf-8") as f: | long_description = f.read()
with io.open("requirements.txt", encoding="utf-8") as f:
requirements = [r for r in f.read().split('\n') if len(r)]
setup(name="bip32",
version=bip32.__version__,
description="Minimalistic implementation of the BIP32 key derivation scheme",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://github.com/darosior/python-bip32",
author="Antoine Poinsot",
author_email="[email protected]",
license="MIT",
packages=["bip32"],
keywords=["bitcoin", "bip32", "hdwallet"],
install_requires=requirements) | |
unzip.go | package main
import (
"archive/zip"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
func unzip(target, output string) error {
archive, err := zip.OpenReader(target)
if err != nil { | for _, file := range archive.File {
filePath := filepath.Join(output, file.Name)
fmt.Println("unzipping file:", filePath)
if !strings.HasPrefix(filePath, filepath.Clean(output)+string(os.PathSeparator)) {
fmt.Println("invalid file path:", filePath)
return fmt.Errorf("invalid file path: %s", filePath)
}
if file.FileInfo().IsDir() {
fmt.Println("creating directory:", file)
os.MkdirAll(filePath, os.ModePerm)
continue
}
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
return err
}
outFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
}
fileInArchive, err := file.Open()
if err != nil {
return err
}
if _, err := io.Copy(outFile, fileInArchive); err != nil {
return err
}
outFile.Close()
fileInArchive.Close()
}
return nil
}
func main() {
target := flag.String("t", "", "target name")
output := flag.String("o", "", "output directory")
flag.Parse()
fmt.Println("arg", flag.Args())
if err := unzip(*target, *output); err != nil {
fmt.Println(err)
}
} | return err
}
defer archive.Close()
|
serializers.py | from django.db.models import fields
from rest_framework import serializers
from decimal import Decimal
from .models import Cart, CartItem, Product, Collection, Customer, Order, OrderItem, Review
from uuid import uuid4
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ['id', 'name', 'description', 'date']
def create(self, validated_data):
product_id = self.context['product_id']
return Review.objects.create(product_id=product_id, **validated_data)
class OrderItemSerializer(serializers.ModelSerializer):
class Meta:
model = OrderItem
fields = ['id', 'quantity', 'unit_price', 'order', 'product']
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ['id', 'placed_at', 'payment_status', 'customer']
class CustomerSerializer(serializers.ModelSerializer):
class Meta:
model = Customer
fields = ['id', 'user_id', 'phone', 'birth_date', 'membership']
user_id = serializers.IntegerField(read_only=True)
class CollectionSerializer(serializers.ModelSerializer):
class Meta:
model = Collection
fields = ['id', 'title', 'products_count']
# products_count = serializers.IntegerField()
products_count = serializers.SerializerMethodField('count_products')
def count_products(self, collection):
return collection.product_set.count()
# class CollectionSerializer(serializers.Serializer):
# id = serializers.IntegerField(required=False)
# title = serializers.CharField(max_length=255)
# products_count = serializers.SerializerMethodField('count_products')
# def count_products(self, collection):
# return collection.product_set.count()
# def create(self, validated_data): | # return collection
# def update(self, instance, validated_data):
# instance.title = validated_data["title"]
# instance.save()
# return instance
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ['id', 'title', 'slug', 'description',
'inventory', 'price', 'price_with_tax', 'last_update', 'collection']
# fields = ['id', 'title', 'price', 'price_with_tax', 'collection_number',
# 'collection_title', 'collection_object', 'collection_link']
price = serializers.DecimalField(
max_digits=6, decimal_places=2, source='unit_price')
price_with_tax = serializers.SerializerMethodField(
method_name='calculate_tax')
# collection_number = serializers.PrimaryKeyRelatedField(
# queryset=Collection.objects.all(), source='collection'
# )
# collection_title = serializers.StringRelatedField(source='collection')
# collection_object = CollectionSerializer(source='collection')
# collection_link = serializers.HyperlinkedRelatedField(
# queryset=Collection.objects.all(),
# view_name='collection-detail',
# source='collection'
# )
def calculate_tax(self, product):
return product.unit_price * Decimal(1.1)
# def validate(self, data):
# if data['password'] != data['confirm_password']:
# return serializers.ValidationError('Passwords do not match')
# return data
# def create(self, validated_data):
# product = Product(**validated_data)
# product.other = 1
# product.save()
# return product
# def update(self, instance, validated_data):
# instance.other = 1
# instance.save()
# return instance
# class ProductSerializer(serializers.Serializer):
# id = serializers.IntegerField()
# title = serializers.CharField(max_length=255)
# price = serializers.DecimalField(
# max_digits=6, decimal_places=2, source='unit_price')
# price_with_tax = serializers.SerializerMethodField(
# method_name='calculate_tax')
# collection_number = serializers.PrimaryKeyRelatedField(
# queryset=Collection.objects.all(), source='collection'
# )
# collection_title = serializers.StringRelatedField(source='collection')
# collection_object = CollectionSerializer(source='collection')
# collection_link = serializers.HyperlinkedRelatedField(
# queryset=Collection.objects.all(),
# view_name='collection-detail',
# source='collection'
# )
# def calculate_tax(self, product):
# return product.unit_price * Decimal(1.1)
class SimpleProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ['title', 'unit_price']
class CartItemSerializer(serializers.ModelSerializer):
class Meta:
model = CartItem
fields = ['id', 'quantity', 'product', 'total_price']
total_price = serializers.SerializerMethodField('get_total_price')
def get_total_price(self, cartitem):
return Decimal(cartitem.quantity) * cartitem.product.unit_price
product = SimpleProductSerializer()
class AddCartItemSerializer(serializers.ModelSerializer):
class Meta:
model = CartItem
fields = ['id', 'product_id', 'quantity']
def validate_product_id(self, value):
if not Product.objects.filter(pk=value).exists():
raise serializers.ValidationError(
"No Product with the given id was found")
return value
def save(self, **kwargs):
cart_id = self.context['cart_id']
product_id = self.validated_data['product_id']
quantity = self.validated_data['quantity']
try:
cart_item = CartItem.objects.get(
cart_id=cart_id, product_id=product_id)
cart_item.quantity += quantity
cart_item.save()
self.instance = cart_item
except CartItem.DoesNotExist:
self.instance = CartItem.objects.create(
cart_id=cart_id, **self.validated_data)
return self.instance
return super().save(**kwargs)
product_id = serializers.IntegerField()
class CartSerializer(serializers.ModelSerializer):
class Meta:
model = Cart
fields = ['id', 'cartitem_set', 'total_price']
id = serializers.UUIDField(read_only=True)
cartitem_set = CartItemSerializer(many=True, read_only=True)
# created_at = serializers.DateTimeField(read_only=True)
total_price = serializers.SerializerMethodField('get_total_price')
def get_total_price(self, cart):
return sum([item.quantity * item.product.unit_price for item in cart.cartitem_set.all()]) | # collection = Collection(**validated_data)
# collection.save() |
ptr.rs | use proc_macro2::TokenStream;
use quote::quote;
use crate::input::Input;
pub fn derive(input: &Input) -> TokenStream | {
let name = &input.name;
let visibility = &input.visibility;
let other_derive = &input.derive_with_exceptions();
let vec_name = &input.vec_name();
let ptr_name = &input.ptr_name();
let ptr_mut_name = &input.ptr_mut_name();
let ref_name = &input.ref_name();
let ref_mut_name = &input.ref_mut_name();
let doc_url = format!("[`{0}`](struct.{0}.html)", name);
let ptr_doc_url = format!("[`{0}`](struct.{0}.html)", ptr_name);
let ptr_mut_doc_url = format!("[`{0}`](struct.{0}.html)", ptr_mut_name);
let ref_doc_url = format!("[`{0}`](struct.{0}.html)", ref_name);
let ref_mut_doc_url = format!("[`{0}`](struct.{0}.html)", ref_mut_name);
let fields_names = input.fields.iter()
.map(|field| field.ident.clone().unwrap())
.collect::<Vec<_>>();
let fields_names_1 = &fields_names;
let fields_names_2 = &fields_names;
let fields_types = &input.fields.iter()
.map(|field| &field.ty)
.collect::<Vec<_>>();
let fields_doc = fields_names.iter()
.map(|field| format!("A pointer to a `{0}` from a [`{1}`](struct.{1}.html)", field, vec_name))
.collect::<Vec<_>>();
let fields_mut_doc = fields_names.iter()
.map(|field| format!("A mutable pointer to a `{0}` from a [`{1}`](struct.{1}.html)", field, vec_name))
.collect::<Vec<_>>();
quote! {
/// An analog of a pointer to
#[doc = #doc_url]
/// with struct of array layout.
#other_derive
#[derive(Copy, Clone)]
#visibility struct #ptr_name {
#(
#[doc = #fields_doc]
pub #fields_names_1: *const #fields_types,
)*
}
/// An analog of a mutable pointer to
#[doc = #doc_url]
/// with struct of array layout.
#other_derive
#[derive(Copy, Clone)]
#visibility struct #ptr_mut_name {
#(
#[doc = #fields_mut_doc]
pub #fields_names_1: *mut #fields_types,
)*
}
#[allow(dead_code)]
impl #ptr_name {
/// Convert a
#[doc = #ptr_doc_url]
/// to a
#[doc = #ptr_mut_doc_url]
/// ; *i.e.* do a `*const T as *mut T` transformation.
#visibility fn as_mut_ptr(&self) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2 as *mut _, )*
}
}
/// Similar to [`*const T::is_null()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.is_null).
pub fn is_null(self) -> bool {
false #( || self.#fields_names_1.is_null())*
}
/// Similar to [`*const T::as_ref()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref),
/// with the same safety caveats.
pub unsafe fn as_ref<'a>(self) -> Option<#ref_name<'a>> {
if self.is_null() {
None
} else {
Some(#ref_name {
#(#fields_names_1: self.#fields_names_2.as_ref().expect("should not be null"), )*
})
}
}
/// Similar to [`*const T::offset()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset),
/// with the same safety caveats.
pub unsafe fn offset(self, count: isize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.offset(count), )*
}
}
/// Similar to [`*const T::offset()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset).
pub fn wrapping_offset(self, count: isize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.wrapping_offset(count), )*
}
}
/// Similar to [`*const T::add()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add),
/// with the same safety caveats.
pub unsafe fn add(self, count: usize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.add(count), )*
}
}
/// Similar to [`*const T::sub()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.sub),
/// with the same safety caveats.
pub unsafe fn sub(self, count: usize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.sub(count), )*
}
}
/// Similar to [`*const T::wrapping_add()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add).
pub fn wrapping_add(self, count: usize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.wrapping_add(count), )*
}
}
/// Similar to [`*const T::wrapping_sub()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub).
pub fn wrapping_sub(self, count: usize) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2.wrapping_sub(count), )*
}
}
/// Similar to [`*const T::read()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read),
/// with the same safety caveats.
pub unsafe fn read(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read(), )*
}
}
/// Similar to [`*const T::read_volatile()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile),
/// with the same safety caveats.
pub unsafe fn read_volatile(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read_volatile(), )*
}
}
/// Similar to [`*const T::read_unaligned()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned),
/// with the same safety caveats.
pub unsafe fn read_unaligned(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read_unaligned(), )*
}
}
}
#[allow(dead_code)]
impl #ptr_mut_name {
/// Convert a
#[doc = #ptr_mut_doc_url]
/// to a
#[doc = #ptr_doc_url]
/// ; *i.e.* do a `*mut T as *const T` transformation
#visibility fn as_ptr(&self) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2, )*
}
}
/// Similar to [`*mut T::is_null()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.is_null).
pub fn is_null(self) -> bool {
false #( || self.#fields_names_1.is_null())*
}
/// Similar to [`*mut T::as_ref()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_ref),
/// with the same safety caveats.
pub unsafe fn as_ref<'a>(self) -> Option<#ref_name<'a>> {
if self.is_null() {
None
} else {
Some(#ref_name {
#(#fields_names_1: self.#fields_names_2.as_ref().expect("should not be null"), )*
})
}
}
/// Similar to [`*mut T::as_mut()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.as_mut),
/// with the same safety caveats.
pub unsafe fn as_mut<'a>(self) -> Option<#ref_mut_name<'a>> {
if self.is_null() {
None
} else {
Some(#ref_mut_name {
#(#fields_names_1: self.#fields_names_2.as_mut().expect("should not be null"), )*
})
}
}
/// Similar to [`*mut T::offset()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.offset),
/// with the same safety caveats.
pub unsafe fn offset(self, count: isize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.offset(count), )*
}
}
/// Similar to [`*mut T::wrapping_offset()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_offset)
pub fn wrapping_offset(self, count: isize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.wrapping_offset(count), )*
}
}
/// Similar to [`*mut T::add()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.add),
/// with the same safety caveats.
pub unsafe fn add(self, count: usize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.add(count), )*
}
}
/// Similar to [`*mut T::sub()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.sub),
/// with the same safety caveats.
pub unsafe fn sub(self, count: usize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.sub(count), )*
}
}
/// Similar to [`*mut T::wrapping_add()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add),
/// with the same safety caveats.
pub fn wrapping_add(self, count: usize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.wrapping_add(count), )*
}
}
/// Similar to [`*mut T::wrapping_sub()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub),
/// with the same safety caveats.
pub fn wrapping_sub(self, count: usize) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2.wrapping_sub(count), )*
}
}
/// Similar to [`*mut T::read()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read),
/// with the same safety caveats.
pub unsafe fn read(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read(), )*
}
}
/// Similar to [`*mut T::read_volatile()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile),
/// with the same safety caveats.
pub unsafe fn read_volatile(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read_volatile(), )*
}
}
/// Similar to [`*mut T::read_unaligned()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned),
/// with the same safety caveats.
pub unsafe fn read_unaligned(self) -> #name {
#name {
#(#fields_names_1: self.#fields_names_2.read_unaligned(), )*
}
}
/// Similar to [`*mut T::write()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write),
/// with the same safety caveats.
pub unsafe fn write(self, val: #name) {
#(self.#fields_names_1.write(val.#fields_names_2); )*
}
/// Similar to [`*mut T::write_volatile()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_volatile),
/// with the same safety caveats.
pub unsafe fn write_volatile(self, val: #name) {
#(self.#fields_names_1.write_volatile(val.#fields_names_2); )*
}
/// Similar to [`*mut T::write_unaligned()`](https://doc.rust-lang.org/std/primitive.pointer.html#method.write_unaligned),
/// with the same safety caveats.
pub unsafe fn write_unaligned(self, val: #name) {
#(self.#fields_names_1.write_unaligned(val.#fields_names_2); )*
}
}
#[allow(dead_code)]
impl<'a> #ref_name<'a> {
/// Convert a
#[doc = #ref_doc_url]
/// to a
#[doc = #ptr_doc_url]
/// ; *i.e.* do a `&T as *const T` transformation
#visibility fn as_ptr(&self) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2, )*
}
}
}
#[allow(dead_code)]
impl<'a> #ref_mut_name<'a> {
/// Convert a
#[doc = #ref_mut_doc_url]
/// to a
#[doc = #ptr_doc_url]
/// ; *i.e.* do a `&mut T as *const T` transformation
#visibility fn as_ptr(&self) -> #ptr_name {
#ptr_name {
#(#fields_names_1: self.#fields_names_2, )*
}
}
/// Convert a
#[doc = #ref_mut_doc_url]
/// to a
#[doc = #ptr_mut_doc_url]
/// ; *i.e.* do a `&mut T as *mut T` transformation
#visibility fn as_mut_ptr(&mut self) -> #ptr_mut_name {
#ptr_mut_name {
#(#fields_names_1: self.#fields_names_2, )*
}
}
}
}
} |
|
pog.py | m = 'ola mundo'
print(m) | ||
dash_ps.py | # -*- coding: utf-8 -*-
import asyncio
import copy
import random
import time
import threading
from collections import deque
from uuid import uuid4
from . import util
from .dash_msg import PRIVATESEND_ENTRY_MAX_SIZE
from .dash_ps_net import PSMixSession, PRIVATESEND_SESSION_MSG_TIMEOUT
from .dash_ps_wallet import (PSDataMixin, PSKeystoreMixin, KeyPairsMixin,
KPStates, NotFoundInKeypairs, AddPSDataError,
SignWithKeypairsFailed)
from .dash_ps_util import (PSOptsMixin, PSUtilsMixin, PSGUILogHandler,
PSManLogAdapter, PSCoinRounds, PSStates,
PS_DENOMS_DICT, COLLATERAL_VAL, MIN_DENOM_VAL,
CREATE_COLLATERAL_VAL, CREATE_COLLATERAL_VALS,
PSTxWorkflow, PSDenominateWorkflow, calc_tx_fee)
from .dash_tx import PSTxTypes, SPEC_TX_NAMES, CTxIn
from .logging import Logger
from .transaction import Transaction, PartialTxOutput, PartialTransaction
from .util import (NoDynamicFeeEstimates, log_exceptions, SilentTaskGroup,
NotEnoughFunds, bfh, is_android)
from .i18n import _
PS_DENOM_REVERSE_DICT = {int(v): k for k, v in PS_DENOMS_DICT.items()}
class TooManyUtxos(Exception):
"""Thrown when creating new denoms/collateral txs from coins"""
class TooLargeUtxoVal(Exception):
"""Thrown when creating new collateral txs from coins"""
class PSManager(Logger, PSKeystoreMixin, PSDataMixin, PSOptsMixin,
PSUtilsMixin, KeyPairsMixin):
'''Class representing wallet PrivateSend manager'''
LOGGING_SHORTCUT = 'A'
ADD_PS_DATA_ERR_MSG = _('Error on adding PrivateSend transaction data.')
SPEND_TO_PS_ADDRS_MSG = _('For privacy reasons blocked attempt to'
' transfer coins to PrivateSend address.')
WATCHING_ONLY_MSG = _('This is a watching-only wallet.'
' Mixing can not be run.')
ALL_MIXED_MSG = _('PrivateSend mixing is done')
CLEAR_PS_DATA_MSG = _('Are you sure to clear all wallet PrivateSend data?'
' This is not recommended if there is'
' no particular need.')
NO_NETWORK_MSG = _('Can not start mixing. Network is not available')
NO_DASH_NET_MSG = _('Can not start mixing. DashNet is not available')
LLMQ_DATA_NOT_READY = _('LLMQ quorums data is not fully loaded.')
MNS_DATA_NOT_READY = _('Masternodes data is not fully loaded.')
NOT_ENABLED_MSG = _('PrivateSend mixing is not enabled')
INITIALIZING_MSG = _('PrivateSend mixing is initializing.'
' Please try again soon')
MIXING_ALREADY_RUNNING_MSG = _('PrivateSend mixing is already running.')
MIXING_NOT_RUNNING_MSG = _('PrivateSend mixing is not running.')
FIND_UNTRACKED_RUN_MSG = _('PrivateSend mixing can not start. Process of'
' finding untracked PS transactions'
' is currently run')
ERRORED_MSG = _('PrivateSend mixing can not start.'
' Please check errors in PS Log tab')
UNKNOWN_STATE_MSG = _('PrivateSend mixing can not start.'
' Unknown state: {}')
WAIT_MIXING_STOP_MSG = _('Mixing is not stopped. If mixing sessions ends'
' prematurely additional pay collateral may be'
' paid. Do you really want to close wallet?')
NO_NETWORK_STOP_MSG = _('Network is not available')
OTHER_COINS_ARRIVED_MSG1 = _('Some unknown coins arrived on addresses'
' reserved for PrivateSend use, txid: {}.')
OTHER_COINS_ARRIVED_MSG2 = _('WARNING: it is not recommended to spend'
' these coins in regular transactions!')
OTHER_COINS_ARRIVED_MSG3 = _('You can use these coins in PrivateSend'
' mixing process by manually selecting UTXO'
' and creating new denoms or new collateral,'
' depending on UTXO value.')
OTHER_COINS_ARRIVED_Q = _('Do you want to use other coins now?')
if is_android():
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch fee estimation method'
' on send screen')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins popup from PrivateSend options.')
else:
NO_DYNAMIC_FEE_MSG = _('{}\n\nYou can switch to static fee estimation'
' on Fees Preferences tab')
OTHER_COINS_ARRIVED_MSG4 = _('You can view and use these coins from'
' Coins tab.')
def __init__(self, wallet):
Logger.__init__(self)
PSDataMixin.__init__(self, wallet)
PSKeystoreMixin.__init__(self, wallet)
KeyPairsMixin.__init__(self, wallet)
PSOptsMixin.__init__(self, wallet)
PSUtilsMixin.__init__(self, wallet)
self.log_handler = PSGUILogHandler(self)
self.logger = PSManLogAdapter(self.logger, {'psman_id': id(self)})
self.state_lock = threading.Lock()
self.states = s = PSStates
self.mixing_running_states = [s.StartMixing, s.Mixing, s.StopMixing]
self.no_clean_history_states = [s.Initializing, s.Errored,
s.StartMixing, s.Mixing, s.StopMixing,
s.FindingUntracked]
self.config = wallet.config
self._state = PSStates.Unsupported
self.wallet_types_supported = ['standard']
self.keystore_types_supported = ['bip32', 'hardware']
keystore = wallet.db.get('keystore')
if keystore:
self.w_ks_type = keystore.get('type', 'unknown')
else:
self.w_ks_type = 'unknown'
self.w_type = wallet.wallet_type
if (self.w_type in self.wallet_types_supported
and self.w_ks_type in self.keystore_types_supported):
if wallet.db.get_ps_data('ps_enabled', False):
self.state = PSStates.Initializing
else:
self.state = PSStates.Disabled
if self.unsupported:
supported_w = ', '.join(self.wallet_types_supported)
supported_ks = ', '.join(self.keystore_types_supported)
this_type = self.w_type
this_ks_type = self.w_ks_type
self.unsupported_msg = _(f'PrivateSend is currently supported on'
f' next wallet types: "{supported_w}"'
f' and keystore types: "{supported_ks}".'
f'\n\nThis wallet has type "{this_type}"'
f' and kestore type "{this_ks_type}".')
else:
self.unsupported_msg = ''
if self.is_hw_ks:
self.enable_ps_keystore()
self.network = None
self.dash_net = None
self.loop = None
self._loop_thread = None
self.main_taskgroup = None
self.mix_sessions_lock = asyncio.Lock()
self.mix_sessions = {} # dict peer -> PSMixSession
self.recent_mixes_mns = deque([], 10) # added from mixing sessions
self.denoms_lock = threading.Lock()
self.collateral_lock = threading.Lock()
self.others_lock = threading.Lock()
self.new_denoms_wfl_lock = threading.Lock()
self.new_collateral_wfl_lock = threading.Lock()
self.pay_collateral_wfl_lock = threading.Lock()
self.denominate_wfl_lock = threading.Lock()
self._not_enough_funds = False
# electrum network disconnect time
self.disconnect_time = 0
@property
def unsupported(self):
return self.state == PSStates.Unsupported
@property
def enabled(self):
return self.state not in [PSStates.Unsupported, PSStates.Disabled]
@property
def is_hw_ks(self):
return self.w_ks_type == 'hardware'
def enable_ps(self):
if (self.w_type == 'standard' and self.is_hw_ks
and 'ps_keystore' not in self.wallet.db.data):
self.logger.info('ps_keystore for hw wallets must be created')
return
if not self.enabled:
self.wallet.db.set_ps_data('ps_enabled', True)
coro = self._enable_ps()
asyncio.run_coroutine_threadsafe(coro, self.loop)
async def _enable_ps(self):
if self.enabled:
return
self.state = PSStates.Initializing
util.trigger_callback('ps-state-changes', self.wallet, None, None)
_load_and_cleanup = self.load_and_cleanup
await self.loop.run_in_executor(None, _load_and_cleanup)
await self.find_untracked_ps_txs()
self.wallet.save_db()
def can_find_untracked(self):
w = self.wallet
network = self.network
if network is None:
return False
server_height = network.get_server_height()
if server_height == 0:
return False
local_height = network.get_local_height()
if local_height < server_height:
return False
with w.lock:
unverified_no_islock = []
for txid in w.unverified_tx:
if txid not in w.db.islocks:
unverified_no_islock.append(txid)
if (unverified_no_islock
or not w.is_up_to_date()
or not w.synchronizer.is_up_to_date()):
return False
return True
@property
def state(self):
return self._state
@property
def is_waiting(self):
if self.state not in self.mixing_running_states:
return False
if self.keypairs_state in [KPStates.NeedCache, KPStates.Caching]:
return False
active_wfls_cnt = 0
active_wfls_cnt += len(self.denominate_wfl_list)
if self.new_denoms_wfl:
active_wfls_cnt += 1
if self.new_collateral_wfl:
active_wfls_cnt += 1
return (active_wfls_cnt == 0)
@state.setter
def state(self, state):
self._state = state
def on_network_start(self, network):
self.network = network
util.register_callback(self.on_wallet_updated, ['wallet_updated'])
util.register_callback(self.on_network_status, ['status'])
self.dash_net = network.dash_net
self.loop = network.asyncio_loop
self._loop_thread = network._loop_thread
asyncio.ensure_future(self.clean_keypairs_on_timeout())
asyncio.ensure_future(self.cleanup_staled_denominate_wfls())
asyncio.ensure_future(self.trigger_postponed_notifications())
asyncio.ensure_future(self.broadcast_new_denoms_new_collateral_wfls())
def on_stop_threads(self):
if self.state == PSStates.Mixing:
self.stop_mixing()
util.unregister_callback(self.on_wallet_updated)
util.unregister_callback(self.on_network_status)
def on_network_status(self, event, *args):
connected = self.network.is_connected()
if connected:
self.disconnect_time = 0
else:
now = time.time()
if self.disconnect_time == 0:
self.disconnect_time = now
if now - self.disconnect_time > 30: # disconnected for 30 seconds
if self.state == PSStates.Mixing:
self.stop_mixing(self.NO_NETWORK_STOP_MSG)
async def on_wallet_updated(self, event, *args):
if not self.enabled:
return
w = args[0]
if w != self.wallet:
return
if w.is_up_to_date():
self._not_enough_funds = False
if self.state in [PSStates.Initializing, PSStates.Ready]:
await self.find_untracked_ps_txs()
# Methods related to mixing process
def start_mixing(self, password, nowait=True):
w = self.wallet
msg = None
if w.is_watching_only():
msg = self.WATCHING_ONLY_MSG, 'err'
elif self.all_mixed:
msg = self.ALL_MIXED_MSG, 'inf'
elif not self.network or not self.network.is_connected():
msg = self.NO_NETWORK_MSG, 'err'
elif not self.dash_net.run_dash_net:
msg = self.NO_DASH_NET_MSG, 'err'
if msg:
msg, inf = msg
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
util.trigger_callback('ps-state-changes', w, msg, inf)
return
coro = self.find_untracked_ps_txs()
asyncio.run_coroutine_threadsafe(coro, self.loop).result()
with self.state_lock:
if self.state == PSStates.Ready:
self.state = PSStates.StartMixing
elif self.state in [PSStates.Unsupported, PSStates.Disabled]:
msg = self.NOT_ENABLED_MSG
elif self.state == PSStates.Initializing:
msg = self.INITIALIZING_MSG
elif self.state in self.mixing_running_states:
msg = self.MIXING_ALREADY_RUNNING_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.FIND_UNTRACKED_RUN_MSG
elif self.state == PSStates.FindingUntracked:
msg = self.ERRORED_MSG
else:
msg = self.UNKNOWN_STATE_MSG.format(self.state)
if msg:
util.trigger_callback('ps-state-changes', w, msg, None)
self.logger.info(f'Can not start PrivateSend Mixing: {msg}')
return
else:
util.trigger_callback('ps-state-changes', w, None, None)
fut = asyncio.run_coroutine_threadsafe(self._start_mixing(password),
self.loop)
if nowait:
return
try:
fut.result(timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
async def _start_mixing(self, password):
if not self.enabled or not self.network:
return
assert not self.main_taskgroup
self._not_enough_funds = False
self.main_taskgroup = main_taskgroup = SilentTaskGroup()
self.logger.info('Starting PrivateSend Mixing')
async def main():
try:
async with main_taskgroup as group:
if (self.w_type == 'standard'
and self.is_hw_ks):
await group.spawn(self._prepare_funds_from_hw_wallet())
await group.spawn(self._make_keypairs_cache(password))
await group.spawn(self._check_not_enough_funds())
await group.spawn(self._check_all_mixed())
await group.spawn(self._maintain_pay_collateral_tx())
await group.spawn(self._maintain_collateral_amount())
await group.spawn(self._maintain_denoms())
await group.spawn(self._mix_denoms())
except Exception as e:
self.logger.info(f'error starting mixing: {str(e)}')
raise e
asyncio.run_coroutine_threadsafe(main(), self.loop)
with self.state_lock:
self.state = PSStates.Mixing
self.last_mix_start_time = time.time()
self.logger.info('Started PrivateSend Mixing')
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def stop_mixing_from_async_thread(self, msg, msg_type=None):
await self.loop.run_in_executor(None, self.stop_mixing, msg, msg_type)
def stop_mixing(self, msg=None, msg_type=None, nowait=True):
w = self.wallet
with self.state_lock:
if self.state == PSStates.Mixing:
self.state = PSStates.StopMixing
elif self.state == PSStates.StopMixing:
return
else:
msg = self.MIXING_NOT_RUNNING_MSG
util.trigger_callback('ps-state-changes', w, msg, 'inf')
self.logger.info(f'Can not stop PrivateSend Mixing: {msg}')
return
if msg:
self.logger.info(f'Stopping PrivateSend Mixing: {msg}')
if not msg_type or not msg_type.startswith('inf'):
stopped_prefix = _('PrivateSend mixing is stopping!')
msg = f'{stopped_prefix}\n\n{msg}'
util.trigger_callback('ps-state-changes', w, msg, msg_type)
else:
self.logger.info('Stopping PrivateSend Mixing')
util.trigger_callback('ps-state-changes', w, None, None)
self.last_mix_stop_time = time.time() # write early if later time lost
fut = asyncio.run_coroutine_threadsafe(self._stop_mixing(), self.loop)
if nowait:
return
try:
fut.result(timeout=PRIVATESEND_SESSION_MSG_TIMEOUT+5)
except (asyncio.TimeoutError, asyncio.CancelledError):
pass
@log_exceptions
async def _stop_mixing(self):
if self.keypairs_state == KPStates.Caching:
self.logger.info('Waiting for keypairs caching to finish')
while self.keypairs_state == KPStates.Caching:
await asyncio.sleep(0.5)
if self.main_taskgroup:
sess_cnt = len(self.mix_sessions)
if sess_cnt > 0:
self.logger.info(f'Waiting for {sess_cnt}'
f' mixing sessions to finish')
while sess_cnt > 0:
await asyncio.sleep(0.5)
sess_cnt = len(self.mix_sessions)
try:
await asyncio.wait_for(self.main_taskgroup.cancel_remaining(),
timeout=2)
except (asyncio.TimeoutError, asyncio.CancelledError) as e:
self.logger.debug(f'Exception during main_taskgroup'
f' cancellation: {repr(e)}')
self.main_taskgroup = None
with self.keypairs_state_lock:
if self.keypairs_state == KPStates.Ready:
self.logger.info('Mark keypairs as unused')
self.keypairs_state = KPStates.Unused
self.logger.info('Stopped PrivateSend Mixing')
self.last_mix_stop_time = time.time()
with self.state_lock:
self.state = PSStates.Ready
w = self.wallet
util.trigger_callback('ps-state-changes', w, None, None)
async def _check_all_mixed(self):
while not self.main_taskgroup.closed():
await asyncio.sleep(10)
if self.all_mixed:
await self.stop_mixing_from_async_thread(self.ALL_MIXED_MSG,
'inf')
async def _check_not_enough_funds(self):
while not self.main_taskgroup.closed():
if self._not_enough_funds:
await asyncio.sleep(30)
self._not_enough_funds = False
await asyncio.sleep(5)
async def _maintain_pay_collateral_tx(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.pay_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_pay_collateral_wfl()
elif self.ps_collateral_cnt > 0:
if kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Pay collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if not self.get_confirmed_ps_collateral_data():
await asyncio.sleep(5)
continue
await self.prepare_pay_collateral_wfl()
await asyncio.sleep(0.25)
async def broadcast_new_denoms_new_collateral_wfls(self):
w = self.wallet
while True:
if self.enabled:
wfl = self.new_denoms_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_denoms_wfl()
await asyncio.sleep(0.25)
wfl = self.new_collateral_wfl
if wfl and wfl.completed and wfl.next_to_send(w):
await self.broadcast_new_collateral_wfl()
await asyncio.sleep(0.25)
else:
await asyncio.sleep(1)
async def _maintain_collateral_amount(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_collateral_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_collateral_wfl()
elif (not self._not_enough_funds
and not self.ps_collateral_cnt
and not self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing(for_denoms=False)
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready():
self.logger.info(_('New collateral workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New collateral workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_collateral_wfl()
await asyncio.sleep(0.25)
async def _maintain_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
while not self.main_taskgroup.closed():
wfl = self.new_denoms_wfl
if wfl:
if not wfl.completed or not wfl.tx_order:
await self.cleanup_new_denoms_wfl()
elif (not self._not_enough_funds
and self.calc_need_denoms_amounts(use_cache=True)):
coins = await self.get_next_coins_for_mixing()
if not coins:
await asyncio.sleep(5)
continue
if not self.check_llmq_ready(): | await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('New denoms workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
await self.create_new_denoms_wfl()
await asyncio.sleep(0.25)
async def _mix_denoms(self):
kp_wait_state = KPStates.Ready if self.need_password() else None
def _cleanup():
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if wfl and not wfl.completed:
self._cleanup_denominate_wfl(wfl)
await self.loop.run_in_executor(None, _cleanup)
main_taskgroup = self.main_taskgroup
while not main_taskgroup.closed():
if (self._denoms_to_mix_cache
and self.pay_collateral_wfl
and self.active_denominate_wfl_cnt < self.max_sessions):
if not self.check_llmq_ready():
self.logger.info(_('Denominate workflow: {}')
.format(self.LLMQ_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif not self.check_protx_info_completeness():
self.logger.info(_('Denominate workflow: {}')
.format(self.MNS_DATA_NOT_READY))
await asyncio.sleep(5)
continue
elif kp_wait_state and self.keypairs_state != kp_wait_state:
self.logger.info('Denominate workflow waiting'
' for keypairs generation')
await asyncio.sleep(5)
continue
if self.state == PSStates.Mixing:
await main_taskgroup.spawn(self.start_denominate_wfl())
await asyncio.sleep(0.25)
async def start_mix_session(self, denom_value, dsq, wfl_lid):
n_denom = PS_DENOMS_DICT[denom_value]
sess = PSMixSession(self, denom_value, n_denom, dsq, wfl_lid)
peer_str = sess.peer_str
async with self.mix_sessions_lock:
if peer_str in self.mix_sessions:
raise Exception(f'Session with {peer_str} already exists')
await sess.run_peer()
self.mix_sessions[peer_str] = sess
return sess
async def stop_mix_session(self, peer_str):
async with self.mix_sessions_lock:
sess = self.mix_sessions.pop(peer_str)
if not sess:
self.logger.debug(f'Peer {peer_str} not found in mix_session')
return
sess.close_peer()
return sess
# Workflow methods for pay collateral transaction
def get_confirmed_ps_collateral_data(self):
w = self.wallet
for outpoint, ps_collateral in w.db.get_ps_collaterals().items():
addr, value = ps_collateral
utxos = w.get_utxos([addr], min_rounds=PSCoinRounds.COLLATERAL,
confirmed_only=True, consider_islocks=True)
utxos = self.filter_out_hw_ks_coins(utxos)
inputs = []
for utxo in utxos:
if utxo.prevout.to_str() != outpoint:
continue
w.add_input_info(utxo)
inputs.append(utxo)
if inputs:
return outpoint, value, inputs
else:
self.logger.wfl_err(f'ps_collateral outpoint {outpoint}'
f' is not confirmed')
async def prepare_pay_collateral_wfl(self):
try:
_prepare = self._prepare_pay_collateral_tx
res = await self.loop.run_in_executor(None, _prepare)
if res:
txid, wfl = res
self.logger.wfl_ok(f'Completed pay collateral workflow with'
f' tx: {txid}, workflow: {wfl.lid}')
self.wallet.save_db()
except Exception as e:
wfl = self.pay_collateral_wfl
if wfl:
self.logger.wfl_err(f'Error creating pay collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_pay_collateral_wfl(force=True)
else:
self.logger.wfl_err(f'Error during creation of pay collateral'
f' worfklow: {str(e)}')
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
def _prepare_pay_collateral_tx(self):
with self.pay_collateral_wfl_lock:
if self.pay_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Started up pay collateral workflow: {wfl.lid}')
res = self.get_confirmed_ps_collateral_data()
if not res:
raise Exception('No confirmed ps_collateral found')
outpoint, value, inputs = res
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
self.add_ps_spending_collateral(outpoint, wfl.uuid)
if value >= COLLATERAL_VAL*2:
ovalue = value - COLLATERAL_VAL
output_addr = None
for addr, data in self.wallet.db.get_ps_reserved().items():
if data == outpoint:
output_addr = addr
break
if not output_addr:
reserved = self.reserve_addresses(1, for_change=True,
data=outpoint)
output_addr = reserved[0]
outputs = [PartialTxOutput.from_address_and_value(output_addr, ovalue)]
else:
# OP_RETURN as ouptut script
outputs = [PartialTxOutput(scriptpubkey=bfh('6a'), value=0)]
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
tx.inputs()[0].nsequence = 0xffffffff
tx = self.sign_transaction(tx, None)
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.PAY_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
wfl.completed = True
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved:
raise Exception('pay_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('pay_collateral_wfl differs from original')
self.set_pay_collateral_wfl(wfl)
return txid, wfl
async def cleanup_pay_collateral_wfl(self, force=False):
_cleanup = self._cleanup_pay_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_pay_collateral_wfl(self, force=False):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data(txid)
else:
self._cleanup_pay_collateral_wfl_tx_data()
return True
def _cleanup_pay_collateral_wfl_tx_data(self, txid=None):
with self.pay_collateral_wfl_lock:
wfl = self.pay_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.info(f'Cleaned up pay collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.info(f'Cleaned up pay collateral workflow: {wfl.lid}')
def _search_pay_collateral_wfl(self, txid, tx):
err = self._check_pay_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.pay_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
err = self._check_pay_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_pay_collateral_wfl(self, txid, tx):
wfl = self._search_pay_collateral_wfl(txid, tx)
if not wfl:
return
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_pay_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from pay'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_collaterals().items()):
if uuid != wfl.uuid:
continue
with self.collateral_lock:
self.pop_ps_spending_collateral(outpoint)
with self.pay_collateral_wfl_lock:
saved = self.pay_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_pay_collateral_wfl()
self.logger.wfl_done(f'Finished processing of pay collateral'
f' workflow: {wfl.lid}')
def get_pay_collateral_tx(self):
wfl = self.pay_collateral_wfl
if not wfl or not wfl.tx_order:
return
txid = wfl.tx_order[0]
tx_data = wfl.tx_data.get(txid)
if not tx_data:
return
return tx_data.raw_tx
# Workflow methods for new collateral transaction
def new_collateral_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if (coins_val >= self.min_new_denoms_from_coins_val
or coins_val < self.min_new_collateral_from_coins_val):
return
fee_per_kb = self.config.fee_per_kb()
for collateral_val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb, max_size=True)
if coins_val - new_collateral_fee >= collateral_val:
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(1)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(collateral_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - collateral_val)
return info
def create_new_collateral_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new collateral as mixing'
' process is currently run.')
if len(coins) > 1:
return None, ('Can not create new collateral amount,'
' too many coins selected')
wfl = self._start_new_collateral_wfl()
if not wfl:
return None, ('Can not create new collateral as other new'
' collateral creation process is in progress')
try:
w = self.wallet
txid, tx = self._make_new_collateral_tx(wfl, coins, password)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_COLLATERAL:
self._add_ps_data(txid, tx, PSTxTypes.NEW_COLLATERAL)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
return wfl, None
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_collateral_wfl(force=True)
self.logger.info(f'Cleaned up new collateral workflow:'
f' {wfl.lid}')
return None, err
async def create_new_collateral_wfl(self):
coins_data = await self.get_next_coins_for_mixing(for_denoms=False)
coins = coins_data['coins']
_start = self._start_new_collateral_wfl
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
try:
_make_tx = self._make_new_collateral_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, coins)
w = self.wallet
# add_transaction need run in network therad
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
wfl.completed = True
self.set_new_collateral_wfl(wfl)
self.logger.wfl_ok(f'Completed new collateral workflow'
f' with tx: {txid},'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new collateral tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_collateral_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_COLLATERAL]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
def _start_new_collateral_wfl(self):
with self.new_collateral_wfl_lock:
if self.new_collateral_wfl:
return
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Started up new collateral workflow: {wfl.lid}')
return self.new_collateral_wfl
def _make_new_collateral_tx(self, wfl, coins=None, password=None):
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
w = self.wallet
fee_per_kb = self.config.fee_per_kb()
uuid = wfl.uuid
oaddr = self.reserve_addresses(1, data=uuid)[0]
if not coins:
# try select minimal denom utxo with mimial rounds
coins = w.get_utxos(None, mature_only=True, confirmed_only=True,
consider_islocks=True, min_rounds=0)
coins = [c for c in coins if c.value_sats() == MIN_DENOM_VAL]
coins = self.filter_out_hw_ks_coins(coins)
if not coins:
raise NotEnoughFunds()
coins = sorted(coins, key=lambda x: x.ps_rounds)
coins = coins[0:1]
no_change = False
outputs = None
coins_val = sum([c.value_sats() for c in coins])
if (len(coins) == 1 # Minimal denom or PS other selected, no change
and coins[0].ps_rounds is not None
and coins[0].ps_rounds != PSCoinRounds.MIX_ORIGIN):
if coins_val >= self.min_new_denoms_from_coins_val:
raise TooLargeUtxoVal('To large utxo selected')
no_change = True
if no_change:
for val in CREATE_COLLATERAL_VALS[::-1]:
new_collateral_fee = calc_tx_fee(1, 1, fee_per_kb,
max_size=True)
if coins_val - new_collateral_fee < val:
continue
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
break
if outputs is None:
raise NotEnoughFunds()
else:
val = CREATE_COLLATERAL_VAL
outputs = [PartialTxOutput.from_address_and_value(oaddr, val)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else: # use first input address as a change, use selected inputs
change_addr = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=change_addr)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_COLLATERAL
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs from original')
self.set_new_collateral_wfl(wfl)
return txid, tx
async def cleanup_new_collateral_wfl(self, force=False):
_cleanup = self._cleanup_new_collateral_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_collateral_wfl(self, force=False):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_collateral_wfl_tx_data(txid)
else:
self._cleanup_new_collateral_wfl_tx_data()
return True
def _cleanup_new_collateral_wfl_tx_data(self, txid=None):
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.info(f'Cleaned up new collateral tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.info(f'Cleaned up new collateral workflow: {wfl.lid}')
async def broadcast_new_collateral_wfl(self):
def _check_wfl():
with self.new_collateral_wfl_lock:
wfl = self.new_collateral_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new collateral tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved:
raise Exception('new_collateral_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_collateral_wfl differs'
' from original')
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' collateral workflow: {wfl.lid}')
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_collateral_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new'
f' collateral workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_collateral_wfl(self, txid, tx):
err = self._check_new_collateral_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_collateral_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
err = self._check_new_collateral_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_collateral_wfl(self, txid, tx):
wfl = self._search_new_collateral_wfl(txid, tx)
if not wfl:
return
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_collateral_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new'
f' collateral workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_collateral_wfl_lock:
saved = self.new_collateral_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_collateral_wfl()
self.logger.wfl_done(f'Finished processing of new collateral'
f' workflow: {wfl.lid}')
# Workflow methods for new denoms transaction
def new_denoms_from_coins_info(self, coins):
if not coins or len(coins) > 1:
return
coins_val = sum([c.value_sats() for c in coins])
if coins_val < self.min_new_denoms_from_coins_val:
return
fee_per_kb = self.config.fee_per_kb()
denoms_amounts = self._calc_denoms_amounts_from_coins(coins,
fee_per_kb)
if denoms_amounts:
tx_cnt = len(denoms_amounts)
outputs_val = sum([sum(amounts) for amounts in denoms_amounts])
tx_type = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
info = _('Transactions type: {}').format(tx_type)
info += '\n'
info += _('Count of transactions: {}').format(tx_cnt)
info += '\n'
info += _('Total sent amount: {}').format(coins_val)
info += '\n'
info += _('Total output amount: {}').format(outputs_val)
info += '\n'
info += _('Total fee: {}').format(coins_val - outputs_val)
return info
def create_new_denoms_wfl_from_gui(self, coins, password):
if self.state in self.mixing_running_states:
return None, ('Can not create new denoms as mixing process'
' is currently run.')
if len(coins) > 1:
return None, ('Can not create new denoms,'
' too many coins selected')
wfl, outputs_amounts = self._start_new_denoms_wfl(coins,
use_all_coins=True)
if not outputs_amounts:
return None, ('Can not create new denoms,'
' not enough coins selected')
if not wfl:
return None, ('Can not create new denoms as other new'
' denoms creation process is in progress')
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
txid, tx = self._make_new_denoms_tx(wfl, tx_amounts,
last_tx_idx, i,
coins, password,
use_all_coins=True)
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
if not w.db.get_ps_tx(txid)[0] == PSTxTypes.NEW_DENOMS:
self._add_ps_data(txid, tx, PSTxTypes.NEW_DENOMS)
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
return wfl, None
else:
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
utxos = w.get_utxos([txin0_addr],
min_rounds=PSCoinRounds.OTHER)
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
coins = []
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
except Exception as e:
err = str(e)
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {err}, workflow: {wfl.lid}')
self._cleanup_new_denoms_wfl(force=True)
self.logger.info(f'Cleaned up new denoms workflow:'
f' {wfl.lid}')
return None, err
async def create_new_denoms_wfl(self):
coins_data = await self.get_next_coins_for_mixing()
coins = coins_data['coins']
if not coins:
return
_start = self._start_new_denoms_wfl
wfl, outputs_amounts = await self.loop.run_in_executor(None, _start,
coins)
if not wfl:
return
last_tx_idx = len(outputs_amounts) - 1
for i, tx_amounts in enumerate(outputs_amounts):
try:
w = self.wallet
_make_tx = self._make_new_denoms_tx
txid, tx = await self.loop.run_in_executor(None, _make_tx,
wfl, tx_amounts,
last_tx_idx, i,
coins)
# add_transaction need run in network therad
if not w.add_transaction(tx):
raise Exception(f'Transaction with txid: {txid}'
f' conflicts with current history')
def _after_create_tx():
with self.new_denoms_wfl_lock:
self.logger.info(f'Created new denoms tx: {txid},'
f' workflow: {wfl.lid}')
if i == last_tx_idx:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs'
' from original')
wfl.completed = True
self.set_new_denoms_wfl(wfl)
self.logger.wfl_ok(f'Completed new denoms'
f' workflow: {wfl.lid}')
coins_data = self._get_next_coins_for_mixing()
coins = coins_data['coins']
txin0 = copy.deepcopy(tx.inputs()[0])
txin0_addr = w.get_txin_address(txin0)
if i != last_tx_idx:
utxos = w.get_utxos([txin0_addr])
change_outpoint = None
for change_idx, o in enumerate(tx.outputs()):
if o.address == txin0_addr:
change_outpoint = f'{txid}:{change_idx}'
break
for utxo in utxos:
if utxo.prevout.to_str() != change_outpoint:
continue
coins.append(utxo)
if self.group_origin_coins_by_addr:
coins = [c for c in coins if c.address == txin0_addr]
return coins
coins = await self.loop.run_in_executor(None, _after_create_tx)
w.save_db()
except Exception as e:
self.logger.wfl_err(f'Error creating new denoms tx:'
f' {str(e)}, workflow: {wfl.lid}')
await self.cleanup_new_denoms_wfl(force=True)
type_e = type(e)
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == AddPSDataError:
msg = self.ADD_PS_DATA_ERR_MSG
type_name = SPEC_TX_NAMES[PSTxTypes.NEW_DENOMS]
msg = f'{msg} {type_name} {txid}:\n{str(e)}'
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
elif type_e == NotEnoughFunds:
self._not_enough_funds = True
if msg:
await self.stop_mixing_from_async_thread(msg)
break
def _start_new_denoms_wfl(self, coins, use_all_coins=False):
outputs_amounts = \
self.calc_need_denoms_amounts(coins=coins,
use_all_coins=use_all_coins)
if not outputs_amounts:
return None, None
with self.new_denoms_wfl_lock, \
self.pay_collateral_wfl_lock, \
self.new_collateral_wfl_lock:
if self.new_denoms_wfl:
return None, None
uuid = str(uuid4())
wfl = PSTxWorkflow(uuid=uuid)
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Started up new denoms workflow: {wfl.lid}')
return wfl, outputs_amounts
def _make_new_denoms_tx(self, wfl, tx_amounts, last_tx_idx, i,
coins, password=None, use_all_coins=False):
w = self.wallet
# try to create new denoms tx with change outupt at first
addrs_cnt = len(tx_amounts)
oaddrs = self.reserve_addresses(addrs_cnt, data=wfl.uuid)
outputs = [PartialTxOutput.from_address_and_value(addr, a)
for addr, a in zip(oaddrs, tx_amounts)]
tx = w.make_unsigned_transaction(coins=coins, outputs=outputs)
inputs = tx.inputs()
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
input_addrs = [utxo.address for utxo in inputs]
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
no_change = False
fee_per_kb = self.config.fee_per_kb()
if i == last_tx_idx:
if use_all_coins:
no_change = True
if no_change:
tx = PartialTransaction.from_io(inputs[:], outputs[:], locktime=0)
for txin in tx.inputs():
txin.nsequence = 0xffffffff
else:
# use first input address as a change, use selected inputs
in0 = inputs[0].address
tx = w.make_unsigned_transaction(coins=inputs, outputs=outputs,
change_addr=in0)
tx = self.sign_transaction(tx, password)
estimated_fee = calc_tx_fee(len(tx.inputs()), len(tx.outputs()),
fee_per_kb, max_size=True)
overfee = tx.get_fee() - estimated_fee
assert overfee < self.min_new_collateral_from_coins_val, 'too high fee'
txid = tx.txid()
raw_tx = tx.serialize_to_network()
tx_type = PSTxTypes.NEW_DENOMS
wfl.add_tx(txid=txid, raw_tx=raw_tx, tx_type=tx_type)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
return txid, tx
async def cleanup_new_denoms_wfl(self, force=False):
_cleanup = self._cleanup_new_denoms_wfl
changed = await self.loop.run_in_executor(None, _cleanup, force)
if changed:
self.wallet.save_db()
def _cleanup_new_denoms_wfl(self, force=False):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl or wfl.completed and wfl.tx_order and not force:
return
w = self.wallet
if wfl.tx_order:
for txid in wfl.tx_order[::-1]: # use reversed tx_order
if w.db.get_transaction(txid):
w.remove_transaction(txid)
else:
self._cleanup_new_denoms_wfl_tx_data(txid)
else:
self._cleanup_new_denoms_wfl_tx_data()
return True
def _cleanup_new_denoms_wfl_tx_data(self, txid=None):
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if txid:
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.info(f'Cleaned up new denoms tx:'
f' {txid}, workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.info(f'Cleaned up new denoms workflow: {wfl.lid}')
async def broadcast_new_denoms_wfl(self):
def _check_wfl():
with self.new_denoms_wfl_lock:
wfl = self.new_denoms_wfl
if not wfl:
return
if not wfl.completed:
return
return wfl
wfl = await self.loop.run_in_executor(None, _check_wfl)
if not wfl:
return
w = self.wallet
tx_data = wfl.next_to_send(w)
if not tx_data:
return
txid = tx_data.txid
sent, err = await tx_data.send(self)
if err:
def _on_fail():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_err(f'Failed broadcast of new denoms tx'
f' {txid}: {err}, workflow {wfl.lid}')
await self.loop.run_in_executor(None, _on_fail)
if sent:
def _on_success():
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved:
raise Exception('new_denoms_wfl not found')
if saved.uuid != wfl.uuid:
raise Exception('new_denoms_wfl differs from original')
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Broadcasted transaction {txid} from new'
f' denoms workflow: {wfl.lid}')
self.last_denoms_tx_time = time.time()
tx = Transaction(wfl.tx_data[txid].raw_tx)
self._process_by_new_denoms_wfl(txid, tx)
if not wfl.next_to_send(w):
self.logger.wfl_done(f'Broadcast completed for new denoms'
f' workflow: {wfl.lid}')
await self.loop.run_in_executor(None, _on_success)
def _search_new_denoms_wfl(self, txid, tx):
err = self._check_new_denoms_tx_err(txid, tx, full_check=False)
if not err:
wfl = self.new_denoms_wfl
if wfl and wfl.tx_order and txid in wfl.tx_order:
return wfl
def _check_on_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
err = self._check_new_denoms_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_new_denoms_wfl(self, txid, tx):
wfl = self._search_new_denoms_wfl(txid, tx)
if not wfl:
return
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if not saved or saved.uuid != wfl.uuid:
return
tx_data = wfl.pop_tx(txid)
if tx_data:
self.set_new_denoms_wfl(wfl)
self.logger.wfl_done(f'Processed tx: {txid} from new denoms'
f' workflow: {wfl.lid}')
if wfl.tx_order:
return
w = self.wallet
for addr in w.db.select_ps_reserved(data=wfl.uuid):
self.pop_ps_reserved(addr)
with self.new_denoms_wfl_lock:
saved = self.new_denoms_wfl
if saved and saved.uuid == wfl.uuid:
self.clear_new_denoms_wfl()
self.logger.wfl_done(f'Finished processing of new denoms'
f' workflow: {wfl.lid}')
# Workflow methods for denominate transaction
async def cleanup_staled_denominate_wfls(self):
def _cleanup_staled():
changed = False
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
now = time.time()
if now - wfl.completed > self.wait_for_mn_txs_time:
self.logger.info(f'Cleaning staled denominate'
f' workflow: {wfl.lid}')
self._cleanup_denominate_wfl(wfl)
changed = True
return changed
while True:
if self.enabled:
done = await self.loop.run_in_executor(None, _cleanup_staled)
if done:
self.wallet.save_db()
await asyncio.sleep(self.wait_for_mn_txs_time/12)
async def start_denominate_wfl(self):
wfl = None
try:
_start = self._start_denominate_wfl
dsq = None
session = None
if random.random() > 0.33:
self.logger.debug('try to get masternode from recent dsq')
recent_mns = self.recent_mixes_mns
while self.state == PSStates.Mixing:
dsq = self.dash_net.get_recent_dsq(recent_mns)
if dsq is not None:
self.logger.debug(f'get dsq from recent dsq queue'
f' {dsq.masternodeOutPoint}')
dval = PS_DENOM_REVERSE_DICT[dsq.nDenom]
wfl = await self.loop.run_in_executor(None,
_start, dval)
break
await asyncio.sleep(0.5)
else:
self.logger.debug('try to create new queue'
' on random masternode')
wfl = await self.loop.run_in_executor(None, _start)
if not wfl:
return
if self.state != PSStates.Mixing:
raise Exception('Mixing is finished')
else:
session = await self.start_mix_session(wfl.denom, dsq, wfl.lid)
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
await session.send_dsa(pay_collateral_tx)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsq' and session.fReady:
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsa sent')
pay_collateral_tx = self.get_pay_collateral_tx()
if not pay_collateral_tx:
raise Exception('Absent suitable pay collateral tx')
final_tx = None
await session.send_dsi(wfl.inputs, pay_collateral_tx, wfl.outputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsf':
final_tx = PartialTransaction.from_tx(res)
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dsi sent')
signed_inputs = self._sign_inputs(final_tx, wfl.inputs)
await session.send_dss(signed_inputs)
while True:
cmd, res = await session.read_next_msg(wfl)
if cmd == 'dssu':
continue
elif cmd == 'dsc':
def _on_dsc():
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if saved:
saved.completed = time.time()
self.set_denominate_wfl(saved)
return saved
else: # already processed from _add_ps_data
self.logger.debug(f'denominate workflow:'
f' {wfl.lid} not found')
saved = await self.loop.run_in_executor(None, _on_dsc)
if saved:
wfl = saved
self.wallet.save_db()
break
else:
raise Exception(f'Unsolisited cmd: {cmd} after dss sent')
self.logger.wfl_ok(f'Completed denominate workflow: {wfl.lid}')
except Exception as e:
type_e = type(e)
if type_e != asyncio.CancelledError:
if wfl:
self.logger.wfl_err(f'Error in denominate worfklow:'
f' {str(e)}, workflow: {wfl.lid}')
else:
self.logger.wfl_err(f'Error during creation of denominate'
f' worfklow: {str(e)}')
msg = None
if type_e == NoDynamicFeeEstimates:
msg = self.NO_DYNAMIC_FEE_MSG.format(str(e))
elif type_e == NotFoundInKeypairs:
msg = self.NOT_FOUND_KEYS_MSG
elif type_e == SignWithKeypairsFailed:
msg = self.SIGN_WIHT_KP_FAILED_MSG
if msg:
await self.stop_mixing_from_async_thread(msg)
finally:
if session:
await self.stop_mix_session(session.peer_str)
if wfl:
await self.cleanup_denominate_wfl(wfl)
def _select_denoms_to_mix(self, denom_value=None):
if not self._denoms_to_mix_cache:
self.logger.debug('No suitable denoms to mix,'
' _denoms_to_mix_cache is empty')
return None, None
if denom_value is not None:
denoms = self.denoms_to_mix(denom_value=denom_value)
else:
denoms = self.denoms_to_mix()
outpoints = list(denoms.keys())
w = self.wallet
icnt = 0
txids = []
inputs = []
while icnt < random.randint(1, PRIVATESEND_ENTRY_MAX_SIZE):
if not outpoints:
break
outpoint = outpoints.pop(random.randint(0, len(outpoints)-1))
if not w.db.get_ps_denom(outpoint): # already spent
continue
if w.db.get_ps_spending_denom(outpoint): # reserved to spend
continue
txid = outpoint.split(':')[0]
if txid in txids: # skip outputs from same tx
continue
height = w.get_tx_height(txid).height
islock = w.db.get_islock(txid)
if not islock and height <= 0: # skip not islocked/confirmed
continue
denom = denoms.pop(outpoint)
if denom[2] >= self.mix_rounds:
continue
if not self.is_ps_ks(denom[0]) and self.is_hw_ks:
continue # skip denoms on hw keystore
if denom_value is None:
denom_value = denom[1]
elif denom[1] != denom_value: # skip other denom values
continue
inputs.append(outpoint)
txids.append(txid)
icnt += 1
if not inputs:
self.logger.debug(f'No suitable denoms to mix:'
f' denom_value={denom_value}')
return None, None
else:
return inputs, denom_value
def _start_denominate_wfl(self, denom_value=None):
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
selected_inputs, denom_value = self._select_denoms_to_mix(denom_value)
if not selected_inputs:
return
with self.denominate_wfl_lock, self.denoms_lock:
if self.active_denominate_wfl_cnt >= self.max_sessions:
return
icnt = 0
inputs = []
input_addrs = []
w = self.wallet
for outpoint in selected_inputs:
denom = w.db.get_ps_denom(outpoint)
if not denom:
continue # already spent
if w.db.get_ps_spending_denom(outpoint):
continue # already used by other wfl
if self.is_hw_ks and not self.is_ps_ks(denom[0]):
continue # skip denoms from hardware keystore
inputs.append(outpoint)
input_addrs.append(denom[0])
icnt += 1
if icnt < 1:
self.logger.debug(f'No suitable denoms to mix after'
f' denoms_lock: denom_value={denom_value}')
return
uuid = str(uuid4())
wfl = PSDenominateWorkflow(uuid=uuid)
wfl.inputs = inputs
wfl.denom = denom_value
self.set_denominate_wfl(wfl)
for outpoint in inputs:
self.add_ps_spending_denom(outpoint, wfl.uuid)
# check input addresses is in keypairs if keypairs cache available
if self._keypairs_cache:
not_found_addrs = self._find_addrs_not_in_keypairs(input_addrs)
if not_found_addrs:
not_found_addrs = ', '.join(list(not_found_addrs))
raise NotFoundInKeypairs(f'Input addresses is not found'
f' in the keypairs cache:'
f' {not_found_addrs}')
output_addrs = []
found_outpoints = []
for addr, data in w.db.get_ps_reserved().items():
if data in inputs:
output_addrs.append(addr)
found_outpoints.append(data)
for outpoint in inputs:
if outpoint not in found_outpoints:
force_main_ks = False
if self.is_hw_ks:
denom = w.db.get_ps_denom(outpoint)
if denom[2] == self.mix_rounds - 1:
force_main_ks = True
reserved = self.reserve_addresses(1, data=outpoint,
force_main_ks=force_main_ks)
output_addrs.append(reserved[0])
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved:
raise Exception(f'denominate_wfl {wfl.lid} not found')
wfl = saved
wfl.outputs = output_addrs
self.set_denominate_wfl(saved)
self.logger.info(f'Created denominate workflow: {wfl.lid}, with inputs'
f' value {wfl.denom}, count {len(wfl.inputs)}')
return wfl
def _sign_inputs(self, tx, inputs):
signed_inputs = []
tx = self._sign_denominate_tx(tx)
for i in tx.inputs():
if i.prevout.to_str() not in inputs:
continue
signed_inputs.append(CTxIn(i.prevout.txid[::-1], i.prevout.out_idx,
i.script_sig, i.nsequence))
return signed_inputs
def _sign_denominate_tx(self, tx):
mine_txins_cnt = 0
for txin in tx.inputs():
self.wallet.add_input_info(txin)
if txin.address is None:
continue
mine_txins_cnt += 1
self.sign_transaction(tx, None, mine_txins_cnt)
return tx
async def cleanup_denominate_wfl(self, wfl):
_cleanup = self._cleanup_denominate_wfl
changed = await self.loop.run_in_executor(None, _cleanup, wfl)
if changed:
self.wallet.save_db()
def _cleanup_denominate_wfl(self, wfl):
with self.denominate_wfl_lock:
saved = self.get_denominate_wfl(wfl.uuid)
if not saved: # already processed from _add_ps_data
return
else:
wfl = saved
completed = wfl.completed
if completed:
now = time.time()
if now - wfl.completed <= self.wait_for_mn_txs_time:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.info(f'Cleaned up denominate workflow: {wfl.lid}')
return True
def _search_denominate_wfl(self, txid, tx):
err = self._check_denominate_tx_err(txid, tx, full_check=False)
if not err:
for uuid in self.denominate_wfl_list:
wfl = self.get_denominate_wfl(uuid)
if not wfl or not wfl.completed:
continue
if self._check_denominate_tx_io_on_wfl(txid, tx, wfl):
return wfl
def _check_on_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
err = self._check_denominate_tx_err(txid, tx)
if not err:
return True
if wfl:
raise AddPSDataError(f'{err}')
else:
return False
def _process_by_denominate_wfl(self, txid, tx):
wfl = self._search_denominate_wfl(txid, tx)
if not wfl:
return
w = self.wallet
for outpoint, uuid in list(w.db.get_ps_spending_denoms().items()):
if uuid != wfl.uuid:
continue
with self.denoms_lock:
self.pop_ps_spending_denom(outpoint)
with self.denominate_wfl_lock:
self.clear_denominate_wfl(wfl.uuid)
self.logger.wfl_done(f'Finished processing of denominate'
f' workflow: {wfl.lid} with tx: {txid}')
def get_workflow_tx_info(self, wfl):
w = self.wallet
tx_cnt = len(wfl.tx_order)
tx_type = None if not tx_cnt else wfl.tx_data[wfl.tx_order[0]].tx_type
total = 0
total_fee = 0
for txid in wfl.tx_order:
tx = Transaction(wfl.tx_data[txid].raw_tx)
tx_info = w.get_tx_info(tx)
total += tx_info.amount
total_fee += tx_info.fee
return tx_type, tx_cnt, total, total_fee | self.logger.info(_('New denoms workflow: {}')
.format(self.LLMQ_DATA_NOT_READY)) |
zlib_decompressor.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use gio_sys;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::StaticType;
use glib::Value;
use glib_sys;
use gobject_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use Converter;
use FileInfo;
use ZlibCompressorFormat;
glib_wrapper! {
pub struct ZlibDecompressor(Object<gio_sys::GZlibDecompressor, gio_sys::GZlibDecompressorClass, ZlibDecompressorClass>) @implements Converter;
match fn {
get_type => || gio_sys::g_zlib_decompressor_get_type(),
}
}
impl ZlibDecompressor {
pub fn new(format: ZlibCompressorFormat) -> ZlibDecompressor {
unsafe { from_glib_full(gio_sys::g_zlib_decompressor_new(format.to_glib())) }
}
}
pub const NONE_ZLIB_DECOMPRESSOR: Option<&ZlibDecompressor> = None;
pub trait ZlibDecompressorExt: 'static {
fn get_file_info(&self) -> Option<FileInfo>;
fn get_property_format(&self) -> ZlibCompressorFormat;
fn connect_property_file_info_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<ZlibDecompressor>> ZlibDecompressorExt for O {
fn get_file_info(&self) -> Option<FileInfo> {
unsafe {
from_glib_none(gio_sys::g_zlib_decompressor_get_file_info(
self.as_ref().to_glib_none().0,
))
}
}
fn get_property_format(&self) -> ZlibCompressorFormat {
unsafe {
let mut value = Value::from_type(<ZlibCompressorFormat as StaticType>::static_type());
gobject_sys::g_object_get_property(
self.to_glib_none().0 as *mut gobject_sys::GObject,
b"format\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `format` getter")
.unwrap()
}
}
fn connect_property_file_info_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_file_info_trampoline<P, F: Fn(&P) + 'static>(
this: *mut gio_sys::GZlibDecompressor,
_param_spec: glib_sys::gpointer,
f: glib_sys::gpointer,
) where
P: IsA<ZlibDecompressor>,
{
let f: &F = &*(f as *const F);
f(&ZlibDecompressor::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::file-info\0".as_ptr() as *const _,
Some(transmute(notify_file_info_trampoline::<Self, F> as usize)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for ZlibDecompressor {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ZlibDecompressor")
}
}
| fmt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.