file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
consts.rs
pub use crate::arch::consts::*; pub use crate::memory::PAGE_SIZE; pub use crate::percpu::PER_CPU_SIZE; pub const HV_BASE: usize = 0xffff_ff00_0000_0000;
pub const TEMP_MAPPING_BASE: usize = 0x0000_0080_0000_0000; pub const NUM_TEMP_PAGES: usize = 16; pub const LOCAL_PER_CPU_BASE: usize = TEMP_MAPPING_BASE + NUM_TEMP_PAGES * PAGE_SIZE;
netns.go
// Package netns allows ultra-simple network namespace handling. NsHandles // can be retrieved and set. Note that the current namespace is thread // local so actions that set and reset namespaces should use LockOSThread // to make sure the namespace doesn't change due to a goroutine switch. // It is best to close NsHandles when you are done with them. This can be // accomplished via a `defer ns.Close()` on the handle. Changing namespaces // requires elevated privileges, so in most cases this code needs to be run // as root. package netns import ( "fmt" "syscall" ) // NsHandle is a handle to a network namespace. It can be cast directly // to an int and used as a file descriptor. type NsHandle int // Equal determines if two network handles refer to the same network // namespace. This is done by comparing the device and inode that the // file descriptors point to. func (ns NsHandle) Equal(other NsHandle) bool { if ns == other { return true } var s1, s2 syscall.Stat_t if err := syscall.Fstat(int(ns), &s1); err != nil { return false } if err := syscall.Fstat(int(other), &s2); err != nil { return false } return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino) } // String shows the file descriptor number and its dev and inode. func (ns NsHandle) String() string { var s syscall.Stat_t if ns == -1 { return "NS(None)" } if err := syscall.Fstat(int(ns), &s); err != nil { return fmt.Sprintf("NS(%d: unknown)", ns) } return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino) } // UniqueId returns a string which uniquely identifies the namespace // associated with the network handle. func (ns NsHandle) UniqueId() string { var s syscall.Stat_t if ns == -1 { return "NS(none)" } if err := syscall.Fstat(int(ns), &s); err != nil { return "NS(unknown)" } return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino) } // IsOpen returns true if Close() has not been called. func (ns NsHandle) IsOpen() bool { return ns != -1 } // Close closes the NsHandle and resets its file descriptor to -1. // It is not safe to use an NsHandle after Close() is called. func (ns *NsHandle) Close() error { if err := syscall.Close(int(*ns)); err != nil { return err } (*ns) = -1 return nil } // None gets an empty (closed) NsHandle. func
() NsHandle { return NsHandle(-1) }
None
callbacks.py
""" Contains custom callbacks. """ from constants import minimum_scores, maximum_scores import constants import datetime import json from keras.callbacks import Callback, ModelCheckpoint import numpy as np import os from sklearn.metrics import cohen_kappa_score from util import process_data, create_folder class QWKScore(Callback): def __init__(self, essays, save_to_file=True, print_to_screen=True): super() self.essays = essays self.save_to_file = save_to_file self.print_to_screen = print_to_screen def on_epoch_end(self, epoch, logs={}): # for each essay set calculate the QWK scores
number_essays = [] if self.print_to_screen: print("\nQWK Scores") for essay_set in range(1, 9): essays_in_set = self.essays[self.essays['essay_set'] == essay_set] X, y = process_data(essays_in_set) y_true = essays_in_set['domain1_score'].values normalised_prediction = self.model.predict(X) normalised_prediction = np.array(normalised_prediction) y_pred = np.around((normalised_prediction * (maximum_scores[essay_set] - minimum_scores[essay_set])) + minimum_scores[essay_set]) qwk_score = cohen_kappa_score(y_true, y_pred, weights='quadratic') qwk_scores.append(qwk_score) number_essays.append(len(essays_in_set)) if self.print_to_screen: print("Set {}: {:.2f}".format(essay_set, qwk_score), end=' ') qwk_scores = np.array(qwk_scores) number_essays = np.array(number_essays) weighted_qwk_score = np.sum(qwk_scores * number_essays) / np.sum(number_essays) if self.print_to_screen: print('\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score)) if self.save_to_file: summary = "Epoch " + str(epoch + 1) log_values = "\n" for key, value in logs.items(): log_values += "{}: {:.4f} ".format(key, value) individual_qwk_scores = "\n" for essay_set in range(8): individual_qwk_scores += "Set {}: {:.2f} ".format(essay_set + 1, qwk_scores[essay_set]) summary = summary + log_values + individual_qwk_scores summary += '\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score) summary += '\n\n' with open(os.path.join(constants.SAVE_DIR, "scores.txt"), "a") as f: f.write(summary) class SaveModel(ModelCheckpoint): """ Wrapper of Model Checkpoint class. """ def __init__(self, directory, filename, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): # make folder with the current time as name now = datetime.datetime.now() current_time = "{}_{}_{}_{}_{}_{}".format(now.day, now.month, now.year, now.hour, now.minute, now.second) constants.SAVE_DIR = os.path.join(directory, current_time) create_folder(constants.SAVE_DIR) ModelCheckpoint.__init__(self, os.path.join(constants.SAVE_DIR, filename), monitor=monitor, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, period=period) def on_train_begin(self, logs=None): # save model architecture. parsed = json.loads(self.model.to_json()) with open(os.path.join(constants.SAVE_DIR, 'model.txt'), 'w') as file: file.write(json.dumps(parsed, indent=4))
qwk_scores = []
state.rs
/* * Copyright 2018 Bitwise IO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ use crypto::digest::Digest; use crypto::sha2::Sha512; use handler::game::Game; use sawtooth_sdk::processor::handler::ApplyError; use sawtooth_sdk::processor::handler::TransactionContext; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::str::from_utf8; pub fn get_xo_prefix() -> String { let mut sha = Sha512::new(); sha.input_str("xo"); sha.result_str()[..6].to_string() } pub struct XoState<'a> { context: &'a mut TransactionContext, address_map: HashMap<String, Option<String>>, } impl<'a> XoState<'a> { pub fn new(context: &'a mut TransactionContext) -> XoState { XoState { context, address_map: HashMap::new(), } } fn calculate_address(name: &str) -> String { let mut sha = Sha512::new(); sha.input_str(name); get_xo_prefix() + &sha.result_str()[..64].to_string() } pub fn delete_game(&mut self, game_name: &str) -> Result<(), ApplyError> { let mut games = self._load_games(game_name)?; games.remove(game_name); if games.is_empty() { self._delete_game(game_name)?; } else { self._store_game(game_name, games)?; } Ok(()) } pub fn set_game(&mut self, game_name: &str, g: Game) -> Result<(), ApplyError> { let mut games = self._load_games(game_name)?; games.insert(game_name.to_string(), g); self._store_game(game_name, games)?; Ok(()) } pub fn get_game(&mut self, game_name: &str) -> Result<Option<Game>, ApplyError> { let games = self._load_games(game_name)?; if games.contains_key(game_name) { Ok(Some(games[game_name].clone()))
fn _store_game( &mut self, game_name: &str, games: HashMap<String, Game>, ) -> Result<(), ApplyError> { let address = XoState::calculate_address(game_name); let state_string = Game::serialize_games(games); self.address_map .insert(address.clone(), Some(state_string.clone())); let mut sets = HashMap::new(); sets.insert(address, state_string.into_bytes()); self.context.set_state(sets)?; Ok(()) } fn _delete_game(&mut self, game_name: &str) -> Result<(), ApplyError> { let address = XoState::calculate_address(game_name); if self.address_map.contains_key(&address) { self.address_map.insert(address.clone(), None); } self.context.delete_state(vec![address])?; Ok(()) } fn _load_games(&mut self, game_name: &str) -> Result<HashMap<String, Game>, ApplyError> { let address = XoState::calculate_address(game_name); Ok(match self.address_map.entry(address.clone()) { Entry::Occupied(entry) => match entry.get() { Some(addr) => Game::deserialize_games(addr).ok_or_else(|| { ApplyError::InvalidTransaction("Invalid serialization of game state".into()) })?, None => HashMap::new(), }, Entry::Vacant(entry) => match self.context.get_state(vec![address])? { Some(state_bytes) => { let state_string = from_utf8(&state_bytes).map_err(|e| { ApplyError::InvalidTransaction(format!( "Invalid serialization of game state: {}", e )) })?; entry.insert(Some(state_string.to_string())); Game::deserialize_games(state_string).ok_or_else(|| { ApplyError::InvalidTransaction("Invalid serialization of game state".into()) })? } None => { entry.insert(None); HashMap::new() } }, }) } }
} else { Ok(None) } }
phone-type-formatter.bb.js
!function(){var aa=this;function h(a,c){var b=a.split("."),d=aa;b[0]in d||!d.execScript||d.execScript("var "+b[0]);for(var e;b.length&&(e=b.shift());)b.length||void 0===c?d[e]?d=d[e]:d=d[e]={}:d[e]=c}function l(a,c){function b(){}b.prototype=c.prototype;a.M=c.prototype;a.prototype=new b;a.prototype.constructor=a;a.N=function(a,b,f){for(var g=Array(arguments.length-2),k=2;k<arguments.length;k++)g[k-2]=arguments[k];return c.prototype[b].apply(a,g)}};function n(a,c){null!=a&&this.a.apply(this,arguments)}n.prototype.b="";n.prototype.set=function(a){this.b=""+a};n.prototype.a=function(a,c,b){this.b+=String(a);if(null!=c)for(var d=1;d<arguments.length;d++)this.b+=arguments[d];return this};function p(a){a.b=""}n.prototype.toString=function(){return this.b};function ba(a,c){a.sort(c||ca)}function ca(a,c){return a>c?1:a<c?-1:0};function da(a){var c=[],b=0,d;for(d in a)c[b++]=a[d];return c};function ea(a,c){this.b=a;this.a={};for(var b=0;b<c.length;b++){var d=c[b];this.a[d.b]=d}}function fa(a){a=da(a.a);ba(a,function(a,b){return a.b-b.b});return a};function ga(a,c){this.b=a;this.g=!!c.G;this.a=c.c;this.j=c.type;this.h=!1;switch(this.a){case ha:case ia:case ja:case ka:case la:case ma:case na:this.h=!0}this.f=c.defaultValue}var na=1,ma=2,ha=3,ia=4,ja=6,ka=16,la=18;function q(){this.a={};this.f=this.i().a;this.b=this.g=null}q.prototype.set=function(a,c){r(this,a.b,c)};function t(a,c){for(var b=fa(a.i()),d=0;d<b.length;d++){var e=b[d],f=e.b;if(null!=c.a[f]){a.b&&delete a.b[e.b];var g=11==e.a||10==e.a;if(e.g)for(var e=v(c,f)||[],k=0;k<e.length;k++){var m=a,u=f,sa=g?e[k].clone():e[k];m.a[u]||(m.a[u]=[]);m.a[u].push(sa);m.b&&delete m.b[u]}else e=v(c,f),g?(g=v(a,f))?t(g,e):r(a,f,e.clone()):r(a,f,e)}}} q.prototype.clone=function(){var a=new this.constructor;a!=this&&(a.a={},a.b&&(a.b={}),t(a,this));return a};function v(a,c){var b=a.a[c];if(null==b)return null;if(a.g){if(!(c in a.b)){var d=a.g,e=a.f[c];if(null!=b)if(e.g){for(var f=[],g=0;g<b.length;g++)f[g]=d.b(e,b[g]);b=f}else b=d.b(e,b);return a.b[c]=b}return a.b[c]}return b}function w(a,c,b){var d=v(a,c);return a.f[c].g?d[b||0]:d} function x(a,c){var b;if(null!=a.a[c])b=w(a,c,void 0);else a:{b=a.f[c];if(void 0===b.f){var d=b.j;if(d===Boolean)b.f=!1;else if(d===Number)b.f=0;else if(d===String)b.f=b.h?"0":"";else{b=new d;break a}}b=b.f}return b}function y(a,c){return a.f[c].g?null!=a.a[c]?a.a[c].length:0:null!=a.a[c]?1:0}function r(a,c,b){a.a[c]=b;a.b&&(a.b[c]=b)}function z(a,c){var b=[],d;for(d in c)0!=d&&b.push(new ga(d,c[d]));return new ea(a,b)};/* Protocol Buffer 2 Copyright 2008 Google Inc. All other code copyright its respective owners. Copyright (C) 2010 The Libphonenumber Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
A.prototype.i=function(){B||(B=z(A,{0:{name:"NumberFormat",I:"i18n.phonenumbers.NumberFormat"},1:{name:"pattern",required:!0,c:9,type:String},2:{name:"format",required:!0,c:9,type:String},3:{name:"leading_digits_pattern",G:!0,c:9,type:String},4:{name:"national_prefix_formatting_rule",c:9,type:String},6:{name:"national_prefix_optional_when_formatting",c:8,type:Boolean},5:{name:"domestic_carrier_code_formatting_rule",c:9,type:String}}));return B};A.ctor=A;A.ctor.i=A.prototype.i; C.prototype.i=function(){D||(D=z(C,{0:{name:"PhoneNumberDesc",I:"i18n.phonenumbers.PhoneNumberDesc"},2:{name:"national_number_pattern",c:9,type:String},3:{name:"possible_number_pattern",c:9,type:String},6:{name:"example_number",c:9,type:String},7:{name:"national_number_matcher_data",c:12,type:String},8:{name:"possible_number_matcher_data",c:12,type:String}}));return D};C.ctor=C;C.ctor.i=C.prototype.i; E.prototype.i=function(){F||(F=z(E,{0:{name:"PhoneMetadata",I:"i18n.phonenumbers.PhoneMetadata"},1:{name:"general_desc",c:11,type:C},2:{name:"fixed_line",c:11,type:C},3:{name:"mobile",c:11,type:C},4:{name:"toll_free",c:11,type:C},5:{name:"premium_rate",c:11,type:C},6:{name:"shared_cost",c:11,type:C},7:{name:"personal_number",c:11,type:C},8:{name:"voip",c:11,type:C},21:{name:"pager",c:11,type:C},25:{name:"uan",c:11,type:C},27:{name:"emergency",c:11,type:C},28:{name:"voicemail",c:11,type:C},24:{name:"no_international_dialling", c:11,type:C},9:{name:"id",required:!0,c:9,type:String},10:{name:"country_code",c:5,type:Number},11:{name:"international_prefix",c:9,type:String},17:{name:"preferred_international_prefix",c:9,type:String},12:{name:"national_prefix",c:9,type:String},13:{name:"preferred_extn_prefix",c:9,type:String},15:{name:"national_prefix_for_parsing",c:9,type:String},16:{name:"national_prefix_transform_rule",c:9,type:String},18:{name:"same_mobile_and_fixed_line_pattern",c:8,defaultValue:!1,type:Boolean},19:{name:"number_format", G:!0,c:11,type:A},20:{name:"intl_number_format",G:!0,c:11,type:A},22:{name:"main_country_for_code",c:8,defaultValue:!1,type:Boolean},23:{name:"leading_digits",c:9,type:String},26:{name:"leading_zero_possible",c:8,defaultValue:!1,type:Boolean}}));return F};E.ctor=E;E.ctor.i=E.prototype.i;function G(){}G.prototype.a=function(a){new a.b;throw Error("Unimplemented");};G.prototype.b=function(a,c){if(11==a.a||10==a.a)return c instanceof q?c:this.a(a.j.prototype.i(),c);if(14==a.a){if("string"==typeof c&&H.test(c)){var b=Number(c);if(0<b)return b}return c}if(!a.h)return c;b=a.j;if(b===String){if("number"==typeof c)return String(c)}else if(b===Number&&"string"==typeof c&&("Infinity"===c||"-Infinity"===c||"NaN"===c||H.test(c)))return Number(c);return c};var H=/^-?[0-9]+$/;function I(){}l(I,G);I.prototype.a=function(a,c){var b=new a.b;b.g=this;b.a=c;b.b={};return b};function J(){}l(J,I);J.prototype.b=function(a,c){return 8==a.a?!!c:G.prototype.b.apply(this,arguments)};J.prototype.a=function(a,c){return J.M.a.call(this,a,c)};/* Copyright (C) 2010 The Libphonenumber Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ var K={1:"US AG AI AS BB BM BS CA DM DO GD GU JM KN KY LC MP MS PR SX TC TT VC VG VI".split(" ")},L={BB:[null,[null,null,"[2589]\\d{9}","\\d{7}(?:\\d{3})?"],[null,null,"246(?:2(?:2[78]|7[0-4])|4(?:1[024-6]|2\\d|3[2-9])|5(?:20|[34]\\d|54|7[1-3])|6(?:2\\d|38)|7(?:37|57)|9(?:1[89]|63))\\d{4}","\\d{7}(?:\\d{3})?",null,null,"2464123456"],[null,null,"246(?:2(?:[356]\\d|4[0-57-9]|8[0-79])|45\\d|8(?:[2-5]\\d|83))\\d{4}","\\d{10}",null,null,"2462501234"],[null,null,"8(?:00|44|55|66|77|88)[2-9]\\d{6}","\\d{10}", null,null,"8002123456"],[null,null,"900\\d{7}|246976\\d{4}","\\d{10}",null,null,"9002123456"],[null,null,"NA","NA"],[null,null,"5(?:00|33|44|66|77|88)[2-9]\\d{6}","\\d{10}",null,null,"5002345678"],[null,null,"24631\\d{5}","\\d{10}",null,null,"2463101234"],"BB",1,"011","1",null,null,"1",null,null,null,null,null,[null,null,"NA","NA"],null,"246",[null,null,"NA","NA"],[null,null,"246(?:292|41[7-9]|43[01])\\d{4}","\\d{10}",null,null,"2464301234"],null,null,[null,null,"NA","NA"]]};/* Copyright (C) 2010 The Libphonenumber Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ function M(){this.a={}}M.b=function(){return M.a?M.a:M.a=new M}; var oa={0:"0",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9","\uff10":"0","\uff11":"1","\uff12":"2","\uff13":"3","\uff14":"4","\uff15":"5","\uff16":"6","\uff17":"7","\uff18":"8","\uff19":"9","\u0660":"0","\u0661":"1","\u0662":"2","\u0663":"3","\u0664":"4","\u0665":"5","\u0666":"6","\u0667":"7","\u0668":"8","\u0669":"9","\u06f0":"0","\u06f1":"1","\u06f2":"2","\u06f3":"3","\u06f4":"4","\u06f5":"5","\u06f6":"6","\u06f7":"7","\u06f8":"8","\u06f9":"9"},pa=RegExp("[+\uff0b]+"),qa=RegExp("([0-9\uff10-\uff19\u0660-\u0669\u06f0-\u06f9])"), ra=/^\(?\$1\)?$/;function N(a,c){if(null==c)return null;c=c.toUpperCase();var b=a.a[c];if(null==b){b=L[c];if(null==b)return null;b=(new J).a(E.i(),b);a.a[c]=b}return b}function O(a){a=K[a];return null==a?"ZZ":a[0]};function P(a){this.H=RegExp("\u2008");this.B="";this.m=new n;this.v="";this.h=new n;this.u=new n;this.j=!0;this.w=this.o=this.D=!1;this.F=M.b();this.s=0;this.b=new n;this.A=!1;this.l="";this.a=new n;this.f=[];this.C=a;this.J=this.g=Q(this,this.C)}var R=new E;r(R,11,"NA"); var ta=/\[([^\[\]])*\]/g,ua=/\d(?=[^,}][^,}])/g,va=RegExp("^[-x\u2010-\u2015\u2212\u30fc\uff0d-\uff0f \u00a0\u00ad\u200b\u2060\u3000()\uff08\uff09\uff3b\uff3d.\\[\\]/~\u2053\u223c\uff5e]*(\\$\\d[-x\u2010-\u2015\u2212\u30fc\uff0d-\uff0f \u00a0\u00ad\u200b\u2060\u3000()\uff08\uff09\uff3b\uff3d.\\[\\]/~\u2053\u223c\uff5e]*)+$"),S=/[- ]/; function Q(a,c){var b;if(null!=c&&isNaN(c)&&c.toUpperCase()in L){b=N(a.F,c);if(null==b)throw"Invalid region code: "+c;b=x(b,10)}else b=0;b=N(a.F,O(b));return null!=b?b:R} function T(a){for(var c=a.f.length,b=0;b<c;++b){var d=a.f[b],e=x(d,1);if(a.v==e)return!1;var f;f=a;var g=d,k=x(g,1);if(-1!=k.indexOf("|"))f=!1;else{k=k.replace(ta,"\\d");k=k.replace(ua,"\\d");p(f.m);var m;m=f;var g=x(g,2),u="999999999999999".match(k)[0];u.length<m.a.b.length?m="":(m=u.replace(new RegExp(k,"g"),g),m=m.replace(RegExp("9","g"),"\u2008"));0<m.length?(f.m.a(m),f=!0):f=!1}if(f)return a.v=e,a.A=S.test(w(d,4)),a.s=0,!0}return a.j=!1} function U(a,c){for(var b=[],d=c.length-3,e=a.f.length,f=0;f<e;++f){var g=a.f[f];0==y(g,3)?b.push(a.f[f]):(g=w(g,3,Math.min(d,y(g,3)-1)),0==c.search(g)&&b.push(a.f[f]))}a.f=b}P.prototype.K=function(){this.B="";p(this.h);p(this.u);p(this.m);this.s=0;this.v="";p(this.b);this.l="";p(this.a);this.j=!0;this.w=this.o=this.D=!1;this.f=[];this.A=!1;this.g!=this.J&&(this.g=Q(this,this.C))};P.prototype.L=function(a){return this.B=wa(this,a)}; function wa(a,c){a.h.a(c);var b=c;if(qa.test(b)||1==a.h.b.length&&pa.test(b)){var b=c,d;"+"==b?(d=b,a.u.a(b)):(d=oa[b],a.u.a(d),a.a.a(d));c=d}else a.j=!1,a.D=!0;if(!a.j){if(!a.D)if(V(a)){if(W(a))return X(a)}else if(0<a.l.length&&(b=a.a.toString(),p(a.a),a.a.a(a.l),a.a.a(b),b=a.b.toString(),d=b.lastIndexOf(a.l),p(a.b),a.b.a(b.substring(0,d))),a.l!=xa(a))return a.b.a(" "),X(a);return a.h.toString()}switch(a.u.b.length){case 0:case 1:case 2:return a.h.toString();case 3:if(V(a))a.w=!0;else return a.l= xa(a),Y(a);default:if(a.w)return W(a)&&(a.w=!1),a.b.toString()+a.a.toString();if(0<a.f.length){b=ya(a,c);d=za(a);if(0<d.length)return d;U(a,a.a.toString());return T(a)?Aa(a):a.j?Z(a,b):a.h.toString()}return Y(a)}}function X(a){a.j=!0;a.w=!1;a.f=[];a.s=0;p(a.m);a.v="";return Y(a)}function za(a){for(var c=a.a.toString(),b=a.f.length,d=0;d<b;++d){var e=a.f[d],f=x(e,1);if((new RegExp("^(?:"+f+")$")).test(c))return a.A=S.test(w(e,4)),c=c.replace(new RegExp(f,"g"),w(e,2)),Z(a,c)}return""} function Z(a,c){var b=a.b.b.length;return a.A&&0<b&&" "!=a.b.toString().charAt(b-1)?a.b+" "+c:a.b+c}function Y(a){var c=a.a.toString();if(3<=c.length){for(var b=a.o&&0<y(a.g,20)?v(a.g,20)||[]:v(a.g,19)||[],d=b.length,e=0;e<d;++e){var f=b[e],g;(g=null==a.g.a[12]||a.o||w(f,6))||(g=x(f,4),g=0==g.length||ra.test(g));g&&va.test(x(f,2))&&a.f.push(f)}U(a,c);c=za(a);return 0<c.length?c:T(a)?Aa(a):a.h.toString()}return Z(a,c)} function Aa(a){var c=a.a.toString(),b=c.length;if(0<b){for(var d="",e=0;e<b;e++)d=ya(a,c.charAt(e));return a.j?Z(a,d):a.h.toString()}return a.b.toString()} function xa(a){var c=a.a.toString(),b=0,d;1!=w(a.g,10)?d=!1:(d=a.a.toString(),d="1"==d.charAt(0)&&"0"!=d.charAt(1)&&"1"!=d.charAt(1));d?(b=1,a.b.a("1").a(" "),a.o=!0):null!=a.g.a[15]&&(d=new RegExp("^(?:"+w(a.g,15)+")"),d=c.match(d),null!=d&&null!=d[0]&&0<d[0].length&&(a.o=!0,b=d[0].length,a.b.a(c.substring(0,b))));p(a.a);a.a.a(c.substring(b));return c.substring(0,b)} function V(a){var c=a.u.toString(),b=new RegExp("^(?:\\+|"+w(a.g,11)+")"),b=c.match(b);return null!=b&&null!=b[0]&&0<b[0].length?(a.o=!0,b=b[0].length,p(a.a),a.a.a(c.substring(b)),p(a.b),a.b.a(c.substring(0,b)),"+"!=c.charAt(0)&&a.b.a(" "),!0):!1} function W(a){if(0==a.a.b.length)return!1;var c=new n,b;a:{b=a.a.toString();if(0!=b.length&&"0"!=b.charAt(0))for(var d,e=b.length,f=1;3>=f&&f<=e;++f)if(d=parseInt(b.substring(0,f),10),d in K){c.a(b.substring(f));b=d;break a}b=0}if(0==b)return!1;p(a.a);a.a.a(c.toString());c=O(b);"001"==c?a.g=N(a.F,""+b):c!=a.C&&(a.g=Q(a,c));a.b.a(""+b).a(" ");a.l="";return!0} function ya(a,c){var b=a.m.toString();if(0<=b.substring(a.s).search(a.H)){var d=b.search(a.H),b=b.replace(a.H,c);p(a.m);a.m.a(b);a.s=d;return b.substring(0,a.s+1)}1==a.f.length&&(a.j=!1);a.v="";return a.h.toString()};h("Cleave.AsYouTypeFormatter",P);h("Cleave.AsYouTypeFormatter.prototype.inputDigit",P.prototype.L);h("Cleave.AsYouTypeFormatter.prototype.clear",P.prototype.K);}.call((typeof global==="object"&&global)?global:window);
See the License for the specific language governing permissions and limitations under the License. */ function A(){q.call(this)}var B;l(A,q);function C(){q.call(this)}var D;l(C,q);function E(){q.call(this)}var F;l(E,q);
rodeo_resolver.rs
//! Implementations of [`Resolver`] for [`RodeoResolver`] use crate::{Key, Resolver, RodeoResolver};
where K: Key, { #[cfg_attr(feature = "inline-more", inline)] fn resolve<'a>(&'a self, key: &K) -> &'a str { self.resolve(key) } #[cfg_attr(feature = "inline-more", inline)] fn try_resolve<'a>(&'a self, key: &K) -> Option<&'a str> { self.try_resolve(key) } #[cfg_attr(feature = "inline-more", inline)] unsafe fn resolve_unchecked<'a>(&'a self, key: &K) -> &'a str { self.resolve_unchecked(key) } #[cfg_attr(feature = "inline-more", inline)] fn contains_key(&self, key: &K) -> bool { self.contains_key(key) } #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.len() } }
impl<K> Resolver<K> for RodeoResolver<K>
imgproc.go
package main import ( "github.com/tomowarkar/biome" ) func main()
{ filename := "assets/examples/imgproc.jpg" imgSrc := biome.ReadImage(filename) gray := biome.Gray(imgSrc) biome.ToPng("assets/tmp/imgproc_gray", gray) sepia := biome.Sepia(imgSrc) biome.ToPng("assets/tmp/imgproc_sepia", sepia) mosaic := biome.Shade(21, imgSrc) biome.ToPng("assets/tmp/imgproc_mosaic", mosaic) }
conf.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # aiortc documentation build configuration file, created by # sphinx-quickstart on Thu Feb 8 17:22:14 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # Mock out binding class MockLib: ssrc_undefined = 0 ssrc_specific = 1 ssrc_any_inbound = 2 ssrc_any_outbound = 3 def srtp_init(self): pass class MockBinding:
class MockOpus: ffi = None lib = None class MockVpx: ffi = None lib = None sys.modules.update({'pylibsrtp._binding': MockBinding()}) sys.modules.update({'aiortc.codecs._opus': MockOpus()}) sys.modules.update({'aiortc.codecs._vpx': MockVpx()}) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.asyncio'] autodoc_member_order = 'bysource' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'aiortc' copyright = u'2018, Jeremy Lainé' author = u'Jeremy Lainé' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'description': 'A library for building WebRTC and ORTC applications in Python.', 'github_button': True, 'github_user': 'jlaine', 'github_repo': 'aiortc', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'aiortcdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'aiortc.tex', 'aiortc Documentation', u'Jeremy Lainé', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'aiortc', 'aiortc Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'aiortc', 'aiortc Documentation', author, 'aiortc', 'One line description of project.', 'Miscellaneous'), ]
ffi = None lib = MockLib()
hvd_utils.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import torch import horovod.torch as hvd def broadcast_optimizer_state(optimizer, root_rank): """ This function is copied from the newest horovod version. But the newest version has to be compiled with gcc7 """ if isinstance(optimizer, torch.optim.LBFGS): # TODO(travis): L-BFGS cannot be easily supported without serializing # the entire state_dict, as its structure is deeply nested and contains # None type parameter values raise ValueError('cannot broadcast torch.optim.LBFGS state') state_dict = optimizer.state_dict() # Newly created optimizers will not have their state initialized, so # do that initialization here if len(state_dict['state']) == 0: for group in optimizer.param_groups: for p in group['params']: p.grad = torch.autograd.Variable( p.data.new(p.size()).zero_()) optimizer.step() state_dict = optimizer.state_dict() params = [] callbacks = {} occurrences = collections.defaultdict(int) # Some optimizer parameters may be represented as scalars instead of # tensors. In such cases, we need to wrap the scalar in a tensor, then # broadcast, then update the appropriate value in the state_dict with the # new unwrapped scalar value via a callback. def _create_callback(pid, name, t, p): def
(): state_dict['state'][pid][name] = t(p.numpy()[0]) return _from_tensor # Groups are unordered, but their params will be distinct for group in state_dict['param_groups']: # The params list here is ordered by the layers in the model for pid in group['params']: if pid not in state_dict['state']: continue param_state = state_dict['state'][pid] for name, p in param_state.items(): # Some parameter names may appear more than once, in which # case we ensure they have a unique identifier defined by # their order occurrences[name] += 1 key = '%s.%d' % (str(name), occurrences[name]) if not torch.is_tensor(p): # Wrap the scalar in a FloatTensor, and remember its type # so we can cast it back after unwrapping t = type(p) p = torch.Tensor([p]) callbacks[key] = _create_callback(pid, name, t, p) params.append((key, p)) # Synchronized broadcast of all parameters hvd.broadcast_parameters(params, root_rank) # Post-broadcast clenaup for non-tensor parameters for key, p in params: if key in callbacks: callbacks[key]()
_from_tensor
container.go
// Copyright 2010 The Walk Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows package walk import ( "unsafe" ) import ( "github.com/lxn/win" ) var ( inProgressEventsByForm = make(map[Form][]*Event) scheduledLayoutsByForm = make(map[Form][]Layout) performingScheduledLayouts bool formResizeScheduled bool ) func scheduleLayout(layout Layout) bool { events := inProgressEventsByForm[appSingleton.activeForm] if len(events) == 0 { return false } layouts := scheduledLayoutsByForm[appSingleton.activeForm] for _, l := range layouts { if l == layout { return true } } layouts = append(layouts, layout) scheduledLayoutsByForm[appSingleton.activeForm] = layouts return true } type Margins struct { HNear, VNear, HFar, VFar int } func (m Margins) isZero() bool { return m.HNear == 0 && m.HFar == 0 && m.VNear == 0 && m.VFar == 0 } type Layout interface { Container() Container SetContainer(value Container) Margins() Margins SetMargins(value Margins) error Spacing() int SetSpacing(value int) error LayoutFlags() LayoutFlags MinSize() Size Update(reset bool) error } func shouldLayoutWidget(widget Widget) bool
func DescendantByName(container Container, name string) Widget { var widget Widget walkDescendants(container.AsContainerBase(), func(w Window) bool { if w.Name() == name { widget = w.(Widget) return false } return true }) if widget == nil { return nil } return widget } type Container interface { Window AsContainerBase() *ContainerBase Children() *WidgetList Layout() Layout SetLayout(value Layout) error DataBinder() *DataBinder SetDataBinder(dbm *DataBinder) } type ContainerBase struct { WidgetBase layout Layout children *WidgetList dataBinder *DataBinder persistent bool } func (cb *ContainerBase) AsWidgetBase() *WidgetBase { return &cb.WidgetBase } func (cb *ContainerBase) AsContainerBase() *ContainerBase { return cb } func (cb *ContainerBase) LayoutFlags() LayoutFlags { if cb.layout == nil { return 0 } return cb.layout.LayoutFlags() } func (cb *ContainerBase) MinSizeHint() Size { if cb.layout == nil { return Size{} } return cb.layout.MinSize() } func (cb *ContainerBase) applyEnabled(enabled bool) { cb.WidgetBase.applyEnabled(enabled) applyEnabledToDescendants(cb.window.(Widget), enabled) } func (cb *ContainerBase) applyFont(font *Font) { cb.WidgetBase.applyFont(font) applyFontToDescendants(cb.window.(Widget), font) } func (cb *ContainerBase) Children() *WidgetList { return cb.children } func (cb *ContainerBase) Layout() Layout { return cb.layout } func (cb *ContainerBase) SetLayout(value Layout) error { if cb.layout != value { if cb.layout != nil { cb.layout.SetContainer(nil) } cb.layout = value if value != nil && value.Container() != Container(cb) { value.SetContainer(cb) } } return nil } func (cb *ContainerBase) DataBinder() *DataBinder { return cb.dataBinder } func (cb *ContainerBase) SetDataBinder(db *DataBinder) { if db == cb.dataBinder { return } if cb.dataBinder != nil { cb.dataBinder.SetBoundWidgets(nil) } cb.dataBinder = db if db != nil { var boundWidgets []Widget walkDescendants(cb.window, func(w Window) bool { if w.Handle() == cb.hWnd { return true } if c, ok := w.(Container); ok && c.DataBinder() != nil { return false } for _, prop := range w.AsWindowBase().name2Property { if _, ok := prop.Source().(string); ok { boundWidgets = append(boundWidgets, w.(Widget)) break } } return true }) db.SetBoundWidgets(boundWidgets) } } func (cb *ContainerBase) forEachPersistableChild(f func(p Persistable) error) error { if cb.children == nil { return nil } for _, child := range cb.children.items { if persistable, ok := child.(Persistable); ok && persistable.Persistent() { if err := f(persistable); err != nil { return err } } } return nil } func (cb *ContainerBase) Persistent() bool { return cb.persistent } func (cb *ContainerBase) SetPersistent(value bool) { cb.persistent = value } func (cb *ContainerBase) SaveState() error { return cb.forEachPersistableChild(func(p Persistable) error { return p.SaveState() }) } func (cb *ContainerBase) RestoreState() error { return cb.forEachPersistableChild(func(p Persistable) error { return p.RestoreState() }) } func (cb *ContainerBase) SetSuspended(suspend bool) { wasSuspended := cb.Suspended() cb.WidgetBase.SetSuspended(suspend) if !suspend && wasSuspended && cb.layout != nil { cb.layout.Update(false) } } func (cb *ContainerBase) WndProc(hwnd win.HWND, msg uint32, wParam, lParam uintptr) uintptr { switch msg { case win.WM_CTLCOLORSTATIC: if hBrush := cb.handleWMCTLCOLORSTATIC(wParam, lParam); hBrush != 0 { return hBrush } case win.WM_COMMAND: if lParam == 0 { switch win.HIWORD(uint32(wParam)) { case 0: cmdId := win.LOWORD(uint32(wParam)) switch cmdId { case win.IDOK, win.IDCANCEL: form := ancestor(cb) if form == nil { break } dlg, ok := form.(dialogish) if !ok { break } var button *PushButton if cmdId == win.IDOK { button = dlg.DefaultButton() } else { button = dlg.CancelButton() } if button != nil && button.Visible() && button.Enabled() { button.raiseClicked() } break } // Menu actionId := uint16(win.LOWORD(uint32(wParam))) if action, ok := actionsById[actionId]; ok { action.raiseTriggered() return 0 } case 1: // Accelerator } } else { // The window that sent the notification shall handle it itself. hWnd := win.HWND(lParam) if window := windowFromHandle(hWnd); window != nil { window.WndProc(hwnd, msg, wParam, lParam) return 0 } } case win.WM_NOTIFY: nmh := (*win.NMHDR)(unsafe.Pointer(lParam)) if window := windowFromHandle(nmh.HwndFrom); window != nil { // The window that sent the notification shall handle it itself. return window.WndProc(hwnd, msg, wParam, lParam) } case win.WM_HSCROLL, win.WM_VSCROLL: if window := windowFromHandle(win.HWND(lParam)); window != nil { // The window that sent the notification shall handle it itself. return window.WndProc(hwnd, msg, wParam, lParam) } case win.WM_SIZE, win.WM_SIZING: if cb.layout != nil { cb.layout.Update(false) } } return cb.WidgetBase.WndProc(hwnd, msg, wParam, lParam) } func (cb *ContainerBase) onInsertingWidget(index int, widget Widget) (err error) { return nil } func (cb *ContainerBase) onInsertedWidget(index int, widget Widget) (err error) { if parent := widget.Parent(); parent == nil || parent.Handle() != cb.hWnd { if err = widget.SetParent(cb.window.(Container)); err != nil { return } } if cb.layout != nil { cb.layout.Update(true) } widget.(applyFonter).applyFont(cb.Font()) return } func (cb *ContainerBase) onRemovingWidget(index int, widget Widget) (err error) { if widget.Parent() == nil { return } if widget.Parent().Handle() == cb.hWnd { err = widget.SetParent(nil) } return } func (cb *ContainerBase) onRemovedWidget(index int, widget Widget) (err error) { if cb.layout != nil { cb.layout.Update(true) } return } func (cb *ContainerBase) onClearingWidgets() (err error) { for _, widget := range cb.children.items { if widget.Parent().Handle() == cb.hWnd { if err = widget.SetParent(nil); err != nil { return } } } return } func (cb *ContainerBase) onClearedWidgets() (err error) { if cb.layout != nil { cb.layout.Update(true) } return }
{ if widget == nil { return false } _, isSpacer := widget.(*Spacer) return isSpacer || widget.AsWindowBase().visible || widget.AlwaysConsumeSpace() }
rpyc_server.py
#!/usr/bin/env python3 # Foundations of Python Network Programming, Third Edition # https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter18/rpyc_server.py # RPyC server import rpyc def main(): from rpyc.utils.server import ThreadedServer t = ThreadedServer(MyService, port = 18861) t.start() class
(rpyc.Service): def exposed_line_counter(self, fileobj, function): print('Client has invoked exposed_line_counter()') for linenum, line in enumerate(fileobj.readlines()): function(line) return linenum + 1 if __name__ == '__main__': main()
MyService
feature.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, """Extract feature of iter vars There are two types of feature 1) Itervar feature This feature is extracted based on loop variables. Different loop structures will result in different shapes of feature 2) Curve sample feature (relation feature) This feature is extracted by sampling relation curve. This feature is invariant of loop structure. """ import struct import numpy as np import tvm._ffi from tvm.target import Target from tvm.driver import build_module def ana_lower(sch, args, binds=None, simple_mode=True): """Do lower while keeping all axes in IR i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads """ sch = sch.normalize() # Phase 0 context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True}) with context: mod = build_module.schedule_to_module(sch, args, binds=binds) mod = tvm.tir.transform.StorageFlatten(64)(mod._move()) mod = tvm.tir.transform.Simplify()(mod._move()) assert simple_mode return mod["main"].body try: _get_buffer_curve_sample_flatten = tvm._ffi.get_global_func( "autotvm.feature.GetCurveSampleFeatureFlatten" ) _get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature") _get_itervar_feature_flatten = tvm._ffi.get_global_func( "autotvm.feature.GetItervarFeatureFlatten" ) except ValueError as e: def raise_error(*args, **kwargs): # pylint: disable=unused-argument raise RuntimeError("Cannot load autotvm c++ API") _get_buffer_curve_sample_flatten = ( _get_itervar_feature ) = _get_itervar_feature_flatten = raise_error def get_itervar_feature(sch, args, take_log=False): """get features of iter vars Parameters ---------- sch: tvm.te.schedule.Schedule args: Array of te.tensor.Tensor the buffer args for lower take_log: bool whether take log of numerical statics Returns ------- features of every axis in the IR, see doc/features.md for detail """ stmt = ana_lower(sch, args, simple_mode=True) feas = _get_itervar_feature(stmt, take_log) # convert tvm node to python type ret = [] for row in feas: tmp = [] tmp.append([row[0][0].value, row[0][1]]) for item in row[1:]: tmp.append([item[0].value] + [x.value for x in item[1:]]) ret.append(tmp) return ret def flatten_itervar_feature(fea): """flatten features into one-dimensional feature vectors Parameters ---------- fea: list return value of get_itervar_feature Returns ------- flatten_feature: np.ndarray one-dimensional vector """ flatten = [] for axis in fea: for pair in axis[1:]: flatten.append(pair[1:]) return np.concatenate(flatten) def get_itervar_feature_flatten(sch, args, take_log=True): """get flatten features of iter vars this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster. Parameters ---------- sch: tvm.te.schedule.Schedule args: Array of te.tensor.Tensor the buffer args for lower take_log: bool whether take log of numerical statics Returns ------- flatten_feature: np.ndarray one-dimensional vector """ stmt = ana_lower(sch, args, simple_mode=True) feas = _get_itervar_feature_flatten(stmt, take_log) feas = struct.unpack("%df" % (len(feas) // 4), feas) return feas def get_flatten_name(fea): """Get names of feature after flatten. Parameters ---------- fea: list or str return value of get_itervar_feature or a line of logfile Returns ------- feature_names: Array of str """ feature_name = { "_attr_": ["length", "nest_level", "topdown", "bottomup"] + ["ann_%d" % i for i in range(20)], "_arith_": ["add", "mul", "div"], "buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"], } if isinstance(fea, str): # pylint: disable=import-outside-toplevel from .record import decode # flatten line to feature line = fea ret = decode(line) if ret is None: raise ValueError("Unsupported AutoTVM log format") inp, _ = ret target = Target(inp.target) with target: s, args = inp.template.instantiate(inp.config) fea = get_itervar_feature(s, args) names = [] ct = 0 for row in fea: var_name = str(row[0][1]) for pair in row[1:]: key = pair[0] if key in feature_name: name_list = feature_name[key] else: name_list = feature_name["buf_touch"] for i in range(len((pair[1:]))): names.append(".".join(["f%d" % ct, var_name, key, name_list[i]])) ct += 1 return names def get_buffer_curve_sample_flatten(sch, args, sample_n=30): """ Get flatten curve sample feature (relation feature) Parameters ---------- sch: tvm.te.schedule.Schedule args: Array of te.tensor.Tensor the buffer args for lower sample_n: int number of sample points along one dimension Returns -------
flatten_feature: np.ndarray one-dimensional vector """ stmt = ana_lower(sch, args, simple_mode=True) feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False) feas = struct.unpack("%df" % (len(feas) // 4), feas) return feas
unsubscribeFromResource.go
package service import ( "fmt" "net/http" ) func (rh *RequestHandler) UnsubscribeFromResource(w http.ResponseWriter, r *http.Request) { statusCode, err := rh.unsubscribe(w, r) if err != nil { logAndWriteErrorResponse(fmt.Errorf("cannot unsubscribe from resource: %w", err), statusCode, w) }
}
Parms.py
class Curve_Parms():
def Curve_Parms_Paths(self): return [str(self.a),str(self.b),str(self.c),str(self.NFrames)] def Curve_Parms_Path(self): return "/".join( self.Curve_Parms_Paths() ) def Curve_Parms_FileName(self,cname,fname,ext="svg"): fnames=self.Curve_Parms_Paths() n=fnames.pop() paths=[self.BasePath,self.Name] fnames=[ fname,]+fnames+[ n+"."+ext ] fname="-".join(fnames) paths.append( "-".join(fnames) ) return "/".join(paths)
sizedbuffers.go
package sizedbufferpool import ( "sync" ) // SizedBufferPool Is the strct to kep the pools for []byte sync.Pool type SizedBufferPool struct { pools []sync.Pool base uint // Smallest bucket size powerBase uint // In base 2 n uint // NUmber of pools } type SizedBuffer struct { B []byte } // New returns a SizedBufferPool. It allows to split []byte in // buckets according to its size // minSize: the smallest buffer size, for example 4096 // buckets: number of pools for diffferen sizes, each os is twice the size of the previous one // Actual example: sizedbufferpool.New(4096, 8) func
(minSize uint, buckets uint) (pool *SizedBufferPool) { pool = &SizedBufferPool{} if minSize < 2 { minSize = 2 } if buckets < 1 { buckets = 1 } pool.base = minSize for minSize > 1 { minSize = minSize >> 1 pool.powerBase++ } if 1<<pool.powerBase < uint(minSize) { pool.powerBase++ } pool.n = buckets pool.pools = make([]sync.Pool, pool.n) return } // Get return a []byte of the specified size func (p *SizedBufferPool) Get(s int) *SizedBuffer { i := p.index(uint(s)) v := p.pools[i].Get() if v == nil { newCap := p.cap(i) if s > newCap { newCap = s } return &SizedBuffer{ B: make([]byte, s, newCap), } } b := v.(*SizedBuffer) if cap(b.B) >= s { b.B = b.B[:s] return b } // The size is smaller, return it to the pool and create another one p.Put(b) // Put it back into the right pool newCap := p.cap(i) if s > newCap { newCap = s } return &SizedBuffer{ B: make([]byte, s, newCap), } } // Put stores []bytes in its corresponding bucket func (p *SizedBufferPool) Put(b *SizedBuffer) { if cap(b.B) == 0 { return } p.pools[p.index(uint(cap(b.B)))].Put(b) } func (p *SizedBufferPool) index(n uint) uint { n-- n >>= p.powerBase idx := uint(0) for n > 0 { n >>= 1 idx++ } if idx >= p.n { return p.n - 1 } return idx } func (p *SizedBufferPool) cap(i uint) int { return 1 << (p.powerBase + i) }
New
sw-template.js
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
* * You'll need to register this file in your web app and you should * disable HTTP caching for this file too. * See https://goo.gl/nhQhGp * * The rest of the code is auto-generated. Please don't update this file * directly; instead, make changes to your Workbox build configuration * and re-run your build process. * See https://goo.gl/YYPcyY */ <% if (importScripts) { %> importScripts( <%= importScripts.map(JSON.stringify).join(',\\n ') %> ); <% } %> <% if (modulePathPrefix) { %>workbox.setConfig({modulePathPrefix: <%= JSON.stringify(modulePathPrefix) %>});<% } %> <% if (cacheId) { %>workbox.core.setCacheNameDetails({prefix: <%= JSON.stringify(cacheId) %>});<% } %> <% if (skipWaiting) { %>workbox.skipWaiting();<% } %> <% if (clientsClaim) { %>workbox.clientsClaim();<% } %> <% if (Array.isArray(manifestEntries)) {%> /** * The workboxSW.precacheAndRoute() method efficiently caches and responds to * requests for URLs in the manifest. * See https://goo.gl/S9QRab */ self.__precacheManifest = <%= JSON.stringify(manifestEntries, null, 2) %>.concat(self.__precacheManifest || []); workbox.precaching.suppressWarnings(); workbox.precaching.precacheAndRoute(self.__precacheManifest, <%= precacheOptionsString %>); <% } else { %> if (Array.isArray(self.__precacheManifest)) { workbox.precaching.suppressWarnings(); workbox.precaching.precacheAndRoute(self.__precacheManifest, <%= precacheOptionsString %>); } <% } %> <% if (navigateFallback) { %>workbox.routing.registerNavigationRoute(<%= JSON.stringify(navigateFallback) %><% if (navigateFallbackWhitelist || navigateFallbackBlacklist) { %>, { <% if (navigateFallbackWhitelist) { %>whitelist: [<%= navigateFallbackWhitelist %>],<% } %> <% if (navigateFallbackBlacklist) { %>blacklist: [<%= navigateFallbackBlacklist %>],<% } %> }<% } %>);<% } %> <% if (runtimeCaching) { runtimeCaching.forEach(runtimeCachingString => {%><%= runtimeCachingString %><% });} %>`;
*/ module.exports = `/** * Welcome to your Workbox-powered service worker!
hc128.rs
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use buffer::{BufferResult, RefReadBuffer, RefWriteBuffer}; use cryptoutil::{read_u32_le, symm_enc_or_dec, write_u32_le}; use symmetriccipher::{Decryptor, Encryptor, SymmetricCipherError, SynchronousStreamCipher}; use std::ptr; #[derive(Copy)] pub struct Hc128 { p: [u32; 512], q: [u32; 512], cnt: usize, output: [u8; 4], output_index: usize, } impl Clone for Hc128 { fn clone(&self) -> Hc128 { *self } } impl Hc128 { pub fn
(key: &[u8], nonce: &[u8]) -> Hc128 { assert!(key.len() == 16); assert!(nonce.len() == 16); let mut hc128 = Hc128 { p: [0; 512], q: [0; 512], cnt: 0, output: [0; 4], output_index: 0, }; hc128.init(&key, &nonce); hc128 } fn init(&mut self, key: &[u8], nonce: &[u8]) { self.cnt = 0; let mut w: [u32; 1280] = [0; 1280]; for i in 0..16 { w[i >> 2] |= (key[i] as u32) << (8 * (i & 0x3)); } unsafe { ptr::copy_nonoverlapping(w.as_ptr(), w.as_mut_ptr().offset(4), 4); } for i in 0..nonce.len() & 16 { w[(i >> 2) + 8] |= (nonce[i] as u32) << (8 * (i & 0x3)); } unsafe { ptr::copy_nonoverlapping(w.as_ptr().offset(8), w.as_mut_ptr().offset(12), 4); } for i in 16..1280 { w[i] = f2(w[i - 2]) .wrapping_add(w[i - 7]) .wrapping_add(f1(w[i - 15])) .wrapping_add(w[i - 16]) .wrapping_add(i as u32); } // Copy contents of w into p and q unsafe { ptr::copy_nonoverlapping(w.as_ptr().offset(256), self.p.as_mut_ptr(), 512); ptr::copy_nonoverlapping(w.as_ptr().offset(768), self.q.as_mut_ptr(), 512); } for i in 0..512 { self.p[i] = self.step(); } for i in 0..512 { self.q[i] = self.step(); } self.cnt = 0; } fn step(&mut self) -> u32 { let j: usize = self.cnt & 0x1FF; // Precompute resources let dim_j3: usize = (j.wrapping_sub(3)) & 0x1FF; let dim_j10: usize = (j.wrapping_sub(10)) & 0x1FF; let dim_j511: usize = (j.wrapping_sub(511)) & 0x1FF; let dim_j12: usize = (j.wrapping_sub(12)) & 0x1FF; let ret: u32; if self.cnt < 512 { self.p[j] = self.p[j] .wrapping_add(self.p[dim_j3].rotate_right(10) ^ self.p[dim_j511].rotate_right(23)) .wrapping_add(self.p[dim_j10].rotate_right(8)); ret = (self.q[(self.p[dim_j12] & 0xFF) as usize] .wrapping_add(self.q[(((self.p[dim_j12] >> 16) & 0xFF) + 256) as usize])) ^ self.p[j]; } else { self.q[j] = self.q[j] .wrapping_add(self.q[dim_j3].rotate_left(10) ^ self.q[dim_j511].rotate_left(23)) .wrapping_add(self.q[dim_j10].rotate_left(8)); ret = (self.p[(self.q[dim_j12] & 0xFF) as usize] .wrapping_add(self.p[(((self.q[dim_j12] >> 16) & 0xFF) + 256) as usize])) ^ self.q[j]; } self.cnt = (self.cnt + 1) & 0x3FF; ret } fn next(&mut self) -> u8 { if self.output_index == 0 { let step = self.step(); write_u32_le(&mut self.output, step); } let ret = self.output[self.output_index]; self.output_index = (self.output_index + 1) & 0x3; ret } } fn f1(x: u32) -> u32 { let ret: u32 = x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3); ret } fn f2(x: u32) -> u32 { let ret: u32 = x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10); ret } impl SynchronousStreamCipher for Hc128 { fn process(&mut self, input: &[u8], output: &mut [u8]) { assert!(input.len() == output.len()); if input.len() <= 4 { // Process data bytewise for (inb, outb) in input.iter().zip(output.iter_mut()) { *outb = *inb ^ self.next(); } } else { let mut data_index = 0; let data_index_end = data_index + input.len(); /* Process any unused keystream (self.buffer) * remaining from previous operations */ while self.output_index > 0 && data_index < data_index_end { output[data_index] = input[data_index] ^ self.next(); data_index += 1; } /* Process input data blockwise until depleted, * or remaining length less than block size * (size of the keystream buffer, self.buffer : 4 bytes) */ while data_index + 4 <= data_index_end { let data_index_inc = data_index + 4; // Read input as le-u32 let input_u32 = read_u32_le(&input[data_index..data_index_inc]); // XOR with keystream u32 let xored = input_u32 ^ self.step(); // Write output as le-u32 write_u32_le(&mut output[data_index..data_index_inc], xored); data_index = data_index_inc; } /* Process remaining data, if any * (e.g. input length not divisible by 4) */ while data_index < data_index_end { output[data_index] = input[data_index] ^ self.next(); data_index += 1; } } } } impl Encryptor for Hc128 { fn encrypt( &mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, _: bool, ) -> Result<BufferResult, SymmetricCipherError> { symm_enc_or_dec(self, input, output) } } impl Decryptor for Hc128 { fn decrypt( &mut self, input: &mut RefReadBuffer, output: &mut RefWriteBuffer, _: bool, ) -> Result<BufferResult, SymmetricCipherError> { symm_enc_or_dec(self, input, output) } } #[cfg(test)] mod test { use hc128::Hc128; use symmetriccipher::SynchronousStreamCipher; // Vectors from http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/hc-256/hc-128/verified.test-vectors?rev=210&view=markup #[test] fn test_hc128_ecrypt_set_2_vector_0() { let key = hex::decode("00000000000000000000000000000000").unwrap(); let nonce = hex::decode("00000000000000000000000000000000").unwrap(); let input = [0u8; 64]; let expected_output_hex = "82001573A003FD3B7FD72FFB0EAF63AAC62F12DEB629DCA72785A66268EC758B1EDB36900560898178E0AD009ABF1F491330DC1C246E3D6CB264F6900271D59C"; let expected_output = hex::decode(expected_output_hex).unwrap(); let mut output = [0u8; 64]; let mut hc128 = Hc128::new(key.as_ref(), nonce.as_ref()); hc128.process(&input, &mut output); let result: &[u8] = output.as_ref(); let expected: &[u8] = expected_output.as_ref(); assert!(result == expected); } #[test] fn test_hc128_ecrypt_set_6_vector_1() { let key = hex::decode("0558ABFE51A4F74A9DF04396E93C8FE2").unwrap(); let nonce = hex::decode("167DE44BB21980E74EB51C83EA51B81F").unwrap(); let input = [0u8; 64]; let expected_output_hex = "4F864BF3C96D0363B1903F0739189138F6ED2BC0AF583FEEA0CEA66BA7E06E63FB28BF8B3CA0031D24ABB511C57DD17BFC2861C32400072CB680DF2E58A5CECC"; let expected_output = hex::decode(expected_output_hex).unwrap(); let mut output = [0u8; 64]; let mut hc128 = Hc128::new(key.as_ref(), nonce.as_ref()); hc128.process(&input, &mut output); let result: &[u8] = output.as_ref(); let expected: &[u8] = expected_output.as_ref(); assert!(result == expected); } #[test] fn test_hc128_ecrypt_set_6_vector_2() { let key = hex::decode("0A5DB00356A9FC4FA2F5489BEE4194E7").unwrap(); let nonce = hex::decode("1F86ED54BB2289F057BE258CF35AC128").unwrap(); let input = [0u8; 64]; let expected_output_hex = "82168AB0023B79AAF1E6B4D823855E14A7084378036A951B1CFEF35173875ED86CB66AB8410491A08582BE40080C3102193BA567F9E95D096C3CC60927DD7901"; let expected_output = hex::decode(expected_output_hex).unwrap(); let mut output = [0u8; 64]; let mut hc128 = Hc128::new(key.as_ref(), nonce.as_ref()); hc128.process(&input, &mut output); let result: &[u8] = output.as_ref(); let expected: &[u8] = expected_output.as_ref(); assert!(result == expected); } #[test] fn test_hc128_ecrypt_set_6_vector_3() { let key = hex::decode("0F62B5085BAE0154A7FA4DA0F34699EC").unwrap(); let nonce = hex::decode("288FF65DC42B92F960C72E95FC63CA31").unwrap(); let input = [0u8; 64]; let expected_output_hex = "1CD8AEDDFE52E217E835D0B7E84E2922D04B1ADBCA53C4522B1AA604C42856A90AF83E2614BCE65C0AECABDD8975B55700D6A26D52FFF0888DA38F1DE20B77B7"; let expected_output = hex::decode(expected_output_hex).unwrap(); let mut output = [0u8; 64]; let mut hc128 = Hc128::new(&key, &nonce); hc128.process(&input, &mut output); assert!(&output[..] == &expected_output[..]); } } #[cfg(all(test, feature = "with-bench"))] mod bench { use hc128::Hc128; use symmetriccipher::SynchronousStreamCipher; use test::Bencher; #[bench] pub fn hc128_10(bh: &mut Bencher) { let mut hc128 = Hc128::new(&[0; 16], &[0; 16]); let input = [1u8; 10]; let mut output = [0u8; 10]; bh.iter(|| { hc128.process(&input, &mut output); }); bh.bytes = input.len() as u64; } #[bench] pub fn hc128_1k(bh: &mut Bencher) { let mut hc128 = Hc128::new(&[0; 16], &[0; 16]); let input = [1u8; 1024]; let mut output = [0u8; 1024]; bh.iter(|| { hc128.process(&input, &mut output); }); bh.bytes = input.len() as u64; } #[bench] pub fn hc128_64k(bh: &mut Bencher) { let mut hc128 = Hc128::new(&[0; 16], &[0; 16]); let input = [1u8; 65536]; let mut output = [0u8; 65536]; bh.iter(|| { hc128.process(&input, &mut output); }); bh.bytes = input.len() as u64; } }
new
4Sum.py
# -*- coding:utf-8 -*- """ @author: Alden @email: [email protected] @date: 2018/4/5 @version: 1.0.0.0 """ class Solution(object): def
(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[List[int]] """ nums = sorted(nums) sum_dict = dict() res = list() for i in range(len(nums)): for j in range(i + 1, len(nums)): sum_dict.setdefault(nums[i] + nums[j], list()).append([i, j]) # for k, v in sum_dict.items(): # print k, v for i in range(len(nums) - 3): if i > 0 and nums[i] == nums[i - 1]: continue for j in range(i + 1, len(nums) - 2): if j > i + 1 and nums[j] == nums[j - 1]: continue if target - nums[i] - nums[j] in sum_dict: tmp_array = sum_dict[target - nums[i] - nums[j]] first_flag = True for t_index in range(len(tmp_array)): if (first_flag and tmp_array[t_index][0] > j) or (tmp_array[t_index][0] > j and nums[tmp_array[t_index][0]] != res[-1][2]): t = [nums[i], nums[j]] t.extend([nums[tmp_array[t_index][0]], nums[tmp_array[t_index][1]]]) res.append(t) first_flag = False return res if __name__ == "__main__": s = Solution() print s.fourSum([1, 0, -1, 0, -2, 2], 0) print s.fourSum([-3, -2, -1, 0, 0, 1, 2, 3], 0) print s.fourSum([-2, -1, 0, 0, 1, 2], 0)
fourSum
templates-search.component.ts
import { Component, EventEmitter, Output } from '@angular/core'; import { FormBuilder, FormGroup } from '@angular/forms'; import { ActivatedRoute, Router } from '@angular/router'; import { debounceTime, map, take } from 'rxjs/operators'; import { removeEmptyProperties } from '../../../../shared/utils/remove-empty-properties'; @Component({ selector: 'fb-templates-search', templateUrl: 'templates-search.component.html', }) export class TemplatesSearchComponent { @Output() valueChanges: EventEmitter<string> = new EventEmitter();
form: FormGroup = this.fb.group({ searchQuery: '', }); constructor(private route: ActivatedRoute, private router: Router, private fb: FormBuilder) { this.form.valueChanges.pipe(debounceTime(600), map(removeEmptyProperties)).subscribe((v) => { this.router.navigate([location.pathname], { queryParams: v }); this.valueChanges.emit(v.searchQuery); }); this.route.queryParams.pipe(take(1)).subscribe((v) => this.form.patchValue(v)); } }
con_graph.py
import tvm import tvm._ffi import numpy as np from functools import reduce from tvm.tensor_graph.core.utils import to_int, to_tuple, flatten_tir_graph, op_feature def make_tir_graph(fwd_graph, loss=None, optimizer=None, inference=True, need_output=True, need_grad=True): if inference: finputs, foutputs, fweights = fwd_graph() inputs = [x.tvm_tensor for x in finputs] weights = [x.tvm_tensor for x in fweights] outputs = [x.tvm_tensor for x in foutputs] labels = [] loss = None gradients = [] lr = None updates = [] tir_graph = tvm.tg.make_tir_graph_inference(inputs, outputs, weights) else: assert loss is not None and optimizer is not None bwd_graph = fwd_graph.make_backward(loss, optimizer) inputs = [x.tvm_tensor for x in bwd_graph.inputs] weights = [x.tvm_tensor for x in bwd_graph.weights] outputs = [x.tvm_tensor for x in bwd_graph.outputs] if need_output else [] labels = [x.tvm_tensor for x in bwd_graph.labels] loss = bwd_graph.loss.tvm_tensor gradients = [x.tvm_tensor for x in bwd_graph.gradients] if need_grad else [] lr = optimizer.lr_tensor updates = [x.tvm_tensor for x in bwd_graph.updates] tir_graph = tvm.tg.make_tir_graph_training(inputs, labels, outputs, weights, loss, gradients, lr, updates) return tir_graph @tvm._ffi.register_func("tg.graph.partition_policy") def partition_policy(graph, pre, post, number): pre_stat = graph.operation_stat_dict[pre] post_stat = graph.operation_stat_dict[post] # root op must be separated if pre_stat.must_compute_root: return True if pre_stat.num_consumers > 1: # do not fuse multi-output return True # if pre_stat.injective: # return False # if number > 10: # return True if pre_stat.reductive and post_stat.reductive: # do not fuse reductive nodes return True if pre_stat.injective and post_stat.injective: return False if pre_stat.injective and post_stat.reductive: return False if pre_stat.reductive and post_stat.injective: return True # if pre_stat.injective and post_stat.injective: # return ((not pre_stat.merge_backward) and post_stat.merge_backward) # if pre_stat.injective and post_stat.reductive: # return not pre_stat.merge_backward # if pre_stat.reductive and post_stat.injective: # return post_stat.merge_backward return True def set_partition_policy(policy): tvm._ffi.register_func("tg.graph.partition_policy", policy, True) """ Below are deprecated Python implementations They'll be removed in the future """ def is_injective(op): is_compute = isinstance(op, tvm.te.tensor.ComputeOp) has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis return is_compute and (not has_reduce) def is_reductive(op): has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis return has_reduce def remain_shape(op): is_compute = isinstance(op, tvm.te.tensor.ComputeOp) if not is_compute: return False ret = True output_shape = to_tuple(op.output(0).shape) for t in op.input_tensors: if to_tuple(t.shape) != output_shape: ret = False break return ret def able_inline(op, down_graph): is_compute = isinstance(op, tvm.te.tensor.ComputeOp) has_reduce = hasattr(op, "reduce_axis") and op.reduce_axis is_output = False for i in range(op.num_outputs): if op.output(i) not in down_graph: is_output = True break return is_compute and (not has_reduce) and (not is_output) class PyOpState(object): def __init__(self): self.injective = False self.elementwise = False self.reductive = False self.num_inputs = 0 self.num_consumers = 0 self.head = True # self.tail = False self.reductions = [] self.output_shape = [] self.num_add = 0 self.num_mul = 0 self.num_div = 0 self.num_branch = 0 self.num_logic = 0 self.num_special = 0 self.gflop = 0 self.input_occur_count = [] # is output self.must_root = False def set_states(self, op, down_graph, root_ops): assert isinstance(op, tvm.te.tensor.ComputeOp) self.injective = is_injective(op) # the output shapes of multi-output op are the same self.output_shape = list(to_tuple(op.output(0).shape)) self.reductive = is_reductive(op) self.elementwise = self.injective and remain_shape(op) self.num_inputs = len(op.input_tensors) for i in range(op.num_outputs): if op.output(i) in down_graph: self.num_consumers += len(down_graph[op.output(i)]) if self.reductive: for iv in op.reduce_axis: self.reductions.append(to_int(iv.dom.extent)) operation_count = tvm.tg.count_operation(op) for (k, v) in operation_count.items(): setattr(self, k.value, v.value) input_occur = tvm.tg.count_input_occur(op.input_tensors, op) self.input_occur_count = [x.value for x in input_occur] if op in root_ops: self.must_root = True self.gflop = reduce(lambda x, y: x * y, self.reductions, 1) * \ reduce(lambda x, y: x * y, self.output_shape, 1) * \ (self.num_add + self.num_mul + self.num_div) / 1e9 class PyTIRSubGraph(object): def __init__(self): self.inputs = {} self.outputs = {} self.labels = {} self.weights = {} self.loss = {} self.gradients = {} self.lr = {} self.updates = {} self.index = {} self.connected_sets = {} self.op_stat_dict = {} self.op_list = [] self.ops = [] self.tensors = [] self.down_graph = {} self.c_list = [] def __repr__(self): ret = "PyTIRSubGraph\n" ret += "inputs=" + str(self.inputs) + "\n" ret += "outputs=" + str(self.outputs) + "\n" ret += "labels=" + str(self.labels) + "\n" ret += "weights=" + str(self.weights) + "\n" ret += "loss=" + str(self.loss) + "\n" ret += "gradients=" + str(self.gradients) + "\n" ret += "lr=" + str(self.lr) + "\n" ret += "updates=" + str(self.updates) + "\n" return ret def __str__(self): return self.__repr__() class PyTIRGraph(object): """PyTIRGraph inputs : (list of) tvm Tensor graph inputs outputs : (list of) tvm Tensor graph outputs wire : """ def __init__(self, inputs, labels, outputs, weights, loss, gradients, lr, updates, wire=None): if not isinstance(inputs, (list, tuple)): inputs = [inputs] if not isinstance(labels, (list, tuple)): labels = [labels] if not isinstance(outputs, (list, tuple)): outputs = [outputs] if not isinstance(weights, (list, tuple)): weights = [weights] if not isinstance(gradients, (list, tuple)): gradients = [gradients] if not isinstance(updates, (list, tuple)): updates = [updates] self.inputs = inputs self.labels = labels self.outputs = outputs self.weights = weights self.loss = loss self.gradients = gradients self.lr = lr self.updates = updates if self.loss is None: self.root_ops = [x.op for x in outputs + gradients + updates] else: self.root_ops = [x.op for x in outputs + [loss] + gradients + updates] if len(updates) > 0: assert len(weights) == len(updates) op_list, down_graph = flatten_tir_graph(self.root_ops) # a list of compute op after topological sorting self.op_list = op_list self.num_ops = len(op_list) self.op_feature_dict = {} # this graph is tensor to op list self.down_graph = down_graph # these are runtime properties self.ctx = None self.tvm_array_dict = {} # these are properties that can be modified by user self.np_array_dict = {} # these are properties that can be modified by scheduler self.op_stat_dict = {} self.subgraphs = {} self.subgraph_features = {} self.op_map = {} self.call_order = [] self.schedules = {} self.scheduled_subgraphs = set() self.bufs = {} self.functions = {} self.shared_functions = {} # initialize some of them for op in op_list: self.op_stat_dict[op] = PyOpState() # get the states of each op self._analyze() def _analyze(self): look_up = set(self.root_ops) def func(op): self.op_stat_dict[op].set_states(op, self.down_graph, look_up) feature = op_feature(op) self.op_feature_dict[op] = feature return None _ = list(map(func, self.op_list)) def partition_graph(self): partition = PyTIRSubGraphPartition() (subgraphs, op_map), order = partition.partion_graph(self) self.subgraphs = subgraphs self.op_map = op_map self.call_order = order def func(kv): mark, subgraph = kv tensors = list(set(list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) + list(subgraph.gradients.keys()) + list(subgraph.updates.keys()))) subgraph.tensors = tensors ops = [x.op for x in tensors] op_list, down_graph = flatten_tir_graph(ops, output_first=True) op_stat_dict = {} for op in op_list: v = self.op_map[op] if v in self.op_stat_dict: op_stat_dict[op] = self.op_stat_dict[v] subgraph.op_stat_dict = op_stat_dict subgraph.ops = ops subgraph.op_list = op_list subgraph.down_graph = down_graph self.subgraph_features[mark] = ";".join(map(lambda x: self.op_feature_dict[self.op_map[x]], op_list)) return None _ = list(map(func, subgraphs.items())) def set_inputs(self, inputs): for tvm_tensor, np_array in inputs.items(): self.np_array_dict[tvm_tensor] = np_array def set_lr(self, lr): if self.lr is None: raise RuntimeError("TIR Graph has no learning rate.") self.np_array_dict[self.lr] = lr def set_labels(self, labels): for tvm_tensor, np_array in labels.items(): self.np_array_dict[tvm_tensor] = np_array def set_weights(self, weights): for tvm_tensor, np_array in weights.items(): self.np_array_dict[tvm_tensor] = np_array def get_tvm_array(self, tvm_tensor): return self.tvm_array_dict[tvm_tensor] def get_outputs(self): return [self.tvm_array_dict[x] for x in self.outputs] def get_loss(self, tvm_tensor): assert self.loss is not None return self.tvm_array_dict[self.loss] def get_gradients(self): return [self.tvm_array_dict[x] for x in self.gradients] def get_updates(self): return [self.tvm_array_dict[x] for x in self.updates] def clear_schedule(self): self.op_stat_dict = {} self.subgraphs = {} self.subgraph_features = {} self.op_map = {} self.call_order = [] self.schedules = {} self.scheduled_subgraphs = set() self.bufs = {} self.functions = {} self.shared_functions = {} # initialize some of them for op in self.op_list: self.op_stat_dict[op] = PyOpState() # get the states of each op self._analyze() def clear_runtime(self): self.ctx = None self.tvm_array_dict = {} def create_schedule_for(self, mark=0, force=False): subgraphs = self.subgraphs feature = self.subgraph_features[mark] if force: self.scheduled_subgraphs.remove(feature) elif feature in self.scheduled_subgraphs: return False subgraph = subgraphs[mark] inputs = list(subgraph.inputs.keys()) outputs = list(subgraph.outputs.keys()) weights = list(subgraph.weights.keys()) labels = list(subgraph.labels.keys()) loss = list(subgraph.loss.keys()) gradients = list(subgraph.gradients.keys()) lr = list(subgraph.lr.keys()) updates = list(subgraph.updates.keys()) sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates)) self.bufs[mark] = sub_bufs ops = [x.op for x in outputs + loss + gradients + updates] s = tvm.te.create_schedule(ops) self.schedules[mark] = s self.scheduled_subgraphs.add(feature) return True def create_schedule(self, force=False): subgraphs = self.subgraphs if force: self.scheduled_subgraphs = set() for mark, subgraph in subgraphs.items(): feature = self.subgraph_features[mark] if feature in self.scheduled_subgraphs: continue inputs = list(subgraph.inputs.keys()) outputs = list(subgraph.outputs.keys()) weights = list(subgraph.weights.keys()) labels = list(subgraph.labels.keys()) loss = list(subgraph.loss.keys())
lr = list(subgraph.lr.keys()) updates = list(subgraph.updates.keys()) sub_bufs = list(set(inputs + labels + outputs + weights + loss + gradients + lr + updates)) self.bufs[mark] = sub_bufs ops = [x.op for x in outputs + loss + gradients + updates] s = tvm.te.create_schedule(ops) self.schedules[mark] = s self.scheduled_subgraphs.add(feature) def build_for(self, target, mark=0, force=False): feature = self.subgraph_features[mark] if force: self.shared_functions.pop(feature) elif feature in self.shared_functions: self.functions[mark] = self.shared_functions[feature] return True bufs = self.bufs[mark] sch = self.schedules[mark] try: func = tvm.build(sch, bufs, target=target) self.functions[mark] = func self.shared_functions[feature] = func # print("build success for subgraph", mark) return True except Exception as e: print("build error in subgraph", mark) print(e) # print(bufs) # print(tvm.lower(sch, bufs, simple_mode=True)) return False def build(self, target, force=False): fail = 0 if force: self.shared_functions = {} for mark, sch in self.schedules.items(): feature = self.subgraph_features[mark] if feature in self.shared_functions: self.functions[mark] = self.shared_functions[feature] continue bufs = self.bufs[mark] try: func = tvm.build(sch, bufs, target=target) self.functions[mark] = func self.shared_functions[feature] = func # print("build success for subgraph", mark) except Exception as e: fail += 1 print("build error in subgraph", mark) print(e) print(bufs) print(tvm.lower(sch, bufs, simple_mode=True)) return fail == 0 def allocate_buffer(self, target, dev, force=False): if not force and self.ctx is not None: return self.ctx = tvm.context(target, dev) # inputs for inp in self.inputs: if inp in self.np_array_dict: np_array = self.np_array_dict[inp].astype(inp.dtype) else: raise RuntimeError("Should provide input tensor for %s" % (str(inp))) self.tvm_array_dict[inp] = tvm.nd.array(np_array, self.ctx) # outputs for out in self.outputs: self.tvm_array_dict[out] = tvm.nd.empty(to_tuple(out.shape), out.dtype, ctx=self.ctx) # labels for label in self.labels: if label in self.np_array_dict: np_array = self.np_array_dict[label].astype(label.dtype) else: raise RuntimeError("Should provide input tensor for %s" % (str(label))) self.tvm_array_dict[label] = tvm.nd.array(np_array, self.ctx) # loss if self.loss is not None: self.tvm_array_dict[self.loss] = tvm.nd.empty(to_tuple(self.loss.shape), self.loss.dtype, ctx=self.ctx) # weights for weight in self.weights: if weight in self.np_array_dict: np_array = self.np_array_dict[weight].astype(weight.dtype) else: # TODO: add initializer np_array = np.random.uniform(-1, 1, to_tuple(weight.shape)).astype(weight.dtype) self.tvm_array_dict[weight] = tvm.nd.array(np_array, self.ctx) # gradients for grad in self.gradients: self.tvm_array_dict[grad] = tvm.nd.empty(to_tuple(grad.shape), grad.dtype, ctx=self.ctx) # lr if self.lr is not None: if self.lr in self.np_array_dict: np_array = self.np_array_dict[self.lr].astype(self.lr.dtype) else: raise RuntimeError("Should provide learning rate.") self.tvm_array_dict[self.lr] = tvm.nd.array(np_array, self.ctx) # updates for i, update in enumerate(self.updates): self.tvm_array_dict[update] = self.tvm_array_dict[self.weights[i]] # intermediate buffer for subgraph in self.subgraphs.values(): for out, old_tensor in subgraph.outputs.items(): if old_tensor not in self.outputs: # it's new output self.tvm_array_dict[old_tensor] = tvm.nd.empty(to_tuple(old_tensor.shape), old_tensor.dtype, ctx=self.ctx) def run(self, scheduler, target, dev): """ This is not enabled """ raise NotImplementedError() # generate specific space # scheduler has a cache, so multiple calls has the same effect scheduler.add_task(self, target) config = scheduler.propose(self, target) scheduler.apply_config(self, target, config) # apply config # 1. modify op stat list -> head, tail # 2. make subgraphs # 3. create schedule # 4. modify schedule self.build(target) # allocate buffer # only the first call has effect self.allocate_buffer(target, dev) for mark in self.call_order: func = self.functions[mark] bufs = self.bufs[mark] real_bufs = [self.tvm_array_dict[self.subgraphs[mark].index[x]] for x in bufs] func(*real_bufs) class PyTIRSubGraphPartition(object): def __init__(self): pass def __call__(self, graph): """ graph: PyTIRGraph """ pass def is_boundary(self, pre, post, graph): pre_stat = graph.op_stat_dict[pre] post_stat = graph.op_stat_dict[post] # root op must be separated if pre_stat.must_root: return True if pre_stat.num_consumers > 1: # do not fuse multi-output return True if pre_stat.reductive and post_stat.reductive: # do not fuse reductive nodes return True if pre_stat.injective and post_stat.injective: return ((not pre_stat.head) and post_stat.head) if pre_stat.injective and post_stat.reductive: return not pre_stat.head if pre_stat.reductive and post_stat.injective: return post_stat.head return True def partion_graph(self, graph): """ graph: PyTIRGraph returns: list of list of tvm ComputeOp dict from tvm ComputeOp to list of DataPort """ # -1 for not visited graph_mark = {x: -1 for x in graph.op_list} # setup initial nodes, all compute ops are included # this guarantees no node is left visit_stack = list(reversed(graph.op_list)) visited = set() global_mark = -1 while len(visit_stack) > 0: cur = visit_stack.pop() if cur in visited: continue if graph_mark[cur] < 0: # not marked # new subgraph global_mark += 1 graph_mark[cur] = global_mark graph_mark[cur] = global_mark # all the outputs for i in range(cur.num_outputs): t = cur.output(i) if t in graph.down_graph: for op in graph.down_graph[t]: if not self.is_boundary(cur, op, graph): if graph_mark[op] < 0: # mark it as the same subgraph graph_mark[op] = global_mark # only add node within the same subgraph visit_stack.append(op) # all the inputs for t in cur.input_tensors: if isinstance(t.op, tvm.te.tensor.ComputeOp): if not self.is_boundary(t.op, cur, graph): if graph_mark[t.op] < 0: # mark it as the same subgraph graph_mark[t.op] = global_mark # only add node within the same subgraph visit_stack.append(t.op) # add visit visited.add(cur) order = self.validate_partition(graph_mark) return self.subgraph_rewrite(graph_mark, graph), order def subgraph_rewrite(self, graph_mark, tgraph): ret = tvm.tg.subgraph_partition(graph_mark, tgraph.root_ops) op_map = {} inputs_set = set(tgraph.inputs) outputs_set = set(tgraph.outputs) labels_set = set(tgraph.labels) weights_set = set(tgraph.weights) gradients_set = set(tgraph.gradients) updates_set = set(tgraph.updates) subgraphs = {} for (old_op, mark) in graph_mark.items(): new_op = ret[old_op] op_map[new_op] = old_op if mark not in subgraphs: subgraphs[mark] = PyTIRSubGraph() for i, t in enumerate(old_op.input_tensors): if t in inputs_set: # new -> old subgraphs[mark].inputs[new_op.input_tensors[i]] = t if t in labels_set: subgraphs[mark].labels[new_op.input_tensors[i]] = t if t == tgraph.lr: subgraphs[mark].lr[new_op.input_tensors[i]] = t if t in weights_set: subgraphs[mark].weights[new_op.input_tensors[i]] = t # this is special # ret contains the new placeholder op because # this indicates an intermediate input if new_op.input_tensors[i].op in ret: subgraphs[mark].inputs[new_op.input_tensors[i]] = \ ret[new_op.input_tensors[i].op].output(t.value_index) another_mark = graph_mark[ret[new_op.input_tensors[i].op]] if another_mark not in subgraphs: subgraphs[another_mark] = PyTIRSubGraph() subgraphs[another_mark].outputs[ret[ret[new_op.input_tensors[i].op]].output(t.value_index)] = \ ret[new_op.input_tensors[i].op].output(t.value_index) for i in range(old_op.num_outputs): t = old_op.output(i) if t in outputs_set: subgraphs[mark].outputs[new_op.output(i)] = t if t in gradients_set: subgraphs[mark].gradients[new_op.output(i)] = t if t in updates_set: subgraphs[mark].updates[new_op.output(i)] = t if t == tgraph.loss: subgraphs[mark].loss[new_op.output(i)] = t for mark, subgraph in subgraphs.items(): subgraph.index = { **subgraph.inputs, **subgraph.outputs, **subgraph.labels, **subgraph.loss, \ **subgraph.weights, **subgraph.gradients, **subgraph.lr, **subgraph.updates} return subgraphs, op_map def validate_partition(self, graph_mark): # dst -> src order = [] ref = {} max_mark = 0 for (op, mark) in graph_mark.items(): max_mark = max(mark, max_mark) for inp in op.input_tensors: if inp.op in graph_mark: src_mark = graph_mark[inp.op] if src_mark != mark: if mark not in ref: ref[mark] = set() ref[mark].add(src_mark) visited = set() visiting = set() def func(val): if val in visited: return if val in visiting: raise RuntimeError( "The subgraph relation has a circular reference.") visiting.add(val) if val not in ref: order.append(val) visiting.remove(val) visited.add(val) return for inp in ref[val]: func(inp) order.append(val) visiting.remove(val) visited.add(val) return for mark in range(max_mark+1): func(mark) return order
gradients = list(subgraph.gradients.keys())
index.js
import { connect } from 'react-redux'; import Journal from '../../components/Journal'; /** * @function filterData * @param {array} entries Entries * @param {object} filter Filter object * @param {number} pageLimit Page limit * @param {number} pageNumber Page number * @return {array} Entries */ function
(entries, filter, pageLimit, pageNumber) { switch (filter.viewType) { case 'entry': return entries.filter((entry) => { return entry.link === filter.value; }); case 'tag': return entries.filter((entry) => { return entry.tags.map((tag) => { return tag.txt; }).indexOf(filter.value) > -1; }); case 'page': default: { const start = (pageNumber - 1) * pageLimit; const end = start + pageLimit; return entries.slice(start, end); } } } const mapStateToProps = (state, ownProps) => { const { entries, pageLimit, links, tags, isDisplayed } = state.journal; const { viewType, value } = ownProps.match.params; const pageNumber = viewType === 'page' ? Number(value) : 1; const filter = { viewType: viewType || 'page', value: value || 'date' }; const filteredEntries = filterData(entries, filter, pageLimit, pageNumber); const totalEntries = entries.length; const totalFilteredEntries = filter.viewType === 'tag' ? filteredEntries.length : totalEntries; return { entries: filteredEntries, totalEntries: totalFilteredEntries, links, tags, pageLimit, filter, isDisplayed, pageNumber }; }; export default connect(mapStateToProps)(Journal);
filterData
impl_cast.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use std::borrow::Cow; use std::convert::TryFrom; use std::convert::TryInto; use std::num::IntErrorKind; use num_traits::identities::Zero; use tidb_query_codegen::rpn_fn; use tidb_query_datatype::*; use tipb::{Expr, FieldType}; use crate::types::RpnExpressionBuilder; use crate::{RpnExpressionNode, RpnFnCallExtra, RpnFnMeta}; use tidb_query_common::Result; use tidb_query_datatype::codec::convert::*; use tidb_query_datatype::codec::data_type::*; use tidb_query_datatype::codec::error::{ERR_DATA_OUT_OF_RANGE, ERR_TRUNCATE_WRONG_VALUE}; use tidb_query_datatype::codec::mysql::time::{MAX_YEAR, MIN_YEAR}; use tidb_query_datatype::codec::mysql::{binary_literal, Time}; use tidb_query_datatype::codec::Error; use tidb_query_datatype::expr::EvalContext; fn get_cast_fn_rpn_meta( is_from_constant: bool, from_field_type: &FieldType, to_field_type: &FieldType, ) -> Result<RpnFnMeta> { let from = box_try!(EvalType::try_from(from_field_type.as_accessor().tp())); let to = box_try!(EvalType::try_from(to_field_type.as_accessor().tp())); let func_meta = match (from, to) { // any as int (EvalType::Int, EvalType::Int) => { if !from_field_type.is_unsigned() && to_field_type.is_unsigned() { cast_signed_int_as_unsigned_int_fn_meta() } else { cast_int_as_int_others_fn_meta() } } (EvalType::Real, EvalType::Int) => { if to_field_type.is_unsigned() { cast_real_as_uint_fn_meta() } else { cast_any_as_any_fn_meta::<Real, Int>() } } (EvalType::Bytes, EvalType::Int) => { if is_from_constant && from_field_type.is_binary_string_like() { cast_binary_string_as_int_fn_meta() } else { cast_string_as_int_fn_meta() } } (EvalType::Decimal, EvalType::Int) => { if to_field_type.is_unsigned() { cast_decimal_as_uint_fn_meta() } else { cast_any_as_any_fn_meta::<Decimal, Int>() } } (EvalType::DateTime, EvalType::Int) => cast_any_as_any_fn_meta::<DateTime, Int>(), (EvalType::Duration, EvalType::Int) => cast_any_as_any_fn_meta::<Duration, Int>(), (EvalType::Json, EvalType::Int) => { if to_field_type.is_unsigned() { cast_json_as_uint_fn_meta() } else { cast_json_as_any_fn_meta::<Int>() } } // any as real (EvalType::Int, EvalType::Real) => { let fu = from_field_type.is_unsigned(); let ru = to_field_type.is_unsigned(); match (fu, ru) { (true, _) => cast_unsigned_int_as_signed_or_unsigned_real_fn_meta(), (false, false) => cast_signed_int_as_signed_real_fn_meta(), (false, true) => cast_signed_int_as_unsigned_real_fn_meta(), } } (EvalType::Real, EvalType::Real) => { if to_field_type.is_unsigned() { cast_real_as_signed_real_fn_meta() } else { cast_real_as_unsigned_real_fn_meta() } } (EvalType::Bytes, EvalType::Real) => { match ( is_from_constant && from_field_type.is_binary_string_like(), to_field_type.is_unsigned(), ) { (true, true) => cast_binary_string_as_unsigned_real_fn_meta(), (true, false) => cast_binary_string_as_signed_real_fn_meta(), (false, true) => cast_string_as_unsigned_real_fn_meta(), (false, false) => cast_string_as_signed_real_fn_meta(), } } (EvalType::Decimal, EvalType::Real) => { if to_field_type.is_unsigned() { cast_decimal_as_unsigned_real_fn_meta() } else { cast_any_as_any_fn_meta::<Decimal, Real>() } } (EvalType::DateTime, EvalType::Real) => cast_any_as_any_fn_meta::<DateTime, Real>(), (EvalType::Duration, EvalType::Real) => cast_any_as_any_fn_meta::<Duration, Real>(), (EvalType::Json, EvalType::Real) => cast_json_as_any_fn_meta::<Real>(), // any as string (EvalType::Int, EvalType::Bytes) => { if FieldTypeAccessor::tp(from_field_type) == FieldTypeTp::Year { cast_year_as_string_fn_meta() } else if from_field_type.is_unsigned() { cast_uint_as_string_fn_meta() } else { cast_any_as_string_fn_meta::<Int>() } } (EvalType::Real, EvalType::Bytes) => { if FieldTypeAccessor::tp(from_field_type) == FieldTypeTp::Float { cast_float_real_as_string_fn_meta() } else { cast_any_as_string_fn_meta::<Real>() } } (EvalType::Bytes, EvalType::Bytes) => cast_string_as_string_fn_meta(), (EvalType::Decimal, EvalType::Bytes) => cast_any_as_string_fn_meta::<Decimal>(), (EvalType::DateTime, EvalType::Bytes) => cast_any_as_string_fn_meta::<DateTime>(), (EvalType::Duration, EvalType::Bytes) => cast_any_as_string_fn_meta::<Duration>(), (EvalType::Json, EvalType::Bytes) => cast_json_as_bytes_fn_meta(), // any as decimal (EvalType::Int, EvalType::Decimal) => { let fu = from_field_type.is_unsigned(); let ru = to_field_type.is_unsigned(); match (fu, ru) { (true, _) => cast_unsigned_int_as_signed_or_unsigned_decimal_fn_meta(), (false, true) => cast_signed_int_as_unsigned_decimal_fn_meta(), (false, false) => cast_any_as_decimal_fn_meta::<Int>(), } } (EvalType::Real, EvalType::Decimal) => cast_real_as_decimal_fn_meta(), (EvalType::Bytes, EvalType::Decimal) => { if to_field_type.is_unsigned() { cast_string_as_unsigned_decimal_fn_meta() } else { cast_bytes_as_decimal_fn_meta() } } (EvalType::Decimal, EvalType::Decimal) => { if to_field_type.is_unsigned() { cast_decimal_as_unsigned_decimal_fn_meta() } else { cast_decimal_as_signed_decimal_fn_meta() } } (EvalType::DateTime, EvalType::Decimal) => cast_any_as_decimal_fn_meta::<DateTime>(), (EvalType::Duration, EvalType::Decimal) => cast_any_as_decimal_fn_meta::<Duration>(), (EvalType::Json, EvalType::Decimal) => cast_json_as_decimal_fn_meta(), // any as duration (EvalType::Int, EvalType::Duration) => cast_int_as_duration_fn_meta(), (EvalType::Real, EvalType::Duration) => cast_real_as_duration_fn_meta(), (EvalType::Bytes, EvalType::Duration) => cast_bytes_as_duration_fn_meta(), (EvalType::Decimal, EvalType::Duration) => cast_decimal_as_duration_fn_meta(), (EvalType::DateTime, EvalType::Duration) => cast_time_as_duration_fn_meta(), (EvalType::Duration, EvalType::Duration) => cast_duration_as_duration_fn_meta(), (EvalType::Json, EvalType::Duration) => cast_json_as_duration_fn_meta(), (EvalType::Int, EvalType::DateTime) => { if FieldTypeAccessor::tp(from_field_type) == FieldTypeTp::Year { cast_year_as_time_fn_meta() } else { cast_int_as_time_fn_meta() } } (EvalType::Real, EvalType::DateTime) => cast_real_as_time_fn_meta(), (EvalType::Bytes, EvalType::DateTime) => cast_string_as_time_fn_meta(), (EvalType::Decimal, EvalType::DateTime) => cast_decimal_as_time_fn_meta(), (EvalType::DateTime, EvalType::DateTime) => cast_time_as_time_fn_meta(), (EvalType::Duration, EvalType::DateTime) => cast_duration_as_time_fn_meta(), // any as json (EvalType::Int, EvalType::Json) => { if from_field_type.is_bool() { cast_bool_as_json_fn_meta() } else if from_field_type.is_unsigned() { cast_uint_as_json_fn_meta() } else { cast_any_as_json_fn_meta::<Int>() } } (EvalType::Real, EvalType::Json) => cast_any_as_json_fn_meta::<Real>(), (EvalType::Bytes, EvalType::Json) => cast_string_as_json_fn_meta(), (EvalType::Decimal, EvalType::Json) => cast_any_as_json_fn_meta::<Decimal>(), (EvalType::DateTime, EvalType::Json) => cast_any_as_json_fn_meta::<DateTime>(), (EvalType::Duration, EvalType::Json) => cast_any_as_json_fn_meta::<Duration>(), (EvalType::Json, EvalType::Json) => cast_json_as_json_fn_meta(), _ => return Err(other_err!("Unsupported cast from {} to {}", from, to)), }; Ok(func_meta) } /// Gets the cast function between specified data types. /// /// TODO: This function supports some internal casts performed by TiKV. However it would be better /// to be done in TiDB. pub fn get_cast_fn_rpn_node( is_from_constant: bool, from_field_type: &FieldType, to_field_type: FieldType, ) -> Result<RpnExpressionNode> { let func_meta = get_cast_fn_rpn_meta(is_from_constant, from_field_type, &to_field_type)?; // This cast function is inserted by `Coprocessor` automatically, // the `inUnion` flag always false in this situation. Ideally, // the cast function should be inserted by TiDB and pushed down // with all implicit arguments. Ok(RpnExpressionNode::FnCall { func_meta, args_len: 1, field_type: to_field_type, metadata: Box::new(tipb::InUnionMetadata::default()), }) } /// Gets the RPN function meta pub fn map_cast_func(expr: &Expr) -> Result<RpnFnMeta> { let children = expr.get_children(); if children.len() != 1 { return Err(other_err!( "Unexpected arguments: sig {:?} with {} args", expr.get_sig(), children.len() )); } get_cast_fn_rpn_meta( RpnExpressionBuilder::is_expr_eval_to_scalar(&children[0])?, children[0].get_field_type(), expr.get_field_type(), ) } // cast any as int/uint, some cast functions reuse `cast_any_as_any` // // - cast_real_as_int -> cast_any_as_any<Real, Int> // - cast_decimal_as_int -> cast_any_as_any<Decimal, Int> // - cast_time_as_int_or_uint -> cast_any_as_any<Time, Int> // - cast_duration_as_int_or_uint -> cast_any_as_any<Duration, Int> // - cast_json_as_int -> cast_any_as_any<Json, Int> #[rpn_fn(nullable, capture = [metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_signed_int_as_unsigned_int( metadata: &tipb::InUnionMetadata, val: Option<&Int>, ) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => { let val = *val; if metadata.get_in_union() && val < 0i64 { Ok(Some(0)) } else { Ok(Some(val)) } } } } #[rpn_fn(nullable)] #[inline] fn cast_int_as_int_others(val: Option<&Int>) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => Ok(Some(*val)), } } #[rpn_fn(nullable, capture = [ctx, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_real_as_uint( ctx: &mut EvalContext, metadata: &tipb::InUnionMetadata, val: Option<&Real>, ) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => { let val = val.into_inner(); if metadata.get_in_union() && val < 0f64 { Ok(Some(0)) } else { // FIXME: mysql's double to unsigned is very special, // it **seems** that if the float num bigger than i64::MAX, // then return i64::MAX always. // This may be the bug of mysql. // So I don't change ours' behavior here. let val: u64 = val.convert(ctx)?; Ok(Some(val as i64)) } } } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_string_as_int( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<BytesRef>, ) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => { // TODO: in TiDB, if `b.args[0].GetType().Hybrid()` || `IsBinaryLiteral(b.args[0])`, // then it will return res from EvalInt() directly. let is_unsigned = extra.ret_field_type.is_unsigned(); let val = get_valid_utf8_prefix(ctx, val)?; let val = val.trim(); let is_str_neg = val.starts_with('-'); if metadata.get_in_union() && is_unsigned && is_str_neg { Ok(Some(0)) } else { // FIXME: if the err get_valid_int_prefix returned is overflow err, // it should be ERR_TRUNCATE_WRONG_VALUE but not others. let valid_int_prefix = get_valid_int_prefix(ctx, val)?; let parse_res = if !is_str_neg { valid_int_prefix.parse::<u64>().map(|x| x as i64) } else { valid_int_prefix.parse::<i64>() }; // The `OverflowAsWarning` is true just if in `SELECT` statement context, e.g: // 1. SELECT * FROM t => OverflowAsWarning = true // 2. INSERT INTO t VALUE (...) => OverflowAsWarning = false // 3. INSERT INTO t SELECT * FROM t2 => OverflowAsWarning = false // (according to https://github.com/pingcap/tidb/blob/e173c7f5c1041b3c7e67507889d50a7bdbcdfc01/executor/executor.go#L1452) // // NOTE: if this flag(OverflowAsWarning)'s setting had changed, // then here's behavior should be changed to keep consistent with TiDB. match parse_res { Ok(x) => { if !is_str_neg { if !is_unsigned && x as u64 > std::i64::MAX as u64 { ctx.warnings .append_warning(Error::cast_as_signed_overflow()) } } else if is_unsigned { ctx.warnings .append_warning(Error::cast_neg_int_as_unsigned()); } Ok(Some(x as i64)) } Err(err) => match *err.kind() { IntErrorKind::PosOverflow | IntErrorKind::NegOverflow => { let err = if is_str_neg { Error::overflow("BIGINT UNSIGNED", valid_int_prefix) } else { Error::overflow("BIGINT", valid_int_prefix) }; let warn_err = Error::truncated_wrong_val("INTEGER", val); ctx.handle_overflow_err(warn_err).map_err(|_| err)?; let val = if is_str_neg { std::i64::MIN } else { std::u64::MAX as i64 }; Ok(Some(val)) } _ => Err(other_err!("parse string to int failed: {}", err)), }, } } } } } #[rpn_fn(nullable, capture = [ctx])] fn cast_binary_string_as_int(ctx: &mut EvalContext, val: Option<BytesRef>) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => { let r = binary_literal::to_uint(ctx, val)? as i64; Ok(Some(r)) } } } /// # TODO /// /// This function is added to prove `rpn_fn` supports `enum`/`set` correctly. We will add enum/set /// related copr functions into `get_cast_fn_rpn_meta` after Enum/Set decode implemented. #[rpn_fn] #[inline] fn cast_enum_as_int(val: EnumRef) -> Result<Option<Int>> { Ok(Some(val.value() as Int)) } #[rpn_fn] #[inline] fn cast_set_as_int(val: SetRef) -> Result<Option<Int>> { Ok(Some(val.value() as Int)) } #[rpn_fn(nullable, capture = [ctx, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_decimal_as_uint( ctx: &mut EvalContext, metadata: &tipb::InUnionMetadata, val: Option<&Decimal>, ) -> Result<Option<Int>> { match val { None => Ok(None), Some(val) => { // TODO: here TiDB round before call `val.is_negative()` if metadata.get_in_union() && val.is_negative() { Ok(Some(0)) } else { let r: u64 = val.convert(ctx)?; Ok(Some(r as i64)) } } } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_json_as_uint(ctx: &mut EvalContext, val: Option<JsonRef>) -> Result<Option<Int>> { match val { None => Ok(None), Some(j) => { let r: u64 = j.convert(ctx)?; Ok(Some(r as i64)) } } } // cast any as real, some cast functions reuse `cast_any_as_any` // // cast_decimal_as_signed_real -> cast_any_as_any<Decimal, Real> // cast_time_as_real -> cast_any_as_any<Time, Real> // cast_duration_as_real -> cast_any_as_any<Duration, Real> // cast_json_as_real -> by cast_any_as_any<Json, Real> #[rpn_fn(nullable)] #[inline] fn cast_signed_int_as_signed_real(val: Option<&Int>) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => Ok(Real::new(*val as f64).ok()), } } #[rpn_fn(nullable, capture = [metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_signed_int_as_unsigned_real( metadata: &tipb::InUnionMetadata, val: Option<&Int>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { if metadata.get_in_union() && *val < 0 { Ok(Some(Real::zero())) } else { // FIXME: negative number to unsigned real's logic may be wrong here. Ok(Real::new(*val as u64 as f64).ok()) } } } } // because we needn't to consider if uint overflow upper boundary of signed real, // so we can merge uint to signed/unsigned real in one function #[rpn_fn(nullable)] #[inline] fn cast_unsigned_int_as_signed_or_unsigned_real(val: Option<&Int>) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => Ok(Real::new(*val as u64 as f64).ok()), } } #[rpn_fn(nullable)] #[inline] fn cast_real_as_signed_real(val: Option<&Real>) -> Result<Option<Real>> { Ok(val.cloned()) } #[rpn_fn(nullable, capture = [metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_real_as_unsigned_real( metadata: &tipb::InUnionMetadata, val: Option<&Real>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { if metadata.get_in_union() && val.into_inner() < 0f64 { Ok(Some(Real::zero())) } else { // FIXME: negative number to unsigned real's logic may be wrong here. Ok(Some(*val)) } } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_string_as_signed_real( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { let r: f64 = val.convert(ctx)?; let r = produce_float_with_specified_tp(ctx, extra.ret_field_type, r)?; Ok(Real::new(r).ok()) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_binary_string_as_signed_real( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { let r = binary_literal::to_uint(ctx, val)? as i64 as f64; let r = produce_float_with_specified_tp(ctx, extra.ret_field_type, r)?; Ok(Real::new(r).ok()) } } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_string_as_unsigned_real( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<BytesRef>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { let mut r: f64 = val.convert(ctx)?; if metadata.get_in_union() && r < 0f64 { r = 0f64; } let r = produce_float_with_specified_tp(ctx, extra.ret_field_type, r)?; Ok(Real::new(r).ok()) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_binary_string_as_unsigned_real( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { let r = binary_literal::to_uint(ctx, val)? as f64; let r = produce_float_with_specified_tp(ctx, extra.ret_field_type, r)?; Ok(Real::new(r).ok()) } } } #[rpn_fn(nullable, capture = [ctx, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_decimal_as_unsigned_real( ctx: &mut EvalContext, metadata: &tipb::InUnionMetadata, val: Option<&Decimal>, ) -> Result<Option<Real>> { match val { None => Ok(None), Some(val) => { if metadata.get_in_union() && val.is_negative() { Ok(Some(Real::zero())) } else { // FIXME: negative number to unsigned real's logic may be wrong here. Ok(Some(val.convert(ctx)?)) } } } } // cast any as string, some cast functions reuse `cast_any_as_any` // // cast_int_as_string -> cast_any_as_string_fn_meta::<Int> // cast_real_as_string -> cast_any_as_string_fn_meta::<Real> // cast_decimal_as_string -> cast_any_as_string_fn_meta::<Decimal> // cast_datetime_as_string -> cast_any_as_string_fn_meta::<DateTime> // cast_duration_as_string -> cast_any_as_string_fn_meta::<Duration> // cast_json_as_string -> by cast_any_as_any<Json, String> #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_any_as_string<T: ConvertTo<Bytes> + Evaluable + EvaluableRet>( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&T>, ) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val: Bytes = val.convert(ctx)?; cast_as_string_helper(ctx, extra, val) } } } #[rpn_fn(capture = [ctx, extra])] #[inline] fn cast_year_as_string( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: &Int, ) -> Result<Option<Bytes>> { let cast = if *val == 0 { b"0000".to_vec() } else { val.to_string().into_bytes() }; cast_as_string_helper(ctx, extra, cast) } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_uint_as_string( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Int>, ) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val = (*val as u64).to_string().into_bytes(); cast_as_string_helper(ctx, extra, val) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_float_real_as_string( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Real>, ) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val = val.into_inner() as f32; let val = val.to_string().into_bytes(); cast_as_string_helper(ctx, extra, val) } } } // FIXME: We cannot use specialization in current Rust version, so impl ConvertTo<Bytes> for Bytes cannot // pass compile because of we have impl Convert<Bytes> for T where T: ToString + Evaluable // Refactor this part after https://github.com/rust-lang/rust/issues/31844 closed #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_string_as_string( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val = val.to_vec(); cast_as_string_helper(ctx, extra, val) } } } #[inline] fn cast_as_string_helper( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Vec<u8>, ) -> Result<Option<Bytes>> { let res = produce_str_with_specified_tp( ctx, Cow::Borrowed(val.as_slice()), extra.ret_field_type, false, )?; let mut res = match res { Cow::Borrowed(_) => val, Cow::Owned(x) => x.to_vec(), }; pad_zero_for_binary_type(&mut res, extra.ret_field_type); Ok(Some(res)) } // cast any as decimal, some cast functions reuse `cast_any_as_decimal` // // - cast_signed_int_as_signed_decimal -> cast_any_as_decimal<Int> // - cast_string_as_signed_decimal -> cast_any_as_decimal<Bytes> // - cast_time_as_decimal -> cast_any_as_decimal<Time> // - cast_duration_as_decimal -> cast_any_as_decimal<Duration> // - cast_json_as_decimal -> cast_any_as_decimal<Json> #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn
( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&i64>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { // because uint's upper bound is smaller than signed decimal's upper bound // so we can merge cast uint as signed/unsigned decimal in this function let dec = Decimal::from(*val as u64); Ok(Some(produce_dec_with_specified_tp( ctx, dec, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_signed_int_as_unsigned_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<&i64>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let dec = if metadata.get_in_union() && *val < 0 { Decimal::zero() } else { Decimal::from(*val as u64) }; Ok(Some(produce_dec_with_specified_tp( ctx, dec, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_real_as_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<&Real>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let val = val.into_inner(); let res = if metadata.get_in_union() && val < 0f64 { Decimal::zero() } else { Decimal::from_f64(val)? }; Ok(Some(produce_dec_with_specified_tp( ctx, res, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_string_as_unsigned_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<BytesRef>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { // FIXME: in TiDB, if the param IsBinaryLiteral, then return the result of `evalDecimal` directly let d: Decimal = val.convert(ctx)?; let d = if metadata.get_in_union() && d.is_negative() { Decimal::zero() } else { d }; Ok(Some(produce_dec_with_specified_tp( ctx, d, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_decimal_as_signed_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Decimal>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => Ok(Some(produce_dec_with_specified_tp( ctx, *val, extra.ret_field_type, )?)), } } #[rpn_fn(nullable, capture = [ctx, extra, metadata], metadata_type = tipb::InUnionMetadata)] #[inline] fn cast_decimal_as_unsigned_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, metadata: &tipb::InUnionMetadata, val: Option<&Decimal>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let res = if metadata.get_in_union() && val.is_negative() { Decimal::zero() } else { *val }; Ok(Some(produce_dec_with_specified_tp( ctx, res, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_any_as_decimal<From: Evaluable + EvaluableRet + ConvertTo<Decimal>>( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&From>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let dec: Decimal = val.convert(ctx)?; Ok(Some(produce_dec_with_specified_tp( ctx, dec, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_json_as_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<JsonRef>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let dec: Decimal = val.convert(ctx)?; Ok(Some(produce_dec_with_specified_tp( ctx, dec, extra.ret_field_type, )?)) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_bytes_as_decimal( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Decimal>> { match val { None => Ok(None), Some(val) => { let dec: Decimal = val.convert(ctx)?; Ok(Some(produce_dec_with_specified_tp( ctx, dec, extra.ret_field_type, )?)) } } } // cast any as duration, no cast functions reuse `cast_any_as_any` #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_int_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Int>, ) -> Result<Option<Duration>> { match val { None => Ok(None), Some(val) => { let fsp = extra.ret_field_type.get_decimal() as i8; Duration::from_i64(ctx, *val, fsp).map(Some).or_else(|err| { if err.is_overflow() { ctx.handle_overflow_err(err)?; Ok(None) } else if err.is_truncated() { ctx.handle_truncate_err(err)?; Ok(None) } else { Err(err.into()) } }) } } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] fn cast_time_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&DateTime>, ) -> Result<Option<Duration>> { match val { None => Ok(None), Some(val) => { let dur: Duration = val.convert(ctx)?; Ok(Some(dur.round_frac(extra.ret_field_type.decimal() as i8)?)) } } } #[rpn_fn(nullable, capture = [extra])] #[inline] fn cast_duration_as_duration( extra: &RpnFnCallExtra, val: Option<&Duration>, ) -> Result<Option<Duration>> { match val { None => Ok(None), Some(val) => Ok(Some(val.round_frac(extra.ret_field_type.decimal() as i8)?)), } } // TODO: use this macro to simplify all other place macro_rules! skip_none { ($val:expr) => { match $val { None => return Ok(None), Some(v) => v, } }; } #[inline] fn cast_bytes_like_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: &[u8], ) -> Result<Option<Duration>> { let val = std::str::from_utf8(val).map_err(Error::Encoding)?; let result = Duration::parse(ctx, val, extra.ret_field_type.get_decimal() as i8); match result { Ok(dur) => Ok(Some(dur)), Err(e) => match e.code() { ERR_DATA_OUT_OF_RANGE => { ctx.handle_overflow_err(e)?; Ok(None) } ERR_TRUNCATE_WRONG_VALUE => { ctx.handle_truncate_err(e)?; Ok(None) } _ => Err(e.into()), }, } } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] pub fn cast_real_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Real>, ) -> Result<Option<Duration>> { let v = skip_none!(val).into_inner().to_string(); cast_bytes_like_as_duration(ctx, extra, v.as_bytes()) } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] pub fn cast_bytes_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Duration>> { let v = skip_none!(val); cast_bytes_like_as_duration(ctx, extra, v) } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] pub fn cast_decimal_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Decimal>, ) -> Result<Option<Duration>> { let v = skip_none!(val).to_string(); cast_bytes_like_as_duration(ctx, extra, v.as_bytes()) } #[rpn_fn(nullable, capture = [ctx, extra])] #[inline] pub fn cast_json_as_duration( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<JsonRef>, ) -> Result<Option<Duration>> { let v = skip_none!(val).unquote()?; cast_bytes_like_as_duration(ctx, extra, v.as_bytes()) } #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_int_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Int>, ) -> Result<Option<Time>> { if let Some(val) = val { // Parse `val` as a `u64` Time::parse_from_i64( ctx, *val, extra.ret_field_type.as_accessor().tp().try_into()?, extra.ret_field_type.get_decimal() as i8, ) .map(Some) .or_else(|_| { Ok(ctx .handle_invalid_time_error(Error::incorrect_datetime_value(val)) .map(|_| None)?) }) } else { Ok(None) } } #[rpn_fn(capture = [ctx, extra])] fn cast_year_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, year: &Int, ) -> Result<Option<Time>> { let year = *year; if year != 0 && (year < MIN_YEAR.into() || year > MAX_YEAR.into()) { ctx.handle_truncate_err(Error::truncated_wrong_val("YEAR", year))?; return Ok(None); } let time_type = FieldTypeAccessor::tp(extra.ret_field_type).try_into()?; let fsp = extra.ret_field_type.decimal() as i8; let time = Time::from_year(ctx, year as u32, fsp, time_type)?; Ok(Some(time)) } // NOTE: in MySQL, casting `Real` to `Time` should cast `Real` to `Int` first, // However, TiDB cast `Real` to `String` and then parse it into a `Time` #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_real_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Real>, ) -> Result<Option<Time>> { if let Some(val) = val { if val.is_zero() { Time::zero( ctx, extra.ret_field_type.get_decimal() as i8, extra.ret_field_type.as_accessor().tp().try_into()?, ) } else { // Convert `val` to a string first and then parse it as a float string. Time::parse( ctx, &val.to_string(), extra.ret_field_type.as_accessor().tp().try_into()?, extra.ret_field_type.get_decimal() as i8, // Enable round true, ) } .map(Some) .or_else(|e| Ok(ctx.handle_invalid_time_error(e).map(|_| None)?)) } else { Ok(None) } } #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_string_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<BytesRef>, ) -> Result<Option<Time>> { if let Some(val) = val { // Convert `val` to a string first and then parse it as a float string. Time::parse( ctx, unsafe { std::str::from_utf8_unchecked(val) }, extra.ret_field_type.as_accessor().tp().try_into()?, extra.ret_field_type.get_decimal() as i8, // Enable round true, ) .map(Some) .or_else(|e| Ok(ctx.handle_invalid_time_error(e).map(|_| None)?)) } else { Ok(None) } } #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_decimal_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Decimal>, ) -> Result<Option<Time>> { if let Some(val) = val { // Convert `val` to a string first and then parse it as a string. Time::parse_from_decimal( ctx, val, extra.ret_field_type.as_accessor().tp().try_into()?, extra.ret_field_type.get_decimal() as i8, // Enable round true, ) .map(Some) .or_else(|e| Ok(ctx.handle_invalid_time_error(e).map(|_| None)?)) } else { Ok(None) } } #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_time_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Time>, ) -> Result<Option<Time>> { if let Some(val) = val { let mut val = *val; val.set_time_type(extra.ret_field_type.as_accessor().tp().try_into()?)?; val.round_frac(ctx, extra.ret_field_type.get_decimal() as i8) .map(Some) .or_else(|e| Ok(ctx.handle_invalid_time_error(e).map(|_| None)?)) } else { Ok(None) } } #[rpn_fn(nullable, capture = [ctx, extra])] fn cast_duration_as_time( ctx: &mut EvalContext, extra: &RpnFnCallExtra, val: Option<&Duration>, ) -> Result<Option<Time>> { if let Some(val) = val { Time::from_duration( ctx, *val, extra.ret_field_type.as_accessor().tp().try_into()?, ) .and_then(|now| now.round_frac(ctx, extra.ret_field_type.get_decimal() as i8)) .map(Some) .or_else(|e| Ok(ctx.handle_invalid_time_error(e).map(|_| None)?)) } else { Ok(None) } } // cast any as json, some cast functions reuse `cast_any_as_any` // // - cast_int_as_json -> cast_any_as_any<Int, Json> // - cast_real_as_json -> cast_any_as_any<Real, Json> // - cast_decimal_as_json -> cast_any_as_any<Decimal, Json> // - cast_time_as_json -> cast_any_as_any<Time, Json> // - cast_duration_as_json -> cast_any_as_any<Duration, Json> #[rpn_fn(nullable)] #[inline] fn cast_bool_as_json(val: Option<&Int>) -> Result<Option<Json>> { match val { None => Ok(None), Some(val) => Ok(Some(Json::from_bool(*val != 0)?)), } } #[rpn_fn(nullable)] #[inline] fn cast_uint_as_json(val: Option<&Int>) -> Result<Option<Json>> { match val { None => Ok(None), Some(val) => Ok(Some(Json::from_u64(*val as u64)?)), } } #[rpn_fn(nullable, capture = [extra])] #[inline] fn cast_string_as_json(extra: &RpnFnCallExtra<'_>, val: Option<BytesRef>) -> Result<Option<Json>> { match val { None => Ok(None), Some(val) => { if extra .ret_field_type .as_accessor() .flag() .contains(FieldTypeFlag::PARSE_TO_JSON) { // if failed, is it because of bug? let s: String = box_try!(String::from_utf8(val.to_owned())); let val: Json = s.parse()?; Ok(Some(val)) } else { // FIXME: port `JSONBinary` from TiDB to adapt if the bytes is not a valid utf8 string let val = unsafe { String::from_utf8_unchecked(val.to_owned()) }; Ok(Some(Json::from_string(val)?)) } } } } #[rpn_fn(nullable)] #[inline] fn cast_json_as_json(val: Option<JsonRef>) -> Result<Option<Json>> { match val { None => Ok(None), Some(val) => Ok(Some(val.to_owned())), } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_any_as_any<From: ConvertTo<To> + Evaluable + EvaluableRet, To: Evaluable + EvaluableRet>( ctx: &mut EvalContext, val: Option<&From>, ) -> Result<Option<To>> { match val { None => Ok(None), Some(val) => { let val = val.convert(ctx)?; Ok(Some(val)) } } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_json_as_any<To: Evaluable + EvaluableRet + ConvertFrom<Json>>( ctx: &mut EvalContext, val: Option<JsonRef>, ) -> Result<Option<To>> { match val { None => Ok(None), Some(val) => { let val = To::convert_from(ctx, val.to_owned())?; Ok(Some(val)) } } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_any_as_json<From: ConvertTo<Json> + Evaluable + EvaluableRet>( ctx: &mut EvalContext, val: Option<&From>, ) -> Result<Option<Json>> { match val { None => Ok(None), Some(val) => { let val = val.convert(ctx)?; Ok(Some(val)) } } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_any_as_bytes<From: ConvertTo<Bytes> + Evaluable + EvaluableRet>( ctx: &mut EvalContext, val: Option<&From>, ) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val = val.convert(ctx)?; Ok(Some(val)) } } } #[rpn_fn(nullable, capture = [ctx])] #[inline] fn cast_json_as_bytes(ctx: &mut EvalContext, val: Option<JsonRef>) -> Result<Option<Bytes>> { match val { None => Ok(None), Some(val) => { let val = val.convert(ctx)?; Ok(Some(val)) } } } #[cfg(test)] mod tests { use super::Result; use crate::impl_cast::*; use crate::types::test_util::RpnFnScalarEvaluator; use crate::RpnFnCallExtra; use std::collections::BTreeMap; use std::fmt::{Debug, Display}; use std::sync::Arc; use std::{f32, f64, i64, u64}; use tidb_query_datatype::builder::FieldTypeBuilder; use tidb_query_datatype::codec::convert::produce_dec_with_specified_tp; use tidb_query_datatype::codec::data_type::{Bytes, Int, Real}; use tidb_query_datatype::codec::error::{ ERR_DATA_OUT_OF_RANGE, ERR_DATA_TOO_LONG, ERR_TRUNCATE_WRONG_VALUE, ERR_UNKNOWN, WARN_DATA_TRUNCATED, }; use tidb_query_datatype::codec::mysql::charset::*; use tidb_query_datatype::codec::mysql::decimal::{max_decimal, max_or_min_dec}; use tidb_query_datatype::codec::mysql::{ Decimal, Duration, Json, RoundMode, Time, TimeType, MAX_FSP, MIN_FSP, }; use tidb_query_datatype::expr::Flag; use tidb_query_datatype::expr::{EvalConfig, EvalContext}; use tidb_query_datatype::{Collation, FieldTypeFlag, FieldTypeTp, UNSPECIFIED_LENGTH}; use tikv_util::buffer_vec::BufferVec; use tipb::ScalarFuncSig; fn test_none_with_ctx_and_extra<F, Input, Ret>(func: F) where F: Fn(&mut EvalContext, &RpnFnCallExtra, Option<Input>) -> Result<Option<Ret>>, { let mut ctx = EvalContext::default(); let ret_field_type: FieldType = FieldType::default(); let extra = RpnFnCallExtra { ret_field_type: &ret_field_type, }; let r = func(&mut ctx, &extra, None).unwrap(); assert!(r.is_none()); } fn test_none_with_ctx<F, Input, Ret>(func: F) where F: Fn(&mut EvalContext, Option<Input>) -> Result<Option<Ret>>, { let mut ctx = EvalContext::default(); let r = func(&mut ctx, None).unwrap(); assert!(r.is_none()); } fn test_none_with_extra<F, Input, Ret>(func: F) where F: Fn(&RpnFnCallExtra, Option<Input>) -> Result<Option<Ret>>, { let ret_field_type: FieldType = FieldType::default(); let extra = RpnFnCallExtra { ret_field_type: &ret_field_type, }; let r = func(&extra, None).unwrap(); assert!(r.is_none()); } fn test_none_with_metadata<F, Input, Ret>(func: F) where F: Fn(&tipb::InUnionMetadata, Option<Input>) -> Result<Option<Ret>>, { let metadata = make_metadata(true); let r = func(&metadata, None).unwrap(); assert!(r.is_none()); } fn test_none_with_ctx_and_metadata<F, Input, Ret>(func: F) where F: Fn(&mut EvalContext, &tipb::InUnionMetadata, Option<Input>) -> Result<Option<Ret>>, { let mut ctx = EvalContext::default(); let metadata = make_metadata(true); let r = func(&mut ctx, &metadata, None).unwrap(); assert!(r.is_none()); } fn test_none_with_ctx_and_extra_and_metadata<F, Input, Ret>(func: F) where F: Fn( &mut EvalContext, &RpnFnCallExtra, &tipb::InUnionMetadata, Option<Input>, ) -> Result<Option<Ret>>, { let mut ctx = EvalContext::default(); let ret_field_type: FieldType = FieldType::default(); let extra = RpnFnCallExtra { ret_field_type: &ret_field_type, }; let metadata = make_metadata(true); let r = func(&mut ctx, &extra, &metadata, None).unwrap(); assert!(r.is_none()); } fn test_none_with_nothing<F, Input, Ret>(func: F) where F: Fn(Option<Input>) -> Result<Option<Ret>>, { let r = func(None).unwrap(); assert!(r.is_none()); } struct CtxConfig { overflow_as_warning: bool, truncate_as_warning: bool, should_clip_to_zero: bool, in_insert_stmt: bool, in_update_or_delete_stmt: bool, } impl Default for CtxConfig { fn default() -> Self { CtxConfig { overflow_as_warning: false, truncate_as_warning: false, should_clip_to_zero: false, in_insert_stmt: false, in_update_or_delete_stmt: false, } } } impl From<CtxConfig> for EvalContext { fn from(config: CtxConfig) -> Self { let mut flag: Flag = Flag::empty(); if config.overflow_as_warning { flag |= Flag::OVERFLOW_AS_WARNING; } if config.truncate_as_warning { flag |= Flag::TRUNCATE_AS_WARNING; } if config.should_clip_to_zero { flag |= Flag::IN_INSERT_STMT; } if config.in_insert_stmt { flag |= Flag::IN_INSERT_STMT; } if config.in_update_or_delete_stmt { flag |= Flag::IN_UPDATE_OR_DELETE_STMT; } let cfg = Arc::new(EvalConfig::from_flag(flag)); EvalContext::new(cfg) } } fn make_metadata(in_union: bool) -> tipb::InUnionMetadata { let mut metadata = tipb::InUnionMetadata::default(); metadata.set_in_union(in_union); metadata } struct FieldTypeConfig { unsigned: bool, flen: isize, decimal: isize, charset: Option<&'static str>, tp: Option<FieldTypeTp>, collation: Option<Collation>, } impl Default for FieldTypeConfig { fn default() -> Self { FieldTypeConfig { unsigned: false, flen: UNSPECIFIED_LENGTH, decimal: UNSPECIFIED_LENGTH, charset: None, tp: None, collation: None, } } } impl From<FieldTypeConfig> for FieldType { fn from(config: FieldTypeConfig) -> Self { let mut ft = FieldType::default(); if let Some(c) = config.charset { ft.set_charset(String::from(c)); } let fta = ft.as_mut_accessor(); if config.unsigned { fta.set_flag(FieldTypeFlag::UNSIGNED); } fta.set_flen(config.flen); fta.set_decimal(config.decimal); if let Some(tp) = config.tp { fta.set_tp(tp); } if let Some(c) = config.collation { fta.set_collation(c); } ft } } fn make_extra(ret_field_type: &FieldType) -> RpnFnCallExtra { RpnFnCallExtra { ret_field_type } } fn make_log<P: Display, R: Display + Debug>( input: &P, expect: &R, result: &Result<Option<R>>, ) -> String { format!( "input: {}, expect: {:?}, output: {:?}", input, expect, result ) } fn check_overflow(ctx: &EvalContext, overflow: bool, log: &str) { if overflow { check_warning(ctx, Some(ERR_DATA_OUT_OF_RANGE), log) } } fn check_truncate(ctx: &EvalContext, truncate: bool, log: &str) { if truncate { check_warning(ctx, Some(ERR_TRUNCATE_WRONG_VALUE), log) } } fn check_warning(ctx: &EvalContext, err_code: Option<i32>, log: &str) { if let Some(x) = err_code { assert_eq!( ctx.warnings.warning_cnt, 1, "log: {}, warnings: {:?}", log, ctx.warnings.warnings ); assert_eq!(ctx.warnings.warnings[0].get_code(), x, "{}", log); } } fn check_result<R: Debug + PartialEq>(expect: Option<&R>, res: &Result<Option<R>>, log: &str) { assert!(res.is_ok(), "{}", log); let res = res.as_ref().unwrap(); if res.is_none() { assert!(expect.is_none(), "{}", log); } else { let res = res.as_ref().unwrap(); assert_eq!(res, expect.unwrap(), "{}", log); } } // comment for all test below: // if there should not be any overflow/truncate, // then should not set ctx with overflow_as_warning/truncated_as_warning flag, // and then if there is unexpected overflow/truncate, // then we will find them in `unwrap` #[test] fn test_int_as_int_others() { test_none_with_nothing(cast_int_as_int_others); let cs = vec![ (i64::MAX, i64::MAX), (i64::MIN, i64::MIN), (u64::MAX as i64, u64::MAX as i64), ]; for (input, expect) in cs { let r = cast_int_as_int_others(Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_signed_int_as_unsigned_int() { test_none_with_metadata(cast_signed_int_as_unsigned_int); let cs = vec![ // (origin, result, in_union) // in union (-10, 0u64, true), (10, 10u64, true), (i64::MIN, 0u64, true), (i64::MAX, i64::MAX as u64, true), // not in union (-10, (-10i64) as u64, false), (10, 10u64, false), (i64::MIN, i64::MIN as u64, false), (i64::MAX, i64::MAX as u64, false), ]; for (input, expect, in_union) in cs { let metadata = make_metadata(in_union); let r = cast_signed_int_as_unsigned_int(&metadata, Some(&input)); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_real_as_int() { test_none_with_ctx(cast_any_as_any::<Real, Int>); let cs = vec![ // (origin, result, overflow) (-10.4, -10i64, false), (-10.5, -11, false), (10.4, 10, false), (10.5, 11, false), (i64::MAX as f64, i64::MAX, false), ((1u64 << 63) as f64, i64::MAX, false), (i64::MIN as f64, i64::MIN, false), ((1u64 << 63) as f64 + (1u64 << 62) as f64, i64::MAX, true), ((i64::MIN as f64) * 2f64, i64::MIN, true), ]; for (input, result, overflow) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_any_as_any::<Real, Int>(&mut ctx, Real::new(input).as_ref().ok()); let log = make_log(&input, &result, &r); check_result(Some(&result), &r, log.as_str()); check_overflow(&ctx, overflow, log.as_str()); } } #[test] fn test_enum_as_int() { // TODO: we need to test None case here. let mut buf = BufferVec::new(); buf.push("我好强啊"); buf.push("我太强啦"); let cs = vec![ // (input, expect) (EnumRef::new(&buf, 0), 0), (EnumRef::new(&buf, 1), 1), ]; for (input, expect) in cs { let r = cast_enum_as_int(input); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_set_as_int() { // TODO: we need to test None case here. let mut buf = BufferVec::new(); buf.push("我好强啊"); buf.push("我太强啦"); let cs = vec![ // (input, expect) (SetRef::new(&buf, 0b01), 1), (SetRef::new(&buf, 0b11), 3), ]; for (input, expect) in cs { let r = cast_set_as_int(input); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_real_as_uint() { test_none_with_ctx_and_metadata(cast_real_as_uint); // in_union let cs = vec![ // (input, expect) (-10.0, 0u64), (i64::MIN as f64, 0), (10.0, 10u64), (i64::MAX as f64, (1u64 << 63)), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let metadata = make_metadata(true); let r = cast_real_as_uint( &mut ctx, &metadata, Some(Real::new(input).as_ref().unwrap()), ); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } // no clip to zero let cs = vec![ // (origin, expect, overflow) (10.5, 11u64, false), (10.4, 10u64, false), ( ((1u64 << 63) + (1u64 << 62)) as f64, ((1u64 << 63) + (1u64 << 62)), false, ), (u64::MAX as f64, u64::MAX, false), ((u64::MAX as f64) * 2f64, u64::MAX, true), (-1f64, -1f64 as i64 as u64, true), ]; for (input, expect, overflow) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let metadata = make_metadata(false); let r = cast_real_as_uint(&mut ctx, &metadata, Real::new(input).as_ref().ok()); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_overflow(&ctx, overflow, log.as_str()) } // should clip to zero let cs: Vec<(f64, u64, bool)> = vec![ // (origin, expect, overflow) (-1f64, 0, true), (i64::MIN as f64, 0, true), ]; for (input, expect, overflow) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, should_clip_to_zero: true, ..CtxConfig::default() } .into(); let metadata = make_metadata(false); let r = cast_real_as_uint( &mut ctx, &metadata, Some(Real::new(input).as_ref().unwrap()), ); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_overflow(&ctx, overflow, log.as_str()) } } #[test] fn test_cast_string_as_int() { // None { let output: Option<Int> = RpnFnScalarEvaluator::new() .push_param(ScalarValue::Bytes(None)) .evaluate(ScalarFuncSig::CastStringAsInt) .unwrap(); assert_eq!(output, None); } #[derive(Debug)] enum Cond { None, Unsigned, InUnionAndUnsigned, } impl Cond { fn in_union(&self) -> bool { matches!(self, Cond::InUnionAndUnsigned) } fn is_unsigned(&self) -> bool { matches!(self, Cond::InUnionAndUnsigned | Cond::Unsigned) } } let cs: Vec<(&str, i64, Vec<i32>, Cond)> = vec![ // (origin, expect, err_code, condition) // has no prefix `-` ( " 9223372036854775807 ", 9223372036854775807i64, vec![], Cond::None, ), ( "9223372036854775807", 9223372036854775807i64, vec![], Cond::None, ), ( "9223372036854775808", 9223372036854775808u64 as i64, vec![ERR_UNKNOWN], Cond::None, ), ( "9223372036854775808", 9223372036854775808u64 as i64, vec![], Cond::Unsigned, ), ( " 9223372036854775807abc ", 9223372036854775807i64, vec![ERR_TRUNCATE_WRONG_VALUE], Cond::None, ), ( "9223372036854775807abc", 9223372036854775807i64, vec![ERR_TRUNCATE_WRONG_VALUE], Cond::None, ), ( "9223372036854775808abc", 9223372036854775808u64 as i64, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_UNKNOWN], Cond::None, ), ( "9223372036854775808abc", 9223372036854775808u64 as i64, vec![ERR_TRUNCATE_WRONG_VALUE], Cond::Unsigned, ), // TODO: there are some cases that has not be covered. // FIXME: in mysql, this case will return 18446744073709551615 // and `show warnings` will show // `| Warning | 1292 | Truncated incorrect INTEGER value: '18446744073709551616'` // fix this cast_string_as_int after fix TiDB's // ("18446744073709551616", 18446744073709551615 as i64, Some(ERR_TRUNCATE_WRONG_VALUE) , Cond::Unsigned) // FIXME: our cast_string_as_int's err handle is not exactly same as TiDB's // ("18446744073709551616", 18446744073709551615u64 as i64, Some(ERR_TRUNCATE_WRONG_VALUE), Cond::InSelectStmt), // has prefix `-` and in_union and unsigned ("-10", 0, vec![], Cond::InUnionAndUnsigned), ("-9223372036854775808", 0, vec![], Cond::InUnionAndUnsigned), // has prefix `-` and not in_union or not unsigned ("-10", -10i64, vec![], Cond::None), ( "-9223372036854775808", -9223372036854775808i64, vec![], Cond::None, ), // FIXME: our cast_string_as_int's err handle is not exactly same as TiDB's ( "-9223372036854775809", -9223372036854775808i64, vec![ERR_TRUNCATE_WRONG_VALUE], Cond::None, ), ("-10", -10i64, vec![ERR_UNKNOWN], Cond::Unsigned), ( "-9223372036854775808", -9223372036854775808i64, vec![ERR_UNKNOWN], Cond::Unsigned, ), ( "-9223372036854775809", -9223372036854775808i64, vec![ERR_TRUNCATE_WRONG_VALUE], Cond::Unsigned, ), ]; for (input, expected, mut err_code, cond) in cs { let (result, ctx) = RpnFnScalarEvaluator::new() .context(CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() }) .metadata(Box::new(make_metadata(cond.in_union()))) .push_param(ScalarValue::Bytes(Some(input.as_bytes().to_owned()))) .evaluate_raw( FieldTypeConfig { tp: Some(FieldTypeTp::LongLong), unsigned: cond.is_unsigned(), ..FieldTypeConfig::default() }, ScalarFuncSig::CastStringAsInt, ); let output: Option<Int> = result.unwrap().into(); assert_eq!( output.unwrap(), expected, "input:{:?}, expected:{:?}, cond:{:?}", input, expected, cond, ); let mut got_warnings = ctx .warnings .warnings .iter() .map(|w| w.get_code()) .collect::<Vec<i32>>(); got_warnings.sort_unstable(); err_code.sort_unstable(); assert_eq!( ctx.warnings.warning_cnt, err_code.len(), "input:{:?}, expected:{:?}, warnings:{:?}", input, expected, got_warnings, ); assert_eq!(got_warnings, err_code); } // binary literal let cases = vec![ (vec![0x01, 0x02, 0x03], Some(0x010203_i64)), (vec![0x01, 0x02, 0x03, 0x4], Some(0x01020304_i64)), ( vec![0x01, 0x02, 0x03, 0x4, 0x05, 0x06, 0x06, 0x06, 0x06], None, ), ]; for (input, expected) in cases { let output: Result<Option<Int>> = RpnFnScalarEvaluator::new() .return_field_type(FieldTypeConfig { tp: Some(FieldTypeTp::LongLong), ..FieldTypeConfig::default() }) .push_param_with_field_type( input.clone(), FieldTypeConfig { tp: Some(FieldTypeTp::VarString), collation: Some(Collation::Binary), ..FieldTypeConfig::default() }, ) .evaluate(ScalarFuncSig::CastStringAsInt); if let Some(exp) = expected { assert!(output.is_ok(), "input: {:?}", input); assert_eq!(output.unwrap().unwrap(), exp, "input={:?}", input); } else { assert!(output.is_err()); } } } #[test] fn test_decimal_as_int() { test_none_with_ctx(cast_any_as_any::<Decimal, Int>); let cs: Vec<(Decimal, i64, Option<i32>)> = vec![ // (origin, expect, overflow) ( Decimal::from_bytes(b"9223372036854775807") .unwrap() .unwrap(), 9223372036854775807, None, ), ( Decimal::from_bytes(b"-9223372036854775808") .unwrap() .unwrap(), -9223372036854775808, None, ), ( Decimal::from_bytes(b"9223372036854775808") .unwrap() .unwrap(), 9223372036854775807, Some(ERR_TRUNCATE_WRONG_VALUE), ), ( Decimal::from_bytes(b"-9223372036854775809") .unwrap() .unwrap(), -9223372036854775808, Some(ERR_TRUNCATE_WRONG_VALUE), ), ]; for (input, expect, err_code) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_any_as_any::<Decimal, Int>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, err_code, log.as_str()); } } #[test] fn test_decimal_as_uint() { test_none_with_ctx_and_metadata(cast_decimal_as_uint); // in_union let cs: Vec<(Decimal, u64)> = vec![ ( Decimal::from_bytes(b"-9223372036854775808") .unwrap() .unwrap(), 0, ), ( Decimal::from_bytes(b"-9223372036854775809") .unwrap() .unwrap(), 0, ), ( Decimal::from_bytes(b"9223372036854775808") .unwrap() .unwrap(), 9223372036854775808, ), ( Decimal::from_bytes(b"18446744073709551615") .unwrap() .unwrap(), 18446744073709551615, ), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let metadata = make_metadata(true); let r = cast_decimal_as_uint(&mut ctx, &metadata, Some(&input)); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } let cs: Vec<(Decimal, u64, Option<i32>)> = vec![ // (input, expect, err_code) (Decimal::from_bytes(b"10").unwrap().unwrap(), 10, None), ( Decimal::from_bytes(b"1844674407370955161") .unwrap() .unwrap(), 1844674407370955161, None, ), ( Decimal::from_bytes(b"-10").unwrap().unwrap(), 0, Some(ERR_TRUNCATE_WRONG_VALUE), ), ( Decimal::from_bytes(b"18446744073709551616") .unwrap() .unwrap(), u64::MAX, Some(ERR_TRUNCATE_WRONG_VALUE), ), ]; for (input, expect, err_code) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let metadata = make_metadata(false); let r = cast_decimal_as_uint(&mut ctx, &metadata, Some(&input)); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, err_code, log.as_str()); } } #[test] fn test_time_as_int_and_uint() { let mut ctx = EvalContext::default(); // TODO: add more test case // TODO: add test that make cast_any_as_any::<Time, Int> returning truncated error let cs: Vec<(Time, i64)> = vec![ ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14", 0, true).unwrap(), 20000101121314, ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 0, true).unwrap(), 20000101121315, ), // FiXME // Time::parse_utc_datetime("2000-01-01T12:13:14.6666", 4).unwrap().round_frac(DEFAULT_FSP) // will get 2000-01-01T12:13:14, this is a bug // ( // Time::parse_utc_datetime("2000-01-01T12:13:14.6666", 4).unwrap(), // 20000101121315, // ), ]; for (input, expect) in cs { let r = cast_any_as_any::<Time, Int>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_cast_int_as_time() { let should_pass = vec![ ("0000-00-00 00:00:00", 0), ("2000-01-01 00:00:00", 101), ("2045-00-00 00:00:00", 450_000), ("2059-12-31 00:00:00", 591_231), ("1970-01-01 00:00:00", 700_101), ("1999-12-31 00:00:00", 991_231), ("1000-01-00 00:00:00", 10_000_100), ("2000-01-01 00:00:00", 101_000_000), ("2069-12-31 23:59:59", 691_231_235_959), ("1970-01-01 00:00:00", 700_101_000_000), ("1999-12-31 23:59:59", 991_231_235_959), ("0100-00-00 00:00:00", 1_000_000_000_000), ("1000-01-01 00:00:00", 10_000_101_000_000), ("1999-01-01 00:00:00", 19_990_101_000_000), ]; for (expected, input) in should_pass { let actual: Time = RpnFnScalarEvaluator::new() .push_param(input) .return_field_type(FieldTypeBuilder::new().tp(FieldTypeTp::DateTime).build()) .evaluate(ScalarFuncSig::CastIntAsTime) // `Result<Option<_>>` .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } let should_fail = vec![ -11111, 1, 100, 700_100, 100_000_000, 100_000_101_000_000, 73, ]; for case in should_fail { let actual = RpnFnScalarEvaluator::new() .push_param(case) .return_field_type(FieldTypeBuilder::new().tp(FieldTypeTp::Date).build()) .evaluate::<Time>(ScalarFuncSig::CastIntAsTime) .unwrap(); assert!(actual.is_none()); } } #[test] fn test_cast_year_as_time() { let normal_cases = vec![ ("2020-00-00 00:00:00", 2020), ("2000-00-00 00:00:00", 2000), ("1999-00-00 00:00:00", 1999), ("2077-00-00 00:00:00", 2077), ("1901-00-00 00:00:00", 1901), ("2155-00-00 00:00:00", 2155), ("0000-00-00 00:00:00", 0), ]; for (expected, input) in normal_cases { let actual = RpnFnScalarEvaluator::new() .push_param_with_field_type(input, FieldTypeTp::Year) .return_field_type(FieldTypeBuilder::new().tp(FieldTypeTp::DateTime).build()) .evaluate::<Time>(ScalarFuncSig::CastIntAsTime) .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } let null_cases = vec![ None, Some(10086), Some(1900), Some(2156), Some(i64::MAX), Some(i64::MIN), ]; for input in null_cases { let actual = RpnFnScalarEvaluator::new() .push_param_with_field_type(input, FieldTypeTp::Year) .return_field_type(FieldTypeBuilder::new().tp(FieldTypeTp::DateTime).build()) .context(EvalContext::new(Arc::new(EvalConfig::from_flag( Flag::TRUNCATE_AS_WARNING, )))) .evaluate::<Time>(ScalarFuncSig::CastIntAsTime) .unwrap(); assert!(actual.is_none()); } } #[test] #[allow(clippy::excessive_precision)] fn test_cast_real_time() { let cases = vec![ ("2019-09-16 10:11:12", 190916101112.111, 0), ("2019-09-16 10:11:12", 20190916101112.111, 0), ("2019-09-16 10:11:12", 20190916101112.123, 0), ("2019-09-16 10:11:13", 20190916101112.999, 0), ("0000-00-00 00:00:00", 0.0, 0), ]; for (expected, input, fsp) in cases { let actual: Time = RpnFnScalarEvaluator::new() .push_param(input) .return_field_type( FieldTypeBuilder::new() .tp(FieldTypeTp::DateTime) .decimal(fsp) .build(), ) .evaluate::<Time>(ScalarFuncSig::CastRealAsTime) // `Result<Option<_>>` .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } } #[test] fn test_cast_string_as_time() { let cases = vec![ ("2019-09-16 10:11:12", "20190916101112", 0), ("2019-09-16 10:11:12", "190916101112", 0), ("2019-09-16 10:11:01", "19091610111", 0), ("2019-09-16 10:11:00", "1909161011", 0), ("2019-09-16 10:01:00", "190916101", 0), ("1909-12-10 00:00:00", "19091210", 0), ("2020-02-29 10:00:00", "20200229100000", 0), ("2019-09-16 01:00:00", "1909161", 0), ("2019-09-16 00:00:00", "190916", 0), ("2019-09-01 00:00:00", "19091", 0), ("2019-09-16 10:11:12.111", "190916101112.111", 3), ("2019-09-16 10:11:12.111", "20190916101112.111", 3), ("2019-09-16 10:11:12.67", "20190916101112.666", 2), ("2019-09-16 10:11:13.0", "20190916101112.999", 1), ("2019-09-16 00:00:00", "2019-09-16", 0), ("2019-09-16 10:11:12", "2019-09-16 10:11:12", 0), ("2019-09-16 10:11:12", "2019-09-16T10:11:12", 0), ("2019-09-16 10:11:12.7", "2019-09-16T10:11:12.66", 1), ("2019-09-16 10:11:13.0", "2019-09-16T10:11:12.99", 1), ("2020-01-01 00:00:00.0", "2019-12-31 23:59:59.99", 1), ]; for (expected, input, fsp) in cases { let actual: Time = RpnFnScalarEvaluator::new() .push_param(input.as_bytes().to_vec()) .return_field_type( FieldTypeBuilder::new() .tp(FieldTypeTp::DateTime) .decimal(fsp) .build(), ) .evaluate::<Time>(ScalarFuncSig::CastStringAsTime) // `Result<Option<_>>` .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } } #[test] fn test_time_as_time() { let cases = vec![ // (Timestamp, DateTime) ("2020-02-29 10:00:00.999", "2020-02-29 10:00:01.0", 1), ("2019-09-16 01:00:00.999", "2019-09-16 01:00:01.00", 2), ("2019-09-16 00:00:00.9999", "2019-09-16 00:00:01.0", 1), ]; for (input, expected, fsp) in cases { let mut ctx = EvalContext::default(); let time = Time::parse_timestamp(&mut ctx, input, MAX_FSP, /* Enable round*/ true).unwrap(); let actual: Time = RpnFnScalarEvaluator::new() .push_param(time) .return_field_type( FieldTypeBuilder::new() .tp(FieldTypeTp::DateTime) .decimal(fsp) .build(), ) .evaluate::<Time>(ScalarFuncSig::CastTimeAsTime) // `Result<Option<_>>` .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } } #[test] fn test_cast_duration_as_time() { use chrono::Datelike; let cases = vec!["11:30:45.123456", "-35:30:46"]; for case in cases { let mut ctx = EvalContext::default(); let duration = Duration::parse(&mut ctx, case, MAX_FSP).unwrap(); let now = RpnFnScalarEvaluator::new() .push_param(duration) .return_field_type( FieldTypeBuilder::new() .tp(FieldTypeTp::DateTime) .decimal(MAX_FSP as isize) .build(), ) .evaluate::<Time>(ScalarFuncSig::CastDurationAsTime) .unwrap() .unwrap(); let chrono_today = chrono::Utc::now(); let today = now.checked_sub(&mut ctx, duration).unwrap(); assert_eq!(today.year(), chrono_today.year() as u32); assert_eq!(today.month(), chrono_today.month()); assert_eq!(today.day(), chrono_today.day()); assert_eq!(today.hour(), 0); assert_eq!(today.minute(), 0); assert_eq!(today.second(), 0); assert_eq!(today.micro(), 0); } } #[test] fn test_cast_decimal_as_time() { let cases = vec![ ("2019-09-16 10:11:12", "20190916101112", 0), ("2019-09-16 10:11:12", "190916101112", 0), ("1909-12-10 00:00:00", "19091210", 0), ("2020-02-29 10:00:00", "20200229100000", 0), ("2019-09-16 00:00:00", "190916", 0), ("2019-09-16 10:11:12.111", "190916101112.111", 3), ("2019-09-16 10:11:12.111", "20190916101112.111", 3), ("2019-09-16 10:11:12.67", "20190916101112.666", 2), ("2019-09-16 10:11:13.0", "20190916101112.999", 1), ("2001-11-11 00:00:00.0000", "11111.1111", 4), ("0102-11-21 14:11:05.4324", "1021121141105.4324", 4), ("2002-11-21 14:11:05.101", "21121141105.101", 3), ("2000-11-21 14:11:05.799055", "1121141105.799055", 6), ("2000-01-21 14:11:05.123", "121141105.123", 3), ("0114-11-05 00:00:00", "1141105", 0), ("2004-11-05 00:00:00.00", "41105.11", 2), ("2000-11-05 00:00:00.0", "1105.3", 1), ("2000-01-05 00:00:00", "105", 0), ]; for (expected, decimal, fsp) in cases { let decimal: Decimal = decimal.parse().unwrap(); let actual: Time = RpnFnScalarEvaluator::new() .push_param(decimal) .return_field_type( FieldTypeBuilder::new() .tp(FieldTypeTp::DateTime) .decimal(fsp) .build(), ) .evaluate(ScalarFuncSig::CastDecimalAsTime) // `Result<Option<_>>` .unwrap() .unwrap(); assert_eq!(actual.to_string(), expected); } let should_fail = vec![ "19091610111", "1909161011", "190916101", "1909161", "19091", "201705051315111.22", "2011110859.1111", "2011110859.1111", "191203081.1111", "43128.121105", ]; for case in should_fail { let case: Decimal = case.parse().unwrap(); let actual = RpnFnScalarEvaluator::new() .push_param(case) .return_field_type(FieldTypeBuilder::new().tp(FieldTypeTp::DateTime).build()) .evaluate::<Time>(ScalarFuncSig::CastDecimalAsTime) .unwrap(); assert!(actual.is_none()); } } #[test] fn test_duration_as_int() { let mut ctx = EvalContext::default(); // TODO: add more test case let cs: Vec<(Duration, i64)> = vec![ (Duration::parse(&mut ctx, "17:51:04.78", 2).unwrap(), 175105), ( Duration::parse(&mut ctx, "-17:51:04.78", 2).unwrap(), -175105, ), (Duration::parse(&mut ctx, "17:51:04.78", 0).unwrap(), 175105), ( Duration::parse(&mut ctx, "-17:51:04.78", 0).unwrap(), -175105, ), ]; for (input, expect) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_any_as_any::<Duration, Int>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_json_as_int() { test_none_with_ctx(cast_json_as_any::<Int>); // no overflow let cs = vec![ // (origin, expect, overflow, truncate) ( Json::from_object(BTreeMap::default()).unwrap(), 0, false, true, ), (Json::from_array(vec![]).unwrap(), 0, false, true), (Json::from_i64(10).unwrap(), 10i64, false, false), (Json::from_i64(i64::MAX).unwrap(), i64::MAX, false, false), (Json::from_i64(i64::MIN).unwrap(), i64::MIN, false, false), (Json::from_u64(0).unwrap(), 0, false, false), ( Json::from_u64(u64::MAX).unwrap(), u64::MAX as i64, false, false, ), ( Json::from_f64(i64::MIN as u64 as f64).unwrap(), i64::MAX, false, false, ), ( Json::from_f64(i64::MAX as u64 as f64).unwrap(), i64::MAX, false, false, ), ( Json::from_f64(i64::MIN as u64 as f64).unwrap(), i64::MAX, false, false, ), ( Json::from_f64(i64::MIN as f64).unwrap(), i64::MIN, false, false, ), (Json::from_f64(10.5).unwrap(), 11, false, false), (Json::from_f64(10.4).unwrap(), 10, false, false), (Json::from_f64(-10.4).unwrap(), -10, false, false), (Json::from_f64(-10.5).unwrap(), -11, false, false), ( Json::from_string(String::from("10.0")).unwrap(), 10, false, false, ), (Json::from_bool(true).unwrap(), 1, false, false), (Json::from_bool(false).unwrap(), 0, false, false), (Json::none().unwrap(), 0, false, false), ( Json::from_f64(((1u64 << 63) + (1u64 << 62)) as u64 as f64).unwrap(), i64::MAX, true, false, ), ( Json::from_f64(-((1u64 << 63) as f64 + (1u64 << 62) as f64)).unwrap(), i64::MIN, true, false, ), ]; for (input, expect, overflow, truncate) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_json_as_any::<Int>(&mut ctx, Some(input.as_ref())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_overflow(&ctx, overflow, log.as_str()); check_truncate(&ctx, truncate, log.as_str()) } } #[test] fn test_json_as_uint() { test_none_with_ctx(cast_json_as_uint); // no clip to zero let cs: Vec<(Json, u64, Option<i32>)> = vec![ // (origin, expect, error_code) (Json::from_f64(-1.0).unwrap(), -1.0f64 as i64 as u64, None), (Json::from_string(String::from("10")).unwrap(), 10, None), ( Json::from_string(String::from("+10abc")).unwrap(), 10, Some(ERR_TRUNCATE_WRONG_VALUE), ), ( Json::from_string(String::from("9999999999999999999999999")).unwrap(), u64::MAX, Some(ERR_DATA_OUT_OF_RANGE), ), ( Json::from_f64(2f64 * (u64::MAX as f64)).unwrap(), u64::MAX, Some(ERR_DATA_OUT_OF_RANGE), ), ]; for (input, expect, error_code) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_json_as_uint(&mut ctx, Some(input.as_ref())); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, error_code, log.as_str()); } // should clip to zero let cs: Vec<(Json, u64, Option<i32>)> = vec![ // (origin, expect, err_code) (Json::from_f64(-1.0).unwrap(), 0, None), ( Json::from_string(String::from("-10")).unwrap(), 0, Some(ERR_DATA_OUT_OF_RANGE), ), (Json::from_string(String::from("10")).unwrap(), 10, None), ( Json::from_string(String::from("+10abc")).unwrap(), 10, Some(ERR_TRUNCATE_WRONG_VALUE), ), ( Json::from_string(String::from("9999999999999999999999999")).unwrap(), u64::MAX, Some(ERR_DATA_OUT_OF_RANGE), ), ( Json::from_f64(2f64 * (u64::MAX as f64)).unwrap(), u64::MAX, Some(ERR_DATA_OUT_OF_RANGE), ), ]; for (input, expect, err_code) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, truncate_as_warning: true, should_clip_to_zero: true, ..CtxConfig::default() } .into(); let r = cast_json_as_uint(&mut ctx, Some(input.as_ref())); let r = r.map(|x| x.map(|x| x as u64)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, err_code, log.as_str()); } } #[test] fn test_signed_int_as_signed_real() { test_none_with_nothing(cast_signed_int_as_signed_real); let cs: Vec<(i64, f64)> = vec![ // (input, expect) (i64::MIN, i64::MIN as f64), (0, 0f64), (i64::MAX, i64::MAX as f64), ]; for (input, expect) in cs { let r = cast_signed_int_as_signed_real(Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_signed_int_as_unsigned_real() { test_none_with_metadata(cast_signed_int_as_unsigned_real); let cs: Vec<(i64, f64, bool)> = vec![ // (input, expect, in_union) // TODO: add test case of negative int to unsigned real without in_union // (i64::MIN, i64::MIN as u64 as f64, false), // not in union (i64::MAX, i64::MAX as f64, false), (0, 0f64, false), // in union (i64::MIN, 0f64, true), (-1, 0f64, true), (i64::MAX, i64::MAX as f64, true), (0, 0f64, true), ]; for (input, expect, in_union) in cs { let metadata = make_metadata(in_union); let r = cast_signed_int_as_unsigned_real(&metadata, Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = format!( "input: {}, expect: {}, in_union: {}", input, expect, in_union ); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_unsigned_int_as_signed_or_unsigned_real() { test_none_with_nothing(cast_unsigned_int_as_signed_or_unsigned_real); let cs = vec![ // (input, expect) (0, 0f64), (u64::MAX, u64::MAX as f64), (i64::MAX as u64, i64::MAX as u64 as f64), ]; for (input, expect) in cs { let r = cast_unsigned_int_as_signed_or_unsigned_real(Some(&(input as i64))); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_real_as_signed_real() { test_none_with_nothing(cast_real_as_signed_real); let cs = vec![ // (input, expect) (f64::from(f32::MIN), f64::from(f32::MIN)), (f64::from(f32::MAX), f64::from(f32::MAX)), (f64::MIN, f64::MIN), (0f64, 0f64), (f64::MAX, f64::MAX), (i64::MIN as f64, i64::MIN as f64), (i64::MAX as f64, i64::MAX as f64), (u64::MAX as f64, u64::MAX as f64), ]; for (input, expect) in cs { let r = cast_real_as_signed_real(Some(Real::new(input).as_ref().unwrap())); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_real_as_unsigned_real() { let cs = vec![ // (input, expect, in_union) // not in union // TODO: add test case of negative real to unsigned real // (-1.0, -1.0, false), // (i64::MIN as f64, i64::MIN as f64, false), // (f64::MIN, f64::MIN, false), (u64::MIN as f64, u64::MIN as f64, false), (1.0, 1.0, false), (i64::MAX as f64, i64::MAX as f64, false), (u64::MAX as f64, u64::MAX as f64, false), (f64::MAX, f64::MAX, false), // in union (-1.0, 0.0, true), (i64::MIN as f64, 0.0, true), (u64::MIN as f64, 0.0, true), (f64::MIN, 0.0, true), (1.0, 1.0, true), (i64::MAX as f64, i64::MAX as f64, true), (u64::MAX as f64, u64::MAX as f64, true), (f64::MAX, f64::MAX, true), ]; for (input, expect, in_union) in cs { let metadata = make_metadata(in_union); let r = cast_real_as_unsigned_real(&metadata, Some(Real::new(input).as_ref().unwrap())); let r = r.map(|x| x.map(|x| x.into_inner())); let log = format!( "input: {}, expect: {}, in_union: {}", input, expect, in_union ); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_cast_string_as_real() { // None { let output: Option<Real> = RpnFnScalarEvaluator::new() .push_param(ScalarValue::Bytes(None)) .evaluate(ScalarFuncSig::CastStringAsReal) .unwrap(); assert_eq!(output, None); } // signed let ul = UNSPECIFIED_LENGTH; let cs: Vec<(String, f64, isize, isize, bool, bool)> = vec![ // (input, expect, flen, decimal, truncated, overflow) // no special flen and decimal (String::from("99999999"), 99999999f64, ul, ul, false, false), (String::from("1234abc"), 1234f64, ul, ul, true, false), (String::from("-1234abc"), -1234f64, ul, ul, true, false), ( (0..400).map(|_| '9').collect::<String>(), f64::MAX, ul, ul, true, false, ), ( (0..401) .map(|x| if x == 0 { '-' } else { '9' }) .collect::<String>(), f64::MIN, ul, ul, true, false, ), // with special flen and decimal (String::from("99999999"), 99999999f64, 8, 0, false, false), (String::from("99999999"), 99999999f64, 9, 0, false, false), (String::from("99999999"), 9999999f64, 7, 0, false, true), (String::from("99999999"), 999999.99, 8, 2, false, true), (String::from("1234abc"), 0.9f64, 1, 1, true, true), (String::from("-1234abc"), -0.9f64, 1, 1, true, true), ]; for (input, expected, flen, decimal, truncated, overflow) in cs { let (result, ctx) = RpnFnScalarEvaluator::new() .context(CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() }) .push_param(input.clone().into_bytes()) .evaluate_raw( FieldTypeConfig { unsigned: false, flen, decimal, tp: Some(FieldTypeTp::Double), ..FieldTypeConfig::default() }, ScalarFuncSig::CastStringAsReal, ); let output: Option<Real> = result.unwrap().into(); assert!( (output.unwrap().into_inner() - expected).abs() < std::f64::EPSILON, "input={:?}", input ); let (warning_cnt, warnings) = match (truncated, overflow) { (true, true) => (2, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE]), (true, false) => (1, vec![ERR_TRUNCATE_WRONG_VALUE]), (false, true) => (1, vec![ERR_DATA_OUT_OF_RANGE]), _ => (0, vec![]), }; assert_eq!(ctx.warnings.warning_cnt, warning_cnt); let mut got_warnings = ctx .warnings .warnings .iter() .map(|w| w.get_code()) .collect::<Vec<i32>>(); got_warnings.sort_unstable(); assert_eq!(got_warnings, warnings); } // unsigned let cs: Vec<(String, f64, isize, isize, bool, bool, bool)> = vec![ // (input, expect, flen, decimal, truncated, overflow, in_union) // not in union ( String::from("99999999"), 99999999f64, ul, ul, false, false, false, ), (String::from("1234abc"), 1234f64, ul, ul, true, false, false), ( (0..400).map(|_| '9').collect::<String>(), f64::MAX, ul, ul, true, false, false, ), ( String::from("99999999"), 99999999f64, 8, 0, false, false, false, ), ( String::from("99999999"), 9999999.9, 8, 1, false, true, false, ), ( String::from("99999999"), 999999.99, 8, 2, false, true, false, ), (String::from("99999999"), 999999.9, 7, 1, false, true, false), (String::from("1234abc"), 1234.0, 4, 0, true, false, false), (String::from("1234abc"), 999.9, 4, 1, true, true, false), (String::from("1234abc"), 99.99, 4, 2, true, true, false), (String::from("1234abc"), 99.9, 3, 1, true, true, false), (String::from("1234abc"), 9.999, 4, 3, true, true, false), ( String::from("99999999"), 99999999f64, 8, 0, false, false, false, ), ( String::from("99999999"), 9999999.9, 8, 1, false, true, false, ), ( String::from("99999999"), 999999.99, 8, 2, false, true, false, ), (String::from("99999999"), 999999.9, 7, 1, false, true, false), (String::from("1234abc"), 1234.0, 4, 0, true, false, false), (String::from("1234abc"), 999.9, 4, 1, true, true, false), (String::from("1234abc"), 99.99, 4, 2, true, true, false), (String::from("1234abc"), 99.9, 3, 1, true, true, false), (String::from("1234abc"), 9.999, 4, 3, true, true, false), ( (0..400).map(|_| '9').collect::<String>(), f64::MAX, ul, ul, true, false, false, ), ( (0..400).map(|_| '9').collect::<String>(), 9999999999.0, 10, 0, true, true, false, ), ( (0..400).map(|_| '9').collect::<String>(), 999999999.9, 10, 1, true, true, false, ), // TODO // ( // (0..401) // .map(|x| if x == 0 { '-' } else { '9' }) // .collect::<String>(), // 0f64, ul, ul, true, true, false, // ), // ( // String::from("-1234abc"), 0f64, ul, ul, // true, true, false, // ), // (String::from("-1234abc"), 0.0, 4, 0, true, true, false), // (String::from("-1234abc"), 0.0, 4, 1, true, true, false), // (String::from("-1234abc"), 0.0, 4, 2, true, true, false), // (String::from("-1234abc"), 0.0, 3, 1, true, true, false), // (String::from("-1234abc"), 0.0, 4, 3, true, true, false), // in union // in union and neg (String::from("-190"), 0f64, ul, ul, false, false, true), (String::from("-10abc"), 0f64, ul, ul, true, false, true), (String::from("-1234abc"), 0.0, ul, ul, true, false, true), ]; for (input, expected, flen, decimal, truncated, overflow, in_union) in cs { let (result, ctx) = RpnFnScalarEvaluator::new() .context(CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() }) .metadata(Box::new(make_metadata(in_union))) .push_param(input.clone().into_bytes()) .evaluate_raw( FieldTypeConfig { unsigned: true, flen, decimal, tp: Some(FieldTypeTp::Double), ..FieldTypeConfig::default() }, ScalarFuncSig::CastStringAsReal, ); let output: Option<Real> = result.unwrap().into(); assert!( (output.unwrap().into_inner() - expected).abs() < std::f64::EPSILON, "input:{:?}, expected:{:?}, flen:{:?}, decimal:{:?}, truncated:{:?}, overflow:{:?}, in_union:{:?}", input, expected, flen, decimal, truncated, overflow, in_union ); let (warning_cnt, warnings) = match (truncated, overflow) { (true, true) => (2, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE]), (true, false) => (1, vec![ERR_TRUNCATE_WRONG_VALUE]), (false, true) => (1, vec![ERR_DATA_OUT_OF_RANGE]), _ => (0, vec![]), }; let mut got_warnings = ctx .warnings .warnings .iter() .map(|w| w.get_code()) .collect::<Vec<i32>>(); got_warnings.sort_unstable(); assert_eq!( ctx.warnings.warning_cnt, warning_cnt, "input:{:?}, expected:{:?}, flen:{:?}, decimal:{:?}, truncated:{:?}, overflow:{:?}, in_union:{:?}, warnings:{:?}", input, expected, flen, decimal, truncated, overflow, in_union,got_warnings, ); assert_eq!(got_warnings, warnings); } // not in union, neg let cs: Vec<(String, f64, isize, isize, Vec<i32>)> = vec![ ( (0..401) .map(|x| if x == 0 { '-' } else { '9' }) .collect::<String>(), 0f64, ul, ul, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE], ), ( String::from("-1234abc"), 0f64, ul, ul, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE], ), ( String::from("-1234abc"), 0.0, 4, 0, vec![ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE], ), // the case below has 3 warning // 1. from getValidFloatPrefix, because of `-1234abc`'s `abc`, (ERR_TRUNCATE_WRONG_VALUE) // 2. from ProduceFloatWithSpecifiedTp, because of TruncateFloat (ERR_DATA_OUT_OF_RANGE) // 3. from ProduceFloatWithSpecifiedTp, because of unsigned but negative (ERR_DATA_OUT_OF_RANGE) ( String::from("-1234abc"), 0.0, 4, 1, vec![ ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE, ERR_DATA_OUT_OF_RANGE, ], ), ( String::from("-1234abc"), 0.0, 4, 2, vec![ ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE, ERR_DATA_OUT_OF_RANGE, ], ), ( String::from("-1234abc"), 0.0, 3, 1, vec![ ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE, ERR_DATA_OUT_OF_RANGE, ], ), ( String::from("-1234abc"), 0.0, 4, 3, vec![ ERR_TRUNCATE_WRONG_VALUE, ERR_DATA_OUT_OF_RANGE, ERR_DATA_OUT_OF_RANGE, ], ), ]; for (input, expected, flen, decimal, err_codes) in cs { let (result, ctx) = RpnFnScalarEvaluator::new() .context(CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() }) .metadata(Box::new(make_metadata(false))) .push_param(input.clone().into_bytes()) .evaluate_raw( FieldTypeConfig { unsigned: true, flen, decimal, tp: Some(FieldTypeTp::Double), ..FieldTypeConfig::default() }, ScalarFuncSig::CastStringAsReal, ); let output: Option<Real> = result.unwrap().into(); assert!( (output.unwrap().into_inner() - expected).abs() < std::f64::EPSILON, "input={:?}", input ); assert_eq!(ctx.warnings.warning_cnt, err_codes.len()); for (idx, err) in err_codes.iter().enumerate() { assert_eq!( ctx.warnings.warnings[idx].get_code(), *err, "input: {:?}", input ); } } // binary literal let cases = vec![ (vec![0x01, 0x02, 0x03], Some(f64::from(0x010203))), (vec![0x01, 0x02, 0x03, 0x4], Some(f64::from(0x01020304))), ( vec![0x01, 0x02, 0x03, 0x4, 0x05, 0x06, 0x06, 0x06, 0x06], None, ), ]; for (input, expected) in cases { let output: Result<Option<Real>> = RpnFnScalarEvaluator::new() .metadata(Box::new(make_metadata(false))) .return_field_type(FieldTypeConfig { flen: tidb_query_datatype::UNSPECIFIED_LENGTH, decimal: tidb_query_datatype::UNSPECIFIED_LENGTH, tp: Some(FieldTypeTp::Double), ..FieldTypeConfig::default() }) .push_param_with_field_type( input.clone(), FieldTypeConfig { tp: Some(FieldTypeTp::VarString), collation: Some(Collation::Binary), ..FieldTypeConfig::default() }, ) .evaluate(ScalarFuncSig::CastStringAsReal); if let Some(exp) = expected { assert!(output.is_ok(), "input: {:?}", input); assert!( (output.unwrap().unwrap().into_inner() - exp).abs() < std::f64::EPSILON, "input={:?}", input ); } else { assert!(output.is_err()); } } } #[test] fn test_decimal_as_signed_real() { test_none_with_ctx(cast_any_as_any::<Decimal, Int>); // because decimal can always be represent by signed real, // so we needn't to check whether get truncated err. let cs = vec![ // (input, expect) (Decimal::from_f64(-10.0).unwrap(), -10.0), (Decimal::from_f64(i64::MIN as f64).unwrap(), i64::MIN as f64), (Decimal::from_f64(i64::MAX as f64).unwrap(), i64::MAX as f64), (Decimal::from_f64(u64::MAX as f64).unwrap(), u64::MAX as f64), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_any::<Decimal, Real>(&mut ctx, Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_decimal_as_unsigned_real() { test_none_with_ctx_and_metadata(cast_decimal_as_unsigned_real); let cs: Vec<(Decimal, f64, bool, bool)> = vec![ // (origin, expect, in_union, overflow) // not in union (Decimal::from(0), 0.0, false, false), ( Decimal::from(9223372036854775807u64), 9223372036854775807.0, false, false, ), ( Decimal::from_bytes(b"9223372036854775809") .unwrap() .unwrap(), 9223372036854775809.0, false, false, ), // TODO: add test case for negative decimal to unsigned real // in union (Decimal::from(-1023), 0f64, true, false), (Decimal::from(-10), 0f64, true, false), (Decimal::from(i64::MIN), 0f64, true, false), (Decimal::from(1023), 1023.0, true, false), (Decimal::from(10), 10.0, true, false), (Decimal::from(i64::MAX), i64::MAX as f64, true, false), (Decimal::from(u64::MAX), u64::MAX as f64, true, false), ( Decimal::from(1844674407370955161u64), 1844674407370955161u64 as f64, true, false, ), ( Decimal::from_bytes(b"18446744073709551616") .unwrap() .unwrap(), // 18446744073709551616 - u64::MAX==1, // but u64::MAX as f64 == 18446744073709551616 u64::MAX as f64, true, false, ), ]; for (input, expect, in_union, overflow) in cs { let mut ctx = CtxConfig { overflow_as_warning: true, ..CtxConfig::default() } .into(); let metadata = make_metadata(in_union); let r = cast_decimal_as_unsigned_real(&mut ctx, &metadata, Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = format!( "input: {}, expect: {}, in_union: {}, expect_overflow: {}, result: {:?}", input, expect, in_union, overflow, r ); check_result(Some(&expect), &r, log.as_str()); check_overflow(&ctx, overflow, log.as_str()); } } #[test] #[allow(clippy::excessive_precision)] fn test_time_as_real() { let mut ctx = EvalContext::default(); test_none_with_ctx(cast_any_as_any::<Time, Real>); // TODO: add more test case let cs = vec![ ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 6, true).unwrap(), 20000101121314.666600, ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 0, true).unwrap(), 20000101121315.0, ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 3, true).unwrap(), 20000101121314.667, ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 4, true).unwrap(), 20000101121314.6666, ), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_any::<Time, Real>(&mut ctx, Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_duration_as_real() { let mut ctx = EvalContext::default(); // TODO: add more test case let cs = vec![ // (input, expect) ( Duration::parse(&mut ctx, "17:51:04.78", 2).unwrap(), 175104.78, ), ( Duration::parse(&mut ctx, "-17:51:04.78", 2).unwrap(), -175104.78, ), ( Duration::parse(&mut ctx, "17:51:04.78", 0).unwrap(), 175105.0, ), ( Duration::parse(&mut ctx, "-17:51:04.78", 0).unwrap(), -175105.0, ), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_any::<Duration, Real>(&mut ctx, Some(&input)); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_json_as_real() { let cs: Vec<(Json, f64, Option<i32>)> = vec![ // (input, expect, err_code) (Json::from_object(BTreeMap::default()).unwrap(), 0f64, None), (Json::from_array(vec![]).unwrap(), 0f64, None), (Json::from_i64(10).unwrap(), 10f64, None), (Json::from_i64(i64::MAX).unwrap(), i64::MAX as f64, None), (Json::from_i64(i64::MIN).unwrap(), i64::MIN as f64, None), (Json::from_u64(0).unwrap(), 0f64, None), (Json::from_u64(u64::MAX).unwrap(), u64::MAX as f64, None), (Json::from_f64(f64::MAX).unwrap(), f64::MAX, None), (Json::from_f64(f64::MIN).unwrap(), f64::MIN, None), (Json::from_string(String::from("10.0")).unwrap(), 10.0, None), ( Json::from_string(String::from("-10.0")).unwrap(), -10.0, None, ), (Json::from_bool(true).unwrap(), 1f64, None), (Json::from_bool(false).unwrap(), 0f64, None), (Json::none().unwrap(), 0f64, None), ( Json::from_string((0..500).map(|_| '9').collect::<String>()).unwrap(), f64::MAX, Some(ERR_TRUNCATE_WRONG_VALUE), ), ( Json::from_string( (0..500) .map(|x| if x == 0 { '-' } else { '9' }) .collect::<String>(), ) .unwrap(), f64::MIN, Some(ERR_TRUNCATE_WRONG_VALUE), ), ]; for (input, expect, err_code) in cs { let mut ctx = CtxConfig { truncate_as_warning: true, ..CtxConfig::default() } .into(); let r = cast_json_as_any::<Real>(&mut ctx, Some(input.as_ref())); let r = r.map(|x| x.map(|x| x.into_inner())); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, err_code, log.as_str()); } } /// base_cs: /// vector of (T, T to bytes(without any other handle do by cast_as_string_helper), /// T to string for debug output), /// the object should not be zero len. #[allow(clippy::type_complexity)] fn test_as_string_helper<T: Clone, FnCast>( base_cs: Vec<(T, Vec<u8>, String)>, cast_func: FnCast, func_name: &str, ) where FnCast: Fn(&mut EvalContext, &RpnFnCallExtra, Option<T>) -> Result<Option<Bytes>>, { #[derive(Clone, Copy)] enum FlenType { Eq, LessOne, ExtraOne, Unspecified, } let cs: Vec<(FlenType, bool, &str, FieldTypeTp, Collation, Option<i32>)> = vec![ // (flen_type, pad_zero, charset, tp, collation, err_code) // normal, flen==str.len ( FlenType::Eq, false, CHARSET_BIN, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Eq, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Eq, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Eq, false, CHARSET_ASCII, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Eq, false, CHARSET_LATIN1, FieldTypeTp::String, Collation::Binary, None, ), // normal, flen==UNSPECIFIED_LENGTH ( FlenType::Unspecified, false, CHARSET_BIN, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Unspecified, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Unspecified, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Unspecified, false, CHARSET_ASCII, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::Unspecified, false, CHARSET_LATIN1, FieldTypeTp::String, Collation::Binary, None, ), // branch 1 of ProduceStrWithSpecifiedTp // not bin_str, so no pad_zero ( FlenType::LessOne, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, Some(ERR_DATA_TOO_LONG), ), ( FlenType::LessOne, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, Some(ERR_DATA_TOO_LONG), ), ( FlenType::Eq, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), ( FlenType::Eq, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), ( FlenType::ExtraOne, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), ( FlenType::ExtraOne, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), ( FlenType::ExtraOne, false, CHARSET_UTF8, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), ( FlenType::ExtraOne, false, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, None, ), // bin_str, so need pad_zero ( FlenType::ExtraOne, true, CHARSET_UTF8, FieldTypeTp::String, Collation::Binary, None, ), ( FlenType::ExtraOne, true, CHARSET_UTF8MB4, FieldTypeTp::String, Collation::Binary, None, ), // branch 2 of ProduceStrWithSpecifiedTp // branch 2 need s.len>flen, so never need pad_zero ( FlenType::LessOne, false, CHARSET_ASCII, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, Some(ERR_DATA_TOO_LONG), ), ( FlenType::LessOne, false, CHARSET_LATIN1, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, Some(ERR_DATA_TOO_LONG), ), ( FlenType::LessOne, false, CHARSET_BIN, FieldTypeTp::String, Collation::Utf8Mb4BinNoPadding, Some(ERR_DATA_TOO_LONG), ), // branch 3 of ProduceStrWithSpecifiedTp , // will never be reached, // because padZero param is always false ]; for (input, bytes, debug_str) in base_cs { for (flen_type, pad_zero, charset, tp, collation, err_code) in cs.iter() { let mut ctx = CtxConfig { truncate_as_warning: true, ..CtxConfig::default() } .into(); let res_len = bytes.len(); let flen = match flen_type { FlenType::Eq => res_len as isize, FlenType::LessOne => { if res_len == 0 { continue; } else { (res_len - 1) as isize } } FlenType::ExtraOne => (res_len + 1) as isize, FlenType::Unspecified => UNSPECIFIED_LENGTH, }; let rft = FieldTypeConfig { flen, charset: Some(charset), tp: Some(*tp), collation: Some(*collation), ..FieldTypeConfig::default() } .into(); let extra = make_extra(&rft); let r = cast_func(&mut ctx, &extra, Some(input.clone())); let mut expect = bytes.clone(); if *pad_zero && flen > expect.len() as isize { expect.extend((expect.len()..flen as usize).map(|_| 0u8)); } else if flen != UNSPECIFIED_LENGTH { expect.truncate(flen as usize); } let log = format!( "func: {:?}, input: {}, expect: {:?}, flen: {}, \ charset: {}, field_type: {}, collation: {}, output: {:?}", func_name, debug_str, &expect, flen, charset, tp, collation, &r ); check_result(Some(&expect), &r, log.as_str()); check_warning(&ctx, *err_code, log.as_str()); } } } #[test] fn test_int_as_string() { test_none_with_ctx_and_extra(cast_any_as_string::<Int>); let cs: Vec<(&i64, Vec<u8>, String)> = vec![ ( &i64::MAX, i64::MAX.to_string().into_bytes(), i64::MAX.to_string(), ), ( &i64::MIN, i64::MIN.to_string().into_bytes(), i64::MIN.to_string(), ), ]; test_as_string_helper(cs, cast_any_as_string::<Int>, "cast_any_as_string::<Int>"); } fn helper_get_cs_ref<U, V: Clone, W: Clone>(cs: &[(U, V, W)]) -> Vec<(&U, V, W)> { cs.iter() .map(|(u, v, w)| (u, v.clone(), w.clone())) .collect() } #[test] fn test_uint_as_string() { test_none_with_ctx_and_extra(cast_uint_as_string); let cs: Vec<(u64, Vec<u8>, String)> = vec![ ( i64::MAX as u64, (i64::MAX as u64).to_string().into_bytes(), (i64::MAX as u64).to_string(), ), ( i64::MIN as u64, (i64::MIN as u64).to_string().into_bytes(), (i64::MIN as u64).to_string(), ), ( u64::MAX, u64::MAX.to_string().into_bytes(), u64::MAX.to_string(), ), (0u64, 0u64.to_string().into_bytes(), 0u64.to_string()), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, |ctx, extra, val| { let val = val.map(|x| *x as i64); cast_uint_as_string(ctx, extra, val.as_ref()) }, "cast_uint_as_string", ); } #[test] fn test_year_as_string() { let cs: Vec<(i64, Vec<u8>, String)> = vec![ (0, b"0000".to_vec(), "0000".to_string()), (2000, b"2000".to_vec(), "2000".to_string()), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, |ctx, extra, val| { let val = val.map(|x| *x as i64); cast_year_as_string(ctx, extra, &val.unwrap()) }, "cast_year_as_string", ); } #[test] fn test_float_real_as_string() { test_none_with_ctx_and_extra(cast_float_real_as_string); let cs: Vec<(f32, Vec<u8>, String)> = vec![ ( f32::MAX, f32::MAX.to_string().into_bytes(), f32::MAX.to_string(), ), (1.0f32, 1.0f32.to_string().into_bytes(), 1.0f32.to_string()), ( 1.1113f32, 1.1113f32.to_string().into_bytes(), 1.1113f32.to_string(), ), (0.1f32, 0.1f32.to_string().into_bytes(), 0.1f32.to_string()), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, |ctx, extra, val| { cast_float_real_as_string( ctx, extra, val.map(|x| Real::new(f64::from(*x)).unwrap()).as_ref(), ) }, "cast_float_real_as_string", ); } #[test] fn test_double_real_as_string() { test_none_with_ctx_and_extra(cast_any_as_string::<Real>); let cs: Vec<(f64, Vec<u8>, String)> = vec![ ( f64::from(f32::MAX), (f64::from(f32::MAX)).to_string().into_bytes(), f64::from(f32::MAX).to_string(), ), ( f64::from(f32::MIN), (f64::from(f32::MIN)).to_string().into_bytes(), f64::from(f32::MIN).to_string(), ), ( f64::MIN, f64::MIN.to_string().into_bytes(), f64::MIN.to_string(), ), ( f64::MAX, f64::MAX.to_string().into_bytes(), f64::MAX.to_string(), ), (1.0f64, 1.0f64.to_string().into_bytes(), 1.0f64.to_string()), ( 1.1113f64, 1.1113f64.to_string().into_bytes(), 1.1113f64.to_string(), ), (0.1f64, 0.1f64.to_string().into_bytes(), 0.1f64.to_string()), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, |ctx, extra, val| { cast_any_as_string::<Real>(ctx, extra, val.map(|x| Real::new(*x).unwrap()).as_ref()) }, "cast_any_as_string::<Real>", ); } #[test] fn test_string_as_string() { test_none_with_ctx_and_extra(cast_string_as_string); let test_vec_1 = Vec::from(b"".as_ref()); let test_vec_2 = (0..1024).map(|_| b'0').collect::<Vec<u8>>(); let cs: Vec<(BytesRef, Vec<u8>, String)> = vec![ ( test_vec_1.as_slice(), Vec::from(b"".as_ref()), String::from("<empty-str>"), ), ( test_vec_2.as_slice(), (0..1024).map(|_| b'0').collect::<Vec<u8>>(), String::from("1024 zeros('0')"), ), ]; test_as_string_helper(cs, cast_string_as_string, "cast_string_as_string"); } #[test] fn test_decimal_as_string() { test_none_with_ctx_and_extra(cast_any_as_string::<Decimal>); let cs: Vec<(Decimal, Vec<u8>, String)> = vec![ ( Decimal::from(i64::MAX), i64::MAX.to_string().into_bytes(), i64::MAX.to_string(), ), ( Decimal::from(i64::MIN), i64::MIN.to_string().into_bytes(), i64::MIN.to_string(), ), ( Decimal::from(u64::MAX), u64::MAX.to_string().into_bytes(), u64::MAX.to_string(), ), ( Decimal::from_f64(0.0).unwrap(), 0.0.to_string().into_bytes(), 0.0.to_string(), ), ( Decimal::from_f64(i64::MAX as f64).unwrap(), (i64::MAX as f64).to_string().into_bytes(), (i64::MAX as f64).to_string(), ), ( Decimal::from_f64(i64::MIN as f64).unwrap(), (i64::MIN as f64).to_string().into_bytes(), (i64::MIN as f64).to_string(), ), ( Decimal::from_f64(u64::MAX as f64).unwrap(), (u64::MAX as f64).to_string().into_bytes(), (u64::MAX as f64).to_string(), ), ( Decimal::from_bytes(b"999999999999999999999999") .unwrap() .unwrap(), Vec::from(b"999999999999999999999999".as_ref()), String::from("999999999999999999999999"), ), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, cast_any_as_string::<Decimal>, "cast_any_as_string::<Decimal>", ); } #[test] fn test_time_as_string() { test_none_with_ctx_and_extra(cast_any_as_string::<Time>); let mut ctx = EvalContext::default(); // TODO: add more test case let cs: Vec<(Time, Vec<u8>, String)> = vec![ ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14", 0, true).unwrap(), "2000-01-01 12:13:14".to_string().into_bytes(), "2000-01-01 12:13:14".to_string(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 0, true).unwrap(), "2000-01-01 12:13:15".to_string().into_bytes(), "2000-01-01 12:13:15".to_string(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 3, true).unwrap(), "2000-01-01 12:13:14.667".to_string().into_bytes(), "2000-01-01 12:13:14.667".to_string(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 4, true).unwrap(), "2000-01-01 12:13:14.6666".to_string().into_bytes(), "2000-01-01 12:13:14.6666".to_string(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 6, true).unwrap(), "2000-01-01 12:13:14.666600".to_string().into_bytes(), "2000-01-01 12:13:14.666600".to_string(), ), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, cast_any_as_string::<Time>, "cast_any_as_string::<Time>", ); } #[test] fn test_duration_as_string() { test_none_with_ctx_and_extra(cast_any_as_string::<Duration>); let mut ctx = EvalContext::default(); let cs = vec![ ( Duration::parse(&mut ctx, "17:51:04.78", 2).unwrap(), "17:51:04.78".to_string().into_bytes(), "17:51:04.78".to_string(), ), ( Duration::parse(&mut ctx, "-17:51:04.78", 2).unwrap(), "-17:51:04.78".to_string().into_bytes(), "-17:51:04.78".to_string(), ), ( Duration::parse(&mut ctx, "17:51:04.78", 0).unwrap(), "17:51:05".to_string().into_bytes(), "17:51:05".to_string(), ), ( Duration::parse(&mut ctx, "-17:51:04.78", 0).unwrap(), "-17:51:05".to_string().into_bytes(), "-17:51:05".to_string(), ), ]; let ref_cs = helper_get_cs_ref(&cs); test_as_string_helper( ref_cs, cast_any_as_string::<Duration>, "cast_any_as_string::<Duration>", ); } #[test] fn test_json_as_string() { test_none_with_ctx(cast_json_as_bytes); // FIXME: this case is not exactly same as TiDB's, // such as(left is TiKV, right is TiDB) // f64::MIN => "1.7976931348623157e308", "1.7976931348623157e+308", // f64::MAX => "-1.7976931348623157e308", "-1.7976931348623157e+308", // f32::MIN as f64 => "3.4028234663852886e38", "3.4028234663852886e+38", // f32::MAX as f64 => "-3.4028234663852886e38", "-3.4028234663852886e+38", // i64::MIN as f64 => "-9.223372036854776e18", "-9223372036854776000", // i64::MAX as f64 => "9.223372036854776e18", "9223372036854776000", // u64::MAX as f64 => "1.8446744073709552e19", "18446744073709552000", let cs = vec![ ( Json::from_object(BTreeMap::default()).unwrap(), "{}".to_string(), ), (Json::from_array(vec![]).unwrap(), "[]".to_string()), (Json::from_i64(10).unwrap(), "10".to_string()), (Json::from_i64(i64::MAX).unwrap(), i64::MAX.to_string()), (Json::from_i64(i64::MIN).unwrap(), i64::MIN.to_string()), (Json::from_u64(0).unwrap(), "0".to_string()), (Json::from_u64(u64::MAX).unwrap(), u64::MAX.to_string()), (Json::from_f64(f64::MIN).unwrap(), format!("{:e}", f64::MIN)), (Json::from_f64(f64::MAX).unwrap(), format!("{:e}", f64::MAX)), ( Json::from_f64(f64::from(f32::MIN)).unwrap(), format!("{:e}", f64::from(f32::MIN)), ), ( Json::from_f64(f64::from(f32::MAX)).unwrap(), format!("{:e}", f64::from(f32::MAX)), ), ( Json::from_f64(i64::MIN as f64).unwrap(), format!("{:e}", i64::MIN as f64), ), ( Json::from_f64(i64::MAX as f64).unwrap(), format!("{:e}", i64::MAX as f64), ), ( Json::from_f64(u64::MAX as f64).unwrap(), format!("{:e}", u64::MAX as f64), ), (Json::from_f64(10.5).unwrap(), "10.5".to_string()), (Json::from_f64(10.4).unwrap(), "10.4".to_string()), (Json::from_f64(-10.4).unwrap(), "-10.4".to_string()), (Json::from_f64(-10.5).unwrap(), "-10.5".to_string()), ( Json::from_string(String::from("10.0")).unwrap(), r#""10.0""#.to_string(), ), (Json::from_bool(true).unwrap(), "true".to_string()), (Json::from_bool(false).unwrap(), "false".to_string()), (Json::none().unwrap(), "null".to_string()), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_json_as_bytes(&mut ctx, Some(input.as_ref())); let r = r.map(|x| x.map(|x| unsafe { String::from_utf8_unchecked(x) })); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } macro_rules! cast_closure_with_metadata { ($cast_fn:expr) => { |ctx, extra, _, val| $cast_fn(ctx, extra, val) }; } /// base_cs /// - (cast_func_input, in_union, is_res_unsigned, base_result) /// - the base_result is the result **should** produce by /// the logic of cast func above `produce_dec_with_specified_tp` fn test_as_decimal_helper<T: Clone, FnCast, FnToStr>( base_cs: Vec<(T, bool, bool, Decimal)>, cast_func: FnCast, input_as_debug_str_func: FnToStr, func_name: &str, ) where FnCast: Fn( &mut EvalContext, &RpnFnCallExtra, &tipb::InUnionMetadata, Option<&T>, ) -> Result<Option<Decimal>>, FnToStr: Fn(&T) -> String, { #[derive(Clone, Copy, Debug)] #[allow(clippy::enum_variant_names)] enum Cond { TargetIntPartLenLessThanOriginIntPartLen, TargetDecimalBiggerThanOriginDecimal, TargetDecimalLessThanOriginDecimal, } #[derive(Clone, Copy, Debug)] enum Sign { Positive, Negative, } #[derive(Clone, Copy, Debug)] enum ResType { Zero, Same, TruncateToMax, TruncateToMin, Round, } let cs = vec![ // ( // origin, origin_flen, origin_decimal, res_flen, res_decimal, is_unsigned, // expect, warning_err_code, // (InInsertStmt || InUpdateStmt || InDeleteStmt), overflow_as_warning, truncate_as_warning // ) // // The origin_flen, origin_decimal here is // to let the programmer clearly know what the flen and decimal of the decimal is. // res_flen and res_decimal isn't UNSPECIFIED_LENGTH // origin not zero, but res's int part len < origin's int part ( Cond::TargetIntPartLenLessThanOriginIntPartLen, Sign::Positive, false, ResType::TruncateToMax, Some(ERR_DATA_OUT_OF_RANGE), false, true, false, ), ( Cond::TargetIntPartLenLessThanOriginIntPartLen, Sign::Negative, false, ResType::TruncateToMin, Some(ERR_DATA_OUT_OF_RANGE), false, true, false, ), // origin_decimal < res_decimal ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Positive, false, ResType::Same, None, false, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Positive, false, ResType::Same, None, true, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Negative, false, ResType::Same, None, false, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Positive, false, ResType::Same, None, true, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Positive, true, ResType::Same, None, false, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Positive, true, ResType::Same, None, true, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Negative, true, ResType::Zero, None, false, false, false, ), ( Cond::TargetDecimalBiggerThanOriginDecimal, Sign::Negative, true, ResType::Zero, None, true, false, false, ), // origin_decimal > res_decimal ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Positive, false, ResType::Round, Some(WARN_DATA_TRUNCATED), false, false, true, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Positive, false, ResType::Round, Some(WARN_DATA_TRUNCATED), true, false, false, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Negative, false, ResType::Round, Some(WARN_DATA_TRUNCATED), false, false, true, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Negative, false, ResType::Round, Some(WARN_DATA_TRUNCATED), true, false, true, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Positive, true, ResType::Round, Some(WARN_DATA_TRUNCATED), false, false, true, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Positive, true, ResType::Round, Some(WARN_DATA_TRUNCATED), true, false, false, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Negative, true, ResType::Zero, Some(WARN_DATA_TRUNCATED), false, false, true, ), ( Cond::TargetDecimalLessThanOriginDecimal, Sign::Negative, true, ResType::Zero, Some(WARN_DATA_TRUNCATED), true, false, false, ), // TODO: add test case for Decimal::round failure ]; for (input, in_union, is_res_unsigned, base_res) in base_cs { for ( cond, sign, is_unsigned, res_type, mut warning_err_code, in_dml, mut overflow_as_warning, mut truncate_as_warning, ) in cs.clone() { let (origin_flen, origin_decimal) = base_res.prec_and_frac(); // some test case in `cs` is just for unsigned result or signed result, // some is just for negative/positive base_res // // in the test case above, we have negative and positive for every test case, // so if the sign is different from base_res's sign, we can skip it. if is_res_unsigned != is_unsigned { continue; } let base_res = match sign { Sign::Positive => { if base_res.is_negative() { continue; } else { base_res } } Sign::Negative => { if base_res.is_negative() { base_res } else { continue; } } }; let (res_flen, res_decimal) = match cond { Cond::TargetIntPartLenLessThanOriginIntPartLen => { if origin_flen - origin_decimal == 0 || origin_flen <= 1 { continue; } (origin_flen - 1, origin_decimal) } Cond::TargetDecimalBiggerThanOriginDecimal => { (origin_flen + 1, origin_decimal + 1) } Cond::TargetDecimalLessThanOriginDecimal => { if origin_decimal == 0 || origin_flen <= 1 { continue; } // TODO: if add test case for Decimal::round failure, // then should check whether this setting is right. let res = base_res .clone() .round((origin_decimal - 1) as i8, RoundMode::HalfEven); if res.is_zero() { truncate_as_warning = false; overflow_as_warning = false; warning_err_code = None; } (origin_flen - 1, origin_decimal - 1) } }; let expect = match res_type { ResType::Zero => Decimal::zero(), ResType::Same => base_res, ResType::TruncateToMax => max_decimal(res_flen as u8, res_decimal as u8), ResType::TruncateToMin => { max_or_min_dec(true, res_flen as u8, res_decimal as u8) } ResType::Round => { let r = base_res .clone() .round(res_decimal as i8, RoundMode::HalfEven) .unwrap(); if r == base_res { overflow_as_warning = false; truncate_as_warning = false; warning_err_code = None; } r } }; let ctx_in_dml_flag = vec![Flag::IN_INSERT_STMT, Flag::IN_UPDATE_OR_DELETE_STMT]; for in_dml_flag in ctx_in_dml_flag { let (res_flen, res_decimal) = (res_flen as isize, res_decimal as isize); let rft = FieldTypeConfig { unsigned: is_unsigned, flen: res_flen, decimal: res_decimal, ..FieldTypeConfig::default() } .into(); let metadata = make_metadata(in_union); let extra = make_extra(&rft); let mut ctx = CtxConfig { overflow_as_warning, truncate_as_warning, in_insert_stmt: in_dml_flag == Flag::IN_INSERT_STMT, in_update_or_delete_stmt: in_dml_flag == Flag::IN_UPDATE_OR_DELETE_STMT, ..CtxConfig::default() } .into(); let cast_func_res = cast_func(&mut ctx, &extra, &metadata, Some(&input.clone())); let mut ctx = CtxConfig { overflow_as_warning, truncate_as_warning, in_insert_stmt: in_dml_flag == Flag::IN_INSERT_STMT, in_update_or_delete_stmt: in_dml_flag == Flag::IN_UPDATE_OR_DELETE_STMT, ..CtxConfig::default() } .into(); let pd_res = produce_dec_with_specified_tp(&mut ctx, base_res, &rft); // make log let cast_func_res_log = cast_func_res .as_ref() .map(|x| x.as_ref().map(|x| x.to_string())); let pd_res_log = pd_res.as_ref().map(|x| x.to_string()); let log = format!( "test_func_name: {}, \ input: {}, base_res: {}, \ origin_flen: {}, origin_decimal: {}, \ res_flen: {}, res_decimal: {}, \ in_union: {}, is_unsigned: {}, in_dml: {}, in_dml_flag: {:?}, \ cond: {:?}, sign: {:?}, res_type: {:?}, \ overflow_as_warning: {}, truncate_as_warning: {}, expect_warning_err_code: {:?} \ expect: {}, expect_from_produce_dec_with_specified_tp(this is just for debug): {:?}, result: {:?}", func_name, input_as_debug_str_func(&input), base_res, origin_flen, origin_decimal, res_flen, res_decimal, in_union, is_unsigned, in_dml, in_dml_flag, cond, sign, res_type, overflow_as_warning, truncate_as_warning, warning_err_code, expect.to_string(), pd_res_log, cast_func_res_log ); check_result(Some(&expect), &cast_func_res, log.as_str()); check_warning(&ctx, warning_err_code, log.as_str()); } } } } // These test depend on the correctness of // Decimal::from(u64), Decimal::from(i64), Decimal::from_f64(), Decimal::from_bytes() // Decimal::zero(), Decimal::round, max_or_min_dec, max_decimal #[test] fn test_unsigned_int_as_signed_or_unsigned_decimal() { test_none_with_ctx_and_extra(cast_unsigned_int_as_signed_or_unsigned_decimal); let cs = vec![ (10u64 as i64, false, true, Decimal::from(10)), (u64::MAX as i64, false, true, Decimal::from(u64::MAX)), (i64::MAX as u64 as i64, false, true, Decimal::from(i64::MAX)), ]; test_as_decimal_helper( cs, cast_closure_with_metadata!(cast_unsigned_int_as_signed_or_unsigned_decimal), |x| x.to_string(), "cast_unsigned_int_as_signed_or_unsigned_decimal", ); } #[test] fn test_signed_int_as_unsigned_decimal() { test_none_with_ctx_and_extra_and_metadata(cast_signed_int_as_unsigned_decimal); let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // negative, in_union (-1, true, true, Decimal::zero()), (-10, true, true, Decimal::zero()), (i64::MIN, true, true, Decimal::zero()), // not negative, in_union (1, true, true, Decimal::from(1)), (10, true, true, Decimal::from(10)), (i64::MAX, true, true, Decimal::from(i64::MAX)), // negative, not in_union // FIXME: fix these case(negative to unsigned decimal, without in_union) // after fix the bug of this situation(negative to unsigned decimal, without in_union) (-1, false, true, Decimal::from(-1i64 as u64)), (-10, false, true, Decimal::from(-10i64 as u64)), ( i64::MIN + 1, false, true, Decimal::from((i64::MIN + 1) as u64), ), // not negative, not in_union (1, false, true, Decimal::from(1)), (10, false, true, Decimal::from(10)), (i64::MAX, false, true, Decimal::from(i64::MAX)), ]; test_as_decimal_helper( cs, cast_signed_int_as_unsigned_decimal, |x| x.to_string(), "cast_signed_int_as_unsigned_decimal", ); } #[test] fn test_signed_int_as_signed_decimal() { test_none_with_ctx_and_extra(cast_any_as_decimal::<Int>); let cs: Vec<(i64, bool, bool, Decimal)> = vec![ // (input, in_union, is_res_unsigned, base_result) (-1, false, false, Decimal::from(-1)), (-10, false, false, Decimal::from(-10)), (i64::MIN, false, false, Decimal::from(i64::MIN)), (1, false, false, Decimal::from(1)), (10, false, false, Decimal::from(10)), (i64::MAX, false, false, Decimal::from(i64::MAX)), ]; test_as_decimal_helper( cs, cast_closure_with_metadata!(cast_any_as_decimal::<Int>), |x| x.to_string(), "cast_signed_int_as_signed_decimal", ); } #[test] fn test_real_as_decimal() { test_none_with_ctx_and_extra_and_metadata(cast_real_as_decimal); // TODO: add test case that make Decimal::from_f64 return err let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // neg and in_union (-10.0, true, false, Decimal::zero()), (i64::MIN as f64, true, false, Decimal::zero()), (-1.0, true, false, Decimal::zero()), (-0.0001, true, false, Decimal::zero()), // not neg and in_union (10.0, true, false, Decimal::from_f64(10.0).unwrap()), ( i64::MAX as f64, true, false, Decimal::from_f64(i64::MAX as f64).unwrap(), ), (1.0, true, false, Decimal::from_f64(1.0).unwrap()), (0.0001, true, false, Decimal::from_f64(0.0001).unwrap()), // neg and not in_union (-10.0, false, false, Decimal::from_f64(-10.0).unwrap()), ( i64::MIN as f64, false, false, Decimal::from_f64(i64::MIN as f64).unwrap(), ), (-1.0, false, false, Decimal::from_f64(-1.0).unwrap()), (-0.0001, false, false, Decimal::from_f64(-0.0001).unwrap()), // not neg and not in_union (10.0, false, false, Decimal::from_f64(10.0).unwrap()), ( i64::MAX as f64, false, false, Decimal::from_f64(i64::MAX as f64).unwrap(), ), (1.0, false, false, Decimal::from_f64(1.0).unwrap()), (0.0001, false, false, Decimal::from_f64(0.0001).unwrap()), ]; test_as_decimal_helper( cs, |ctx, extra, metadata, val| { let val = val.map(|x| Real::new(*x).unwrap()); cast_real_as_decimal(ctx, extra, metadata, val.as_ref()) }, |x| x.to_string(), "cast_real_as_decimal", ); } #[test] fn test_string_as_signed_decimal() { test_none_with_ctx_and_extra(cast_bytes_as_decimal); // TODO: add test case that make Decimal::from_bytes return err. let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // neg and in_union ("-10", true, false, Decimal::from(-10)), ("-1", true, false, Decimal::from(-1)), ( "-0.001", true, false, Decimal::from_bytes(b"-0.001").unwrap().unwrap(), ), ( "-9223372036854775807", true, false, Decimal::from(-9223372036854775807i64), ), ( "-9223372036854775808", true, false, Decimal::from(-9223372036854775808i64), ), ( "-9223372036854775808.001", true, false, Decimal::from_bytes(b"-9223372036854775808.001") .unwrap() .unwrap(), ), ( "-9223372036854775808.002", true, false, Decimal::from_bytes(b"-9223372036854775808.002") .unwrap() .unwrap(), ), ( "-18446744073709551615", true, false, Decimal::from_bytes(b"-18446744073709551615") .unwrap() .unwrap(), ), ( "-18446744073709551615.001", true, false, Decimal::from_bytes(b"-18446744073709551615.001") .unwrap() .unwrap(), ), ( "-18446744073709551615.11", true, false, Decimal::from_bytes(b"-18446744073709551615.11") .unwrap() .unwrap(), ), // not neg and in_union ("10", true, false, Decimal::from(10)), ("1", true, false, Decimal::from(1)), ("0.001", true, false, Decimal::from_f64(0.001).unwrap()), ( "9223372036854775807", true, false, Decimal::from(9223372036854775807u64), ), ( "9223372036854775808", true, false, Decimal::from(9223372036854775808u64), ), ( "9223372036854775808.001", true, false, Decimal::from_bytes(b"9223372036854775808.001") .unwrap() .unwrap(), ), ( "9223372036854775808.002", true, false, Decimal::from_bytes(b"9223372036854775808.002") .unwrap() .unwrap(), ), ( "18446744073709551615", true, false, Decimal::from(18446744073709551615u64), ), ( "18446744073709551615.001", true, false, Decimal::from_bytes(b"18446744073709551615.001") .unwrap() .unwrap(), ), ( "18446744073709551615.11", true, false, Decimal::from_bytes(b"18446744073709551615.11") .unwrap() .unwrap(), ), // neg and not in_union ("-10", false, false, Decimal::from(-10)), ("-1", false, false, Decimal::from(-1)), ("-0.001", false, false, Decimal::from_f64(-0.001).unwrap()), ( "-9223372036854775807", false, true, Decimal::from(-9223372036854775807i64), ), ( "-9223372036854775808", false, true, Decimal::from(-9223372036854775808i64), ), ( "-9223372036854775808.001", false, true, Decimal::from_bytes(b"-9223372036854775808.001") .unwrap() .unwrap(), ), ( "-9223372036854775808.002", false, true, Decimal::from_bytes(b"-9223372036854775808.002") .unwrap() .unwrap(), ), ( "-18446744073709551615", false, true, Decimal::from_bytes(b"-18446744073709551615") .unwrap() .unwrap(), ), ( "-18446744073709551615.001", false, true, Decimal::from_bytes(b"-18446744073709551615.001") .unwrap() .unwrap(), ), ( "-18446744073709551615.11", false, true, Decimal::from_bytes(b"-18446744073709551615.11") .unwrap() .unwrap(), ), // not neg and not in_union ("10", false, false, Decimal::from(10)), ("1", false, false, Decimal::from(1)), ("0.001", false, false, Decimal::from_f64(0.001).unwrap()), ( "9223372036854775807", false, true, Decimal::from(9223372036854775807u64), ), ( "9223372036854775808", false, true, Decimal::from(9223372036854775808u64), ), ( "9223372036854775808.001", false, true, Decimal::from_bytes(b"9223372036854775808.001") .unwrap() .unwrap(), ), ( "9223372036854775808.002", false, true, Decimal::from_bytes(b"9223372036854775808.002") .unwrap() .unwrap(), ), ( "18446744073709551615", false, true, Decimal::from(18446744073709551615u64), ), ( "18446744073709551615.001", false, true, Decimal::from_bytes(b"18446744073709551615.001") .unwrap() .unwrap(), ), ( "18446744073709551615.11", false, true, Decimal::from_bytes(b"18446744073709551615.11") .unwrap() .unwrap(), ), // can not convert to decimal ("abcde", false, false, Decimal::zero()), ("", false, false, Decimal::zero()), ("s", false, false, Decimal::zero()), ("abcde", true, false, Decimal::zero()), ("", true, false, Decimal::zero()), ("s", true, false, Decimal::zero()), ("abcde", false, true, Decimal::zero()), ("", false, true, Decimal::zero()), ("s", false, true, Decimal::zero()), ("abcde", true, true, Decimal::zero()), ("", true, true, Decimal::zero()), ("s", true, true, Decimal::zero()), ]; test_as_decimal_helper( cs, |ctx, extra, _, val| { let val = val.map(|x| x.as_bytes()); cast_bytes_as_decimal(ctx, extra, val) }, |x| (*x).to_string(), "cast_string_as_signed_decimal", ) } #[test] fn test_string_as_unsigned_decimal() { test_none_with_ctx_and_extra_and_metadata(cast_string_as_unsigned_decimal); let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // neg and in_union ("-10", true, true, Decimal::zero()), ("-1", true, true, Decimal::zero()), ("-0.001", true, true, Decimal::zero()), ("-9223372036854775807", true, true, Decimal::zero()), ("-9223372036854775808", true, true, Decimal::zero()), ("-9223372036854775808.001", true, true, Decimal::zero()), ("-9223372036854775808.002", true, true, Decimal::zero()), ("-18446744073709551615", true, true, Decimal::zero()), ("-18446744073709551615.001", true, true, Decimal::zero()), ("-18446744073709551615.11", true, true, Decimal::zero()), // not neg and in_union ("10", true, true, Decimal::from(10)), ("1", true, true, Decimal::from(1)), ("0.001", true, true, Decimal::from_f64(0.001).unwrap()), ( "9223372036854775807", true, true, Decimal::from(9223372036854775807u64), ), ( "9223372036854775808", true, true, Decimal::from(9223372036854775808u64), ), ( "9223372036854775808.001", true, true, Decimal::from_bytes(b"9223372036854775808.001") .unwrap() .unwrap(), ), ( "9223372036854775808.002", true, true, Decimal::from_bytes(b"9223372036854775808.002") .unwrap() .unwrap(), ), ( "18446744073709551615", true, true, Decimal::from(18446744073709551615u64), ), ( "18446744073709551615.001", true, true, Decimal::from_bytes(b"18446744073709551615.001") .unwrap() .unwrap(), ), ( "18446744073709551615.11", true, true, Decimal::from_bytes(b"18446744073709551615.11") .unwrap() .unwrap(), ), // neg and not in_union ("-10", false, true, Decimal::from(-10)), ("-1", false, true, Decimal::from(-1)), ("-0.001", false, true, Decimal::from_f64(-0.001).unwrap()), ( "-9223372036854775807", false, true, Decimal::from(-9223372036854775807i64), ), ( "-9223372036854775808", false, true, Decimal::from(-9223372036854775808i64), ), ( "-9223372036854775808.001", false, true, Decimal::from_bytes(b"-9223372036854775808.001") .unwrap() .unwrap(), ), ( "-9223372036854775808.002", false, true, Decimal::from_bytes(b"-9223372036854775808.002") .unwrap() .unwrap(), ), ( "-18446744073709551615", false, true, Decimal::from_bytes(b"-18446744073709551615") .unwrap() .unwrap(), ), ( "-18446744073709551615.001", false, true, Decimal::from_bytes(b"-18446744073709551615.001") .unwrap() .unwrap(), ), ( "-18446744073709551615.11", false, true, Decimal::from_bytes(b"-18446744073709551615.11") .unwrap() .unwrap(), ), // not neg and not in_union ("10", false, true, Decimal::from(10)), ("1", false, true, Decimal::from(1)), ("0.001", false, true, Decimal::from_f64(0.001).unwrap()), ( "9223372036854775807", false, true, Decimal::from(9223372036854775807u64), ), ( "9223372036854775808", false, true, Decimal::from(9223372036854775808u64), ), ( "9223372036854775808.001", false, true, Decimal::from_bytes(b"9223372036854775808.001") .unwrap() .unwrap(), ), ( "9223372036854775808.002", false, true, Decimal::from_bytes(b"9223372036854775808.002") .unwrap() .unwrap(), ), ( "18446744073709551615", false, true, Decimal::from(18446744073709551615u64), ), ( "18446744073709551615.001", false, true, Decimal::from_bytes(b"18446744073709551615.001") .unwrap() .unwrap(), ), ( "18446744073709551615.11", false, true, Decimal::from_bytes(b"18446744073709551615.11") .unwrap() .unwrap(), ), // can not convert to decimal ("abcde", false, false, Decimal::zero()), ("", false, false, Decimal::zero()), ("s", false, false, Decimal::zero()), ("abcde", true, false, Decimal::zero()), ("", true, false, Decimal::zero()), ("s", true, false, Decimal::zero()), ("abcde", false, true, Decimal::zero()), ("", false, true, Decimal::zero()), ("s", false, true, Decimal::zero()), ("abcde", true, true, Decimal::zero()), ("", true, true, Decimal::zero()), ("s", true, true, Decimal::zero()), ]; test_as_decimal_helper( cs, |ctx, extra, metadata, val| { let val = val.map(|x| x.as_bytes()); cast_string_as_unsigned_decimal(ctx, extra, metadata, val) }, |x| (*x).to_string(), "cast_string_as_unsigned_decimal", ); } #[test] fn test_decimal_as_signed_decimal() { test_none_with_ctx_and_extra(cast_decimal_as_signed_decimal); // in_union and result is unsigned let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // in_union (Decimal::zero(), true, false, Decimal::zero()), ( Decimal::from_f64(-10f64).unwrap(), true, false, Decimal::from_f64(-10f64).unwrap(), ), ( Decimal::from(i64::MIN), true, false, Decimal::from(i64::MIN), ), ( Decimal::from(i64::MAX), true, false, Decimal::from(i64::MAX), ), ( Decimal::from(u64::MAX), true, false, Decimal::from(u64::MAX), ), // not in_union (Decimal::zero(), false, false, Decimal::zero()), ( Decimal::from_f64(-10f64).unwrap(), false, false, Decimal::from_f64(-10f64).unwrap(), ), ( Decimal::from(i64::MIN), false, false, Decimal::from(i64::MIN), ), ( Decimal::from(i64::MAX), false, false, Decimal::from(i64::MAX), ), ( Decimal::from(u64::MAX), false, false, Decimal::from(u64::MAX), ), ]; test_as_decimal_helper( cs, cast_closure_with_metadata!(cast_decimal_as_signed_decimal), |x| x.to_string(), "cast_decimal_as_signed_decimal", ); } #[test] fn test_decimal_as_unsigned_decimal() { test_none_with_ctx_and_extra_and_metadata(cast_decimal_as_unsigned_decimal); // in_union and result is unsigned let cs = vec![ // (input, in_union, is_res_unsigned, base_result) // neg and in_union ( Decimal::from_f64(-10f64).unwrap(), true, true, Decimal::zero(), ), (Decimal::from(i64::MIN), true, true, Decimal::zero()), // not neg and in_union (Decimal::zero(), true, true, Decimal::zero()), ( Decimal::from_f64(10f64).unwrap(), true, true, Decimal::from_f64(10f64).unwrap(), ), (Decimal::from(i64::MAX), true, true, Decimal::from(i64::MAX)), (Decimal::from(u64::MAX), true, true, Decimal::from(u64::MAX)), // neg and not in_union ( Decimal::from_f64(-10f64).unwrap(), false, true, Decimal::from_f64(-10f64).unwrap(), ), ( Decimal::from(i64::MIN), false, true, Decimal::from(i64::MIN), ), // not neg and not in_union (Decimal::zero(), true, true, Decimal::zero()), ( Decimal::from_f64(10f64).unwrap(), true, true, Decimal::from_f64(10f64).unwrap(), ), (Decimal::from(i64::MAX), true, true, Decimal::from(i64::MAX)), (Decimal::from(u64::MAX), true, true, Decimal::from(u64::MAX)), ]; test_as_decimal_helper( cs, cast_decimal_as_unsigned_decimal, |x| x.to_string(), "cast_decimal_as_unsigned_decimal", ); } #[test] fn test_time_as_decimal() { test_none_with_ctx_and_extra(cast_any_as_decimal::<Time>); let mut ctx = EvalContext::default(); // TODO: add more test case let cs: Vec<(Time, bool, bool, Decimal)> = vec![ // (cast_func_input, in_union, is_res_unsigned, base_result) ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14", 0, false).unwrap(), false, false, Decimal::from_bytes(b"20000101121314").unwrap().unwrap(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 0, true).unwrap(), false, false, Decimal::from_bytes(b"20000101121315").unwrap().unwrap(), ), ]; test_as_decimal_helper( cs, cast_closure_with_metadata!(cast_any_as_decimal::<Time>), |x| x.to_string(), "cast_time_as_decimal", ) } #[test] fn test_duration_as_decimal() { test_none_with_ctx_and_extra(cast_any_as_decimal::<Duration>); let mut ctx = EvalContext::default(); // TODO: add more test case let cs: Vec<(Duration, bool, bool, Decimal)> = vec![ // (input, in_union, is_res_unsigned, base_result) ( Duration::parse(&mut ctx, "17:51:04.78", 2).unwrap(), false, false, Decimal::from_f64(175104.78).unwrap(), ), ( Duration::parse(&mut ctx, "-17:51:04.78", 2).unwrap(), false, false, Decimal::from_f64(-175104.78).unwrap(), ), ( Duration::parse(&mut ctx, "17:51:04.78", 0).unwrap(), false, false, Decimal::from(175105), ), ( Duration::parse(&mut ctx, "-17:51:04.78", 0).unwrap(), false, false, Decimal::from(-175105), ), ]; test_as_decimal_helper( cs, cast_closure_with_metadata!(cast_any_as_decimal::<Duration>), |x| x.to_string(), "cast_duration_as_int", ) } #[test] fn test_json_as_decimal() { test_none_with_ctx_and_extra(cast_json_as_decimal); // TODO: add test case that make Decimal::from_str failed let cs: Vec<(Json, bool, bool, Decimal)> = vec![ ( Json::from_i64(10).unwrap(), false, false, Decimal::from_f64(10f64).unwrap(), ), ( Json::from_i64(i64::MAX).unwrap(), false, false, Decimal::from_f64(i64::MAX as f64).unwrap(), ), ( Json::from_i64(i64::MIN).unwrap(), false, false, Decimal::from_f64(i64::MIN as f64).unwrap(), ), (Json::from_u64(0).unwrap(), false, false, Decimal::zero()), ( Json::from_u64(i64::MAX as u64).unwrap(), false, false, Decimal::from_f64(i64::MAX as f64).unwrap(), ), ( Json::from_u64(u64::MAX).unwrap(), false, false, Decimal::from_f64(u64::MAX as f64).unwrap(), ), ( Json::from_f64(i64::MAX as f64).unwrap(), false, false, Decimal::from_f64(i64::MAX as f64).unwrap(), ), ( Json::from_f64(i64::MIN as f64).unwrap(), false, false, Decimal::from_f64(i64::MIN as f64).unwrap(), ), ( Json::from_f64(u64::MAX as f64).unwrap(), false, false, Decimal::from_f64(u64::MAX as f64).unwrap(), ), ( Json::from_string("10.0".to_string()).unwrap(), false, false, Decimal::from_bytes(b"10.0").unwrap().unwrap(), ), ( Json::from_string("-10.0".to_string()).unwrap(), false, false, Decimal::from_bytes(b"-10.0").unwrap().unwrap(), ), ( Json::from_string("9999999999999999999".to_string()).unwrap(), false, false, Decimal::from_bytes(b"9999999999999999999") .unwrap() .unwrap(), ), ( Json::from_string("-9999999999999999999".to_string()).unwrap(), false, false, Decimal::from_bytes(b"-9999999999999999999") .unwrap() .unwrap(), ), ( Json::from_bool(true).unwrap(), false, false, Decimal::from_f64(1f64).unwrap(), ), ( Json::from_bool(false).unwrap(), false, false, Decimal::zero(), ), (Json::none().unwrap(), false, false, Decimal::zero()), ]; test_as_decimal_helper( cs, |ctx, extra, _, val| cast_json_as_decimal(ctx, extra, val.map(|x| x.as_ref())), |x| x.to_string(), "cast_json_as_decimal", ); } #[test] fn test_truncate_when_cast_json_object_or_array_as_decimal() { test_none_with_ctx(cast_any_as_any::<Real, Int>); let cs = vec![ // (origin, result, errcode) ( Json::from_object(BTreeMap::default()).unwrap(), Decimal::zero(), ERR_TRUNCATE_WRONG_VALUE, ), ( Json::from_array(vec![]).unwrap(), Decimal::zero(), ERR_TRUNCATE_WRONG_VALUE, ), ]; for (input, result, errcode) in cs { let mut ctx = CtxConfig { truncate_as_warning: true, ..CtxConfig::default() } .into(); let rft = FieldTypeConfig::default().into(); let extra = make_extra(&rft); let r = cast_json_as_decimal(&mut ctx, &extra, Some(input.as_ref())); let log = make_log(&input, &result, &r); check_result(Some(&result), &r, log.as_str()); check_warning(&ctx, Some(errcode), log.as_str()); } } #[test] fn test_int_as_duration() { // None { let output: Option<Real> = RpnFnScalarEvaluator::new() .push_param(ScalarValue::Bytes(None)) .evaluate(ScalarFuncSig::CastIntAsDuration) .unwrap(); assert_eq!(output, None); } let mut ctx = EvalContext::default(); struct TestCase( i64, isize, tidb_query_datatype::codec::Result<Option<Duration>>, bool, bool, ); // This case copy from Duration.rs::tests::test_from_i64 let cs: Vec<TestCase> = vec![ // (input, fsp, expect, overflow, truncated) TestCase( 101010, 0, Ok(Some(Duration::parse(&mut ctx, "10:10:10", 0).unwrap())), false, false, ), TestCase( 101010, 5, Ok(Some(Duration::parse(&mut ctx, "10:10:10", 5).unwrap())), false, false, ), TestCase( 8385959, 0, Ok(Some(Duration::parse(&mut ctx, "838:59:59", 0).unwrap())), false, false, ), TestCase( 8385959, 6, Ok(Some(Duration::parse(&mut ctx, "838:59:59", 6).unwrap())), false, false, ), TestCase( -101010, 0, Ok(Some(Duration::parse(&mut ctx, "-10:10:10", 0).unwrap())), false, false, ), TestCase( -101010, 5, Ok(Some(Duration::parse(&mut ctx, "-10:10:10", 5).unwrap())), false, false, ), TestCase( -8385959, 0, Ok(Some(Duration::parse(&mut ctx, "-838:59:59", 0).unwrap())), false, false, ), TestCase( -8385959, 6, Ok(Some(Duration::parse(&mut ctx, "-838:59:59", 6).unwrap())), false, false, ), // overflow as warning TestCase(8385960, 0, Ok(None), true, false), TestCase(-8385960, 0, Ok(None), true, false), // will truncated TestCase(8376049, 0, Ok(None), false, true), TestCase(8375960, 0, Ok(None), false, true), TestCase(-8376049, 0, Ok(None), false, true), TestCase(2002073, 0, Ok(None), false, true), TestCase(2007320, 0, Ok(None), false, true), TestCase(-2002073, 0, Ok(None), false, true), TestCase(-2007320, 0, Ok(None), false, true), TestCase( 10000000000, 0, Ok(Some(Duration::parse(&mut ctx, "0:0:0", 0).unwrap())), false, false, ), TestCase( 10000235959, 0, Ok(Some(Duration::parse(&mut ctx, "23:59:59", 0).unwrap())), false, false, ), TestCase(-10000235959, 0, Ok(None), true, false), ]; for TestCase(input, fsp, expected, overflow, truncated) in cs { let (result, ctx) = RpnFnScalarEvaluator::new() .context(CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() }) .push_param(input) .evaluate_raw( FieldTypeConfig { tp: Some(FieldTypeTp::Duration), decimal: fsp, ..FieldTypeConfig::default() }, ScalarFuncSig::CastIntAsDuration, ); match expected { Ok(expected) => { let result: Option<Duration> = result.unwrap().into(); assert_eq!( result, expected, "input:{:?}, expected:{:?}, got:{:?}", input, expected, result, ); } Err(_) => { assert!( result.is_err(), "input:{:?}, expected err:{:?}, got:{:?}", input, expected, result ); } } if overflow { assert_eq!(ctx.warnings.warning_cnt, 1); assert_eq!(ctx.warnings.warnings[0].get_code(), ERR_DATA_OUT_OF_RANGE); } if truncated { assert_eq!(ctx.warnings.warning_cnt, 1); assert_eq!( ctx.warnings.warnings[0].get_code(), ERR_TRUNCATE_WRONG_VALUE ); } } } fn test_as_duration_helper<T: Clone, FnCast>( base_cs: Vec<T>, func_to_cast_str: impl Fn(T) -> String, func_to_debug_str: impl Fn(T) -> String, func_cast: FnCast, func_name: &str, ) where FnCast: Fn(&mut EvalContext, &RpnFnCallExtra, Option<T>) -> Result<Option<Duration>>, { // cast_real_as_duration call `Duration::parse`, directly, // and `Duration::parse`, is test in duration.rs. // Our test here is to make sure that the result is same as calling `Duration::parse`, // no matter whether call_real_as_duration call `Duration::parse`, directly. for val in base_cs { for fsp in MIN_FSP..=MAX_FSP { let mut ctx = CtxConfig { overflow_as_warning: true, truncate_as_warning: true, ..CtxConfig::default() } .into(); let rft = FieldTypeConfig { decimal: fsp as isize, ..FieldTypeConfig::default() } .into(); let extra = make_extra(&rft); let result = func_cast(&mut ctx, &extra, Some(val.clone())); let val_str = func_to_cast_str(val.clone()); let base_expect = Duration::parse(&mut ctx, &val_str, fsp); // make log let result_str = result.as_ref().map(|x| x.map(|x| x.to_string())); match base_expect { Err(e) => match e.code() { ERR_DATA_OUT_OF_RANGE => { let log = format!( "func_name:{}, input: {}, fsp: {}, output: {:?}, expect: {}, expect_warn: {}", func_name, func_to_debug_str(val.clone()), fsp, result_str, Duration::zero(), ERR_DATA_OUT_OF_RANGE ); check_overflow(&ctx, true, log.as_str()); check_result(None, &result, log.as_str()); } ERR_TRUNCATE_WRONG_VALUE => { let log = format!( "func_name:{}, input: {}, fsp: {}, output: {:?}, output_warn: {:?}, expect: {}, expect_warn: {}", func_name, func_to_debug_str(val.clone()), fsp, result_str, ctx.warnings.warnings, Duration::zero(), WARN_DATA_TRUNCATED ); check_warning(&ctx, Some(ERR_TRUNCATE_WRONG_VALUE), log.as_str()); check_result(None, &result, log.as_str()); } _ => { let expect_err: tidb_query_common::error::Error = e.into(); let log = format!( "func_name:{}, input: {}, fsp: {}, output: {:?}, output_warn: {:?}, expect: {:?}", func_name, func_to_debug_str(val.clone()), fsp, result_str, ctx.warnings.warnings, expect_err ); assert!(result.is_err(), "log: {}", log) } }, Ok(v) => { let log = format!( "func_name:{}, input: {}, fsp: {}, output: {:?}, output_warn: {:?}, expect: {:?}", func_name, func_to_debug_str(val.clone()), fsp, result_str, ctx.warnings.warnings, v ); check_result(Some(&v), &result, log.as_str()) } } } } } #[test] fn test_real_as_duration() { test_none_with_ctx_and_extra(cast_real_as_duration); let cs: Vec<f64> = vec![ 101112.0, 101112.123456, 1112.0, 12.0, -0.123, 12345.0, -123.0, -23.0, ]; test_as_duration_helper( cs, |x| x.to_string(), |x| x.to_string(), |ctx, extra, val| { let val = val.map(|x| Real::new(x).unwrap()); cast_real_as_duration(ctx, extra, val.as_ref()) }, "cast_real_as_duration", ) } #[test] fn test_bytes_as_duration() { test_none_with_ctx_and_extra(cast_bytes_as_duration); let cs: Vec<BytesRef> = vec![ b"17:51:04.78", b"-17:51:04.78", b"17:51:04.78", b"-17:51:04.78", ]; test_as_duration_helper( cs, |x| String::from_utf8_lossy(x).to_string(), |x| String::from_utf8_lossy(x).to_string(), cast_bytes_as_duration, "cast_bytes_as_duration", ); } #[test] fn test_decimal_as_duration() { test_none_with_ctx_and_extra(cast_decimal_as_duration); let cs = vec![ Decimal::from(i64::MIN), Decimal::from(i64::MAX), Decimal::from(u64::MAX), Decimal::zero(), Decimal::from_bytes(b"-9223372036854775808") .unwrap() .unwrap(), Decimal::from_bytes(b"9223372036854775808") .unwrap() .unwrap(), Decimal::from_bytes(b"-9223372036854775809") .unwrap() .unwrap(), Decimal::from_bytes(b"9223372036854775809") .unwrap() .unwrap(), Decimal::from_bytes(b"-18446744073709551615") .unwrap() .unwrap(), Decimal::from_bytes(b"18446744073709551615") .unwrap() .unwrap(), Decimal::from_bytes(b"-18446744073709551616") .unwrap() .unwrap(), Decimal::from_bytes(b"18446744073709551616") .unwrap() .unwrap(), Decimal::from_bytes(b"-184467440737095516160") .unwrap() .unwrap(), Decimal::from_bytes(b"184467440737095516160") .unwrap() .unwrap(), Decimal::from_bytes(b"-99999999999999999999999999999999") .unwrap() .unwrap(), Decimal::from_bytes(b"99999999999999999999999999999999") .unwrap() .unwrap(), ]; let cs_ref: Vec<&Decimal> = cs.iter().collect(); test_as_duration_helper( cs_ref, |x| x.to_string(), |x| x.to_string(), cast_decimal_as_duration, "cast_decimal_as_duration", ); } #[test] fn test_time_as_duration() { test_none_with_ctx_and_extra(cast_time_as_duration); // copy from test_convert_to_duration let cs = vec![ // (input, input's fsp, output's fsp, output) ("2012-12-31 11:30:45.123456", 4, 0, "11:30:45"), ("2012-12-31 11:30:45.123456", 4, 1, "11:30:45.1"), ("2012-12-31 11:30:45.123456", 4, 2, "11:30:45.12"), ("2012-12-31 11:30:45.123456", 4, 3, "11:30:45.124"), ("2012-12-31 11:30:45.123456", 4, 4, "11:30:45.1235"), ("2012-12-31 11:30:45.123456", 4, 5, "11:30:45.12350"), ("2012-12-31 11:30:45.123456", 4, 6, "11:30:45.123500"), ("2012-12-31 11:30:45.123456", 6, 0, "11:30:45"), ("2012-12-31 11:30:45.123456", 6, 1, "11:30:45.1"), ("2012-12-31 11:30:45.123456", 6, 2, "11:30:45.12"), ("2012-12-31 11:30:45.123456", 6, 3, "11:30:45.123"), ("2012-12-31 11:30:45.123456", 6, 4, "11:30:45.1235"), ("2012-12-31 11:30:45.123456", 6, 5, "11:30:45.12346"), ("2012-12-31 11:30:45.123456", 6, 6, "11:30:45.123456"), ("2012-12-31 11:30:45.123456", 0, 0, "11:30:45"), ("2012-12-31 11:30:45.123456", 0, 1, "11:30:45.0"), ("2012-12-31 11:30:45.123456", 0, 2, "11:30:45.00"), ("2012-12-31 11:30:45.123456", 0, 3, "11:30:45.000"), ("2012-12-31 11:30:45.123456", 0, 4, "11:30:45.0000"), ("2012-12-31 11:30:45.123456", 0, 5, "11:30:45.00000"), ("2012-12-31 11:30:45.123456", 0, 6, "11:30:45.000000"), ("0000-00-00 00:00:00", 6, 0, "00:00:00"), ("0000-00-00 00:00:00", 6, 1, "00:00:00.0"), ("0000-00-00 00:00:00", 6, 2, "00:00:00.00"), ("0000-00-00 00:00:00", 6, 3, "00:00:00.000"), ("0000-00-00 00:00:00", 6, 4, "00:00:00.0000"), ("0000-00-00 00:00:00", 6, 5, "00:00:00.00000"), ("0000-00-00 00:00:00", 6, 6, "00:00:00.000000"), ]; for (s, fsp, expect_fsp, expect) in cs { let mut ctx = EvalContext::default(); let rft = FieldTypeConfig { decimal: expect_fsp, ..FieldTypeConfig::default() } .into(); let extra = make_extra(&rft); let input_time = Time::parse_datetime(&mut ctx, s, fsp, true).unwrap(); let expect_time = Duration::parse(&mut ctx, expect, expect_fsp as i8).unwrap(); let result = cast_time_as_duration(&mut ctx, &extra, Some(&input_time)); let result_str = result.as_ref().map(|x| x.as_ref().map(|x| x.to_string())); let log = format!( "input: {}, fsp: {}, expect_fsp: {}, expect: {}, output: {:?}", s, fsp, expect_fsp, expect, result_str, ); check_result(Some(&expect_time), &result, log.as_str()); } } #[test] fn test_duration_as_duration() { test_none_with_extra(cast_duration_as_duration); let cs = vec![ ("11:30:45.123456", 6, 0, "11:30:45"), ("11:30:45.123456", 6, 1, "11:30:45.1"), ("11:30:45.123456", 6, 2, "11:30:45.12"), ("11:30:45.123456", 6, 3, "11:30:45.123"), ("11:30:45.123456", 6, 4, "11:30:45.1235"), ("11:30:45.123456", 6, 5, "11:30:45.12346"), ("11:30:45.123456", 6, 6, "11:30:45.123456"), ]; for (input, input_fsp, output_fsp, expect) in cs { let rft = FieldTypeConfig { decimal: output_fsp as isize, ..FieldTypeConfig::default() } .into(); let extra = make_extra(&rft); let mut ctx = EvalContext::default(); let dur = Duration::parse(&mut ctx, input, input_fsp).unwrap(); let expect = Duration::parse(&mut ctx, expect, output_fsp).unwrap(); let r = cast_duration_as_duration(&extra, Some(&dur)); let result_str = r.as_ref().map(|x| x.map(|x| x.to_string())); let log = format!( "input: {}, input_fsp: {}, output_fsp: {}, expect: {}, output: {:?}", input, input_fsp, output_fsp, expect, result_str ); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_json_as_duration() { test_none_with_ctx_and_extra(cast_json_as_duration); // the case that Json::unquote failed had be tested by test_json_unquote let cs = vec![ Json::from_object(BTreeMap::default()).unwrap(), Json::from_array(vec![]).unwrap(), Json::from_i64(10).unwrap(), Json::from_i64(i64::MAX).unwrap(), Json::from_i64(i64::MIN).unwrap(), Json::from_u64(0).unwrap(), Json::from_u64(u64::MAX).unwrap(), Json::from_f64(10.5).unwrap(), Json::from_f64(10.4).unwrap(), Json::from_f64(-10.4).unwrap(), Json::from_f64(-10.5).unwrap(), Json::from_f64(i64::MIN as u64 as f64).unwrap(), Json::from_f64(i64::MAX as u64 as f64).unwrap(), Json::from_f64(i64::MIN as u64 as f64).unwrap(), Json::from_f64(i64::MIN as f64).unwrap(), Json::from_f64(((1u64 << 63) + (1u64 << 62)) as u64 as f64).unwrap(), Json::from_f64(-((1u64 << 63) as f64 + (1u64 << 62) as f64)).unwrap(), Json::from_f64(f64::from(f32::MIN)).unwrap(), Json::from_f64(f64::from(f32::MAX)).unwrap(), Json::from_f64(f64::MAX).unwrap(), Json::from_f64(f64::MAX).unwrap(), Json::from_string(String::from("10.0")).unwrap(), Json::from_string(String::from( "999999999999999999999999999999999999999999999999", )) .unwrap(), Json::from_string(String::from( "-999999999999999999999999999999999999999999999999", )) .unwrap(), Json::from_string(String::from( "99999999999999999999999999999999999999999999999aabcde9", )) .unwrap(), Json::from_string(String::from( "-99999999999999999999999999999999999999999999999aabcde9", )) .unwrap(), Json::from_bool(true).unwrap(), Json::from_bool(false).unwrap(), Json::none().unwrap(), ]; let cs_ref: Vec<JsonRef> = cs.iter().map(|x| x.as_ref()).collect(); test_as_duration_helper( cs_ref, |x| x.unquote().unwrap(), |x| format!("{:?}", x), cast_json_as_duration, "cast_json_as_duration", ); } #[test] fn test_int_as_json() { test_none_with_ctx(cast_any_as_json::<Int>); let cs = vec![ (i64::MIN, Json::from_i64(i64::MIN).unwrap()), (0, Json::from_i64(0).unwrap()), (i64::MAX, Json::from_i64(i64::MAX).unwrap()), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_json::<Int>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_uint_as_json() { test_none_with_nothing(cast_uint_as_json); let cs = vec![ (u64::MAX, Json::from_u64(u64::MAX).unwrap()), (0, Json::from_u64(0).unwrap()), (i64::MAX as u64, Json::from_u64(i64::MAX as u64).unwrap()), ]; for (input, expect) in cs { let r = cast_uint_as_json(Some(&(input as i64))); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_bool_as_json() { test_none_with_nothing(cast_bool_as_json); let cs = vec![ (0, Json::from_bool(false).unwrap()), (i64::MIN, Json::from_bool(true).unwrap()), (i64::MAX, Json::from_bool(true).unwrap()), ]; for (input, expect) in cs { let result = cast_bool_as_json(Some(&input)); let log = make_log(&input, &expect, &result); check_result(Some(&expect), &result, log.as_str()); } } #[test] fn test_real_as_json() { test_none_with_ctx(cast_any_as_json::<Real>); let cs = vec![ ( f64::from(f32::MAX), Json::from_f64(f64::from(f32::MAX)).unwrap(), ), ( f64::from(f32::MIN), Json::from_f64(f64::from(f32::MIN)).unwrap(), ), (f64::MAX, Json::from_f64(f64::MAX).unwrap()), (f64::MIN, Json::from_f64(f64::MIN).unwrap()), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_json::<Real>(&mut ctx, Real::new(input).as_ref().ok()); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_string_as_json() { test_none_with_extra(cast_string_as_json); let mut jo1: BTreeMap<String, Json> = BTreeMap::new(); jo1.insert( String::from("a"), Json::from_string(String::from("b")).unwrap(), ); // HasParseToJSONFlag let cs = vec![ ( "{\"a\": \"b\"}".to_string(), Json::from_object(jo1).unwrap(), true, ), ( "{}".to_string(), Json::from_object(BTreeMap::new()).unwrap(), true, ), ( "[1, 2, 3]".to_string(), Json::from_array(vec![ Json::from_i64(1).unwrap(), Json::from_i64(2).unwrap(), Json::from_i64(3).unwrap(), ]) .unwrap(), true, ), ( "[]".to_string(), Json::from_array(Vec::new()).unwrap(), true, ), ( "9223372036854775807".to_string(), Json::from_i64(9223372036854775807).unwrap(), true, ), ( "-9223372036854775808".to_string(), Json::from_i64(-9223372036854775808).unwrap(), true, ), ( "18446744073709551615".to_string(), Json::from_f64(18446744073709552000.0).unwrap(), true, ), // FIXME: f64::MAX.to_string() to json should success // (f64::MAX.to_string(), Json::from_f64(f64::MAX), true), ("0.0".to_string(), Json::from_f64(0.0).unwrap(), true), ( "\"abcde\"".to_string(), Json::from_string("abcde".to_string()).unwrap(), true, ), ( "\"\"".to_string(), Json::from_string("".to_string()).unwrap(), true, ), ("true".to_string(), Json::from_bool(true).unwrap(), true), ("false".to_string(), Json::from_bool(false).unwrap(), true), ]; for (input, expect, parse_to_json) in cs { let mut rft = FieldType::default(); if parse_to_json { let fta = rft.as_mut_accessor(); fta.set_flag(FieldTypeFlag::PARSE_TO_JSON); } let extra = make_extra(&rft); let result = cast_string_as_json(&extra, Some(&input.clone().into_bytes())); let result_str = result.as_ref().map(|x| x.as_ref().map(|x| x.to_string())); let log = format!( "input: {}, parse_to_json: {}, expect: {:?}, result: {:?}", input, parse_to_json, expect, result_str ); check_result(Some(&expect), &result, log.as_str()); } } #[test] fn test_decimal_as_json() { test_none_with_ctx(cast_any_as_json::<Decimal>); let cs = vec![ ( Decimal::from_f64(i64::MIN as f64).unwrap(), Json::from_f64(i64::MIN as f64).unwrap(), ), ( Decimal::from_f64(i64::MAX as f64).unwrap(), Json::from_f64(i64::MAX as f64).unwrap(), ), ( Decimal::from_bytes(b"184467440737095516160") .unwrap() .unwrap(), Json::from_f64(184467440737095516160.0).unwrap(), ), ( Decimal::from_bytes(b"-184467440737095516160") .unwrap() .unwrap(), Json::from_f64(-184467440737095516160.0).unwrap(), ), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let r = cast_any_as_json::<Decimal>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &r); check_result(Some(&expect), &r, log.as_str()); } } #[test] fn test_time_as_json() { test_none_with_ctx(cast_any_as_json::<Time>); let mut ctx = EvalContext::default(); // TODO: add more case for other TimeType let cs = vec![ // Add time_type filed here is to make maintainer know clearly that what is the type of the time. ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14", 0, true).unwrap(), TimeType::DateTime, Json::from_string("2000-01-01 12:13:14.000000".to_string()).unwrap(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 0, true).unwrap(), TimeType::DateTime, Json::from_string("2000-01-01 12:13:15.000000".to_string()).unwrap(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14", 6, true).unwrap(), TimeType::DateTime, Json::from_string("2000-01-01 12:13:14.000000".to_string()).unwrap(), ), ( Time::parse_datetime(&mut ctx, "2000-01-01T12:13:14.6666", 6, true).unwrap(), TimeType::DateTime, Json::from_string("2000-01-01 12:13:14.666600".to_string()).unwrap(), ), ( Time::parse_datetime(&mut ctx, "2019-09-01", 0, true).unwrap(), TimeType::DateTime, Json::from_string("2019-09-01 00:00:00.000000".to_string()).unwrap(), ), ( Time::parse_datetime(&mut ctx, "2019-09-01", 6, true).unwrap(), TimeType::DateTime, Json::from_string("2019-09-01 00:00:00.000000".to_string()).unwrap(), ), ]; for (input, time_type, expect) in cs { let mut ctx = EvalContext::default(); let result = cast_any_as_json::<Time>(&mut ctx, Some(&input)); let result_str = result.as_ref().map(|x| x.as_ref().map(|x| x.to_string())); let log = format!( "input: {}, expect_time_type: {:?}, real_time_type: {:?}, expect: {}, result: {:?}", &input, time_type, input.get_time_type(), &expect, result_str ); assert_eq!(input.get_time_type(), time_type, "{}", log); check_result(Some(&expect), &result, log.as_str()); } } #[test] fn test_duration_as_json() { test_none_with_ctx(cast_any_as_json::<Duration>); // TODO: add more case let cs = vec![ ( Duration::zero(), Json::from_string("00:00:00.000000".to_string()).unwrap(), ), ( Duration::parse(&mut EvalContext::default(), "10:10:10", 0).unwrap(), Json::from_string("10:10:10.000000".to_string()).unwrap(), ), ]; for (input, expect) in cs { let mut ctx = EvalContext::default(); let result = cast_any_as_json::<Duration>(&mut ctx, Some(&input)); let log = make_log(&input, &expect, &result); check_result(Some(&expect), &result, log.as_str()); } } #[test] fn test_json_as_json() { test_none_with_nothing(cast_json_as_json); let mut jo1: BTreeMap<String, Json> = BTreeMap::new(); jo1.insert("a".to_string(), Json::from_string("b".to_string()).unwrap()); let cs = vec![ Json::from_object(jo1).unwrap(), Json::from_array(vec![ Json::from_i64(1).unwrap(), Json::from_i64(3).unwrap(), Json::from_i64(4).unwrap(), ]) .unwrap(), Json::from_i64(i64::MIN).unwrap(), Json::from_i64(i64::MAX).unwrap(), Json::from_u64(0u64).unwrap(), Json::from_u64(u64::MAX).unwrap(), Json::from_f64(f64::MIN).unwrap(), Json::from_f64(f64::MAX).unwrap(), Json::from_string("abcde".to_string()).unwrap(), Json::from_bool(true).unwrap(), Json::from_bool(false).unwrap(), Json::none().unwrap(), ]; for input in cs { let expect = input.clone(); let result = cast_json_as_json(Some(input.as_ref())); let log = make_log(&input, &expect, &result); check_result(Some(&expect), &result, log.as_str()); } } }
cast_unsigned_int_as_signed_or_unsigned_decimal
convertValues_test.go
/* * Cadence - The resource-oriented smart contract programming language * * Copyright 2019-2020 Dapper Labs, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package runtime import ( "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/onflow/cadence" "github.com/onflow/cadence/runtime/interpreter" "github.com/onflow/cadence/runtime/sema" "github.com/onflow/cadence/runtime/tests/utils" ) type exportTest struct { label string value interpreter.Value expected cadence.Value skipReverse bool } var exportTests = []exportTest{ { label: "Void", value: interpreter.VoidValue{}, expected: cadence.NewVoid(), }, { label: "Nil", value: interpreter.NilValue{}, expected: cadence.NewOptional(nil), skipReverse: true, }, { label: "SomeValue", value: interpreter.NewSomeValueOwningNonCopying(interpreter.NewIntValueFromInt64(42)), expected: cadence.NewOptional(cadence.NewInt(42)), }, { label: "Bool true", value: interpreter.BoolValue(true), expected: cadence.NewBool(true), }, { label: "Bool false", value: interpreter.BoolValue(false), expected: cadence.NewBool(false), }, { label: "String empty", value: interpreter.NewStringValue(""), expected: cadence.NewString(""), }, { label: "String non-empty", value: interpreter.NewStringValue("foo"), expected: cadence.NewString("foo"), }, { label: "Array empty", value: interpreter.NewArrayValueUnownedNonCopying([]interpreter.Value{}...), expected: cadence.NewArray([]cadence.Value{}), }, { label: "Array non-empty", value: interpreter.NewArrayValueUnownedNonCopying( []interpreter.Value{ interpreter.NewIntValueFromInt64(42), interpreter.NewStringValue("foo"), }..., ), expected: cadence.NewArray([]cadence.Value{ cadence.NewInt(42), cadence.NewString("foo"), }), }, { label: "Int", value: interpreter.NewIntValueFromInt64(42), expected: cadence.NewInt(42), }, { label: "Int8", value: interpreter.Int8Value(42), expected: cadence.NewInt8(42), }, { label: "Int16", value: interpreter.Int16Value(42), expected: cadence.NewInt16(42), }, { label: "Int32", value: interpreter.Int32Value(42), expected: cadence.NewInt32(42), }, { label: "Int64", value: interpreter.Int64Value(42), expected: cadence.NewInt64(42), }, { label: "Int128", value: interpreter.NewInt128ValueFromInt64(42), expected: cadence.NewInt128(42), }, { label: "Int256", value: interpreter.NewInt256ValueFromInt64(42), expected: cadence.NewInt256(42), }, { label: "UInt", value: interpreter.NewUIntValueFromUint64(42), expected: cadence.NewUInt(42), }, { label: "UInt8", value: interpreter.UInt8Value(42), expected: cadence.NewUInt8(42), }, { label: "UInt16", value: interpreter.UInt16Value(42), expected: cadence.NewUInt16(42), }, { label: "UInt32", value: interpreter.UInt32Value(42), expected: cadence.NewUInt32(42), }, { label: "UInt64", value: interpreter.UInt64Value(42), expected: cadence.NewUInt64(42), }, { label: "UInt128", value: interpreter.NewUInt128ValueFromUint64(42), expected: cadence.NewUInt128(42), }, { label: "UInt256", value: interpreter.NewUInt256ValueFromUint64(42), expected: cadence.NewUInt256(42), }, { label: "Word8", value: interpreter.Word8Value(42), expected: cadence.NewWord8(42), }, { label: "Word16", value: interpreter.Word16Value(42), expected: cadence.NewWord16(42), }, { label: "Word32", value: interpreter.Word32Value(42), expected: cadence.NewWord32(42), }, { label: "Word64", value: interpreter.Word64Value(42), expected: cadence.NewWord64(42), }, { label: "Fix64", value: interpreter.Fix64Value(-123000000), expected: cadence.Fix64(-123000000), }, { label: "UFix64", value: interpreter.UFix64Value(123000000), expected: cadence.UFix64(123000000), }, } func TestExportValue(t *testing.T) { t.Parallel() test := func(tt exportTest) { t.Run(tt.label, func(t *testing.T) { t.Parallel() actual := exportValueWithInterpreter(tt.value, nil) assert.Equal(t, tt.expected, actual) if !tt.skipReverse { original := importValue(actual) assert.Equal(t, tt.value, original) } }) } for _, tt := range exportTests { test(tt) } } func TestExportIntegerValuesFromScript(t *testing.T) { t.Parallel() test := func(integerType sema.Type) { t.Run(integerType.String(), func(t *testing.T) { t.Parallel() script := fmt.Sprintf( ` pub fun main(): %s { return 42 } `, integerType, ) assert.NotPanics(t, func() { exportValueFromScript(t, script) }) }) } for _, integerType := range sema.AllIntegerTypes { test(integerType) } } func TestExportFixedPointValuesFromScript(t *testing.T) { t.Parallel() test := func(fixedPointType sema.Type) { t.Run(fixedPointType.String(), func(t *testing.T) { t.Parallel() script := fmt.Sprintf( ` pub fun main(): %s { return 1.23 } `, fixedPointType, ) assert.NotPanics(t, func() { exportValueFromScript(t, script) }) }) } for _, fixedPointType := range sema.AllFixedPointTypes { test(fixedPointType) } } func TestExportDictionaryValue(t *testing.T) { t.Parallel() t.Run("Empty", func(t *testing.T) { t.Parallel() script := ` access(all) fun main(): {String: Int} { return {} } ` actual := exportValueFromScript(t, script) expected := cadence.NewDictionary([]cadence.KeyValuePair{}) assert.Equal(t, expected, actual) }) t.Run("Non-empty", func(t *testing.T) { t.Parallel() script := ` access(all) fun main(): {String: Int} { return { "a": 1, "b": 2 } } ` actual := exportValueFromScript(t, script) expected := cadence.NewDictionary([]cadence.KeyValuePair{ { Key: cadence.NewString("a"), Value: cadence.NewInt(1), }, { Key: cadence.NewString("b"), Value: cadence.NewInt(2), }, }) assert.Equal(t, expected, actual) }) } func TestExportAddressValue(t *testing.T) { t.Parallel() script := ` access(all) fun main(): Address { return 0x42 } ` actual := exportValueFromScript(t, script) expected := cadence.BytesToAddress( []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x42}, ) assert.Equal(t, expected, actual) } func TestExportStructValue(t *testing.T) { t.Parallel() script := ` access(all) struct Foo { access(all) let bar: Int init(bar: Int) { self.bar = bar } } access(all) fun main(): Foo { return Foo(bar: 42) } ` actual := exportValueFromScript(t, script) expected := cadence.NewStruct([]cadence.Value{cadence.NewInt(42)}).WithType(fooStructType) assert.Equal(t, expected, actual) } func TestExportResourceValue(t *testing.T) { t.Parallel() script := ` access(all) resource Foo { access(all) let bar: Int init(bar: Int) { self.bar = bar } } access(all) fun main(): @Foo { return <- create Foo(bar: 42) } ` actual := exportValueFromScript(t, script) expected := cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(42), }).WithType(fooResourceType)
assert.Equal(t, expected, actual) } func TestExportResourceArrayValue(t *testing.T) { t.Parallel() script := ` access(all) resource Foo { access(all) let bar: Int init(bar: Int) { self.bar = bar } } access(all) fun main(): @[Foo] { return <- [<- create Foo(bar: 1), <- create Foo(bar: 2)] } ` actual := exportValueFromScript(t, script) expected := cadence.NewArray([]cadence.Value{ cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(1), }).WithType(fooResourceType), cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(2), }).WithType(fooResourceType), }) assert.Equal(t, expected, actual) } func TestExportResourceDictionaryValue(t *testing.T) { t.Parallel() script := ` access(all) resource Foo { access(all) let bar: Int init(bar: Int) { self.bar = bar } } access(all) fun main(): @{String: Foo} { return <- { "a": <- create Foo(bar: 1), "b": <- create Foo(bar: 2) } } ` actual := exportValueFromScript(t, script) expected := cadence.NewDictionary([]cadence.KeyValuePair{ { Key: cadence.NewString("a"), Value: cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(1), }).WithType(fooResourceType), }, { Key: cadence.NewString("b"), Value: cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(2), }).WithType(fooResourceType), }, }) assert.Equal(t, expected, actual) } func TestExportNestedResourceValueFromScript(t *testing.T) { t.Parallel() barResourceType := &cadence.ResourceType{ TypeID: "S.test.Bar", Identifier: "Bar", Fields: []cadence.Field{ { Identifier: "uuid", Type: cadence.UInt64Type{}, }, { Identifier: "x", Type: cadence.IntType{}, }, }, } fooResourceType := &cadence.ResourceType{ TypeID: "S.test.Foo", Identifier: "Foo", Fields: []cadence.Field{ { Identifier: "uuid", Type: cadence.UInt64Type{}, }, { Identifier: "bar", Type: barResourceType, }, }, } script := ` access(all) resource Bar { access(all) let x: Int init(x: Int) { self.x = x } } access(all) resource Foo { access(all) let bar: @Bar init(bar: @Bar) { self.bar <- bar } destroy() { destroy self.bar } } access(all) fun main(): @Foo { return <- create Foo(bar: <- create Bar(x: 42)) } ` actual := exportValueFromScript(t, script) expected := cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewResource([]cadence.Value{ cadence.NewUInt64(0), cadence.NewInt(42), }).WithType(barResourceType), }).WithType(fooResourceType) assert.Equal(t, expected, actual) } func TestExportEventValue(t *testing.T) { t.Parallel() script := ` access(all) event Foo(bar: Int) access(all) fun main() { emit Foo(bar: 42) } ` actual := exportEventFromScript(t, script) expected := cadence.NewEvent([]cadence.Value{cadence.NewInt(42)}).WithType(fooEventType) assert.Equal(t, expected, actual) } // mock runtime.Interface to capture events type eventCapturingInterface struct { EmptyRuntimeInterface events []cadence.Event } func (t *eventCapturingInterface) EmitEvent(event cadence.Event) { t.events = append(t.events, event) } func exportEventFromScript(t *testing.T, script string) cadence.Event { rt := NewInterpreterRuntime() inter := &eventCapturingInterface{} _, err := rt.ExecuteScript( []byte(script), nil, inter, utils.TestLocation, ) require.NoError(t, err) require.Len(t, inter.events, 1) event := inter.events[0] return event } func exportValueFromScript(t *testing.T, script string) cadence.Value { rt := NewInterpreterRuntime() value, err := rt.ExecuteScript( []byte(script), nil, &EmptyRuntimeInterface{}, utils.TestLocation, ) require.NoError(t, err) return value } const fooID = "Foo" var fooTypeID = fmt.Sprintf("S.%s.%s", utils.TestLocation, fooID) var fooFields = []cadence.Field{ { Identifier: "bar", Type: cadence.IntType{}, }, } var fooResourceFields = []cadence.Field{ { Identifier: "uuid", Type: cadence.UInt64Type{}, }, { Identifier: "bar", Type: cadence.IntType{}, }, } var fooStructType = &cadence.StructType{ TypeID: fooTypeID, Identifier: fooID, Fields: fooFields, } var fooResourceType = &cadence.ResourceType{ TypeID: fooTypeID, Identifier: fooID, Fields: fooResourceFields, } var fooEventType = &cadence.EventType{ TypeID: fooTypeID, Identifier: fooID, Fields: fooFields, }
services_monitor.js
import { __ } from 'embark-i18n'; const async = require('../utils/async_extend.js'); const deepEqual = require('deep-equal'); class
{ constructor(options) { const self = this; this.events = options.events; this.logger = options.logger; this.plugins = options.plugins; this.checkList = {}; this.checkTimers = {}; this.checkState = {}; this.working = false; self.events.setCommandHandler("services:register", (checkName, checkFn, time, initialStatus) => { self.addCheck(checkName, checkFn, time, initialStatus); }); } } ServicesMonitor.prototype.initCheck = function (checkName) { let self = this; let check = this.checkList[checkName]; if (!check) { return false; } self.events.on('check:' + checkName, function (obj) { if (check && check.status === 'off' && obj.status === 'on') { self.events.emit('check:backOnline:' + checkName); } if (check && check.status === 'on' && obj.status === 'off') { self.events.emit('check:wentOffline:' + checkName); } check.status = obj.status; const newState = {name: obj.name, status: obj.status, serviceName: checkName}; if (!deepEqual(newState, self.checkState[checkName])) { self.checkState[checkName] = {name: obj.name, status: obj.status, serviceName: checkName}; self.events.emit("servicesState", self.checkState); } }); if (check.interval !== 0) { self.checkTimers[checkName] = setInterval(function () { check.fn.call(check.fn, function (obj) { self.events.emit('check:' + checkName, obj); }); }, check.interval); } check.fn.call(check.fn, function (obj) { self.events.emit('check:' + checkName, obj); }); }; ServicesMonitor.prototype.addCheck = function (checkName, checkFn, time, initialState) { this.logger.trace('add check: ' + checkName); this.checkList[checkName] = {fn: checkFn, interval: time || 5000, status: initialState}; if (this.working) { this.initCheck(checkName); } }; ServicesMonitor.prototype.stopCheck = function (name) { clearInterval(this.checkTimers[name]); delete this.checkTimers[name]; delete this.checkList[name]; delete this.checkState[name]; }; ServicesMonitor.prototype.startMonitor = function () { let self = this; this.working = true; this.logger.trace('startMonitor'); let servicePlugins = this.plugins.getPluginsProperty('serviceChecks', 'serviceChecks'); servicePlugins.forEach(function (pluginCheck) { self.addCheck(pluginCheck.checkName, pluginCheck.checkFn, pluginCheck.time); }); async.eachObject(this.checkList, function (checkName, check, callback) { self.initCheck(checkName); callback(); }, function (err) { if (err) { self.logger.error(__("error running service check")); self.logger.error(err.message); } }); }; module.exports = ServicesMonitor;
ServicesMonitor
packager.ts
import { ngPackagr } from 'ng-packagr'; import { root } from './helpers'; ngPackagr()
.forProject(root(`./packages/@ngx-meta/${process.argv[2]}/ng-package.json`)) .withTsConfig(root('./tools/build/tsconfig.package.json')) .build() .catch(() => (process.exitCode = 1));
ActionButton.js
import { __assign, __decorate, __extends } from "tslib"; import * as React from 'react'; import { BaseButton } from '../BaseButton'; import { customizable, nullRender } from '../../../Utilities'; import { getStyles } from './ActionButton.styles'; /** * {@docCategory Button} */ var ActionButton = /** @class */ (function (_super) { __extends(ActionButton, _super); function ActionButton() { return _super !== null && _super.apply(this, arguments) || this; } ActionButton.prototype.render = function () { var _a = this.props, styles = _a.styles, theme = _a.theme; return (React.createElement(BaseButton, __assign({}, this.props, { variantClassName: "ms-Button--action ms-Button--command", styles: getStyles(theme, styles), onRenderDescription: nullRender }))); }; ActionButton = __decorate([
customizable('ActionButton', ['theme', 'styles'], true) ], ActionButton); return ActionButton; }(React.Component)); export { ActionButton }; //# sourceMappingURL=ActionButton.js.map
inherited.rs
use super::callee::DeferredCallResolution; use super::MaybeInProgressTables; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_hir::def_id::{DefIdMap, LocalDefId}; use rustc_hir::HirIdMap; use rustc_infer::infer; use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt}; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::{self, Span}; use rustc_trait_selection::infer::InferCtxtExt as _; use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt}; use std::cell::RefCell; use std::ops::Deref; /// Closures defined within the function. For example: /// /// fn foo() { /// bar(move|| { ... }) /// } /// /// Here, the function `foo()` and the closure passed to /// `bar()` will each have their own `FnCtxt`, but they will /// share the inherited fields. pub struct Inherited<'a, 'tcx> { pub(super) infcx: InferCtxt<'a, 'tcx>, pub(super) typeck_results: super::MaybeInProgressTables<'a, 'tcx>, pub(super) locals: RefCell<HirIdMap<super::LocalTy<'tcx>>>, pub(super) fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>, // Some additional `Sized` obligations badly affect type inference. // These obligations are added in a later stage of typeck. pub(super) deferred_sized_obligations: RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>, // When we process a call like `c()` where `c` is a closure type, // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or // `FnOnce` closure. In that case, we defer full resolution of the // call until upvar inference can kick in and make the // decision. We keep these deferred resolutions grouped by the // def-id of the closure, so that once we decide, we can easily go // back and process them. pub(super) deferred_call_resolutions: RefCell<DefIdMap<Vec<DeferredCallResolution<'tcx>>>>, pub(super) deferred_cast_checks: RefCell<Vec<super::cast::CastCheck<'tcx>>>, pub(super) deferred_generator_interiors: RefCell<Vec<(hir::BodyId, Ty<'tcx>, hir::GeneratorKind)>>, /// Reports whether this is in a const context. pub(super) constness: hir::Constness, pub(super) body_id: Option<hir::BodyId>, /// Whenever we introduce an adjustment from `!` into a type variable, /// we record that type variable here. This is later used to inform /// fallback. See the `fallback` module for details. pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>, } impl<'a, 'tcx> Deref for Inherited<'a, 'tcx> { type Target = InferCtxt<'a, 'tcx>; fn deref(&self) -> &Self::Target { &self.infcx } } /// Helper type of a temporary returned by `Inherited::build(...)`. /// Necessary because we can't write the following bound: /// `F: for<'b, 'tcx> where 'tcx FnOnce(Inherited<'b, 'tcx>)`. pub struct InheritedBuilder<'tcx> { infcx: infer::InferCtxtBuilder<'tcx>, def_id: LocalDefId, } impl Inherited<'_, 'tcx> { pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> { let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner; InheritedBuilder { infcx: tcx.infer_ctxt().with_fresh_in_progress_typeck_results(hir_owner), def_id, } } } impl<'tcx> InheritedBuilder<'tcx> { pub fn enter<F, R>(&mut self, f: F) -> R where F: for<'a> FnOnce(Inherited<'a, 'tcx>) -> R,
} impl Inherited<'a, 'tcx> { pub(super) fn new(infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId) -> Self { let tcx = infcx.tcx; let item_id = tcx.hir().local_def_id_to_hir_id(def_id); Self::with_constness(infcx, def_id, tcx.hir().get(item_id).constness_for_typeck()) } pub(super) fn with_constness( infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId, constness: hir::Constness, ) -> Self { let tcx = infcx.tcx; let item_id = tcx.hir().local_def_id_to_hir_id(def_id); let body_id = tcx.hir().maybe_body_owned_by(item_id); Inherited { typeck_results: MaybeInProgressTables { maybe_typeck_results: infcx.in_progress_typeck_results, }, infcx, fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)), locals: RefCell::new(Default::default()), deferred_sized_obligations: RefCell::new(Vec::new()), deferred_call_resolutions: RefCell::new(Default::default()), deferred_cast_checks: RefCell::new(Vec::new()), deferred_generator_interiors: RefCell::new(Vec::new()), diverging_type_vars: RefCell::new(Default::default()), constness, body_id, } } pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) { debug!("register_predicate({:?})", obligation); if obligation.has_escaping_bound_vars() { span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation); } self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation); } pub(super) fn register_predicates<I>(&self, obligations: I) where I: IntoIterator<Item = traits::PredicateObligation<'tcx>>, { for obligation in obligations { self.register_predicate(obligation); } } pub(super) fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T { self.register_predicates(infer_ok.obligations); infer_ok.value } pub(super) fn normalize_associated_types_in<T>( &self, span: Span, body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, value: T, ) -> T where T: TypeFoldable<'tcx>, { self.normalize_associated_types_in_with_cause( ObligationCause::misc(span, body_id), param_env, value, ) } pub(super) fn normalize_associated_types_in_with_cause<T>( &self, cause: ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>, value: T, ) -> T where T: TypeFoldable<'tcx>, { let ok = self.partially_normalize_associated_types_in(cause, param_env, value); debug!(?ok); self.register_infer_ok_obligations(ok) } }
{ let def_id = self.def_id; self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id))) }
untested.spec.ts
import * as embed from "../src/embed";
import * as sorting from "../src/sorting";
import * as index from "../src";
main.go
package main import ( "errors" "fmt" "io/ioutil" "os" "github.com/GoogleContainerTools/kpt-functions-catalog/contrib/functions/go/blueprint-docs/docs" "github.com/GoogleContainerTools/kpt-functions-catalog/contrib/functions/go/blueprint-docs/generated" "sigs.k8s.io/kustomize/kyaml/fn/framework" "sigs.k8s.io/kustomize/kyaml/fn/framework/command" "sigs.k8s.io/kustomize/kyaml/yaml" ) const defaultReadmePath = "/tmp/README.md" const defaultRepoPath = "https://github.com/GoogleCloudPlatform/blueprints.git/catalog/" //nolint func main() { rp := ReadmeProcessor{} cmd := command.Build(&rp, command.StandaloneEnabled, false) cmd.Short = generated.GenerateKptPkgDocsShort cmd.Long = generated.GenerateKptPkgDocsLong cmd.Example = generated.GenerateKptPkgDocsExamples if err := cmd.Execute(); err != nil { os.Exit(1) } } type ReadmeProcessor struct{} func (rp *ReadmeProcessor) Process(resourceList *framework.ResourceList) error { readmePath, repoPath, pkgName := parseFnCfg(resourceList.FunctionConfig) currentDoc, err := ioutil.ReadFile(readmePath) if err != nil { if errors.Is(err, os.ErrNotExist) { resourceList.Results = getResults(fmt.Sprintf("Skipping readme generation: %s", err), framework.Warning) return nil
} err = generateReadme(repoPath, readmePath, pkgName, string(currentDoc), resourceList) if err != nil { resourceList.Results = getResults(err.Error(), framework.Error) return err } return nil } func generateReadme(repoPath, readmePath, pkgName, currentDoc string, resourceList *framework.ResourceList) error { title, generatedDoc, err := docs.GenerateBlueprintReadme(resourceList.Items, repoPath, pkgName) if err != nil { return err } readme, err := docs.InsertIntoReadme(title, currentDoc, generatedDoc) if err != nil { return err } err = ioutil.WriteFile(readmePath, []byte(readme), os.ModePerm) if err != nil { return err } return nil } func parseFnCfg(r *yaml.RNode) (string, string, string) { cm := r.GetDataMap() readme, exists := cm["readme-path"] if !exists { readme = defaultReadmePath } repoPath, exists := cm["repo-path"] if !exists { repoPath = defaultRepoPath } pkgName := cm["pkg-name"] return readme, repoPath, pkgName } // getResults returns the item for input error message func getResults(msg string, severity framework.Severity) []*framework.Result { return []*framework.Result{ { Message: fmt.Sprintf("failed to generate doc: %s", msg), Severity: severity, }, } }
} else { resourceList.Results = getResults(err.Error(), framework.Error) }
connector.go
package pubsub import ( "github.com/kubemq-hub/builder/connector/common" ) func Connector() *common.Connector
{ return common.NewConnector(). SetKind("gcp.pubsub"). SetDescription("AWS Pubsub source properties"). SetName("PubSub"). SetProvider("GCP"). SetCategory("Messaging"). SetTags("streaming","cloud","managed"). AddProperty( common.NewProperty(). SetKind("string"). SetName("project_id"). SetTitle("Project ID"). SetDescription("Set Project Id"). SetMust(true), ). AddProperty( common.NewProperty(). SetKind("string"). SetName("subscriber_id"). SetTitle("Subscriber ID"). SetDescription("Set Subscriber Id"). SetMust(true). SetDefault(""), ). AddProperty( common.NewProperty(). SetKind("multilines"). SetName("credentials"). SetTitle("Json Credentials"). SetDescription("Set gcp Credentials"). SetMust(true). SetDefault(""), ) }
Mλ.py
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner ([email protected]) 11/12/2020, 16:41. Copyright (c) David J Turner
small.py
def pth(N,K,P): # too slow for the huge inputs A=set(map(int,raw_input().split())) try: return [x for x in range(1,N+1) if x not in A][P-1] except IndexError: return -1 def ppth(N,K,P):
if __name__ == "__main__": for tc in xrange(int(raw_input())): nkp = map(int,raw_input().split()) print ppth(nkp[0],nkp[1],nkp[2])
x=P e = map(int,raw_input().split()) for i in xrange(K): if(e[i]<=x): x+=1 if(P<=N-K): return x else: return -1
game.rs
use crate::repr::{Card, CardSet, Suit}; use crate::constant::SUIT; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct GameState { player: u8, // 1 turn: u8, // 1 points: [u8; 2], // 2 discard: CardSet, // 4 played: [Card; 4], // 4 hands: [CardSet; 4], //16 trump: Option<Suit> // 1 } // total 29 bytes #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct LegalCard(Card); impl LegalCard { pub fn
(self) -> Card { self.0 } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct LegalCardSet(CardSet); impl LegalCardSet { pub fn as_set(self) -> CardSet { self.0 } } struct LegalCardIterator { set: LegalCardSet } impl Iterator for LegalCardIterator { type Item = LegalCard; fn next(&mut self) -> Option<LegalCard> { if self.set.as_set().is_empty() { return None; } let set : u32 = self.set.as_set().0; let card = LegalCard( Card((((set - 1) & set) ^ set).trailing_zeros() as u8 )); self.set = LegalCardSet(CardSet((set - 1) & set)); Some(card) } } impl GameState { pub fn new(hands: [CardSet; 4]) -> Self { GameState { player: 0, turn: 0, points: [0; 2], discard: CardSet(0), played: [Card(31); 4], hands, trump: None } } pub fn check(self, card: Card) -> Option<LegalCard> { match self.legal().as_set().contains(card) { true => Some(LegalCard(card)), false => None } } pub fn legal(self) -> LegalCardSet { let hand = self.hands[self.player as usize]; if self.turn % 4 == 0 { return LegalCardSet(hand); } let first = self.played[0]; let suit = first.suit(); let trump = self.trump.unwrap(); let same_suit = hand & SUIT[<usize as From<Suit>>::from(suit)]; if !same_suit.is_empty() { return LegalCardSet(same_suit); } let trump_suit = hand & SUIT[<usize as From<Suit>>::from(trump)]; if !trump_suit.is_empty() { LegalCardSet(trump_suit) } else { LegalCardSet(hand) } } pub fn play(mut self, legal_card: LegalCard) -> GameState { let card = legal_card.0; let hand = self.hands[self.player as usize]; self.hands[self.player as usize] = hand ^ card.to_set(); self.played[(self.turn % 4) as usize] = card; self.player = (self.player + 1) % 4; if self.turn == 0 { self.trump = Some(card.suit()); } self.turn += 1; self } fn take(mut self) -> GameState { let mut player = self.player; let trump = self.trump.unwrap(); let mut best_card = self.played[0]; let mut best_player = player; player = (player + 1) % 4; let mut points = 0u8; let mut call = 0; for &card in &self.played[1..4] { points += card.points(); if card.suit() == best_card.suit() && card.rank() > best_card.rank() { best_card = card; best_player = player; } else if card.suit() == trump { best_card = card; best_player = player; } player = (player + 1) % 4; } self.points[(best_player%2) as usize] += points; self } }
as_card
tabs_spaces.py
__author__ = 'Alex' import re def main(line):
sub = re.sub(r"(\t)", r" ", line) return sub
bestmodel.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class BestNet(torch.nn.Module): def __init__(self, embedding_dim): super(BestNet, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = 256 self.embedding_dropout=0.6 self.desc_rnn_size = 100 self.rnn = nn.GRU( input_size=self.embedding_dim, hidden_size=self.hidden_dim, num_layers=1, batch_first=True, bidirectional=True ) self.rnn_desc = nn.GRU( input_size=self.embedding_dim, hidden_size=self.desc_rnn_size, num_layers=1, batch_first=True, bidirectional=True ) self.emb_drop = nn.Dropout(self.embedding_dropout) self.M = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, 2*self.hidden_dim)) self.b = nn.Parameter(torch.FloatTensor([0])) self.Wc = nn.Parameter(torch.FloatTensor(2*self.hidden_dim, self.embedding_dim)) self.We = nn.Parameter(torch.FloatTensor(self.embedding_dim, self.embedding_dim)) self.attn = nn.Linear(2*self.hidden_dim, 2*self.hidden_dim) self.init_params_() self.tech_w = 0.0 def init_params_(self): #Initializing parameters nn.init.xavier_normal_(self.M) # Set forget gate bias to 2 size = self.rnn.bias_hh_l0.size(0) self.rnn.bias_hh_l0.data[size//4:size//2] = 2 size = self.rnn.bias_ih_l0.size(0) self.rnn.bias_ih_l0.data[size//4:size//2] = 2 size = self.rnn_desc.bias_hh_l0.size(0) self.rnn_desc.bias_hh_l0.data[size//4:size//2] = 2 size = self.rnn_desc.bias_ih_l0.size(0) self.rnn_desc.bias_ih_l0.data[size//4:size//2] = 2 # def forward(self, context, options): # logits = [] # for i, option in enumerate(options.transpose(1, 0)): # gits = [] # for context in context.transpose(1,0): # git = self.forward_one_option(context, option) # gits.append(logit) # logit = torch.stack(gits).mean(0) # logits = torch.stack(logits, 1) # return logits.squeeze() # def forward(self, context, options): # logits = [] # for i, option in enumerate(options.transpose(1, 0)): # logit = self.forward_one_option(context, option) # logits.append(logit) # logits = torch.stack(logits, 1) # return logits.squeeze() def
(self, context, options): logits = [] for i, option in enumerate(options.transpose(1, 0)): logit_ = [] for utter in context.transpose(1,0): logit = self.forward_one_option(utter, option) # 10,1,1 logit_.append(logit) logits.append(torch.stack(logit_,1).mean(1)) logits = torch.stack(logits, 1) return logits.squeeze() def forward_one_option(self, context, option): context, c_h, option, o_h = self.forward_crosspath(context, option) context_attn = self.forward_attn(context, o_h) option_attn = self.forward_attn(option, c_h) final = self.forward_fc(context_attn, option_attn) return final def forward_crosspath(self, context, option): context, c_h = self.rnn(self.emb_drop(context)) c_h = torch.cat([i for i in c_h], dim=-1) option, o_h = self.rnn(self.emb_drop(option)) o_h = torch.cat([i for i in o_h], dim=-1) return context, c_h.squeeze(), option, o_h.squeeze() def forward_attn(self, output, hidden): max_len = output.size(1) b_size = output.size(0) hidden = hidden.squeeze(0).unsqueeze(2) attn = self.attn(output.contiguous().view(b_size*max_len, -1)) attn = attn.view(b_size, max_len, -1) attn_energies = (attn.bmm(hidden).transpose(1,2)) alpha = F.softmax(attn_energies.squeeze(1), dim=-1) alpha = alpha.unsqueeze(1) weighted_attn = alpha.bmm(output) return weighted_attn.squeeze() def forward_fc(self, context, option): out = torch.mm(context, self.M).unsqueeze(1) out = torch.bmm(out, option.unsqueeze(2)) out = out + self.b return out def save(self, filepath): torch.save(self.state_dict(), filepath)
forward
useLogViewerSelection.tsx
/* * Copyright 2021 The Backstage Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { errorApiRef, useApi } from '@backstage/core-plugin-api'; import { useEffect, useState } from 'react'; import useCopyToClipboard from 'react-use/lib/useCopyToClipboard'; import { AnsiLine } from './AnsiProcessor'; export function
(lines: AnsiLine[]) { const errorApi = useApi(errorApiRef); const [sel, setSelection] = useState<{ start: number; end: number }>(); const start = sel ? Math.min(sel.start, sel.end) : undefined; const end = sel ? Math.max(sel.start, sel.end) : undefined; const [{ error }, copyToClipboard] = useCopyToClipboard(); useEffect(() => { if (error) { errorApi.post(error); } }, [error, errorApi]); return { shouldShowButton(line: number) { return start === line || end === line; }, isSelected(line: number) { if (!sel) { return false; } return start! <= line && line <= end!; }, setSelection(line: number, add: boolean) { if (add) { setSelection(s => s ? { start: s.start, end: line } : { start: line, end: line }, ); } else { setSelection(s => s?.start === line && s?.end === line ? undefined : { start: line, end: line }, ); } }, copySelection() { if (sel) { const copyText = lines .slice(Math.min(sel.start, sel.end) - 1, Math.max(sel.start, sel.end)) .map(l => l.chunks.map(c => c.text).join('')) .join('\n'); copyToClipboard(copyText); setSelection(undefined); } }, }; }
useLogViewerSelection
getIntersection.js
/** * Find intersection of points between two different masks * @memberof Image * @instance * @param {Image} mask2 - a mask (1 bit image) * @return {object} - object containing number of white pixels for mask1, for mask 2 and for them both */ export default function
(mask2) { let mask1 = this; let closestParent = mask1.getClosestCommonParent(mask2); let startPos1 = mask1.getRelativePosition(closestParent, { defaultFurther: true }); let allRelPos1 = getRelativePositionForAllPixels(mask1, startPos1); let startPos2 = mask2.getRelativePosition(closestParent, { defaultFurther: true }); let allRelPos2 = getRelativePositionForAllPixels(mask2, startPos2); let commonSurface = getCommonSurface(allRelPos1, allRelPos2); let intersection = { whitePixelsMask1: [], whitePixelsMask2: [], commonWhitePixels: [] }; for (let i = 0; i < commonSurface.length; i++) { let currentRelativePos = commonSurface[i]; let realPos1 = [currentRelativePos[0] - startPos1[0], currentRelativePos[1] - startPos1[1]]; let realPos2 = [currentRelativePos[0] - startPos2[0], currentRelativePos[1] - startPos2[1]]; let valueBitMask1 = mask1.getBitXY(realPos1[0], realPos1[1]); let valueBitMask2 = mask2.getBitXY(realPos2[0], realPos2[1]); if (valueBitMask1 === 1 && valueBitMask2 === 1) { intersection.commonWhitePixels.push(currentRelativePos); } } for (let i = 0; i < allRelPos1.length; i++) { let posX; let posY; if (i !== 0) { posX = Math.floor(i / (mask1.width - 1)); posY = i % (mask1.width - 1); } if (mask1.getBitXY(posX, posY) === 1) { intersection.whitePixelsMask1.push(allRelPos1[i]); } } for (let i = 0; i < allRelPos2.length; i++) { let posX = 0; let posY = 0; if (i !== 0) { posX = Math.floor(i / (mask2.width - 1)); posY = i % (mask2.width - 1); } if (mask2.getBitXY(posX, posY) === 1) { intersection.whitePixelsMask2.push(allRelPos2[i]); } } return intersection; } /** * Get relative position array for all pixels in masks * @param {Image} mask - a mask (1 bit image) * @param {Array<number>} startPosition - start position of mask relative to parent * @return {Array} - relative position of all pixels * @private */ function getRelativePositionForAllPixels(mask, startPosition) { let relativePositions = []; for (let i = 0; i < mask.height; i++) { for (let j = 0; j < mask.width; j++) { let originalPos = [i, j]; relativePositions.push([originalPos[0] + startPosition[0], originalPos[1] + startPosition[1]]); } } return relativePositions; } /** * Finds common surface for two arrays containing the positions of the pixels relative to parent image * @param {Array<number>} positionArray1 - positions of pixels relative to parent * @param {Array<number>} positionArray2 - positions of pixels relative to parent * @return {Array<number>} - positions of common pixels for both arrays * @private */ function getCommonSurface(positionArray1, positionArray2) { let i = 0; let j = 0; let commonSurface = []; while (i < positionArray1.length && j < positionArray2.length) { if (positionArray1[i][0] === positionArray2[j][0] && positionArray1[i][1] === positionArray2[j][1]) { commonSurface.push(positionArray1[i]); i++; j++; } else if (positionArray1[i][0] < positionArray2[j][0] || (positionArray1[i][0] === positionArray2[j][0] && positionArray1[i][1] < positionArray2[j][1])) { i++; } else { j++; } } return commonSurface; }
getIntersection
driver.spec.ts
import 'mocha' import sinon from 'sinon' import { expect } from 'chai' import { silence } from './log' import { botUser, mockUser, apiUser } from '../utils/config' import * as api from './api' import * as utils from '../utils/testing' import * as driver from './driver' import * as methodCache from './methodCache' const delay = (ms) => new Promise((resolve, reject) => setTimeout(resolve, ms)) let clock let tId let pId const tName = utils.testChannelName const pName = utils.testPrivateName silence() // suppress log during tests (disable this while developing tests) describe('driver', () => { before(async () => { const testChannel = await utils.channelInfo({ roomName: tName }) tId = testChannel.channel._id const testPrivate = await utils.privateInfo({ roomName: pName }) pId = testPrivate.group._id }) after(async () => { await api.logout() await driver.logout() await driver.disconnect() }) describe('.connect', () => { context('with localhost connection', () => { it('without args, returns a promise', () => { const promise = driver.connect() expect(promise.then).to.be.a('function') promise.catch((err) => console.error(err)) return promise }) it('accepts an error-first callback, providing asteroid', (done) => { driver.connect({}, (err, asteroid) => { expect(err).to.equal(null) expect(asteroid).to.be.an('object') done() }) }) it('without url takes localhost as default', (done) => { driver.connect({}, (err, asteroid) => { expect(err).to.eql(null) // const connectionHost = asteroid.endpoint const connectionHost = asteroid._host expect(connectionHost).to.contain('localhost:3000') done() }) }) it('promise resolves with asteroid in successful state', () => { return driver.connect({}).then((asteroid) => { const isActive = (asteroid.ddp.readyState === 1) // const isActive = asteroid.ddp.status === 'connected' expect(isActive).to.equal(true) }) }) it('provides the asteroid instance to method cache', () => { return driver.connect().then((asteroid) => { expect(methodCache.instance).to.eql(asteroid) }) }) }) context('with timeout, on expiry', () => { before(() => clock = sinon.useFakeTimers(0)) after(() => clock.restore()) it('with url, attempts connection at URL', (done) => { driver.connect({ host: 'localhost:9999', timeout: 100 }, (err, asteroid) => { expect(err).to.be.an('error') const connectionHost = asteroid.endpoint || asteroid._host expect(connectionHost).to.contain('localhost:9999') done() }) clock.tick(200) }) it('returns error', (done) => { let opts = { host: 'localhost:9999', timeout: 100 } driver.connect(opts, (err, asteroid) => { const isActive = (asteroid.ddp.readyState === 1) expect(err).to.be.an('error') expect(isActive).to.eql(false) done() }) clock.tick(200) }) it('without callback, triggers promise catch', () => { const promise = driver.connect({ host: 'localhost:9999', timeout: 100 }) .catch((err) => expect(err).to.be.an('error')) clock.tick(200) return promise }) it('with callback, provides error to callback', (done) => { driver.connect({ host: 'localhost:9999', timeout: 100 }, (err) => { expect(err).to.be.an('error') done() }) clock.tick(200) }) }) }) // describe('disconnect', () => { // Disabled for now, as only Asteroid v2 has a disconnect method // it('disconnects from asteroid', async () => { // await driver.connect() // const asteroid = await driver.connect() // await driver.disconnect() // const isActive = asteroid.ddp.readyState === 1 // // const isActive = asteroid.ddp.status === 'connected' // expect(isActive).to.equal(false) // }) // }) describe('.login', () => { it('sets the bot user status to online', async () => { await driver.connect() await driver.login() await utils const result = await utils.userInfo(botUser.username) expect(result.user.status).to.equal('online') }) }) describe('.subscribeToMessages', () => { it('resolves with subscription object', async () => { await driver.connect() await driver.login() const subscription = await driver.subscribeToMessages() expect(subscription).to.have.property('ready') // expect(subscription.ready).to.have.property('state', 'fulfilled') ???? }) }) describe('.reactToMessages', () => { afterEach(() => delay(500)) // avoid rate limit it('calls callback on every subscription update', async () => { await driver.connect() await driver.login() await driver.subscribeToMessages() const callback = sinon.spy() driver.reactToMessages(callback) await utils.sendFromUser({ text: 'SDK test `reactToMessages` 1' }) await delay(500) await utils.sendFromUser({ text: 'SDK test `reactToMessages` 2' }) expect(callback.callCount).to.equal(2) }) it('calls callback with sent message object', async () => { await driver.connect() await driver.login() await driver.subscribeToMessages() const callback = sinon.spy() driver.reactToMessages(callback) await utils.sendFromUser({ text: 'SDK test `reactToMessages` 3' }) const messageArgs = callback.getCall(0).args[1] expect(messageArgs.msg).to.equal('SDK test `reactToMessages` 3') }) }) describe('.sendMessage', () => { before(async () => { await driver.connect() await driver.login() }) it('sends a custom message', async () => { const message = driver.prepareMessage({ rid: tId, msg: ':point_down:', emoji: ':point_right:', reactions: { ':thumbsup:': { usernames: [botUser.username] } }, groupable: false }) await driver.sendMessage(message) const last = (await utils.lastMessages(tId))[0] expect(last).to.have.deep.property('reactions', message.reactions) expect(last).to.have.property('emoji', ':point_right:') expect(last).to.have.property('msg', ':point_down:') }) it('sends a message with actions', async () => { const attachments = [{ actions: [ { type: 'button', text: 'Action 1', msg: 'Testing Action 1', msg_in_chat_window: true }, { type: 'button', text: 'Action 2', msg: 'Testing Action 2', msg_in_chat_window: true } ] }] await driver.sendMessage({ rid: tId, msg: 'SDK test `prepareMessage` actions', attachments }) const last = (await utils.lastMessages(tId))[0] delete last.attachments[0].ts; expect(last.attachments).to.eql(attachments) }) }) describe('.editMessage', () => { before(async () => { await driver.connect() await driver.login() }) it('edits the last sent message', async () => { const original = driver.prepareMessage({ msg: ':point_down:', emoji: ':point_right:', groupable: false, rid: tId }) await driver.sendMessage(original) const sent = (await utils.lastMessages(tId))[0] const update = Object.assign({}, original, { _id: sent._id, msg: ':point_up:' }) await driver.editMessage(update) const last = (await utils.lastMessages(tId))[0] expect(last).to.have.property('msg', ':point_up:') expect(last).to.have.deep.property('editedBy', { _id: driver.userId, username: botUser.username }) }) }) describe('.setReaction', () => { before(async () => { await driver.connect() await driver.login() }) it('adds emoji reaction to message', async () => { let sent = await driver.sendToRoomId('test reactions', tId) if (Array.isArray(sent)) sent = sent[0] // see todo on `sendToRoomId` await driver.setReaction(':thumbsup:', sent._id) const last = (await utils.lastMessages(tId))[0] expect(last.reactions).to.have.deep.property(':thumbsup:', { usernames: [ botUser.username ] }) }) it('removes if used when emoji reaction exists', async () => { const sent = await driver.sendMessage(driver.prepareMessage({ msg: 'test reactions -', reactions: { ':thumbsup:': { usernames: [botUser.username] } }, rid: tId })) await driver.setReaction(':thumbsup:', sent._id) const last = (await utils.lastMessages(tId))[0] expect(last).to.not.have.property('reactions') }) }) describe('.sendToRoomId', () => { it('sends string to the given room id', async () => { const result = await driver.sendToRoomId('SDK test `sendToRoomId`', tId) expect(result).to.include.all.keys(['msg', 'rid', '_id']) }) it('sends array of strings to the given room id', async () => { const result = await driver.sendToRoomId([ 'SDK test `sendToRoomId` A', 'SDK test `sendToRoomId` B' ], tId) expect(result).to.be.an('array') expect(result[0]).to.include.all.keys(['msg', 'rid', '_id']) expect(result[1]).to.include.all.keys(['msg', 'rid', '_id']) }) }) describe('.sendToRoom', () => { it('sends string to the given room name', async () => { await driver.connect() await driver.login() await driver.subscribeToMessages() const result = await driver.sendToRoom('SDK test `sendToRoom`', tName) expect(result).to.include.all.keys(['msg', 'rid', '_id']) }) it('sends array of strings to the given room name', async () => { await driver.connect() await driver.login() await driver.subscribeToMessages() const result = await driver.sendToRoom([ 'SDK test `sendToRoom` A', 'SDK test `sendToRoom` B' ], tName) expect(result).to.be.an('array') expect(result[0]).to.include.all.keys(['msg', 'rid', '_id']) expect(result[1]).to.include.all.keys(['msg', 'rid', '_id']) }) }) describe('.sendDirectToUser', () => { before(async () => { await driver.connect() await driver.login() }) it('sends string to the given room name', async () => { await driver.connect() await driver.login() const result = await driver.sendDirectToUser('SDK test `sendDirectToUser`', mockUser.username) expect(result).to.include.all.keys(['msg', 'rid', '_id']) }) it('sends array of strings to the given room name', async () => { const result = await driver.sendDirectToUser([ 'SDK test `sendDirectToUser` A', 'SDK test `sendDirectToUser` B' ], mockUser.username) expect(result).to.be.an('array') expect(result[0]).to.include.all.keys(['msg', 'rid', '_id']) expect(result[1]).to.include.all.keys(['msg', 'rid', '_id']) }) }) describe('.respondToMessages', () => { beforeEach(async () => { await driver.connect() await driver.login() await driver.subscribeToMessages() }) it('joins rooms if not already joined', async () => { expect(driver.joinedIds).to.have.lengthOf(0) await driver.respondToMessages(() => null, { rooms: ['general', tName] }) expect(driver.joinedIds).to.have.lengthOf(2) }) it('ignores messages sent from bot', async () => { const callback = sinon.spy() driver.respondToMessages(callback) await driver.sendToRoomId('SDK test `respondToMessages`', tId) sinon.assert.notCalled(callback) }) it('fires callback on messages in joined rooms', async () => { const callback = sinon.spy() driver.respondToMessages(callback, { rooms: [tName] }) await utils.sendFromUser({ text: 'SDK test `respondToMessages` 1' }) sinon.assert.calledOnce(callback) }) it('by default ignores edited messages', async () => { const callback = sinon.spy() const sentMessage = await utils.sendFromUser({ text: 'SDK test `respondToMessages` sent' }) driver.respondToMessages(callback, { rooms: [tName] }) await utils.updateFromUser({ roomId: tId, msgId: sentMessage.message._id, text: 'SDK test `respondToMessages` edited' }) sinon.assert.notCalled(callback) }) it('ignores edited messages, after receiving original', async () => { const callback = sinon.spy() driver.respondToMessages(callback, { rooms: [tName] }) const sentMessage = await utils.sendFromUser({ text: 'SDK test `respondToMessages` sent' }) await utils.updateFromUser({ roomId: tId, msgId: sentMessage.message._id, text: 'SDK test `respondToMessages` edited' }) sinon.assert.calledOnce(callback) }) it('fires callback on edited message if configured', async () => { const callback = sinon.spy() const sentMessage = await utils.sendFromUser({ text: 'SDK test `respondToMessages` sent' }) driver.respondToMessages(callback, { edited: true, rooms: [tName] }) await utils.updateFromUser({ roomId: tId, msgId: sentMessage.message._id, text: 'SDK test `respondToMessages` edited' }) await delay(500); sinon.assert.called(callback) }) it('by default ignores DMs', async () => { const dmResult = await utils.setupDirectFromUser() const callback = sinon.spy() driver.respondToMessages(callback, { rooms: [tName] }) await utils.sendFromUser({ text: 'SDK test `respondToMessages` DM', roomId: dmResult.room._id }) sinon.assert.notCalled(callback) }) it('fires callback on DMs if configured', async () => { const dmResult = await utils.setupDirectFromUser() const callback = sinon.spy() driver.respondToMessages(callback, { dm: true, rooms: [tName] }) await utils.sendFromUser({ text: 'SDK test `respondToMessages` DM', roomId: dmResult.room._id }) sinon.assert.calledOnce(callback) }) it('fires callback on ul (user leave) message types', async () => { const callback = sinon.spy() driver.respondToMessages(callback, { rooms: [tName] }) await utils.leaveUser() await delay(500) sinon.assert.calledWithMatch(callback, null, sinon.match({ t: 'ul' })) await utils.inviteUser() }) it('fires callback on au (user added) message types', async () => { await utils.leaveUser() const callback = sinon.spy() driver.respondToMessages(callback, { rooms: [tName] }) await utils.inviteUser() await delay(500) sinon.assert.calledWithMatch(callback, null, sinon.match({ t: 'au' })) }) // it('appends room name to event meta in channels', async () => { // const callback = sinon.spy() // driver.respondToMessages(callback, { dm: true, rooms: [tName] }) // await utils.sendFromUser({ text: 'SDK test `respondToMessages` DM' }) // expect(callback.firstCall.args[2].roomName).to.equal(tName) // }) // it('room name is undefined in direct messages', async () => { // const dmResult = await utils.setupDirectFromUser() // const callback = sinon.spy() // driver.respondToMessages(callback, { dm: true, rooms: [tName] }) // await utils.sendFromUser({ // text: 'SDK test `respondToMessages` DM', // roomId: dmResult.room._id // }) // expect(callback.firstCall.args[2].roomName).to.equal(undefined) // }) })
describe('.getRoomId', () => { beforeEach(async () => { await driver.connect() await driver.login() }) it('returns the ID for a channel by ID', async () => { const room = await driver.getRoomId(tName) expect(room).to.equal(tId) }) it('returns the ID for a private room name', async () => { const room = await driver.getRoomId(pName) expect(room).to.equal(pId) }) }) describe('.getRoomName', () => { beforeEach(async () => { await driver.connect() await driver.login() }) it('returns the name for a channel by ID', async () => { const room = await driver.getRoomName(tId) expect(room).to.equal(tName) }) it('returns the name for a private group by ID', async () => { const room = await driver.getRoomName(pId) expect(room).to.equal(pName) }) it('returns undefined for a DM room', async () => { const dmResult = await utils.setupDirectFromUser() const room = await driver.getRoomName(dmResult.room._id) expect(room).to.equal(undefined) }) }) describe('.joinRooms', () => { it('joins all the rooms in array, keeping IDs', async () => { driver.joinedIds.splice(0, driver.joinedIds.length) // clear const array await driver.connect() await driver.login() await driver.joinRooms(['general', tName]) expect(driver.joinedIds).to.have.members(['GENERAL', tId]) }) }) describe('execSlashCommand', () => { it('execute slash command', async () => { await driver.connect() await driver.login() const result = await driver.execSlashCommand({ cmd: 'shrug', params: '', msg: { rid: tId, msg: '' } }); expect(result).to.equal(undefined) }) }) })
etcd.go
// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package etcd import ( "context" "fmt" "time" druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" hvpav1alpha1 "github.com/gardener/hvpa-controller/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/intstr" autoscalingv1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" "github.com/gardener/gardener/pkg/client/kubernetes" "github.com/gardener/gardener/pkg/operation/botanist/component" "github.com/gardener/gardener/pkg/utils" kutil "github.com/gardener/gardener/pkg/utils/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" ) // Class is a string type alias for etcd classes. type Class string const ( // ClassNormal is a constant for a normal etcd (without extensive metrics or higher resource settings, etc.) ClassNormal Class = "normal" // ClassImportant is a constant for an important etcd (with extensive metrics or higher resource settings, etc.). // Such etcds are also unsafe to evict (from the PoV of the cluster-autoscaler when trying to scale down). ClassImportant Class = "important" // SecretNameCA is the name of the secret containing the CA certificate and key for the etcd. SecretNameCA = v1beta1constants.SecretNameCAETCD // SecretNameServer is the name of the secret containing the server certificate and key for the etcd. SecretNameServer = "etcd-server-cert" // SecretNameClient is the name of the secret containing the client certificate and key for the etcd. SecretNameClient = "etcd-client-tls" // LabelAppValue is the value of a label whose key is 'app'. LabelAppValue = "etcd-statefulset" // NetworkPolicyName is the name of a network policy that allows ingress traffic to etcd from certain sources. NetworkPolicyName = "allow-etcd" portNameClient = "client" portNameBackupRestore = "backuprestore" statefulSetNamePrefix = "etcd" containerNameEtcd = "etcd" containerNameBackupRestore = "backup-restore" ) var ( // TimeNow is a function returning the current time exposed for testing. TimeNow = time.Now // PortEtcdServer is the port exposed by etcd for server-to-server communication. PortEtcdServer = 2380 // PortEtcdClient is the port exposed by etcd for client communication. PortEtcdClient = 2379 // PortBackupRestore is the client port exposed by the backup-restore sidecar container. PortBackupRestore = 8080 ) // Name returns the name of the Etcd object for the given role. func Name(role string) string { return "etcd-" + role } // ServiceName returns the service name for an etcd for the given role. func
(role string) string { return fmt.Sprintf("etcd-%s-client", role) } // Etcd contains functions for a etcd deployer. type Etcd interface { component.DeployWaiter component.MonitoringComponent // ServiceDNSNames returns the service DNS names for the etcd. ServiceDNSNames() []string // Snapshot triggers the backup-restore sidecar to perform a full snapshot in case backup configuration is provided. Snapshot(context.Context, kubernetes.PodExecutor) error // SetSecrets sets the secrets. SetSecrets(Secrets) // SetBackupConfig sets the backup configuration. SetBackupConfig(config *BackupConfig) // SetHVPAConfig sets the HVPA configuration. SetHVPAConfig(config *HVPAConfig) } // New creates a new instance of DeployWaiter for the Etcd. func New( client client.Client, namespace string, role string, class Class, retainReplicas bool, storageCapacity string, defragmentationSchedule *string, ) Etcd { return &etcd{ client: client, namespace: namespace, role: role, class: class, retainReplicas: retainReplicas, storageCapacity: storageCapacity, defragmentationSchedule: defragmentationSchedule, } } type etcd struct { client client.Client namespace string role string class Class retainReplicas bool storageCapacity string defragmentationSchedule *string secrets Secrets backupConfig *BackupConfig hvpaConfig *HVPAConfig } func (e *etcd) Deploy(ctx context.Context) error { if e.secrets.CA.Name == "" || e.secrets.CA.Checksum == "" { return fmt.Errorf("missing CA secret information") } if e.secrets.Server.Name == "" || e.secrets.Server.Checksum == "" { return fmt.Errorf("missing server secret information") } if e.secrets.Client.Name == "" || e.secrets.Client.Checksum == "" { return fmt.Errorf("missing client secret information") } var ( networkPolicy = e.emptyNetworkPolicy() etcd = e.emptyEtcd() hvpa = e.emptyHVPA() ) existingEtcd, foundEtcd, err := e.getExistingEtcd(ctx, Name(e.role)) if err != nil { return err } stsName := Name(e.role) if foundEtcd && existingEtcd.Status.Etcd.Name != "" { stsName = existingEtcd.Status.Etcd.Name } existingSts, foundSts, err := e.getExistingStatefulSet(ctx, stsName) if err != nil { return err } var ( replicas = e.computeReplicas(foundEtcd, existingEtcd) protocolTCP = corev1.ProtocolTCP intStrPortEtcdClient = intstr.FromInt(PortEtcdClient) intStrPortBackupRestore = intstr.FromInt(PortBackupRestore) resourcesEtcd, resourcesBackupRestore = e.computeContainerResources(foundSts, existingSts) quota = resource.MustParse("8Gi") storageCapacity = resource.MustParse(e.storageCapacity) garbageCollectionPolicy = druidv1alpha1.GarbageCollectionPolicy(druidv1alpha1.GarbageCollectionPolicyExponential) garbageCollectionPeriod = metav1.Duration{Duration: 12 * time.Hour} annotations = map[string]string{ "checksum/secret-etcd-ca": e.secrets.CA.Checksum, "checksum/secret-etcd-server-cert": e.secrets.Server.Checksum, "checksum/secret-etcd-client-tls": e.secrets.Client.Checksum, } metrics = druidv1alpha1.Basic volumeClaimTemplate = Name(e.role) minAllowed = corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("50m"), corev1.ResourceMemory: resource.MustParse("200M"), } ) if e.class == ClassImportant { annotations["cluster-autoscaler.kubernetes.io/safe-to-evict"] = "false" metrics = druidv1alpha1.Extensive volumeClaimTemplate = e.role + "-etcd" minAllowed = corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("200m"), corev1.ResourceMemory: resource.MustParse("700M"), } } if _, err := controllerutil.CreateOrUpdate(ctx, e.client, networkPolicy, func() error { networkPolicy.Annotations = map[string]string{ v1beta1constants.GardenerDescription: "Allows Ingress to etcd pods from the Shoot's Kubernetes API Server.", } networkPolicy.Labels = map[string]string{ v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, } networkPolicy.Spec.PodSelector = metav1.LabelSelector{ MatchLabels: map[string]string{ v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, v1beta1constants.LabelApp: LabelAppValue, }, } networkPolicy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{ { From: []networkingv1.NetworkPolicyPeer{ { PodSelector: &metav1.LabelSelector{ // TODO: Replace below map with a function call to the to-be-introduced kubeapiserver package. MatchLabels: map[string]string{ v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, v1beta1constants.LabelApp: v1beta1constants.LabelKubernetes, v1beta1constants.LabelRole: v1beta1constants.LabelAPIServer, }, }, }, { PodSelector: &metav1.LabelSelector{ // TODO: Replace below map with a function call to the to-be-introduced prometheus package. MatchLabels: map[string]string{ v1beta1constants.DeprecatedGardenRole: "monitoring", v1beta1constants.LabelApp: "prometheus", v1beta1constants.LabelRole: "monitoring", }, }, }, }, Ports: []networkingv1.NetworkPolicyPort{ { Protocol: &protocolTCP, Port: &intStrPortEtcdClient, }, { Protocol: &protocolTCP, Port: &intStrPortBackupRestore, }, }, }, } networkPolicy.Spec.Egress = nil networkPolicy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress} return nil }); err != nil { return err } if _, err := controllerutil.CreateOrUpdate(ctx, e.client, etcd, func() error { etcd.Annotations = map[string]string{ v1beta1constants.GardenerOperation: v1beta1constants.GardenerOperationReconcile, v1beta1constants.GardenerTimestamp: TimeNow().UTC().String(), } etcd.Labels = map[string]string{ v1beta1constants.LabelRole: e.role, v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, } etcd.Spec.Replicas = replicas etcd.Spec.PriorityClassName = pointer.StringPtr(v1beta1constants.PriorityClassNameShootControlPlane) etcd.Spec.Annotations = annotations etcd.Spec.Labels = utils.MergeStringMaps(e.getLabels(), map[string]string{ v1beta1constants.LabelApp: LabelAppValue, v1beta1constants.LabelNetworkPolicyToDNS: v1beta1constants.LabelNetworkPolicyAllowed, v1beta1constants.LabelNetworkPolicyToPublicNetworks: v1beta1constants.LabelNetworkPolicyAllowed, v1beta1constants.LabelNetworkPolicyToPrivateNetworks: v1beta1constants.LabelNetworkPolicyAllowed, }) etcd.Spec.Selector = &metav1.LabelSelector{ MatchLabels: utils.MergeStringMaps(e.getLabels(), map[string]string{ v1beta1constants.LabelApp: LabelAppValue, }), } etcd.Spec.Etcd = druidv1alpha1.EtcdConfig{ Resources: resourcesEtcd, TLS: &druidv1alpha1.TLSConfig{ TLSCASecretRef: corev1.SecretReference{ Name: e.secrets.CA.Name, Namespace: e.namespace, }, ServerTLSSecretRef: corev1.SecretReference{ Name: e.secrets.Server.Name, Namespace: e.namespace, }, ClientTLSSecretRef: corev1.SecretReference{ Name: e.secrets.Client.Name, Namespace: e.namespace, }, }, ServerPort: &PortEtcdServer, ClientPort: &PortEtcdClient, Metrics: metrics, DefragmentationSchedule: e.computeDefragmentationSchedule(foundEtcd, existingEtcd), Quota: &quota, } etcd.Spec.Backup = druidv1alpha1.BackupSpec{ Port: &PortBackupRestore, Resources: resourcesBackupRestore, GarbageCollectionPolicy: &garbageCollectionPolicy, GarbageCollectionPeriod: &garbageCollectionPeriod, } if e.backupConfig != nil { var ( provider = druidv1alpha1.StorageProvider(e.backupConfig.Provider) deltaSnapshotPeriod = metav1.Duration{Duration: 5 * time.Minute} deltaSnapshotMemoryLimit = resource.MustParse("100Mi") ) etcd.Spec.Backup.Store = &druidv1alpha1.StoreSpec{ SecretRef: &corev1.SecretReference{Name: e.backupConfig.SecretRefName}, Container: &e.backupConfig.Container, Provider: &provider, Prefix: fmt.Sprintf("%s/etcd-%s", e.backupConfig.Prefix, e.role), } etcd.Spec.Backup.FullSnapshotSchedule = e.computeFullSnapshotSchedule(foundEtcd, existingEtcd) etcd.Spec.Backup.DeltaSnapshotPeriod = &deltaSnapshotPeriod etcd.Spec.Backup.DeltaSnapshotMemoryLimit = &deltaSnapshotMemoryLimit } etcd.Spec.StorageCapacity = &storageCapacity etcd.Spec.VolumeClaimTemplate = &volumeClaimTemplate return nil }); err != nil { return err } if e.hvpaConfig != nil && e.hvpaConfig.Enabled { var ( hpaLabels = map[string]string{v1beta1constants.LabelRole: "etcd-hpa-" + e.role} vpaLabels = map[string]string{v1beta1constants.LabelRole: "etcd-vpa-" + e.role} updateModeAuto = hvpav1alpha1.UpdateModeAuto updateModeMaintenanceWindow = hvpav1alpha1.UpdateModeMaintenanceWindow containerPolicyOff = autoscalingv1beta2.ContainerScalingModeOff ) if _, err := controllerutil.CreateOrUpdate(ctx, e.client, hvpa, func() error { hvpa.Labels = utils.MergeStringMaps(e.getLabels(), map[string]string{ v1beta1constants.LabelApp: LabelAppValue, }) hvpa.Spec.Replicas = pointer.Int32Ptr(1) hvpa.Spec.MaintenanceTimeWindow = &hvpav1alpha1.MaintenanceTimeWindow{ Begin: e.hvpaConfig.MaintenanceTimeWindow.Begin, End: e.hvpaConfig.MaintenanceTimeWindow.End, } hvpa.Spec.Hpa = hvpav1alpha1.HpaSpec{ Selector: &metav1.LabelSelector{MatchLabels: hpaLabels}, Deploy: false, Template: hvpav1alpha1.HpaTemplate{ ObjectMeta: metav1.ObjectMeta{ Labels: hpaLabels, }, Spec: hvpav1alpha1.HpaTemplateSpec{ MinReplicas: pointer.Int32Ptr(int32(replicas)), MaxReplicas: int32(replicas), Metrics: []autoscalingv2beta1.MetricSpec{ { Type: autoscalingv2beta1.ResourceMetricSourceType, Resource: &autoscalingv2beta1.ResourceMetricSource{ Name: corev1.ResourceCPU, TargetAverageUtilization: pointer.Int32Ptr(80), }, }, { Type: autoscalingv2beta1.ResourceMetricSourceType, Resource: &autoscalingv2beta1.ResourceMetricSource{ Name: corev1.ResourceMemory, TargetAverageUtilization: pointer.Int32Ptr(80), }, }, }, }, }, } hvpa.Spec.Vpa = hvpav1alpha1.VpaSpec{ Selector: &metav1.LabelSelector{MatchLabels: vpaLabels}, Deploy: true, ScaleUp: hvpav1alpha1.ScaleType{ UpdatePolicy: hvpav1alpha1.UpdatePolicy{ UpdateMode: &updateModeAuto, }, StabilizationDuration: pointer.StringPtr("5m"), MinChange: hvpav1alpha1.ScaleParams{ CPU: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("1"), Percentage: pointer.Int32Ptr(80), }, Memory: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("2G"), Percentage: pointer.Int32Ptr(80), }, }, }, ScaleDown: hvpav1alpha1.ScaleType{ UpdatePolicy: hvpav1alpha1.UpdatePolicy{ UpdateMode: &updateModeMaintenanceWindow, }, StabilizationDuration: pointer.StringPtr("15m"), MinChange: hvpav1alpha1.ScaleParams{ CPU: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("1"), Percentage: pointer.Int32Ptr(80), }, Memory: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("2G"), Percentage: pointer.Int32Ptr(80), }, }, }, LimitsRequestsGapScaleParams: hvpav1alpha1.ScaleParams{ CPU: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("1"), Percentage: pointer.Int32Ptr(40), }, Memory: hvpav1alpha1.ChangeParams{ Value: pointer.StringPtr("1G"), Percentage: pointer.Int32Ptr(40), }, }, Template: hvpav1alpha1.VpaTemplate{ ObjectMeta: metav1.ObjectMeta{ Labels: vpaLabels, }, Spec: hvpav1alpha1.VpaTemplateSpec{ ResourcePolicy: &autoscalingv1beta2.PodResourcePolicy{ ContainerPolicies: []autoscalingv1beta2.ContainerResourcePolicy{ { ContainerName: containerNameEtcd, MinAllowed: minAllowed, MaxAllowed: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("4"), corev1.ResourceMemory: resource.MustParse("30G"), }, }, { ContainerName: containerNameBackupRestore, Mode: &containerPolicyOff, }, }, }, }, }, } hvpa.Spec.WeightBasedScalingIntervals = []hvpav1alpha1.WeightBasedScalingInterval{ { VpaWeight: hvpav1alpha1.VpaOnly, StartReplicaCount: int32(replicas), LastReplicaCount: int32(replicas), }, } hvpa.Spec.TargetRef = &autoscalingv2beta1.CrossVersionObjectReference{ APIVersion: appsv1.SchemeGroupVersion.String(), Kind: "StatefulSet", Name: stsName, } return nil }); err != nil { return err } } else { if err := kutil.DeleteObjects(ctx, e.client, e.emptyHVPA()); err != nil { return err } } return nil } func (e *etcd) Destroy(ctx context.Context) error { return kutil.DeleteObjects( ctx, e.client, e.emptyHVPA(), e.emptyEtcd(), e.emptyNetworkPolicy(), ) } func (e *etcd) getLabels() map[string]string { return map[string]string{ v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, v1beta1constants.LabelRole: e.role, } } func (e *etcd) getExistingEtcd(ctx context.Context, name string) (*druidv1alpha1.Etcd, bool, error) { obj, found, err := e.getExistingResource(ctx, name, &druidv1alpha1.Etcd{}) if obj != nil { return obj.(*druidv1alpha1.Etcd), found, err } return nil, found, err } func (e *etcd) getExistingStatefulSet(ctx context.Context, name string) (*appsv1.StatefulSet, bool, error) { obj, found, err := e.getExistingResource(ctx, name, &appsv1.StatefulSet{}) if obj != nil { return obj.(*appsv1.StatefulSet), found, err } return nil, found, err } func (e *etcd) getExistingResource(ctx context.Context, name string, obj client.Object) (client.Object, bool, error) { if err := e.client.Get(ctx, kutil.Key(e.namespace, name), obj); err != nil { if !apierrors.IsNotFound(err) { return nil, false, err } return nil, false, nil } return obj, true, nil } func (e *etcd) emptyNetworkPolicy() *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: NetworkPolicyName, Namespace: e.namespace}} } func (e *etcd) emptyEtcd() *druidv1alpha1.Etcd { return &druidv1alpha1.Etcd{ObjectMeta: metav1.ObjectMeta{Name: Name(e.role), Namespace: e.namespace}} } func (e *etcd) emptyHVPA() *hvpav1alpha1.Hvpa { return &hvpav1alpha1.Hvpa{ObjectMeta: metav1.ObjectMeta{Name: Name(e.role), Namespace: e.namespace}} } func (e *etcd) Snapshot(ctx context.Context, podExecutor kubernetes.PodExecutor) error { if e.backupConfig == nil { return fmt.Errorf("no backup is configured for this etcd, cannot make a snapshot") } etcdMainSelector := e.podLabelSelector() podsList := &corev1.PodList{} if err := e.client.List(ctx, podsList, client.InNamespace(e.namespace), client.MatchingLabelsSelector{Selector: etcdMainSelector}); err != nil { return err } if len(podsList.Items) == 0 { return fmt.Errorf("didn't find any pods for selector: %v", etcdMainSelector) } if len(podsList.Items) > 1 { return fmt.Errorf("multiple ETCD Pods found. Pod list found: %v", podsList.Items) } _, err := podExecutor.Execute( e.namespace, podsList.Items[0].GetName(), containerNameBackupRestore, "/bin/sh", fmt.Sprintf("curl -k https://etcd-%s-local:%d/snapshot/full", e.role, PortBackupRestore), ) return err } func (e *etcd) ServiceDNSNames() []string { return append( []string{fmt.Sprintf("etcd-%s-local", e.role)}, kutil.DNSNamesForService(fmt.Sprintf("etcd-%s-client", e.role), e.namespace)..., ) } func (e *etcd) SetSecrets(secrets Secrets) { e.secrets = secrets } func (e *etcd) SetBackupConfig(backupConfig *BackupConfig) { e.backupConfig = backupConfig } func (e *etcd) SetHVPAConfig(hvpaConfig *HVPAConfig) { e.hvpaConfig = hvpaConfig } func (e *etcd) podLabelSelector() labels.Selector { app, _ := labels.NewRequirement(v1beta1constants.LabelApp, selection.Equals, []string{LabelAppValue}) role, _ := labels.NewRequirement(v1beta1constants.LabelRole, selection.Equals, []string{e.role}) return labels.NewSelector().Add(*role, *app) } func (e *etcd) computeContainerResources(foundSts bool, existingSts *appsv1.StatefulSet) (*corev1.ResourceRequirements, *corev1.ResourceRequirements) { var ( resourcesEtcd = &corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("300m"), corev1.ResourceMemory: resource.MustParse("1G"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("900m"), corev1.ResourceMemory: resource.MustParse("3G"), }, } resourcesBackupRestore = &corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("23m"), corev1.ResourceMemory: resource.MustParse("128Mi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("10G"), }, } ) if foundSts && e.hvpaConfig != nil && e.hvpaConfig.Enabled { for k := range existingSts.Spec.Template.Spec.Containers { v := existingSts.Spec.Template.Spec.Containers[k] switch v.Name { case containerNameEtcd: resourcesEtcd = v.Resources.DeepCopy() case containerNameBackupRestore: resourcesBackupRestore = v.Resources.DeepCopy() } } } return resourcesEtcd, resourcesBackupRestore } func (e *etcd) computeReplicas(foundEtcd bool, existingEtcd *druidv1alpha1.Etcd) int { if !e.retainReplicas { return 1 } if foundEtcd { return existingEtcd.Spec.Replicas } return 0 } func (e *etcd) computeDefragmentationSchedule(foundEtcd bool, existingEtcd *druidv1alpha1.Etcd) *string { defragmentationSchedule := e.defragmentationSchedule if foundEtcd && existingEtcd.Spec.Etcd.DefragmentationSchedule != nil { defragmentationSchedule = existingEtcd.Spec.Etcd.DefragmentationSchedule } return defragmentationSchedule } func (e *etcd) computeFullSnapshotSchedule(foundEtcd bool, existingEtcd *druidv1alpha1.Etcd) *string { fullSnapshotSchedule := &e.backupConfig.FullSnapshotSchedule if foundEtcd && existingEtcd.Spec.Backup.FullSnapshotSchedule != nil { fullSnapshotSchedule = existingEtcd.Spec.Backup.FullSnapshotSchedule } return fullSnapshotSchedule } // Secrets is collection of secrets for the etcd. type Secrets struct { // CA is a secret containing the CA certificate and key. CA component.Secret // Server is a secret containing the server certificate and key. Server component.Secret // Client is a secret containing the client certificate and key. Client component.Secret } // BackupConfig contains information for configuring the backup-restore sidecar so that it takes regularly backups of // the etcd's data directory. type BackupConfig struct { // Provider is the name of the infrastructure provider for the blob storage bucket. Provider string // Container is the name of the blob storage bucket. Container string // SecretRefName is the name of a Secret object containing the credentials of the selected infrastructure provider. SecretRefName string // Prefix is a prefix that shall be used for the filename of the backups of this etcd. Prefix string // FullSnapshotSchedule is a cron schedule that declares how frequent full snapshots shall be taken. FullSnapshotSchedule string } // HVPAConfig contains information for configuring the HVPA object for the etcd. type HVPAConfig struct { // Enabled states whether an HVPA object shall be deployed. Enabled bool // MaintenanceTimeWindow contains begin and end of a time window that allows down-scaling the etcd in case its // resource requests/limits are unnecessarily high. MaintenanceTimeWindow gardencorev1beta1.MaintenanceTimeWindow }
ServiceName
hello.twirp.go
// Code generated by protoc-gen-twirp v8.1.0, DO NOT EDIT. // source: proto/hello/v1/hello.proto package hellopb import context "context" import fmt "fmt" import http "net/http" import ioutil "io/ioutil" import json "encoding/json" import strconv "strconv" import strings "strings" import protojson "google.golang.org/protobuf/encoding/protojson" import proto "google.golang.org/protobuf/proto" import twirp "github.com/twitchtv/twirp" import ctxsetters "github.com/twitchtv/twirp/ctxsetters" import bytes "bytes" import errors "errors" import io "io" import path "path" import url "net/url" // Version compatibility assertion. // If the constant is not defined in the package, that likely means // the package needs to be updated to work with this generated code. // See https://twitchtv.github.io/twirp/docs/version_matrix.html const _ = twirp.TwirpPackageMinVersion_8_1_0 // ====================== // HelloService Interface // ====================== type HelloService interface { SayHello(context.Context, *SayHelloRequest) (*SayHelloResponse, error) } // ============================ // HelloService Protobuf Client // ============================ type helloServiceProtobufClient struct { client HTTPClient urls [1]string interceptor twirp.Interceptor opts twirp.ClientOptions } // NewHelloServiceProtobufClient creates a Protobuf client that implements the HelloService interface. // It communicates using Protobuf and can be configured with a custom HTTPClient. func NewHelloServiceProtobufClient(baseURL string, client HTTPClient, opts ...twirp.ClientOption) HelloService { if c, ok := client.(*http.Client); ok { client = withoutRedirects(c) } clientOpts := twirp.ClientOptions{} for _, o := range opts { o(&clientOpts) } // Using ReadOpt allows backwards and forwads compatibility with new options in the future literalURLs := false _ = clientOpts.ReadOpt("literalURLs", &literalURLs) var pathPrefix string if ok := clientOpts.ReadOpt("pathPrefix", &pathPrefix); !ok { pathPrefix = "/twirp" // default prefix } // Build method URLs: <baseURL>[<prefix>]/<package>.<Service>/<Method> serviceURL := sanitizeBaseURL(baseURL) serviceURL += baseServicePath(pathPrefix, "proto.hello.v1", "HelloService") urls := [1]string{ serviceURL + "SayHello", } return &helloServiceProtobufClient{ client: client, urls: urls, interceptor: twirp.ChainInterceptors(clientOpts.Interceptors...), opts: clientOpts, } } func (c *helloServiceProtobufClient) SayHello(ctx context.Context, in *SayHelloRequest) (*SayHelloResponse, error) { ctx = ctxsetters.WithPackageName(ctx, "proto.hello.v1") ctx = ctxsetters.WithServiceName(ctx, "HelloService") ctx = ctxsetters.WithMethodName(ctx, "SayHello") caller := c.callSayHello if c.interceptor != nil { caller = func(ctx context.Context, req *SayHelloRequest) (*SayHelloResponse, error) { resp, err := c.interceptor( func(ctx context.Context, req interface{}) (interface{}, error) { typedReq, ok := req.(*SayHelloRequest) if !ok { return nil, twirp.InternalError("failed type assertion req.(*SayHelloRequest) when calling interceptor") } return c.callSayHello(ctx, typedReq) }, )(ctx, req) if resp != nil { typedResp, ok := resp.(*SayHelloResponse) if !ok { return nil, twirp.InternalError("failed type assertion resp.(*SayHelloResponse) when calling interceptor") } return typedResp, err } return nil, err } } return caller(ctx, in) } func (c *helloServiceProtobufClient) callSayHello(ctx context.Context, in *SayHelloRequest) (*SayHelloResponse, error) { out := new(SayHelloResponse) ctx, err := doProtobufRequest(ctx, c.client, c.opts.Hooks, c.urls[0], in, out) if err != nil { twerr, ok := err.(twirp.Error) if !ok { twerr = twirp.InternalErrorWith(err) } callClientError(ctx, c.opts.Hooks, twerr) return nil, err } callClientResponseReceived(ctx, c.opts.Hooks) return out, nil } // ======================== // HelloService JSON Client // ======================== type helloServiceJSONClient struct { client HTTPClient urls [1]string interceptor twirp.Interceptor opts twirp.ClientOptions } // NewHelloServiceJSONClient creates a JSON client that implements the HelloService interface. // It communicates using JSON and can be configured with a custom HTTPClient. func NewHelloServiceJSONClient(baseURL string, client HTTPClient, opts ...twirp.ClientOption) HelloService { if c, ok := client.(*http.Client); ok { client = withoutRedirects(c) } clientOpts := twirp.ClientOptions{} for _, o := range opts { o(&clientOpts) } // Using ReadOpt allows backwards and forwads compatibility with new options in the future literalURLs := false _ = clientOpts.ReadOpt("literalURLs", &literalURLs) var pathPrefix string if ok := clientOpts.ReadOpt("pathPrefix", &pathPrefix); !ok { pathPrefix = "/twirp" // default prefix } // Build method URLs: <baseURL>[<prefix>]/<package>.<Service>/<Method> serviceURL := sanitizeBaseURL(baseURL) serviceURL += baseServicePath(pathPrefix, "proto.hello.v1", "HelloService") urls := [1]string{ serviceURL + "SayHello", } return &helloServiceJSONClient{ client: client, urls: urls, interceptor: twirp.ChainInterceptors(clientOpts.Interceptors...), opts: clientOpts, } } func (c *helloServiceJSONClient) SayHello(ctx context.Context, in *SayHelloRequest) (*SayHelloResponse, error) { ctx = ctxsetters.WithPackageName(ctx, "proto.hello.v1") ctx = ctxsetters.WithServiceName(ctx, "HelloService") ctx = ctxsetters.WithMethodName(ctx, "SayHello") caller := c.callSayHello if c.interceptor != nil { caller = func(ctx context.Context, req *SayHelloRequest) (*SayHelloResponse, error) { resp, err := c.interceptor( func(ctx context.Context, req interface{}) (interface{}, error) { typedReq, ok := req.(*SayHelloRequest) if !ok { return nil, twirp.InternalError("failed type assertion req.(*SayHelloRequest) when calling interceptor") } return c.callSayHello(ctx, typedReq) }, )(ctx, req) if resp != nil { typedResp, ok := resp.(*SayHelloResponse) if !ok { return nil, twirp.InternalError("failed type assertion resp.(*SayHelloResponse) when calling interceptor") } return typedResp, err } return nil, err } } return caller(ctx, in) } func (c *helloServiceJSONClient) callSayHello(ctx context.Context, in *SayHelloRequest) (*SayHelloResponse, error) { out := new(SayHelloResponse) ctx, err := doJSONRequest(ctx, c.client, c.opts.Hooks, c.urls[0], in, out) if err != nil { twerr, ok := err.(twirp.Error) if !ok { twerr = twirp.InternalErrorWith(err) } callClientError(ctx, c.opts.Hooks, twerr) return nil, err } callClientResponseReceived(ctx, c.opts.Hooks) return out, nil } // =========================== // HelloService Server Handler // =========================== type helloServiceServer struct { HelloService interceptor twirp.Interceptor hooks *twirp.ServerHooks pathPrefix string // prefix for routing jsonSkipDefaults bool // do not include unpopulated fields (default values) in the response jsonCamelCase bool // JSON fields are serialized as lowerCamelCase rather than keeping the original proto names } // NewHelloServiceServer builds a TwirpServer that can be used as an http.Handler to handle // HTTP requests that are routed to the right method in the provided svc implementation. // The opts are twirp.ServerOption modifiers, for example twirp.WithServerHooks(hooks). func NewHelloServiceServer(svc HelloService, opts ...interface{}) TwirpServer { serverOpts := newServerOpts(opts) // Using ReadOpt allows backwards and forwads compatibility with new options in the future jsonSkipDefaults := false _ = serverOpts.ReadOpt("jsonSkipDefaults", &jsonSkipDefaults) jsonCamelCase := false _ = serverOpts.ReadOpt("jsonCamelCase", &jsonCamelCase) var pathPrefix string if ok := serverOpts.ReadOpt("pathPrefix", &pathPrefix); !ok { pathPrefix = "/twirp" // default prefix } return &helloServiceServer{ HelloService: svc, hooks: serverOpts.Hooks, interceptor: twirp.ChainInterceptors(serverOpts.Interceptors...), pathPrefix: pathPrefix, jsonSkipDefaults: jsonSkipDefaults, jsonCamelCase: jsonCamelCase, } } // writeError writes an HTTP response with a valid Twirp error format, and triggers hooks. // If err is not a twirp.Error, it will get wrapped with twirp.InternalErrorWith(err) func (s *helloServiceServer) writeError(ctx context.Context, resp http.ResponseWriter, err error) { writeError(ctx, resp, err, s.hooks) } // handleRequestBodyError is used to handle error when the twirp server cannot read request func (s *helloServiceServer) handleRequestBodyError(ctx context.Context, resp http.ResponseWriter, msg string, err error) { if context.Canceled == ctx.Err() { s.writeError(ctx, resp, twirp.NewError(twirp.Canceled, "failed to read request: context canceled")) return } if context.DeadlineExceeded == ctx.Err() { s.writeError(ctx, resp, twirp.NewError(twirp.DeadlineExceeded, "failed to read request: deadline exceeded")) return } s.writeError(ctx, resp, twirp.WrapError(malformedRequestError(msg), err)) } // HelloServicePathPrefix is a convenience constant that may identify URL paths. // Should be used with caution, it only matches routes generated by Twirp Go clients, // with the default "/twirp" prefix and default CamelCase service and method names. // More info: https://twitchtv.github.io/twirp/docs/routing.html const HelloServicePathPrefix = "/twirp/proto.hello.v1.HelloService/" func (s *helloServiceServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx = ctxsetters.WithPackageName(ctx, "proto.hello.v1") ctx = ctxsetters.WithServiceName(ctx, "HelloService") ctx = ctxsetters.WithResponseWriter(ctx, resp) var err error ctx, err = callRequestReceived(ctx, s.hooks) if err != nil { s.writeError(ctx, resp, err) return } if req.Method != "POST" { msg := fmt.Sprintf("unsupported method %q (only POST is allowed)", req.Method) s.writeError(ctx, resp, badRouteError(msg, req.Method, req.URL.Path)) return } // Verify path format: [<prefix>]/<package>.<Service>/<Method> prefix, pkgService, method := parseTwirpPath(req.URL.Path) if pkgService != "proto.hello.v1.HelloService" { msg := fmt.Sprintf("no handler for path %q", req.URL.Path) s.writeError(ctx, resp, badRouteError(msg, req.Method, req.URL.Path)) return } if prefix != s.pathPrefix { msg := fmt.Sprintf("invalid path prefix %q, expected %q, on path %q", prefix, s.pathPrefix, req.URL.Path) s.writeError(ctx, resp, badRouteError(msg, req.Method, req.URL.Path)) return } switch method { case "SayHello": s.serveSayHello(ctx, resp, req) return default: msg := fmt.Sprintf("no handler for path %q", req.URL.Path) s.writeError(ctx, resp, badRouteError(msg, req.Method, req.URL.Path)) return } } func (s *helloServiceServer) serveSayHello(ctx context.Context, resp http.ResponseWriter, req *http.Request) { header := req.Header.Get("Content-Type") i := strings.Index(header, ";") if i == -1 { i = len(header) } switch strings.TrimSpace(strings.ToLower(header[:i])) { case "application/json": s.serveSayHelloJSON(ctx, resp, req) case "application/protobuf": s.serveSayHelloProtobuf(ctx, resp, req) default: msg := fmt.Sprintf("unexpected Content-Type: %q", req.Header.Get("Content-Type")) twerr := badRouteError(msg, req.Method, req.URL.Path) s.writeError(ctx, resp, twerr) } } func (s *helloServiceServer) serveSayHelloJSON(ctx context.Context, resp http.ResponseWriter, req *http.Request) { var err error ctx = ctxsetters.WithMethodName(ctx, "SayHello") ctx, err = callRequestRouted(ctx, s.hooks) if err != nil { s.writeError(ctx, resp, err) return } d := json.NewDecoder(req.Body) rawReqBody := json.RawMessage{} if err := d.Decode(&rawReqBody); err != nil { s.handleRequestBodyError(ctx, resp, "the json request could not be decoded", err) return } reqContent := new(SayHelloRequest) unmarshaler := protojson.UnmarshalOptions{DiscardUnknown: true} if err = unmarshaler.Unmarshal(rawReqBody, reqContent); err != nil { s.handleRequestBodyError(ctx, resp, "the json request could not be decoded", err) return } handler := s.HelloService.SayHello if s.interceptor != nil { handler = func(ctx context.Context, req *SayHelloRequest) (*SayHelloResponse, error) { resp, err := s.interceptor( func(ctx context.Context, req interface{}) (interface{}, error) { typedReq, ok := req.(*SayHelloRequest) if !ok { return nil, twirp.InternalError("failed type assertion req.(*SayHelloRequest) when calling interceptor") } return s.HelloService.SayHello(ctx, typedReq) }, )(ctx, req) if resp != nil { typedResp, ok := resp.(*SayHelloResponse) if !ok { return nil, twirp.InternalError("failed type assertion resp.(*SayHelloResponse) when calling interceptor") } return typedResp, err } return nil, err } } // Call service method var respContent *SayHelloResponse func() { defer ensurePanicResponses(ctx, resp, s.hooks) respContent, err = handler(ctx, reqContent) }() if err != nil { s.writeError(ctx, resp, err) return } if respContent == nil { s.writeError(ctx, resp, twirp.InternalError("received a nil *SayHelloResponse and nil error while calling SayHello. nil responses are not supported")) return } ctx = callResponsePrepared(ctx, s.hooks) marshaler := &protojson.MarshalOptions{UseProtoNames: !s.jsonCamelCase, EmitUnpopulated: !s.jsonSkipDefaults} respBytes, err := marshaler.Marshal(respContent) if err != nil { s.writeError(ctx, resp, wrapInternal(err, "failed to marshal json response")) return } ctx = ctxsetters.WithStatusCode(ctx, http.StatusOK) resp.Header().Set("Content-Type", "application/json") resp.Header().Set("Content-Length", strconv.Itoa(len(respBytes))) resp.WriteHeader(http.StatusOK) if n, err := resp.Write(respBytes); err != nil { msg := fmt.Sprintf("failed to write response, %d of %d bytes written: %s", n, len(respBytes), err.Error()) twerr := twirp.NewError(twirp.Unknown, msg) ctx = callError(ctx, s.hooks, twerr) } callResponseSent(ctx, s.hooks) } func (s *helloServiceServer) serveSayHelloProtobuf(ctx context.Context, resp http.ResponseWriter, req *http.Request) { var err error ctx = ctxsetters.WithMethodName(ctx, "SayHello") ctx, err = callRequestRouted(ctx, s.hooks) if err != nil { s.writeError(ctx, resp, err) return } buf, err := ioutil.ReadAll(req.Body) if err != nil { s.handleRequestBodyError(ctx, resp, "failed to read request body", err) return } reqContent := new(SayHelloRequest) if err = proto.Unmarshal(buf, reqContent); err != nil { s.writeError(ctx, resp, malformedRequestError("the protobuf request could not be decoded")) return } handler := s.HelloService.SayHello if s.interceptor != nil { handler = func(ctx context.Context, req *SayHelloRequest) (*SayHelloResponse, error) { resp, err := s.interceptor( func(ctx context.Context, req interface{}) (interface{}, error) { typedReq, ok := req.(*SayHelloRequest) if !ok { return nil, twirp.InternalError("failed type assertion req.(*SayHelloRequest) when calling interceptor") } return s.HelloService.SayHello(ctx, typedReq) }, )(ctx, req) if resp != nil { typedResp, ok := resp.(*SayHelloResponse) if !ok { return nil, twirp.InternalError("failed type assertion resp.(*SayHelloResponse) when calling interceptor") } return typedResp, err } return nil, err } } // Call service method var respContent *SayHelloResponse func() { defer ensurePanicResponses(ctx, resp, s.hooks) respContent, err = handler(ctx, reqContent) }() if err != nil { s.writeError(ctx, resp, err) return } if respContent == nil { s.writeError(ctx, resp, twirp.InternalError("received a nil *SayHelloResponse and nil error while calling SayHello. nil responses are not supported")) return } ctx = callResponsePrepared(ctx, s.hooks) respBytes, err := proto.Marshal(respContent) if err != nil { s.writeError(ctx, resp, wrapInternal(err, "failed to marshal proto response")) return } ctx = ctxsetters.WithStatusCode(ctx, http.StatusOK) resp.Header().Set("Content-Type", "application/protobuf") resp.Header().Set("Content-Length", strconv.Itoa(len(respBytes))) resp.WriteHeader(http.StatusOK) if n, err := resp.Write(respBytes); err != nil { msg := fmt.Sprintf("failed to write response, %d of %d bytes written: %s", n, len(respBytes), err.Error()) twerr := twirp.NewError(twirp.Unknown, msg) ctx = callError(ctx, s.hooks, twerr) } callResponseSent(ctx, s.hooks) } func (s *helloServiceServer) ServiceDescriptor() ([]byte, int) { return twirpFileDescriptor0, 0 } func (s *helloServiceServer) ProtocGenTwirpVersion() string { return "v8.1.0" } // PathPrefix returns the base service path, in the form: "/<prefix>/<package>.<Service>/" // that is everything in a Twirp route except for the <Method>. This can be used for routing, // for example to identify the requests that are targeted to this service in a mux. func (s *helloServiceServer) PathPrefix() string { return baseServicePath(s.pathPrefix, "proto.hello.v1", "HelloService") } // ===== // Utils // ===== // HTTPClient is the interface used by generated clients to send HTTP requests. // It is fulfilled by *(net/http).Client, which is sufficient for most users. // Users can provide their own implementation for special retry policies. // // HTTPClient implementations should not follow redirects. Redirects are // automatically disabled if *(net/http).Client is passed to client // constructors. See the withoutRedirects function in this file for more // details. type HTTPClient interface { Do(req *http.Request) (*http.Response, error) } // TwirpServer is the interface generated server structs will support: they're // HTTP handlers with additional methods for accessing metadata about the // service. Those accessors are a low-level API for building reflection tools. // Most people can think of TwirpServers as just http.Handlers. type TwirpServer interface { http.Handler // ServiceDescriptor returns gzipped bytes describing the .proto file that // this service was generated from. Once unzipped, the bytes can be // unmarshalled as a // google.golang.org/protobuf/types/descriptorpb.FileDescriptorProto. // // The returned integer is the index of this particular service within that // FileDescriptorProto's 'Service' slice of ServiceDescriptorProtos. This is a // low-level field, expected to be used for reflection. ServiceDescriptor() ([]byte, int) // ProtocGenTwirpVersion is the semantic version string of the version of // twirp used to generate this file. ProtocGenTwirpVersion() string // PathPrefix returns the HTTP URL path prefix for all methods handled by this // service. This can be used with an HTTP mux to route Twirp requests. // The path prefix is in the form: "/<prefix>/<package>.<Service>/" // that is, everything in a Twirp route except for the <Method> at the end. PathPrefix() string } func newServerOpts(opts []interface{}) *twirp.ServerOptions { serverOpts := &twirp.ServerOptions{} for _, opt := range opts { switch o := opt.(type) { case twirp.ServerOption: o(serverOpts) case *twirp.ServerHooks: // backwards compatibility, allow to specify hooks as an argument twirp.WithServerHooks(o)(serverOpts) case nil: // backwards compatibility, allow nil value for the argument continue default: panic(fmt.Sprintf("Invalid option type %T, please use a twirp.ServerOption", o)) } } return serverOpts } // WriteError writes an HTTP response with a valid Twirp error format (code, msg, meta). // Useful outside of the Twirp server (e.g. http middleware), but does not trigger hooks. // If err is not a twirp.Error, it will get wrapped with twirp.InternalErrorWith(err) func WriteError(resp http.ResponseWriter, err error) { writeError(context.Background(), resp, err, nil) } // writeError writes Twirp errors in the response and triggers hooks. func writeError(ctx context.Context, resp http.ResponseWriter, err error, hooks *twirp.ServerHooks) { // Convert to a twirp.Error. Non-twirp errors are converted to internal errors. var twerr twirp.Error if !errors.As(err, &twerr) { twerr = twirp.InternalErrorWith(err) } statusCode := twirp.ServerHTTPStatusFromErrorCode(twerr.Code()) ctx = ctxsetters.WithStatusCode(ctx, statusCode) ctx = callError(ctx, hooks, twerr) respBody := marshalErrorToJSON(twerr) resp.Header().Set("Content-Type", "application/json") // Error responses are always JSON resp.Header().Set("Content-Length", strconv.Itoa(len(respBody))) resp.WriteHeader(statusCode) // set HTTP status code and send response _, writeErr := resp.Write(respBody) if writeErr != nil { // We have three options here. We could log the error, call the Error // hook, or just silently ignore the error. // // Logging is unacceptable because we don't have a user-controlled // logger; writing out to stderr without permission is too rude. // // Calling the Error hook would confuse users: it would mean the Error // hook got called twice for one request, which is likely to lead to // duplicated log messages and metrics, no matter how well we document // the behavior. // // Silently ignoring the error is our least-bad option. It's highly // likely that the connection is broken and the original 'err' says // so anyway. _ = writeErr } callResponseSent(ctx, hooks) } // sanitizeBaseURL parses the the baseURL, and adds the "http" scheme if needed. // If the URL is unparsable, the baseURL is returned unchaged. func sanitizeBaseURL(baseURL string) string { u, err := url.Parse(baseURL) if err != nil { return baseURL // invalid URL will fail later when making requests } if u.Scheme == "" { u.Scheme = "http" } return u.String() } // baseServicePath composes the path prefix for the service (without <Method>). // e.g.: baseServicePath("/twirp", "my.pkg", "MyService") // returns => "/twirp/my.pkg.MyService/" // e.g.: baseServicePath("", "", "MyService") // returns => "/MyService/" func baseServicePath(prefix, pkg, service string) string { fullServiceName := service if pkg != "" { fullServiceName = pkg + "." + service } return path.Join("/", prefix, fullServiceName) + "/" } // parseTwirpPath extracts path components form a valid Twirp route. // Expected format: "[<prefix>]/<package>.<Service>/<Method>" // e.g.: prefix, pkgService, method := parseTwirpPath("/twirp/pkg.Svc/MakeHat") func parseTwirpPath(path string) (string, string, string) { parts := strings.Split(path, "/") if len(parts) < 2 { return "", "", "" } method := parts[len(parts)-1] pkgService := parts[len(parts)-2] prefix := strings.Join(parts[0:len(parts)-2], "/") return prefix, pkgService, method } // getCustomHTTPReqHeaders retrieves a copy of any headers that are set in // a context through the twirp.WithHTTPRequestHeaders function. // If there are no headers set, or if they have the wrong type, nil is returned. func getCustomHTTPReqHeaders(ctx context.Context) http.Header { header, ok := twirp.HTTPRequestHeaders(ctx) if !ok || header == nil { return nil } copied := make(http.Header) for k, vv := range header { if vv == nil { copied[k] = nil continue } copied[k] = make([]string, len(vv)) copy(copied[k], vv) } return copied } // newRequest makes an http.Request from a client, adding common headers. func newRequest(ctx context.Context, url string, reqBody io.Reader, contentType string) (*http.Request, error) { req, err := http.NewRequest("POST", url, reqBody) if err != nil { return nil, err } req = req.WithContext(ctx) if customHeader := getCustomHTTPReqHeaders(ctx); customHeader != nil { req.Header = customHeader } req.Header.Set("Accept", contentType) req.Header.Set("Content-Type", contentType) req.Header.Set("Twirp-Version", "v8.1.0") return req, nil } // JSON serialization for errors type twerrJSON struct { Code string `json:"code"` Msg string `json:"msg"` Meta map[string]string `json:"meta,omitempty"` } // marshalErrorToJSON returns JSON from a twirp.Error, that can be used as HTTP error response body. // If serialization fails, it will use a descriptive Internal error instead. func marshalErrorToJSON(twerr twirp.Error) []byte { // make sure that msg is not too large msg := twerr.Msg() if len(msg) > 1e6 { msg = msg[:1e6] } tj := twerrJSON{ Code: string(twerr.Code()), Msg: msg, Meta: twerr.MetaMap(), } buf, err := json.Marshal(&tj) if err != nil { buf = []byte("{\"type\": \"" + twirp.Internal + "\", \"msg\": \"There was an error but it could not be serialized into JSON\"}") // fallback } return buf } // errorFromResponse builds a twirp.Error from a non-200 HTTP response. // If the response has a valid serialized Twirp error, then it's returned. // If not, the response status code is used to generate a similar twirp // error. See twirpErrorFromIntermediary for more info on intermediary errors. func errorFromResponse(resp *http.Response) twirp.Error { statusCode := resp.StatusCode statusText := http.StatusText(statusCode) if isHTTPRedirect(statusCode) { // Unexpected redirect: it must be an error from an intermediary. // Twirp clients don't follow redirects automatically, Twirp only handles // POST requests, redirects should only happen on GET and HEAD requests. location := resp.Header.Get("Location") msg := fmt.Sprintf("unexpected HTTP status code %d %q received, Location=%q", statusCode, statusText, location) return twirpErrorFromIntermediary(statusCode, msg, location) } respBodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return wrapInternal(err, "failed to read server error response body") } var tj twerrJSON dec := json.NewDecoder(bytes.NewReader(respBodyBytes)) dec.DisallowUnknownFields() if err := dec.Decode(&tj); err != nil || tj.Code == "" { // Invalid JSON response; it must be an error from an intermediary. msg := fmt.Sprintf("Error from intermediary with HTTP status code %d %q", statusCode, statusText) return twirpErrorFromIntermediary(statusCode, msg, string(respBodyBytes)) } errorCode := twirp.ErrorCode(tj.Code) if !twirp.IsValidErrorCode(errorCode) { msg := "invalid type returned from server error response: " + tj.Code return twirp.InternalError(msg).WithMeta("body", string(respBodyBytes)) } twerr := twirp.NewError(errorCode, tj.Msg) for k, v := range tj.Meta { twerr = twerr.WithMeta(k, v) } return twerr } // twirpErrorFromIntermediary maps HTTP errors from non-twirp sources to twirp errors. // The mapping is similar to gRPC: https://github.com/grpc/grpc/blob/master/doc/http-grpc-status-mapping.md. // Returned twirp Errors have some additional metadata for inspection. func twirpErrorFromIntermediary(status int, msg string, bodyOrLocation string) twirp.Error { var code twirp.ErrorCode if isHTTPRedirect(status) { // 3xx code = twirp.Internal } else { switch status { case 400: // Bad Request code = twirp.Internal case 401: // Unauthorized code = twirp.Unauthenticated case 403: // Forbidden code = twirp.PermissionDenied case 404: // Not Found code = twirp.BadRoute case 429: // Too Many Requests code = twirp.ResourceExhausted case 502, 503, 504: // Bad Gateway, Service Unavailable, Gateway Timeout code = twirp.Unavailable default: // All other codes code = twirp.Unknown } } twerr := twirp.NewError(code, msg) twerr = twerr.WithMeta("http_error_from_intermediary", "true") // to easily know if this error was from intermediary twerr = twerr.WithMeta("status_code", strconv.Itoa(status)) if isHTTPRedirect(status) { twerr = twerr.WithMeta("location", bodyOrLocation) } else { twerr = twerr.WithMeta("body", bodyOrLocation) } return twerr } func isHTTPRedirect(status int) bool { return status >= 300 && status <= 399 } // wrapInternal wraps an error with a prefix as an Internal error. // The original error cause is accessible by github.com/pkg/errors.Cause. func wrapInternal(err error, prefix string) twirp.Error { return twirp.InternalErrorWith(&wrappedError{prefix: prefix, cause: err}) } type wrappedError struct { prefix string cause error } func (e *wrappedError) Error() string { return e.prefix + ": " + e.cause.Error() } func (e *wrappedError) Unwrap() error { return e.cause } // for go1.13 + errors.Is/As func (e *wrappedError) Cause() error { return e.cause } // for github.com/pkg/errors // ensurePanicResponses makes sure that rpc methods causing a panic still result in a Twirp Internal // error response (status 500), and error hooks are properly called with the panic wrapped as an error. // The panic is re-raised so it can be handled normally with middleware. func ensurePanicResponses(ctx context.Context, resp http.ResponseWriter, hooks *twirp.ServerHooks) { if r := recover(); r != nil { // Wrap the panic as an error so it can be passed to error hooks. // The original error is accessible from error hooks, but not visible in the response. err := errFromPanic(r) twerr := &internalWithCause{msg: "Internal service panic", cause: err} // Actually write the error writeError(ctx, resp, twerr, hooks) // If possible, flush the error to the wire. f, ok := resp.(http.Flusher) if ok { f.Flush() } panic(r) } } // errFromPanic returns the typed error if the recovered panic is an error, otherwise formats as error. func errFromPanic(p interface{}) error { if err, ok := p.(error); ok { return err } return fmt.Errorf("panic: %v", p) } // internalWithCause is a Twirp Internal error wrapping an original error cause, // but the original error message is not exposed on Msg(). The original error // can be checked with go1.13+ errors.Is/As, and also by (github.com/pkg/errors).Unwrap type internalWithCause struct { msg string cause error } func (e *internalWithCause) Unwrap() error { return e.cause } // for go1.13 + errors.Is/As func (e *internalWithCause) Cause() error { return e.cause } // for github.com/pkg/errors func (e *internalWithCause) Error() string { return e.msg + ": " + e.cause.Error() } func (e *internalWithCause) Code() twirp.ErrorCode { return twirp.Internal } func (e *internalWithCause) Msg() string { return e.msg } func (e *internalWithCause) Meta(key string) string { return "" } func (e *internalWithCause) MetaMap() map[string]string { return nil } func (e *internalWithCause) WithMeta(key string, val string) twirp.Error { return e } // malformedRequestError is used when the twirp server cannot unmarshal a request func malformedRequestError(msg string) twirp.Error { return twirp.NewError(twirp.Malformed, msg) } // badRouteError is used when the twirp server cannot route a request func badRouteError(msg string, method, url string) twirp.Error { err := twirp.NewError(twirp.BadRoute, msg) err = err.WithMeta("twirp_invalid_route", method+" "+url) return err } // withoutRedirects makes sure that the POST request can not be redirected. // The standard library will, by default, redirect requests (including POSTs) if it gets a 302 or // 303 response, and also 301s in go1.8. It redirects by making a second request, changing the // method to GET and removing the body. This produces very confusing error messages, so instead we // set a redirect policy that always errors. This stops Go from executing the redirect. // // We have to be a little careful in case the user-provided http.Client has its own CheckRedirect // policy - if so, we'll run through that policy first. // // Because this requires modifying the http.Client, we make a new copy of the client and return it. func withoutRedirects(in *http.Client) *http.Client
// doProtobufRequest makes a Protobuf request to the remote Twirp service. func doProtobufRequest(ctx context.Context, client HTTPClient, hooks *twirp.ClientHooks, url string, in, out proto.Message) (_ context.Context, err error) { reqBodyBytes, err := proto.Marshal(in) if err != nil { return ctx, wrapInternal(err, "failed to marshal proto request") } reqBody := bytes.NewBuffer(reqBodyBytes) if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } req, err := newRequest(ctx, url, reqBody, "application/protobuf") if err != nil { return ctx, wrapInternal(err, "could not build request") } ctx, err = callClientRequestPrepared(ctx, hooks, req) if err != nil { return ctx, err } req = req.WithContext(ctx) resp, err := client.Do(req) if err != nil { return ctx, wrapInternal(err, "failed to do request") } defer func() { cerr := resp.Body.Close() if err == nil && cerr != nil { err = wrapInternal(cerr, "failed to close response body") } }() if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } if resp.StatusCode != 200 { return ctx, errorFromResponse(resp) } respBodyBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return ctx, wrapInternal(err, "failed to read response body") } if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } if err = proto.Unmarshal(respBodyBytes, out); err != nil { return ctx, wrapInternal(err, "failed to unmarshal proto response") } return ctx, nil } // doJSONRequest makes a JSON request to the remote Twirp service. func doJSONRequest(ctx context.Context, client HTTPClient, hooks *twirp.ClientHooks, url string, in, out proto.Message) (_ context.Context, err error) { marshaler := &protojson.MarshalOptions{UseProtoNames: true} reqBytes, err := marshaler.Marshal(in) if err != nil { return ctx, wrapInternal(err, "failed to marshal json request") } if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } req, err := newRequest(ctx, url, bytes.NewReader(reqBytes), "application/json") if err != nil { return ctx, wrapInternal(err, "could not build request") } ctx, err = callClientRequestPrepared(ctx, hooks, req) if err != nil { return ctx, err } req = req.WithContext(ctx) resp, err := client.Do(req) if err != nil { return ctx, wrapInternal(err, "failed to do request") } defer func() { cerr := resp.Body.Close() if err == nil && cerr != nil { err = wrapInternal(cerr, "failed to close response body") } }() if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } if resp.StatusCode != 200 { return ctx, errorFromResponse(resp) } d := json.NewDecoder(resp.Body) rawRespBody := json.RawMessage{} if err := d.Decode(&rawRespBody); err != nil { return ctx, wrapInternal(err, "failed to unmarshal json response") } unmarshaler := protojson.UnmarshalOptions{DiscardUnknown: true} if err = unmarshaler.Unmarshal(rawRespBody, out); err != nil { return ctx, wrapInternal(err, "failed to unmarshal json response") } if err = ctx.Err(); err != nil { return ctx, wrapInternal(err, "aborted because context was done") } return ctx, nil } // Call twirp.ServerHooks.RequestReceived if the hook is available func callRequestReceived(ctx context.Context, h *twirp.ServerHooks) (context.Context, error) { if h == nil || h.RequestReceived == nil { return ctx, nil } return h.RequestReceived(ctx) } // Call twirp.ServerHooks.RequestRouted if the hook is available func callRequestRouted(ctx context.Context, h *twirp.ServerHooks) (context.Context, error) { if h == nil || h.RequestRouted == nil { return ctx, nil } return h.RequestRouted(ctx) } // Call twirp.ServerHooks.ResponsePrepared if the hook is available func callResponsePrepared(ctx context.Context, h *twirp.ServerHooks) context.Context { if h == nil || h.ResponsePrepared == nil { return ctx } return h.ResponsePrepared(ctx) } // Call twirp.ServerHooks.ResponseSent if the hook is available func callResponseSent(ctx context.Context, h *twirp.ServerHooks) { if h == nil || h.ResponseSent == nil { return } h.ResponseSent(ctx) } // Call twirp.ServerHooks.Error if the hook is available func callError(ctx context.Context, h *twirp.ServerHooks, err twirp.Error) context.Context { if h == nil || h.Error == nil { return ctx } return h.Error(ctx, err) } func callClientResponseReceived(ctx context.Context, h *twirp.ClientHooks) { if h == nil || h.ResponseReceived == nil { return } h.ResponseReceived(ctx) } func callClientRequestPrepared(ctx context.Context, h *twirp.ClientHooks, req *http.Request) (context.Context, error) { if h == nil || h.RequestPrepared == nil { return ctx, nil } return h.RequestPrepared(ctx, req) } func callClientError(ctx context.Context, h *twirp.ClientHooks, err twirp.Error) { if h == nil || h.Error == nil { return } h.Error(ctx, err) } var twirpFileDescriptor0 = []byte{ // 164 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2a, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0xcf, 0x48, 0xcd, 0xc9, 0xc9, 0xd7, 0x2f, 0x33, 0x84, 0x30, 0xf4, 0xc0, 0x82, 0x42, 0x7c, 0x60, 0x4a, 0x0f, 0x22, 0x54, 0x66, 0xa8, 0xa4, 0xca, 0xc5, 0x1f, 0x9c, 0x58, 0xe9, 0x01, 0xe2, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x4a, 0x3a, 0x5c, 0x02, 0x08, 0x65, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x12, 0x5c, 0xec, 0xb9, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0x30, 0xa5, 0x30, 0xae, 0x51, 0x3c, 0x17, 0x0f, 0x58, 0x69, 0x70, 0x6a, 0x51, 0x59, 0x66, 0x72, 0xaa, 0x90, 0x3f, 0x17, 0x07, 0x4c, 0xb7, 0x90, 0xbc, 0x1e, 0xaa, 0x0b, 0xf4, 0xd0, 0xac, 0x97, 0x52, 0xc0, 0xad, 0x00, 0x62, 0xb1, 0x12, 0x83, 0x13, 0x4f, 0x14, 0x97, 0x9e, 0xbe, 0x35, 0x58, 0x45, 0x41, 0x52, 0x12, 0x1b, 0x58, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xfe, 0xb9, 0x0f, 0xf8, 0x00, 0x00, 0x00, }
{ copy := *in copy.CheckRedirect = func(req *http.Request, via []*http.Request) error { if in.CheckRedirect != nil { // Run the input's redirect if it exists, in case it has side effects, but ignore any error it // returns, since we want to use ErrUseLastResponse. err := in.CheckRedirect(req, via) _ = err // Silly, but this makes sure generated code passes errcheck -blank, which some people use. } return http.ErrUseLastResponse } return &copy }
private.rs
use super::request::*; use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DefaultOnError}; use std::collections::HashMap; #[derive(Debug, Serialize, Deserialize)] pub struct BalanceResponse(HashMap<String, Decimal>); pub async fn balance(cred: &Credential) -> Result<BalanceResponse, Error> { let response = private_request(&cred, "/0/private/Balance", &[]).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct BalanceEx { balance: Decimal, hold_trade: Decimal, } #[derive(Debug, Serialize, Deserialize)] pub struct BalanceExResponse(HashMap<String, BalanceEx>); pub async fn balance_ex(cred: &Credential) -> Result<BalanceExResponse, Error> { let response = private_request(&cred, "/0/private/BalanceEx", &[]).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct TradeBalanceResponse { eb: Decimal, tb: Decimal, m: Decimal, n: Decimal, c: Decimal, v: Decimal, e: Decimal, mf: Decimal, ml: Option<Decimal>, } pub async fn trade_balance( cred: &Credential, asset: Option<&str>, ) -> Result<TradeBalanceResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; if let Some(val) = asset { params.push(("asset", &val)); } let response = private_request(&cred, "/0/private/TradeBalance", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct OrderDescr { pair: String, #[serde(rename = "type")] type_: String, // needs to be renamed ordertype: String, price: Decimal, price2: Decimal, leverage: String, order: String, close: String, } #[serde_as] #[derive(Debug, Serialize, Deserialize)] pub struct Order { refid: Option<String>, #[serde_as(deserialize_as = "DefaultOnError")] #[serde(default)] userref: Option<String>, status: String, opentm: f64, starttm: i64, expiretm: i64, descr: OrderDescr, vol: Decimal, vol_exec: Decimal, cost: Decimal, fee: Decimal, price: Decimal, stopprice: Decimal, limitprice: Decimal, misc: String, oflags: String, trades: Option<Vec<String>>, } #[derive(Debug, Serialize, Deserialize)] pub struct OpenOrdersResponse { open: HashMap<String, Order>, } pub async fn open_orders( cred: &Credential, trades: Option<bool>, userref: Option<u32>, ) -> Result<OpenOrdersResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let userref_string; if let Some(val) = userref { userref_string = val.to_string(); params.push(("userref", &userref_string)); } let response = private_request(&cred, "/0/private/OpenOrders", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct ClosedOrdersResponse { closed: HashMap<String, Order>, } pub async fn closed_orders( cred: &Credential, trades: Option<bool>, userref: Option<u32>, start: Option<i64>, end: Option<i64>, ofs: Option<i64>, closetime: Option<&str>, ) -> Result<ClosedOrdersResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let userref_string; if let Some(val) = userref { userref_string = val.to_string(); params.push(("userref", &userref_string)); } let start_string; if let Some(val) = start { start_string = val.to_string(); params.push(("start", &start_string)); } let end_string; if let Some(val) = end { end_string = val.to_string(); params.push(("end", &end_string)); } let ofs_string; if let Some(val) = ofs { ofs_string = val.to_string(); params.push(("ofs", &ofs_string)); } let closetime_string; if let Some(val) = closetime { closetime_string = val; params.push(("closetime", &closetime_string)); } let response = private_request(&cred, "/0/private/ClosedOrders", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct QueryOrdersResponse(HashMap<String, Order>); pub async fn query_orders( cred: &Credential, trades: Option<bool>, userref: Option<u32>, txid: &[&str], ) -> Result<QueryOrdersResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let userref_string; if let Some(val) = userref { userref_string = val.to_string(); params.push(("userref", &userref_string)); } let txid = txid.join(","); params.push(("txid", &txid)); let response = private_request(&cred, "/0/private/QueryOrders", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct Trade { ordertxid: String, postxid: Option<String>, pair: String, time: f64, #[serde(rename = "type")] type_: String, // needs to be renamed ordertype: String, price: Decimal, cost: Decimal, fee: Decimal, vol: Decimal, margin: Decimal, misc: String, ccost: Option<Decimal>, cfee: Option<Decimal>, cvol: Option<Decimal>, cmargin: Option<Decimal>, net: Option<Decimal>, trades: Option<Vec<String>>, } #[derive(Debug, Serialize, Deserialize)] pub struct TradesHistoryResponse { trades: HashMap<String, Trade>, count: u64, } pub async fn trades_history( cred: &Credential, type_: Option<&str>, trades: Option<bool>, start: Option<i64>, end: Option<i64>, ofs: Option<i64>, ) -> Result<TradesHistoryResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; if let Some(val) = type_ { params.push(("type", val)); } let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let start_string; if let Some(val) = start { start_string = val.to_string(); params.push(("start", &start_string)); } let end_string; if let Some(val) = end { end_string = val.to_string(); params.push(("end", &end_string)); } let ofs_string; if let Some(val) = ofs { ofs_string = val.to_string(); params.push(("ofs", &ofs_string)); } let response = private_request(&cred, "/0/private/TradesHistory", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct
(HashMap<String, Trade>); pub async fn query_trades( cred: &Credential, txids: &[&str], trades: Option<bool>, ) -> Result<QueryTradesResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let txids = txids.join(","); if txids != "" { params.push(("txid", &txids)) } let response = private_request(&cred, "/0/private/QueryTrades", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct OpenPosition { ordertxid: String, posstatus: String, pair: String, time: f64, #[serde(rename = "type")] type_: String, // needs to be renamed ordertype: String, cost: Decimal, fee: Decimal, vol: Decimal, vol_closed: Decimal, margin: Decimal, value: Decimal, net: Decimal, terms: String, rollovertm: String, misc: String, oflags: String, } #[derive(Debug, Serialize, Deserialize)] pub struct OpenPositionsResponse(HashMap<String, OpenPosition>); pub async fn open_positions( cred: &Credential, txids: &[&str], docalcs: Option<bool>, consolidation: &str, ) -> Result<OpenPositionsResponse, Error> { let mut params: Vec<(&str, &str)> = vec![("consolidation", &consolidation)]; let txids = txids.join(","); params.push(("txid", &txids)); let docalcs_string; if let Some(val) = docalcs { docalcs_string = val.to_string(); params.push(("docalcs", &docalcs_string)); } let response = private_request(&cred, "/0/private/OpenPositions", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct Ledger { refid: String, time: f64, #[serde(rename = "type")] type_: String, subtype: String, aclass: String, asset: String, amount: Decimal, fee: Decimal, balance: Decimal, } #[derive(Debug, Serialize, Deserialize)] pub struct LedgersResponse { ledger: HashMap<String, Ledger>, count: u64, } pub async fn ledgers( cred: &Credential, asset: Option<&[&str]>, aclass: Option<&str>, type_: Option<&str>, start: Option<i64>, end: Option<i64>, ofs: Option<i64>, ) -> Result<LedgersResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let asset_string; if let Some(val) = asset { asset_string = val.join(","); params.push(("asset", &asset_string)); } if let Some(val) = aclass { params.push(("aclass", &val)); } if let Some(val) = type_ { params.push(("type", &val)); } let start_string; if let Some(val) = start { start_string = val.to_string(); params.push(("start", &start_string)); } let end_string; if let Some(val) = end { end_string = val.to_string(); params.push(("end", &end_string)); } let ofs_string; if let Some(val) = ofs { ofs_string = val.to_string(); params.push(("ofs", &ofs_string)); } let response = private_request(&cred, "/0/private/Ledgers", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct QueryLedgersResponse(HashMap<String, Ledger>); pub async fn query_ledgers( cred: &Credential, id: &[&str], trades: Option<bool>, ) -> Result<QueryLedgersResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let trades_string; if let Some(val) = trades { trades_string = val.to_string(); params.push(("trades", &trades_string)); } let ids = id.join(","); params.push(("id", &ids)); let response = private_request(&cred, "/0/private/QueryLedgers", &params).await?; return load_response(&response); } #[derive(Debug, Serialize, Deserialize)] pub struct Fee { fee: Decimal, minfee: Decimal, maxfee: Decimal, nextfee: Option<Decimal>, nextvolume: Option<Decimal>, tiervolume: Decimal, } #[derive(Debug, Serialize, Deserialize)] pub struct TradeVolumeResponse { currency: String, volume: Decimal, fees: Option<HashMap<String, Fee>>, fees_maker: Option<HashMap<String, Fee>>, } pub async fn trade_volume( cred: &Credential, pair: Option<&[&str]>, fee_info: Option<bool>, ) -> Result<TradeVolumeResponse, Error> { let mut params: Vec<(&str, &str)> = vec![]; let pair_string; if let Some(val) = pair { pair_string = val.join(","); params.push(("pair", &pair_string)); } let fee_info_string; if let Some(val) = fee_info { fee_info_string = val.to_string(); params.push(("fee-info", &fee_info_string)); } let response = private_request(&cred, "/0/private/TradeVolume", &params).await?; return load_response(&response); }
QueryTradesResponse
__init__.py
# Copyright 2018-2021 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is the top level module from which all basic functions and classes of PennyLane can be directly imported. """ from importlib import reload import numpy as _np import pkg_resources from semantic_version import Spec, Version import pennylane.init import pennylane.fourier import pennylane.kernels import pennylane.math import pennylane.operation import pennylane.qaoa as qaoa import pennylane.qnn import pennylane.templates from pennylane._device import Device, DeviceError from pennylane._grad import grad, jacobian, finite_diff from pennylane._qubit_device import QubitDevice from pennylane._version import __version__ from pennylane.about import about from pennylane.circuit_graph import CircuitGraph from pennylane.configuration import Configuration from pennylane.io import * from pennylane.measure import density_matrix, expval, probs, sample, state, var from pennylane.ops import * from pennylane.optimize import * from pennylane.qnode import QNode, qnode from pennylane.templates import broadcast, layer, template from pennylane.transforms import ( adjoint, draw, ControlledOperation, ctrl, measurement_grouping, metric_tensor, specs, qfunc_transform, single_tape_transform, quantum_monte_carlo, ) from pennylane.utils import inv from pennylane.vqe import ExpvalCost, Hamiltonian, VQECost # QueuingContext and collections needs to be imported after all other pennylane imports from .collections import QNodeCollection, apply, dot, map, sum from .queuing import QueuingContext import pennylane.grouping # pylint:disable=wrong-import-order # Look for an existing configuration file default_config = Configuration("config.toml") class QuantumFunctionError(Exception): """Exception raised when an illegal operation is defined in a quantum function.""" def _get_device_entrypoints(): """Returns a dictionary mapping the device short name to the loadable entrypoint""" return {entry.name: entry for entry in pkg_resources.iter_entry_points("pennylane.plugins")} def refresh_devices(): """Scan installed PennyLane plugins to refresh the device list.""" # This function does not return anything; instead, it has a side effect # which is to update the global plugin_devices variable. # We wish to retain the behaviour of a global plugin_devices dictionary, # as re-importing pkg_resources can be a very slow operation on systems # with a large number of installed packages. global plugin_devices # pylint:disable=global-statement reload(pkg_resources) plugin_devices = _get_device_entrypoints() # get list of installed devices plugin_devices = _get_device_entrypoints() # get chemistry plugin class NestedAttrError: """This class mocks out the qchem module in case it is not installed. Any attempt to print an instance of this class, or to access an attribute of this class, results in an import error, directing the user to the installation instructions for PennyLane Qchem""" error_msg = ( "PennyLane-QChem not installed. \n\nTo access the qchem " "module, you can install PennyLane-QChem via pip:" "\n\npip install pennylane-qchem" "\n\nFor more details, see the quantum chemistry documentation:" "\nhttps://pennylane.readthedocs.io/en/stable/introduction/chemistry.html" ) def __str__(self): raise ImportError(self.error_msg) from None def __getattr__(self, name): raise ImportError(self.error_msg) from None __repr__ = __str__ qchem = NestedAttrError() for entry in pkg_resources.iter_entry_points("pennylane.qchem"): if entry.name == "OpenFermion": qchem = entry.load() def device(name, *args, **kwargs): r"""device(name, wires=1, *args, **kwargs) Load a :class:`~.Device` and return the instance. This function is used to load a particular quantum device, which can then be used to construct QNodes. PennyLane comes with support for the following devices: * :mod:`'default.qubit' <pennylane.devices.default_qubit>`: a simple state simulator of qubit-based quantum circuit architectures. * :mod:`'default.gaussian' <pennylane.devices.default_gaussian>`: a simple simulator of Gaussian states and operations on continuous-variable circuit architectures. * :mod:`'default.qubit.tf' <pennylane.devices.default_qubit_tf>`: a state simulator of qubit-based quantum circuit architectures written in TensorFlow, which allows automatic differentiation through the simulation. * :mod:`'default.qubit.autograd' <pennylane.devices.default_qubit_autograd>`: a state simulator of qubit-based quantum circuit architectures which allows automatic differentiation through the simulation via python's autograd library. Additional devices are supported through plugins — see the `available plugins <https://pennylane.ai/plugins.html>`_ for more details. All devices must be loaded by specifying their **short-name** as listed above, followed by the **wires** (subsystems) you wish to initialize. The *wires* argument can be an integer, in which case the wires of the device are addressed by consecutive integers: .. code-block:: python dev = qml.device('default.qubit', wires=5) def circuit(): qml.Hadamard(wires=1) qml.Hadamard(wires=[0]) qml.CNOT(wires=[3, 4]) ... The *wires* argument can also be a sequence of unique numbers or strings, specifying custom wire labels that the user employs to address the wires: .. code-block:: python dev = qml.device('default.qubit', wires=['ancilla', 'q11', 'q12', -1, 1]) def circuit(): qml.Hadamard(wires='q11') qml.Hadamard(wires=['ancilla']) qml.CNOT(wires=['q12', -1] ) ... Most devices accept a ``shots`` argument which specifies how many circuit executions are used to estimate stochastic return values. In particular, ``qml.sample()`` measurements will return as many samples as specified in the shots argument. The shots argument can be changed on a per-call basis using the built-in ``shots`` keyword argument. .. code-block:: python dev = qml.device('default.qubit', wires=1, shots=10) @qml.qnode(dev) def circuit(a): qml.RX(a, wires=0) return qml.sample(qml.PauliZ(wires=0)) >>> circuit(0.8) # 10 samples are returned [ 1 1 1 -1 -1 1 1 1 1 1] >>> circuit(0.8, shots=3)) # default is overwritten for this call [1 1 1] >>> circuit(0.8) # back to default of 10 samples [ 1 1 1 -1 -1 1 1 1 1 1] Some devices may accept additional arguments. For instance, ``default.gaussian`` accepts the keyword argument ``hbar``, to set the convention used in the commutation relation :math:`[\x,\p]=i\hbar` (by default set to 2). Please refer to the documentation for the individual devices to see any additional arguments that might be required or supported. Args: name (str): the name of the device to load wires (int): the number of wires (subsystems) to initialise the device with
Keyword Args: config (pennylane.Configuration): a PennyLane configuration object that contains global and/or device specific configurations. """ if name not in plugin_devices: # Device does not exist in the loaded device list. # Attempt to refresh the devices, in case the user # installed the plugin during the current Python session. refresh_devices() if name in plugin_devices: options = {} # load global configuration settings if available config = kwargs.get("config", default_config) if config: # combine configuration options with keyword arguments. # Keyword arguments take preference, followed by device options, # followed by plugin options, followed by global options. options.update(config["main"]) options.update(config[name.split(".")[0] + ".global"]) options.update(config[name]) kwargs.pop("config", None) options.update(kwargs) # loads the device class plugin_device_class = plugin_devices[name].load() if Version(version()) not in Spec(plugin_device_class.pennylane_requires): raise DeviceError( "The {} plugin requires PennyLane versions {}, however PennyLane " "version {} is installed.".format( name, plugin_device_class.pennylane_requires, __version__ ) ) # load device return plugin_device_class(*args, **options) raise DeviceError("Device does not exist. Make sure the required plugin is installed.") def version(): """Returns the PennyLane version number.""" return __version__
MultiSelect.tsx
import { Select } from 'antd'; import { LabeledValue, SelectValue } from 'antd/es/select'; import React, { useCallback, useMemo } from 'react';
import SelectFilter, { Props as SelectFilterProps } from './SelectFilter'; const { Option } = Select; const MultiSelect: React.FC<SelectFilterProps> = ({ itemName, onChange, value, ...props }) => { const allLabel = useMemo(() => { return itemName ? `All ${itemName}s` : 'All'; }, [ itemName ]); const values = useMemo(() => { if (!value) return []; return Array.isArray(value) ? value : [ value ]; }, [ value ]); const handleSelect = useCallback((selected: SelectValue, option) => { if (!onChange) return; if (selected === ALL_VALUE) { onChange([], option); if (document.activeElement) (document.activeElement as HTMLElement).blur(); } else { const newValue = clone(values); const selectedValue = isObject(selected) ? (selected as LabeledValue).value : selected; if (!newValue.includes(selectedValue)) newValue.push(selectedValue); onChange(newValue as SelectValue, option); } }, [ onChange, values ]); const handleDeselect = useCallback((selected: SelectValue, option) => { if (!onChange) return; const selectedValue = isObject(selected) ? (selected as LabeledValue).value : selected; const newValue = (clone(values) as SelectValue[]).filter(item => item !== selectedValue); onChange(newValue as SelectValue, option); }, [ onChange, values ]); return ( <SelectFilter disableTags dropdownMatchSelectWidth={true} itemName={itemName} mode="multiple" placeholder={allLabel} showArrow style={{ width: props.style?.width ?? 140 }} value={value} onDeselect={handleDeselect} onSelect={handleSelect} {...props}> <Option value={ALL_VALUE}>{allLabel}</Option> {props.children} </SelectFilter> ); }; export default MultiSelect;
import { clone, isObject } from 'shared/utils/data'; import { ALL_VALUE } from 'types';
approtect.rs
#[doc = "Register `APPROTECT` reader"] pub struct R(crate::R<APPROTECT_SPEC>); impl core::ops::Deref for R { type Target = crate::R<APPROTECT_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<APPROTECT_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<APPROTECT_SPEC>) -> Self { R(reader) } } #[doc = "Register `APPROTECT` writer"] pub struct W(crate::W<APPROTECT_SPEC>); impl core::ops::Deref for W { type Target = crate::W<APPROTECT_SPEC>; #[inline(always)] fn
(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<APPROTECT_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<APPROTECT_SPEC>) -> Self { W(writer) } } #[doc = "Enable or disable access port protection.\n\nValue on reset: 255"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PALL_A { #[doc = "255: Disable"] DISABLED = 255, #[doc = "0: Enable"] ENABLED = 0, } impl From<PALL_A> for u8 { #[inline(always)] fn from(variant: PALL_A) -> Self { variant as _ } } #[doc = "Field `PALL` reader - Enable or disable access port protection."] pub struct PALL_R(crate::FieldReader<u8, PALL_A>); impl PALL_R { pub(crate) fn new(bits: u8) -> Self { PALL_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<PALL_A> { match self.bits { 255 => Some(PALL_A::DISABLED), 0 => Some(PALL_A::ENABLED), _ => None, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == PALL_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { **self == PALL_A::ENABLED } } impl core::ops::Deref for PALL_R { type Target = crate::FieldReader<u8, PALL_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PALL` writer - Enable or disable access port protection."] pub struct PALL_W<'a> { w: &'a mut W, } impl<'a> PALL_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PALL_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Disable"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(PALL_A::DISABLED) } #[doc = "Enable"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(PALL_A::ENABLED) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - Enable or disable access port protection."] #[inline(always)] pub fn pall(&self) -> PALL_R { PALL_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Enable or disable access port protection."] #[inline(always)] pub fn pall(&mut self) -> PALL_W { PALL_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Access port protection\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [approtect](index.html) module"] pub struct APPROTECT_SPEC; impl crate::RegisterSpec for APPROTECT_SPEC { type Ux = u32; } #[doc = "`read()` method returns [approtect::R](R) reader structure"] impl crate::Readable for APPROTECT_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [approtect::W](W) writer structure"] impl crate::Writable for APPROTECT_SPEC { type Writer = W; } #[doc = "`reset()` method sets APPROTECT to value 0xffff_ffff"] impl crate::Resettable for APPROTECT_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0xffff_ffff } }
deref
test_utils.rs
use crate::avm1::activation::{Activation, ActivationIdentifier}; use crate::avm1::error::Error; use crate::avm1::globals::system::SystemProperties; use crate::avm1::{Avm1, Object, Timers, UpdateContext}; use crate::avm2::Avm2; use crate::backend::audio::{AudioManager, NullAudioBackend}; use crate::backend::locale::NullLocaleBackend; use crate::backend::log::NullLogBackend; use crate::backend::navigator::NullNavigatorBackend; use crate::backend::render::NullRenderer; use crate::backend::storage::MemoryStorageBackend; use crate::backend::ui::NullUiBackend; use crate::backend::video::NullVideoBackend; use crate::context::ActionQueue; use crate::display_object::{MovieClip, Stage, TDisplayObject}; use crate::focus_tracker::FocusTracker; use crate::library::Library; use crate::loader::LoadManager; use crate::prelude::*; use crate::tag_utils::SwfMovie; use crate::vminterface::Instantiator; use gc_arena::{rootless_arena, MutationContext}; use instant::Instant; use rand::{rngs::SmallRng, SeedableRng}; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; pub fn with_avm<F>(swf_version: u8, test: F) where F: for<'a, 'gc> FnOnce(&mut Activation<'_, 'gc, '_>, Object<'gc>) -> Result<(), Error<'gc>>, { fn in_the_arena<'a, 'gc: 'a, F>(swf_version: u8, test: F, gc_context: MutationContext<'gc, '_>) where F: FnOnce(&mut Activation<'_, 'gc, '_>, Object<'gc>) -> Result<(), Error<'gc>>, { let mut avm1 = Avm1::new(gc_context, swf_version); let mut avm2 = Avm2::new(gc_context); let swf = Arc::new(SwfMovie::empty(swf_version)); let root: DisplayObject<'gc> = MovieClip::new(swf.clone(), gc_context).into(); root.set_depth(gc_context, 0); let stage = Stage::empty(gc_context, 550, 400); let mut frame_rate = 12.0; let globals = avm1.global_object_cell(); let mut context = UpdateContext { gc_context, player_version: 32, swf: &swf, stage, rng: &mut SmallRng::from_seed([0u8; 32]), audio: &mut NullAudioBackend::new(), ui: &mut NullUiBackend::new(), action_queue: &mut ActionQueue::new(), library: &mut Library::empty(gc_context), navigator: &mut NullNavigatorBackend::new(), renderer: &mut NullRenderer::new(), locale: &mut NullLocaleBackend::new(), log: &mut NullLogBackend::new(), video: &mut NullVideoBackend::new(), mouse_over_object: None, mouse_down_object: None, input: &Default::default(), mouse_position: &(Twips::ZERO, Twips::ZERO), drag_object: &mut None, player: None, load_manager: &mut LoadManager::new(), system: &mut SystemProperties::default(), instance_counter: &mut 0, storage: &mut MemoryStorageBackend::default(), shared_objects: &mut HashMap::new(), unbound_text_fields: &mut Vec::new(), timers: &mut Timers::new(), current_context_menu: &mut None, needs_render: &mut false,
external_interface: &mut Default::default(), update_start: Instant::now(), max_execution_duration: Duration::from_secs(15), focus_tracker: FocusTracker::new(gc_context), times_get_time_called: 0, time_offset: &mut 0, audio_manager: &mut AudioManager::new(), frame_rate: &mut frame_rate, }; context.stage.replace_at_depth(&mut context, root, 0); root.post_instantiation(&mut context, None, Instantiator::Movie, false); root.set_name(context.gc_context, "".into()); fn run_test<'a, 'gc: 'a, F>( activation: &mut Activation<'_, 'gc, '_>, root: DisplayObject<'gc>, test: F, ) where F: FnOnce(&mut Activation<'_, 'gc, '_>, Object<'gc>) -> Result<(), Error<'gc>>, { let this = root.object().coerce_to_object(activation); let result = test(activation, this); if let Err(e) = result { panic!("Encountered exception during test: {}", e); } } let swf_version = context.swf.version(); let mut activation = Activation::from_nothing( context, ActivationIdentifier::root("[Test]"), swf_version, globals, root, ); run_test(&mut activation, root, test) } rootless_arena(|gc_context| in_the_arena(swf_version, test, gc_context)) } macro_rules! test_method { ( $test: ident, $name: expr, $object: expr, $($versions: expr => { $([$($arg: expr),*] => $out: expr),* }),* ) => { #[test] fn $test() { use $crate::avm1::test_utils::*; $( for version in &$versions { with_avm(*version, |activation, _root| -> Result<(), Error> { let name: $crate::avm1::AvmString<'_> = $name.into(); let object = $object(activation); $( let args: Vec<Value> = vec![$($arg.into()),*]; let ret = crate::avm1::object::TObject::call_method(&object, name, &args, activation)?; assert_eq!(ret, $out.into(), "{:?} => {:?} in swf {}", args, $out, version); )* Ok(()) }); } )* } }; }
avm1: &mut avm1, avm2: &mut avm2,
cpsr.rs
#[doc = "Reader of register CPSR"] pub type R = crate::R<u32, super::CPSR>; #[doc = "Writer for register CPSR"] pub type W = crate::W<u32, super::CPSR>; #[doc = "Register CPSR `reset()`'s with value 0"] impl crate::ResetValue for super::CPSR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `Reserved32`"] pub type RESERVED32_R = crate::R<u16, u16>; #[doc = "Write proxy for field `Reserved32`"] pub struct RESERVED32_W<'a> { w: &'a mut W, } impl<'a> RESERVED32_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16); self.w } } #[doc = "Reader of field `Reserved16`"] pub type RESERVED16_R = crate::R<u8, u8>; #[doc = "Write proxy for field `Reserved16`"] pub struct RESERVED16_W<'a> { w: &'a mut W, } impl<'a> RESERVED16_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8); self.w } } #[doc = "Reader of field `CPSDVSR`"] pub type CPSDVSR_R = crate::R<u8, u8>; #[doc = "Write proxy for field `CPSDVSR`"] pub struct CPSDVSR_W<'a> { w: &'a mut W, } impl<'a> CPSDVSR_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } impl R { #[doc = "Bits 16:31 - 31:16\\] Reserved"] #[inline(always)] pub fn reserved32(&self) -> RESERVED32_R { RESERVED32_R::new(((self.bits >> 16) & 0xffff) as u16) } #[doc = "Bits 8:15 - 15:8\\] Reserved, read unpredictable, should be written as 0."] #[inline(always)] pub fn reserved16(&self) -> RESERVED16_R { RESERVED16_R::new(((self.bits >> 8) & 0xff) as u8) } #[doc = "Bits 0:7 - 7:0\\] SSI clock prescale divisor (R/W) Reset value: 0x0 This value must be an even number from 2 to 254, depending on the frequency of SSICLK. The LSB always returns zero on reads."] #[inline(always)] pub fn cpsdvsr(&self) -> CPSDVSR_R { CPSDVSR_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 16:31 - 31:16\\] Reserved"] #[inline(always)] pub fn
(&mut self) -> RESERVED32_W { RESERVED32_W { w: self } } #[doc = "Bits 8:15 - 15:8\\] Reserved, read unpredictable, should be written as 0."] #[inline(always)] pub fn reserved16(&mut self) -> RESERVED16_W { RESERVED16_W { w: self } } #[doc = "Bits 0:7 - 7:0\\] SSI clock prescale divisor (R/W) Reset value: 0x0 This value must be an even number from 2 to 254, depending on the frequency of SSICLK. The LSB always returns zero on reads."] #[inline(always)] pub fn cpsdvsr(&mut self) -> CPSDVSR_W { CPSDVSR_W { w: self } } }
reserved32
ku.js
(function(e){const t=e["ku"]=e["ku"]||{};t.dictionary=Object.assign(t.dictionary||{},{"%0 of %1":"%0 لە %1","Align center":"بەهێڵکردنی ناورەڕاست","Align left":"بەهێڵکردنی چەپ","Align right":"بەهێڵکردنی ڕاست",Aquamarine:"شینی دەریایی",Big:"گەورە",Black:"ڕەش","Block quote":"وتەی وەرگیراو",Blue:"شین",Bold:"قەڵەو","Break text":"","Bulleted List":"لیستەی خاڵەیی","Bulleted list styles toolbar":"",Cancel:"هەڵوەشاندنەوە","Centered image":"ناوەڕاستکراوی وێنە","Change image text alternative":"گۆڕینی جێگروەی تێکیسی وێنە","Choose heading":"سەرنووسە هەڵبژێرە",Circle:"",Column:"ستوون",Decimal:"","Decimal with leading zero":"","Decrease indent":"کەمکردنەوەی بۆشایی",Default:"بنچینە","Delete column":"سڕینەوەی ستوون","Delete row":"سڕینەوەی ڕیز","Dim grey":"ڕەساسی تاریک",Disc:"","Document colors":"ڕەنگەکانی دۆکومێنت",Downloadable:"Downloadable","Dropdown toolbar":"تووڵامرازی لیستەیی","Edit block":"دەستکاری بلۆک","Edit link":"دەستکاری بەستەر","Editor toolbar":"تووڵامرازی دەسکاریکەر","Enter image caption":"سەردێڕی وێنە دابنێ","Font Background Color":"ڕەنگی پاشبنەمای فۆنت","Font Color":"ڕەنگی فۆنت","Font Family":"فۆنتی خێزانی","Font Size":"قەبارەی فۆنت","Full size image":"پڕ بەقەبارەی وێنە",Green:"سەوز",Grey:"ڕەساسی","Header column":"ستوونی دەسپێک","Header row":"ڕیزی دەسپێک",Heading:"سەرنووسە","Heading 1":"سەرنووسەی 1","Heading 2":"سەرنووسەی 2","Heading 3":"سەرنووسەی 3","Heading 4":"سەرنووسەی 4","Heading 5":"سەرنووسەی 5","Heading 6":"سەرنووسەی 6",Huge:"زۆر گەورە","Image toolbar":"تووڵامرازی وێنە","image widget":"وێدجیتی وێنە","In line":"","Increase indent":"زیادکردنی بۆشایی",Insert:"","Insert column left":"دانانی ستوون لە چەپ","Insert column right":"دانانی ستوون لە ڕاست","Insert image":"وێنە دابنێ","Insert image via URL":"","Insert media":"مێدیا دابنێ","Insert paragraph after block":"","Insert paragraph before block":"","Insert row above":"دانانی ڕیز لە سەرەوە","Insert row below":"دانانی ڕیز لە ژێرەوە","Insert table":"خشتە دابنێ",Italic:"لار",Justify:"هاوستوونی","Left aligned image":"ڕیزکردنی وێنە بۆ لای چەپ","Light blue":"شینی ڕووناک","Light green":"سەوزی ڕووناک","Light grey":"ڕەساسی ڕووناک",Link:"بەستەر","Link image":"","Link URL":"ناونیشانی بەستەر","Lower-latin":"","Lower–roman":"","Media URL":"بەستەری مێدیا","media widget":"ویدجێتتی مێدیا","Merge cell down":"تێکەڵکردنی خانەکان بەرەو ژێرەوە","Merge cell left":"تێکەڵکردنی خانەکان بەرەو چەپ","Merge cell right":"تێکەڵکردنی خانەکان بەرەو ڕاست","Merge cell up":"تێکەڵکردنی خانەکان بەرەو سەر","Merge cells":"تێکەڵکردنی خانەکان",Next:"دواتر","Numbered List":"لیستەی ژمارەیی","Numbered list styles toolbar":"","Open in a new tab":"کردنەوەی لە پەنجەرەیەکی نوێ","Open link in new tab":"کردنەوەی بەستەرەکە لە پەڕەیەکی نوێ",Orange:"پرتەقاڵی",Paragraph:"پەراگراف","Paste the media URL in the input.":"بەستەری مێدیاکە لە خانەکە بلکێنە.",Previous:"پێشتر",Purple:"مۆر",Red:"سور",Redo:"هەلگەڕاندنەوە","Remove color":"لابردنی ڕەنگ","Rich Text Editor":"سەرنوسەری دەقی بەپیت","Rich Text Editor, %0":"سەرنوسەری دەقی بەپیت, %0","Right aligned image":"ڕیزکردنی وێنە بۆ لای ڕاست",Row:"ڕیز",Save:"پاشکەوتکردن","Select column":"","Select row":"","Show more items":"بڕگەی زیاتر نیشانبدە","Side image":"لای وێنە",Small:"بچوک","Split cell horizontally":"بەشکردنی خانەکان بە ئاسۆیی","Split cell vertically":"بەشکردنی خانەکان بە ئەستوونی",Square:"","Table toolbar":"تووڵامرازی خشتە","Text alignment":"ڕیززکردنی تێکست","Text alignment toolbar":"تووڵامرازی ڕیززکردنی تێکست","Text alternative":"جێگرەوەی تێکست","The URL must not be empty.":"پێویستە بەستەر بەتاڵ نەبێت.","This link has no URL":"ئەم بەستەرە ناونیشانی نیە","This media URL is not supported.":"ئەم بەستەری مێدیایە پاڵپشتی ناکرێت.",Tiny:"گچکە","Tip: Paste the URL into the content to embed faster.":"Tip: Paste the URL into the content to embed faster.","To-do List":"لیستەی کردن","Toggle caption off":"","Toggle caption on":"","Toggle the circle list style":"","Toggle the decimal list style":"","Toggle the decimal with leading zero list style":"","Toggle the disc list style":"","Toggle the lower–latin list style":"","Toggle the lower–roman list style":"","Toggle the square list style":"","Toggle the upper–latin list style":"","Toggle the upper–roman list style":"",Turquoise:"شینی ئاسمانی",Underline:"ژێرهێڵ",Undo:"وەک خۆی لێ بکەوە",Unlink:"لابردنی بەستەر",Update:"","Update image URL":"","Upload failed":"بارکردنەکە سەرنەکەووت","Upload in progress":"بارکردنەکە لە جێبەجێکردن دایە","Upper-latin":"","Upper-roman":"",White:"سپی","Widget toolbar":"تووڵامرازی ویدجێت","Wrap text":"",Yellow:"زەرد"});t.getPluralForm=function(e){return e!=1}})(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={}));
label.go
package service import ( "encoding/json" "fmt" "strconv" "github.com/kyma-incubator/compass/components/connectivity-adapter/internal/appregistry/model" "github.com/kyma-incubator/compass/components/connectivity-adapter/pkg/apperrors" "github.com/pkg/errors" "github.com/kyma-incubator/compass/components/director/pkg/graphql" ) const legacyServicesLabelKey = "compass/legacy-services" type LegacyServiceReference struct { ID string `json:"id"` APIDefID *string `json:"apiDefID"` EventDefID *string `json:"eventDefID"` } type labeler struct{} func NewAppLabeler() *labeler { return &labeler{} } func (l *labeler) WriteServiceReference(appDetails graphql.ApplicationExt, serviceReference LegacyServiceReference) (graphql.LabelInput, error) { services, err := l.readLabel(appDetails) if err != nil { return graphql.LabelInput{}, err } services[serviceReference.ID] = serviceReference return l.writeLabel(services) } func (l *labeler) ReadServiceReference(appDetails graphql.ApplicationExt, serviceID string) (LegacyServiceReference, error) { services, err := l.readLabel(appDetails) if err != nil { return LegacyServiceReference{}, err } service, exists := services[serviceID] if !exists { return LegacyServiceReference{}, apperrors.NotFound("service with ID '%s' not found", serviceID) } return service, nil } func (l *labeler) DeleteServiceReference(appDetails graphql.ApplicationExt, serviceID string) (graphql.LabelInput, error) { services, err := l.readLabel(appDetails) if err != nil { return graphql.LabelInput{}, err } delete(services, serviceID) return l.writeLabel(services) }
func (l *labeler) ReadService(appDetails graphql.ApplicationExt, serviceID string) (model.GraphQLServiceDetails, error) { serviceRef, err := l.ReadServiceReference(appDetails, serviceID) if err != nil { return model.GraphQLServiceDetails{}, err } var outputApi *graphql.APIDefinitionExt if serviceRef.APIDefID != nil { for _, api := range appDetails.APIDefinitions.Data { if api != nil && api.ID == *serviceRef.APIDefID { outputApi = api break } } } var outputEvent *graphql.EventAPIDefinitionExt if serviceRef.EventDefID != nil { for _, event := range appDetails.EventDefinitions.Data { if event != nil && event.ID == *serviceRef.EventDefID { outputEvent = event break } } } return model.GraphQLServiceDetails{ ID: serviceRef.ID, API: outputApi, Event: outputEvent, }, nil } func (l *labeler) ListServices(appDetails graphql.ApplicationExt) ([]model.GraphQLServiceDetails, error) { services, err := l.readLabel(appDetails) if err != nil { return nil, err } var serviceReferences []model.GraphQLServiceDetails for serviceIDKey, _ := range services { value, err := l.ReadService(appDetails, serviceIDKey) if err != nil { return nil, err } serviceReferences = append(serviceReferences, value) } return serviceReferences, nil } func (l *labeler) readLabel(appDetails graphql.ApplicationExt) (map[string]LegacyServiceReference, error) { value := appDetails.Labels[legacyServicesLabelKey] if value == nil { value = "{}" } strValue, ok := value.(string) if !ok { return nil, fmt.Errorf("invalid type: expected: string; actual: %T", value) } var services map[string]LegacyServiceReference err := json.Unmarshal([]byte(strValue), &services) if err != nil { return nil, errors.Wrap(err, "while unmarshalling JSON value") } return services, nil } func (l *labeler) writeLabel(services map[string]LegacyServiceReference) (graphql.LabelInput, error) { marshalledServices, err := json.Marshal(services) if err != nil { return graphql.LabelInput{}, errors.Wrap(err, "while marshalling JSON value") } return graphql.LabelInput{ Key: legacyServicesLabelKey, Value: strconv.Quote(string(marshalledServices)), }, nil }
data.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package ml_job import (
"github.com/joeshaw/multierror" "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/elasticsearch" ) var ( schema = s.Schema{ "id": c.Str("job_id"), "state": c.Str("state"), "data_counts": c.Dict("data_counts", s.Schema{ "processed_record_count": c.Int("processed_record_count"), "invalid_date_count": c.Int("invalid_date_count"), }), "model_size": c.Dict("model_size_stats", s.Schema{ "memory_status": c.Str("memory_status"), }), "forecasts_stats": c.Dict("forecasts_stats", s.Schema{ "total": c.Int("total"), }), } ) type jobsStruct struct { Jobs []map[string]interface{} `json:"jobs"` } func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isXpack bool) error { jobsData := &jobsStruct{} err := json.Unmarshal(content, jobsData) if err != nil { return errors.Wrap(err, "failure parsing Elasticsearch ML Job Stats API response") } var errs multierror.Errors for _, job := range jobsData.Jobs { if err := elastic.FixTimestampField(job, "data_counts.earliest_record_timestamp"); err != nil { errs = append(errs, err) continue } if err := elastic.FixTimestampField(job, "data_counts.latest_record_timestamp"); err != nil { errs = append(errs, err) continue } event := mb.Event{} event.RootFields = mapstr.M{} event.RootFields.Put("service.name", elasticsearch.ModuleName) event.ModuleFields = mapstr.M{} event.ModuleFields.Put("cluster.name", info.ClusterName) event.ModuleFields.Put("cluster.id", info.ClusterID) if node, exists := job["node"]; exists { nodeHash := node.(map[string]interface{}) event.ModuleFields.Put("node.id", nodeHash["id"]) event.ModuleFields.Put("node.name", nodeHash["name"]) } event.MetricSetFields, _ = schema.Apply(job) // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` // When using Agent, the index name is overwritten anyways. if isXpack { index := elastic.MakeXPackMonitoringIndexName(elastic.Elasticsearch) event.Index = index } r.Event(event) } return errs.Err() }
"encoding/json"
setup.py
from setuptools import setup setup( name="myhello", version='0.1', py_modules=['colors'], include_package_data=True, install_requires=[ 'Click', 'colorama', ],
myhello=hello:cli ''', )
entry_points=''' [console_scripts]
var_buf.rs
use std::io; use std::io::BufRead; use std::io::Read; /// An "extension" of `std::io::BufRead`, for which `fill_*` can be forced to read. /// /// `BufRead`'s `fill_buf` may return only 1 byte, and will not re-fill until you consume. /// /// `fill_many` will try to fill the internal buffer to the `target` requested, /// giving up only if the underlying reader hits end-of-file, or returns an error /// /// `fill_at_least` will only return success if the internal buffer contains at least /// `target` bytes. /// /// `consume` advances the internal pointer, same as with `BufRead`. pub trait VarBufRead { /// Advance the internal pointer, so that `fill_*` and `read*` will no longer return /// the consumed bytes. fn consume(&mut self, amt: usize); /// Try hard to return a buffer of at least `target` bytes. If the end of file is /// reached, then the buffer will be shorter. If we already have sufficient bytes in /// memory, then no reads will be performed, and the larger buffer will be returned. /// /// Other errors (except interruption) are returned as-is. fn fill_many(&mut self, target: usize) -> io::Result<&[u8]>; /// Return a buffer of at least `target` bytes, by repeatedly reading from the /// underlying reader. If the underlying reader reaches end-of-file, an error will /// be returned. /// /// Other errors (except interruption) are returned as-is. fn fill_at_least(&mut self, target: usize) -> io::Result<&[u8]> { let buf = self.fill_many(target)?; if buf.len() < target { return Err(io::ErrorKind::UnexpectedEof.into()); } Ok(buf) } /// Read fn read_until_limit(&mut self, delim: u8, limit: usize) -> Result<Vec<u8>, io::Error> { let buf = self.fill_many(limit)?; if let Some(end) = memchr::memchr(delim, buf) { let ret = buf[..end].to_vec(); self.consume(end + 1); return Ok(ret); } Err(io::ErrorKind::NotFound.into()) } } pub struct VarBufReader<R> { inner: R, data: Vec<u8>, } impl<R: Read> VarBufReader<R> { pub fn new(inner: R) -> VarBufReader<R> { VarBufReader { inner, data: Vec::new(), } } } impl<R: Read> VarBufRead for VarBufReader<R> { fn consume(&mut self, amt: usize) { assert!(amt <= self.data.len()); self.data.drain(..amt); } fn fill_many(&mut self, target: usize) -> Result<&[u8], io::Error> { while self.data.len() < target { let mut buf = [0u8; 8 * 1024]; let read = self.inner.read(&mut buf)?; if 0 == read { break; } self.data.extend(&buf[..read]); } Ok(&self.data) } } impl<R: Read> BufRead for VarBufReader<R> { fn fill_buf(&mut self) -> io::Result<&[u8]> { self.fill_many(1) } fn consume(&mut self, amt: usize) { VarBufRead::consume(self, amt) } } impl<R: Read> Read for VarBufReader<R> { fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> { let found = self.fill_many(buf.len())?; let valid = buf.len().min(found.len()); buf[..valid].copy_from_slice(&found[..valid]); VarBufRead::consume(self, valid); Ok(valid) } } #[cfg(test)] mod tests { use std::io; use std::io::Cursor; use std::io::Read; use byteorder::ReadBytesExt; use crate::ShortRead; use super::VarBufRead; use super::VarBufReader; #[test] fn fill_then_read() { let mut vb = VarBufReader::new(ShortRead::new( Cursor::new(b"hello"), vec![1, 1, 2, 1, 99].into_iter(), )); assert_eq!(b"hell", &vb.fill_many(4).unwrap()[..4]); assert_eq!(b'h', vb.read_u8().unwrap()); let mut buf = [0u8; 4]; assert_eq!(4, vb.read(&mut buf).unwrap()); assert_eq!(b"ello", &buf); } #[test] fn read_then_fill() { let mut vb = VarBufReader::new(ShortRead::new( Cursor::new(b"hello world"), vec![1, 1, 2, 1, 99].into_iter(), )); assert_eq!(b'h', vb.read_u8().unwrap()); assert_eq!(b"ell", &vb.fill_many(5).unwrap()[..3]); assert_eq!(b'e', vb.read_u8().unwrap()); vb.consume("llo ".len()); assert_eq!(b"world", &vb.fill_many(7).unwrap()); } #[test] fn double_fill()
#[test] fn eof() { let mut vb = VarBufReader::new(ShortRead::new( Cursor::new(b"hello world"), vec![1, 1, 2, 1, 99].into_iter(), )); assert_eq!(b"hello world", &vb.fill_many(100).unwrap()); vb.consume("hello wor".len()); assert_eq!(b"ld", &vb.fill_many(100).unwrap()); assert_eq!(b"ld", &vb.fill_at_least(2).unwrap()); assert_eq!( io::ErrorKind::UnexpectedEof, vb.fill_at_least(3).unwrap_err().kind() ); vb.consume(1); assert_eq!(b"d", &vb.fill_at_least(1).unwrap()); assert_eq!( io::ErrorKind::UnexpectedEof, vb.fill_at_least(2).unwrap_err().kind() ); vb.consume(1); assert_eq!(b"", &vb.fill_many(1).unwrap()); assert_eq!( io::ErrorKind::UnexpectedEof, vb.fill_at_least(1).unwrap_err().kind() ); } #[test] fn read_short() { let mut vb = VarBufReader::new(ShortRead::new( Cursor::new(b"hello there world"), vec![1, 1, 2, 1, 99].into_iter(), )); assert_eq!( io::ErrorKind::NotFound, vb.read_until_limit(b' ', 3).unwrap_err().kind() ); assert_eq!( io::ErrorKind::NotFound, vb.read_until_limit(b' ', 4).unwrap_err().kind() ); assert_eq!( io::ErrorKind::NotFound, vb.read_until_limit(b' ', 5).unwrap_err().kind() ); assert_eq!(b"hello", vb.read_until_limit(b' ', 6).unwrap().as_slice()); assert_eq!(b"there", vb.read_until_limit(b' ', 6).unwrap().as_slice()); assert_eq!( io::ErrorKind::NotFound, vb.read_until_limit(b' ', 200).unwrap_err().kind() ); assert_eq!(b"world", &vb.fill_many(5).unwrap()[..5]); } }
{ let mut vb = VarBufReader::new(ShortRead::new( Cursor::new(b"hello world"), vec![1, 1, 2, 1, 99].into_iter(), )); assert_eq!(b"he", &vb.fill_many(2).unwrap()[..2]); assert_eq!(b"hell", &vb.fill_many(4).unwrap()[..4]); vb.consume(3); assert_eq!(b"lo", &vb.fill_many(2).unwrap()[..2]); assert_eq!(b'l', vb.read_u8().unwrap()); assert_eq!(b'o', vb.read_u8().unwrap()); }
disallow_typename_on_root_test.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * @generated SignedSource<<2ea6403534fde8fe5781de95bd4059c7>> */ mod disallow_typename_on_root; use disallow_typename_on_root::transform_fixture; use fixture_tests::test_fixture; #[test] fn typename_on_mutation_invalid()
#[test] fn typename_on_query_invalid() { let input = include_str!("disallow_typename_on_root/fixtures/typename-on-query.invalid.graphql"); let expected = include_str!("disallow_typename_on_root/fixtures/typename-on-query.invalid.expected"); test_fixture(transform_fixture, "typename-on-query.invalid.graphql", "disallow_typename_on_root/fixtures/typename-on-query.invalid.expected", input, expected); } #[test] fn valid() { let input = include_str!("disallow_typename_on_root/fixtures/valid.graphql"); let expected = include_str!("disallow_typename_on_root/fixtures/valid.expected"); test_fixture(transform_fixture, "valid.graphql", "disallow_typename_on_root/fixtures/valid.expected", input, expected); }
{ let input = include_str!("disallow_typename_on_root/fixtures/typename-on-mutation.invalid.graphql"); let expected = include_str!("disallow_typename_on_root/fixtures/typename-on-mutation.invalid.expected"); test_fixture(transform_fixture, "typename-on-mutation.invalid.graphql", "disallow_typename_on_root/fixtures/typename-on-mutation.invalid.expected", input, expected); }
texteditor.py
from tkinter import * import tkinter.filedialog as tkFileDialog root = Tk("Text Editor") text = Text(root) text.grid() def
(): global text t = text.get("1.0", "end-1c") savelocation= tkFileDialog.asksaveasfilename() file1=open(savelocation, "w+") file1.write(t) file1.close() button=Button(root, text="Save", command=saveas) button.grid() def FontHelvetica(): global text text.config(font="Helvetica") def FontCourier(): global text text.config(font="Courier") font=Menubutton(root, text="Font") font.grid() font.menu=Menu(font, tearoff=0) font["menu"]=font.menu Helvetica=IntVar() arial=IntVar() times=IntVar() Courier=IntVar() font.menu.add_checkbutton(label="Courier", variable=Courier,command=FontCourier) font.menu.add_checkbutton(label="Helvetica", variable=Helvetica,command=FontHelvetica) root.mainloop()
saveas
script.js
// Assignment Code var generateBtn = document.querySelector("#generate"); // Declare Global Data // list of lower Case Letters var lowerCase = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']; // list of upper Case Letters var upperCase = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']; // List of Numbers var numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; // List of Special Characters var special = ['@', '%', '+', '/', "'", '!', '#', '$', '^', '?', ':', ',', ')', '(', '}', '{', ']', '[', '~', '-', '_', '.']; // Welcom Message window.alert("Hello! Please press Generate password to start"); // Created function to ask user regarding input options function questions() { var isValid = false; do { // Prompt message upon clicking on Generate passowrd. var length = prompt("Please choose password length between 8 and 128 characters"); // Confirmation message for Lowercase letter question. var askLowerCase = confirm("Would you like your password to include lower case letters?"); // Confirmation message for Uppercase letter question. var askUpperCase = confirm("Would you like your password to include upper case letters?"); // Confrimation message for Number question. var askNumbers = confirm("Would you like your password to include numbers?"); // Confirmation message for special charachter question. var askSpecial = confirm("Would you like your password to include special characters?"); // Delcaring responses. var responses = { length: length, askNumbers: askNumbers, askLowerCase: askLowerCase, askUpperCase: askUpperCase, askSpecial: askSpecial } // Asked the user how long the password will be and sore their ansers in a variable `length` // Validate the users answer to verify that it was between 8 and 128 if((length < 8)||(length > 128)) alert("Choose number between 8 and 128"); // length.repeat("Choose number between 8 and 128"); else if((!askNumbers)&&(!askLowerCase)&&(!askUpperCase)&&(!askSpecial)) alert("Must choose at least one type."); else isValid = true; // In Case the response is not valid return } while(!isValid); return responses; } // This function joins all the user responses, arrange them in possible combination and then obtaining final password. function generatePassword() { var passwordOptions = questions(); var combination = [];
if (passwordOptions.askNumbers) { for (var i of numbers) combination.push(i); } if (passwordOptions.askLowerCase) { for (var i of lowerCase) combination.push(i); } if (passwordOptions.askUpperCase) { for (var i of upperCase) combination.push(i); } if (passwordOptions.askSpecial) { for (var i of special) combination.push(i); } // Establishing forloop to generate random choices for (var i = 0; i < passwordOptions.length; i++) { finalPassword += combination[Math.floor(Math.random() * combination.length)]; } return finalPassword; } // Write password to the #password input function writePassword() { var password = generatePassword(); var passwordText = document.querySelector("#password"); passwordText.value = password; } // Add event listener to generate button generateBtn.addEventListener("click", writePassword);
var finalPassword = ""; // lunching and Combining the (if) statment for getting the Final Passowrd
apps.py
# -*- coding: utf-8 -*-
""" Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import unicode_literals from django.apps import AppConfig class PipelinePluginsConfig(AppConfig): name = 'pipeline_plugins'
test_coordinators.py
# -*- coding: utf-8 -*- # Copyright (c) 2020, Siddhant and Contributors # See license.txt from __future__ import unicode_literals # import frappe import unittest class
(unittest.TestCase): pass
TestCoordinators
operation.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// <p>Cancels the deletion of a KMS key. When this operation succeeds, the key /// state of the KMS key is <code>Disabled</code>. To enable the KMS key, use <a>EnableKey</a>. </p> /// <p>For more information about scheduling and canceling deletion of a KMS key, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html">Deleting KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CancelKeyDeletion</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>ScheduleKeyDeletion</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CancelKeyDeletion { _private: (), } impl CancelKeyDeletion { /// Creates a new builder-style object to manufacture [`CancelKeyDeletionInput`](crate::input::CancelKeyDeletionInput) pub fn builder() -> crate::input::cancel_key_deletion_input::Builder { crate::input::cancel_key_deletion_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for CancelKeyDeletion { type Output = std::result::Result< crate::output::CancelKeyDeletionOutput, crate::error::CancelKeyDeletionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_cancel_key_deletion_error(response) } else { crate::operation_deser::parse_cancel_key_deletion_response(response) } } } /// <p>Connects or reconnects a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a> to its associated CloudHSM cluster.</p> /// <p>The custom key store must be connected before you can create KMS keys /// in the key store or use the KMS keys it contains. You can disconnect and reconnect a custom key /// store at any time.</p> /// <p>To connect a custom key store, its associated CloudHSM cluster must have at least one active /// HSM. To get the number of active HSMs in a cluster, use the <a href="https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html">DescribeClusters</a> operation. To add HSMs /// to the cluster, use the <a href="https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html">CreateHsm</a> operation. Also, the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser"> /// <code>kmsuser</code> crypto /// user</a> (CU) must not be logged into the cluster. This prevents KMS from using this /// account to log in.</p> /// <p>The connection process can take an extended amount of time to complete; up to 20 minutes. /// This operation starts the connection process, but it does not wait for it to complete. When it /// succeeds, this operation quickly returns an HTTP 200 response and a JSON object with no /// properties. However, this response does not indicate that the custom key store is connected. /// To get the connection state of the custom key store, use the <a>DescribeCustomKeyStores</a> operation.</p> /// <p>During the connection process, KMS finds the CloudHSM cluster that is associated with the /// custom key store, creates the connection infrastructure, connects to the cluster, logs into /// the CloudHSM client as the <code>kmsuser</code> CU, and rotates its password.</p> /// <p>The <code>ConnectCustomKeyStore</code> operation might fail for various reasons. To find /// the reason, use the <a>DescribeCustomKeyStores</a> operation and see the /// <code>ConnectionErrorCode</code> in the response. For help interpreting the /// <code>ConnectionErrorCode</code>, see <a>CustomKeyStoresListEntry</a>.</p> /// <p>To fix the failure, use the <a>DisconnectCustomKeyStore</a> operation to /// disconnect the custom key store, correct the error, use the <a>UpdateCustomKeyStore</a> operation if necessary, and then use /// <code>ConnectCustomKeyStore</code> again.</p> /// <p>If you are having trouble connecting or disconnecting a custom key store, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html">Troubleshooting a Custom Key /// Store</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ConnectCustomKeyStore</a> (IAM policy)</p> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeCustomKeyStores</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisconnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ConnectCustomKeyStore { _private: (), } impl ConnectCustomKeyStore { /// Creates a new builder-style object to manufacture [`ConnectCustomKeyStoreInput`](crate::input::ConnectCustomKeyStoreInput) pub fn builder() -> crate::input::connect_custom_key_store_input::Builder { crate::input::connect_custom_key_store_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ConnectCustomKeyStore { type Output = std::result::Result< crate::output::ConnectCustomKeyStoreOutput, crate::error::ConnectCustomKeyStoreError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_connect_custom_key_store_error(response) } else { crate::operation_deser::parse_connect_custom_key_store_response(response) } } } /// <p>Creates a friendly name for a KMS key. </p> /// <note> /// <p>Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/abac.html">Using ABAC in KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// </note> /// <p>You can use an alias to identify a KMS key in the KMS console, in the <a>DescribeKey</a> operation and in <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations">cryptographic operations</a>, such as <a>Encrypt</a> and /// <a>GenerateDataKey</a>. You can also change the KMS key that's associated with the /// alias (<a>UpdateAlias</a>) or delete the alias (<a>DeleteAlias</a>) at /// any time. These operations don't affect the underlying KMS key. </p> /// <p>You can associate the alias with any customer managed key in the same Amazon Web Services Region. Each /// alias is associated with only one KMS key at a time, but a KMS key can have multiple aliases. A valid KMS key is required. You can't create an alias without a KMS key.</p> /// <p>The alias must be unique in the account and Region, but you can have aliases with the same /// name in different Regions. For detailed information about aliases, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html">Using aliases</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>This operation does not return a response. To get the alias that you created, use the /// <a>ListAliases</a> operation.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on an alias in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b> /// </p> /// <ul> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CreateAlias</a> on the alias (IAM policy).</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CreateAlias</a> on the KMS key (key policy).</p> /// </li> /// </ul> /// <p>For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access">Controlling access to aliases</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>DeleteAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListAliases</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateAlias</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CreateAlias { _private: (), } impl CreateAlias { /// Creates a new builder-style object to manufacture [`CreateAliasInput`](crate::input::CreateAliasInput) pub fn builder() -> crate::input::create_alias_input::Builder { crate::input::create_alias_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for CreateAlias { type Output = std::result::Result<crate::output::CreateAliasOutput, crate::error::CreateAliasError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_create_alias_error(response) } else { crate::operation_deser::parse_create_alias_response(response) } } } /// <p>Creates a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a> that is associated with an <a href="https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html">CloudHSM cluster</a> that you own and /// manage.</p> /// <p>This operation is part of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Custom Key Store feature</a> feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// single-tenant key store.</p> /// <p>Before you create the custom key store, you must assemble /// the required elements, including an CloudHSM cluster that fulfills the requirements for a custom /// key store. For details about the required elements, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore">Assemble the Prerequisites</a> /// in the <i>Key Management Service Developer Guide</i>.</p> /// <p>When the operation completes successfully, it returns the ID of the new custom key store. /// Before you can use your new custom key store, you need to use the <a>ConnectCustomKeyStore</a> operation to connect the new key store to its CloudHSM /// cluster. Even if you are not going to use your custom key store immediately, you might want to /// connect it to verify that all settings are correct and then disconnect it until you are ready /// to use it.</p> /// <p>For help with failures, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html">Troubleshooting a Custom Key Store</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CreateCustomKeyStore</a> (IAM policy).</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ConnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeCustomKeyStores</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisconnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CreateCustomKeyStore { _private: (), } impl CreateCustomKeyStore { /// Creates a new builder-style object to manufacture [`CreateCustomKeyStoreInput`](crate::input::CreateCustomKeyStoreInput) pub fn builder() -> crate::input::create_custom_key_store_input::Builder { crate::input::create_custom_key_store_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for CreateCustomKeyStore { type Output = std::result::Result< crate::output::CreateCustomKeyStoreOutput, crate::error::CreateCustomKeyStoreError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_create_custom_key_store_error(response) } else { crate::operation_deser::parse_create_custom_key_store_response(response) } } } /// <p>Adds a grant to a KMS key. </p> /// <p>A <i>grant</i> is a policy instrument that allows Amazon Web Services principals to use KMS keys in cryptographic operations. It also can allow them to view a KMS key (<a>DescribeKey</a>) and create and manage grants. When authorizing access to a KMS key, grants are considered along with key policies and IAM policies. Grants are often used for /// temporary permissions because you can create one, use its permissions, and delete it without /// changing your key policies or IAM policies. </p> /// <p>For detailed information about grants, including grant terminology, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html">Using grants</a> in the /// <i> /// <i>Key Management Service Developer Guide</i> /// </i>. For examples of working with grants in several /// programming languages, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html">Programming grants</a>. </p> /// <p>The <code>CreateGrant</code> operation returns a <code>GrantToken</code> and a /// <code>GrantId</code>.</p> /// <ul> /// <li> /// <p>When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as <i>eventual consistency</i>. Once the grant has achieved eventual consistency, the grantee principal /// can use the permissions in the grant without identifying the grant. </p> /// <p>However, to use the permissions in the grant immediately, use the /// <code>GrantToken</code> that <code>CreateGrant</code> returns. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token">Using a grant /// token</a> in the <i> /// <i>Key Management Service Developer Guide</i> /// </i>.</p> /// </li> /// <li> /// <p>The <code>CreateGrant</code> operation also returns a <code>GrantId</code>. You can use the /// <code>GrantId</code> and a key identifier to identify the grant in the <a>RetireGrant</a> and <a>RevokeGrant</a> operations. To find the grant /// ID, use the <a>ListGrants</a> or <a>ListRetirableGrants</a> /// operations.</p> /// </li> /// </ul> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key /// ARN in the value of the <code>KeyId</code> parameter. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CreateGrant</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ListGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListRetirableGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>RetireGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>RevokeGrant</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CreateGrant { _private: (), } impl CreateGrant { /// Creates a new builder-style object to manufacture [`CreateGrantInput`](crate::input::CreateGrantInput) pub fn builder() -> crate::input::create_grant_input::Builder { crate::input::create_grant_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for CreateGrant { type Output = std::result::Result<crate::output::CreateGrantOutput, crate::error::CreateGrantError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_create_grant_error(response) } else { crate::operation_deser::parse_create_grant_response(response) } } } /// <p>Creates a unique customer managed <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys">KMS key</a> in your Amazon Web Services account and Region.</p> /// <note> /// <p>KMS is replacing the term <i>customer master key (CMK)</i> with <i>KMS key</i> and <i>KMS key</i>. The concept has not changed. To prevent breaking changes, KMS is keeping some variations of this term.</p> /// </note> /// <p>You can use the <code>CreateKey</code> operation to create symmetric or asymmetric KMS keys.</p> /// <ul> /// <li> /// <p> /// <b>Symmetric KMS keys</b> contain a 256-bit symmetric key that /// never leaves KMS unencrypted. To use the KMS key, you must call KMS. You can use a /// symmetric KMS key to encrypt and decrypt small amounts of data, but they are typically used to /// generate <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys">data /// keys</a> and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-key-pairs">data keys pairs</a>. For details, /// see <a>GenerateDataKey</a> and <a>GenerateDataKeyPair</a>.</p> /// </li> /// <li> /// <p> /// <b>Asymmetric KMS keys</b> can contain an RSA key pair or an /// Elliptic Curve (ECC) key pair. The private key in an asymmetric KMS key never leaves KMS /// unencrypted. However, you can use the <a>GetPublicKey</a> operation to download /// the public key so it can be used outside of KMS. KMS keys with RSA key pairs can be used to /// encrypt or decrypt data or sign and verify messages (but not both). KMS keys with ECC key /// pairs can be used only to sign and verify messages.</p> /// </li> /// </ul> /// <p>For information about symmetric and asymmetric KMS keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>To create different types of KMS keys, use the following guidance:</p> /// <dl> /// <dt>Asymmetric KMS keys</dt> /// <dd> /// <p>To create an asymmetric KMS key, use the <code>KeySpec</code> parameter to specify /// the type of key material in the KMS key. Then, use the <code>KeyUsage</code> parameter /// to determine whether the KMS key will be used to encrypt and decrypt or sign and verify. /// You can't change these properties after the KMS key is created.</p> /// <p> </p> /// </dd> /// <dt>Symmetric KMS keys</dt> /// <dd> /// <p>When creating a symmetric KMS key, you don't need to specify the /// <code>KeySpec</code> or <code>KeyUsage</code> parameters. The default value for /// <code>KeySpec</code>, <code>SYMMETRIC_DEFAULT</code>, and the default value for /// <code>KeyUsage</code>, <code>ENCRYPT_DECRYPT</code>, are the only valid values for /// symmetric KMS keys. </p> /// <p> </p> /// </dd> /// <dt>Multi-Region primary keys</dt> /// <dt>Imported key material</dt> /// <dd> /// <p>To create a multi-Region <i>primary key</i> in the local Amazon Web Services Region, /// use the <code>MultiRegion</code> parameter with a value of <code>True</code>. To create /// a multi-Region <i>replica key</i>, that is, a KMS key with the same key ID and /// key material as a primary key, but in a different Amazon Web Services Region, use the <a>ReplicateKey</a> operation. To change a replica key to a primary key, and its /// primary key to a replica key, use the <a>UpdatePrimaryRegion</a> /// operation.</p> /// <p>This operation supports <i>multi-Region keys</i>, an KMS feature that lets you create multiple /// interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key /// material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt /// it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html">Using multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>You can create symmetric and asymmetric multi-Region keys and multi-Region keys with /// imported key material. You cannot create multi-Region keys in a custom key store.</p> /// <p> </p> /// </dd> /// <dd> /// <p>To import your own key material, begin by creating a symmetric KMS key with no key /// material. To do this, use the <code>Origin</code> parameter of <code>CreateKey</code> /// with a value of <code>EXTERNAL</code>. Next, use <a>GetParametersForImport</a> operation to get a public key and import token, and use the public key to encrypt /// your key material. Then, use <a>ImportKeyMaterial</a> with your import token /// to import the key material. For step-by-step instructions, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">Importing Key Material</a> in the <i> /// <i>Key Management Service Developer Guide</i> /// </i>. You /// cannot import the key material into an asymmetric KMS key.</p> /// <p>To create a multi-Region primary key with imported key material, use the /// <code>Origin</code> parameter of <code>CreateKey</code> with a value of /// <code>EXTERNAL</code> and the <code>MultiRegion</code> parameter with a value of /// <code>True</code>. To create replicas of the multi-Region primary key, use the <a>ReplicateKey</a> operation. For more information about multi-Region keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html">Using multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> </p> /// </dd> /// <dt>Custom key store</dt> /// <dd> /// <p>To create a symmetric KMS key in a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>, use the /// <code>CustomKeyStoreId</code> parameter to specify the custom key store. You must also /// use the <code>Origin</code> parameter with a value of <code>AWS_CLOUDHSM</code>. The /// CloudHSM cluster that is associated with the custom key store must have at least two active /// HSMs in different Availability Zones in the Amazon Web Services Region. </p> /// <p>You cannot create an asymmetric KMS key in a custom key store. For information about /// custom key stores in KMS see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Using Custom Key Stores</a> in /// the <i> /// <i>Key Management Service Developer Guide</i> /// </i>.</p> /// </dd> /// </dl> /// <p> /// <b>Cross-account use</b>: No. You cannot use this operation to /// create a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:CreateKey</a> (IAM policy). To use the /// <code>Tags</code> parameter, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:TagResource</a> (IAM policy). For examples and information about related /// permissions, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key">Allow a user to create KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>DescribeKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListKeys</a> /// </p> /// </li> /// <li> /// <p> /// <a>ScheduleKeyDeletion</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct CreateKey { _private: (), } impl CreateKey { /// Creates a new builder-style object to manufacture [`CreateKeyInput`](crate::input::CreateKeyInput) pub fn builder() -> crate::input::create_key_input::Builder { crate::input::create_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for CreateKey { type Output = std::result::Result<crate::output::CreateKeyOutput, crate::error::CreateKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_create_key_error(response) } else { crate::operation_deser::parse_create_key_response(response) } } } /// <p>Decrypts ciphertext that was encrypted by a KMS key using any of /// the following operations:</p> /// <ul> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyWithoutPlaintext</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPairWithoutPlaintext</a> /// </p> /// </li> /// </ul> /// <p>You can use this operation to decrypt ciphertext that was encrypted under a symmetric or /// asymmetric KMS key. When the KMS key is asymmetric, you must specify the KMS key and the encryption /// algorithm that was used to encrypt the ciphertext. For information about symmetric and asymmetric KMS keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The Decrypt operation also decrypts ciphertext that was encrypted outside of KMS by the /// public key in an KMS asymmetric KMS key. However, it cannot decrypt ciphertext produced by other /// libraries, such as the <a href="https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/">Amazon Web Services Encryption /// SDK</a> or <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html">Amazon S3 client-side encryption</a>. These libraries return a ciphertext format that /// is incompatible with KMS.</p> /// <p>If the ciphertext was encrypted under a symmetric KMS key, the <code>KeyId</code> parameter is /// optional. KMS can get this information from metadata that it adds to the symmetric /// ciphertext blob. This feature adds durability to your implementation by ensuring that /// authorized users can decrypt ciphertext decades after it was encrypted, even if they've lost /// track of the key ID. However, specifying the KMS key is always recommended as a best practice. /// When you use the <code>KeyId</code> parameter to specify a KMS key, KMS only uses the KMS key you /// specify. If the ciphertext was encrypted under a different KMS key, the <code>Decrypt</code> /// operation fails. This practice ensures that you use the KMS key that you intend.</p> /// <p>Whenever possible, use key policies to give users permission to call the /// <code>Decrypt</code> operation on a particular KMS key, instead of using IAM policies. /// Otherwise, you might create an IAM user policy that gives the user <code>Decrypt</code> /// permission on all KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in other /// accounts if the key policy for the cross-account KMS key permits it. If you must use an IAM policy /// for <code>Decrypt</code> permissions, limit the user to particular KMS keys or particular trusted /// accounts. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices">Best practices for IAM policies</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>Applications in Amazon Web Services Nitro Enclaves can call this operation by using the <a href="https://github.com/aws/aws-nitro-enclaves-sdk-c">Amazon Web Services Nitro Enclaves Development Kit</a>. For information about the supporting parameters, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html">How Amazon Web Services Nitro Enclaves use KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:Decrypt</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// <li> /// <p> /// <a>ReEncrypt</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct Decrypt { _private: (), } impl Decrypt { /// Creates a new builder-style object to manufacture [`DecryptInput`](crate::input::DecryptInput) pub fn builder() -> crate::input::decrypt_input::Builder { crate::input::decrypt_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for Decrypt { type Output = std::result::Result<crate::output::DecryptOutput, crate::error::DecryptError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_decrypt_error(response) } else { crate::operation_deser::parse_decrypt_response(response) } } } /// <p>Deletes the specified alias. </p> /// <note> /// <p>Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/abac.html">Using ABAC in KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// </note> /// <p>Because an alias is not a property of a KMS key, you can delete and change the aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from the <a>DescribeKey</a> operation. To get the aliases of all KMS keys, use the <a>ListAliases</a> operation. </p> /// <p>Each KMS key can have multiple aliases. To change the alias of a KMS key, use <a>DeleteAlias</a> to delete the current alias and <a>CreateAlias</a> to /// create a new alias. To associate an existing alias with a different KMS key, /// call <a>UpdateAlias</a>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on an alias in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b> /// </p> /// <ul> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DeleteAlias</a> on the alias (IAM policy).</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DeleteAlias</a> on the KMS key (key policy).</p> /// </li> /// </ul> /// <p>For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access">Controlling access to aliases</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListAliases</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateAlias</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DeleteAlias { _private: (), } impl DeleteAlias { /// Creates a new builder-style object to manufacture [`DeleteAliasInput`](crate::input::DeleteAliasInput) pub fn builder() -> crate::input::delete_alias_input::Builder { crate::input::delete_alias_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DeleteAlias { type Output = std::result::Result<crate::output::DeleteAliasOutput, crate::error::DeleteAliasError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_delete_alias_error(response) } else { crate::operation_deser::parse_delete_alias_response(response) } } } /// <p>Deletes a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>. This operation does not delete the CloudHSM cluster that is /// associated with the custom key store, or affect any users or keys in the cluster.</p> /// <p>The custom key store that you delete cannot contain any KMS <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys">KMS keys</a>. Before /// deleting the key store, verify that you will never need to use any of the KMS keys in the key /// store for any <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations">cryptographic operations</a>. Then, use <a>ScheduleKeyDeletion</a> to delete the /// KMS keys from the key store. When the scheduled waiting period /// expires, the <code>ScheduleKeyDeletion</code> operation deletes the KMS keys. Then it makes a best /// effort to delete the key material from the associated cluster. However, you might need to /// manually <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key">delete the orphaned key /// material</a> from the cluster and its backups.</p> /// <p>After all KMS keys are deleted from KMS, use <a>DisconnectCustomKeyStore</a> to /// disconnect the key store from KMS. Then, you can delete the custom key store.</p> /// <p>Instead of deleting the custom key store, consider using <a>DisconnectCustomKeyStore</a> to disconnect it from KMS. While the key store is /// disconnected, you cannot create or use the KMS keys in the key store. But, you do not need to /// delete KMS keys and you can reconnect a disconnected custom key store at any time.</p> /// <p>If the operation succeeds, it returns a JSON object with no /// properties.</p> /// <p>This operation is part of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Custom Key Store feature</a> feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// single-tenant key store.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DeleteCustomKeyStore</a> (IAM policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ConnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>CreateCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeCustomKeyStores</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisconnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DeleteCustomKeyStore { _private: (), } impl DeleteCustomKeyStore { /// Creates a new builder-style object to manufacture [`DeleteCustomKeyStoreInput`](crate::input::DeleteCustomKeyStoreInput) pub fn builder() -> crate::input::delete_custom_key_store_input::Builder { crate::input::delete_custom_key_store_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DeleteCustomKeyStore { type Output = std::result::Result< crate::output::DeleteCustomKeyStoreOutput, crate::error::DeleteCustomKeyStoreError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_delete_custom_key_store_error(response) } else { crate::operation_deser::parse_delete_custom_key_store_response(response) } } } /// <p>Deletes key material that you previously imported. This operation makes the specified /// KMS key unusable. For more information about importing key material into /// KMS, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">Importing Key /// Material</a> in the <i>Key Management Service Developer Guide</i>. </p> /// <p>When the specified KMS key is in the <code>PendingDeletion</code> state, this operation does /// not change the KMS key's state. Otherwise, it changes the KMS key's state to /// <code>PendingImport</code>.</p> /// <p>After you delete key material, you can use <a>ImportKeyMaterial</a> to reimport /// the same key material into the KMS key.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DeleteImportedKeyMaterial</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>GetParametersForImport</a> /// </p> /// </li> /// <li> /// <p> /// <a>ImportKeyMaterial</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DeleteImportedKeyMaterial { _private: (), } impl DeleteImportedKeyMaterial { /// Creates a new builder-style object to manufacture [`DeleteImportedKeyMaterialInput`](crate::input::DeleteImportedKeyMaterialInput) pub fn builder() -> crate::input::delete_imported_key_material_input::Builder { crate::input::delete_imported_key_material_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DeleteImportedKeyMaterial { type Output = std::result::Result< crate::output::DeleteImportedKeyMaterialOutput, crate::error::DeleteImportedKeyMaterialError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_delete_imported_key_material_error(response) } else { crate::operation_deser::parse_delete_imported_key_material_response(response) } } } /// <p>Gets information about <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key stores</a> in the account and Region.</p> /// <p>This operation is part of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Custom Key Store feature</a> feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// single-tenant key store.</p> /// <p>By default, this operation returns information about all custom key stores in the account and /// Region. To get only information about a particular custom key store, use either the /// <code>CustomKeyStoreName</code> or <code>CustomKeyStoreId</code> parameter (but not /// both).</p> /// <p>To determine whether the custom key store is connected to its CloudHSM cluster, use the /// <code>ConnectionState</code> element in the response. If an attempt to connect the custom /// key store failed, the <code>ConnectionState</code> value is <code>FAILED</code> and the /// <code>ConnectionErrorCode</code> element in the response indicates the cause of the failure. /// For help interpreting the <code>ConnectionErrorCode</code>, see <a>CustomKeyStoresListEntry</a>.</p> /// <p>Custom key stores have a <code>DISCONNECTED</code> connection state if the key store has /// never been connected or you use the <a>DisconnectCustomKeyStore</a> operation to /// disconnect it. If your custom key store state is <code>CONNECTED</code> but you are having /// trouble using it, make sure that its associated CloudHSM cluster is active and contains the /// minimum number of HSMs required for the operation, if any.</p> /// <p> For help repairing your custom key store, see the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html">Troubleshooting Custom Key Stores</a> topic in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DescribeCustomKeyStores</a> (IAM policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ConnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>CreateCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisconnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DescribeCustomKeyStores { _private: (), } impl DescribeCustomKeyStores { /// Creates a new builder-style object to manufacture [`DescribeCustomKeyStoresInput`](crate::input::DescribeCustomKeyStoresInput) pub fn builder() -> crate::input::describe_custom_key_stores_input::Builder { crate::input::describe_custom_key_stores_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DescribeCustomKeyStores { type Output = std::result::Result< crate::output::DescribeCustomKeyStoresOutput, crate::error::DescribeCustomKeyStoresError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_describe_custom_key_stores_error(response) } else { crate::operation_deser::parse_describe_custom_key_stores_response(response) } } } /// <p>Provides detailed information about a KMS key. You can run /// <code>DescribeKey</code> on a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk">customer managed key</a> or an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk">Amazon Web Services managed key</a>.</p> /// <p>This detailed information includes the key ARN, creation date (and deletion date, if /// applicable), the key state, and the origin and expiration date (if any) of the key material. /// It includes fields, like <code>KeySpec</code>, that help you distinguish symmetric from /// asymmetric KMS keys. It also provides information that is particularly important to asymmetric /// keys, such as the key usage (encryption or signing) and the encryption algorithms or signing /// algorithms that the KMS key supports. For KMS keys in custom key stores, it includes information about /// the custom key store, such as the key store ID and the CloudHSM cluster ID. For multi-Region /// keys, it displays the primary key and all related replica keys. </p> /// <p> /// <code>DescribeKey</code> does not return the following information:</p> /// <ul> /// <li> /// <p>Aliases associated with the KMS key. To get this information, use <a>ListAliases</a>.</p> /// </li> /// <li> /// <p>Whether automatic key rotation is enabled on the KMS key. To get this information, use /// <a>GetKeyRotationStatus</a>. Also, some key states prevent a KMS key from being /// automatically rotated. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works">How Automatic Key Rotation /// Works</a> in <i>Key Management Service Developer Guide</i>.</p> /// </li> /// <li> /// <p>Tags on the KMS key. To get this information, use <a>ListResourceTags</a>.</p> /// </li> /// <li> /// <p>Key policies and grants on the KMS key. To get this information, use <a>GetKeyPolicy</a> and <a>ListGrants</a>.</p> /// </li> /// </ul> /// <p>If you call the <code>DescribeKey</code> operation on a <i>predefined Amazon Web Services alias</i>, that is, an Amazon Web Services alias with no key ID, KMS creates an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk">Amazon Web Services managed key</a>. /// Then, it associates the alias with the new KMS key, and returns the <code>KeyId</code> and /// <code>Arn</code> of the new KMS key in the response.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DescribeKey</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>GetKeyPolicy</a> /// </p> /// </li> /// <li> /// <p> /// <a>GetKeyRotationStatus</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListAliases</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListKeys</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListResourceTags</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListRetirableGrants</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DescribeKey { _private: (), } impl DescribeKey { /// Creates a new builder-style object to manufacture [`DescribeKeyInput`](crate::input::DescribeKeyInput) pub fn builder() -> crate::input::describe_key_input::Builder { crate::input::describe_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DescribeKey { type Output = std::result::Result<crate::output::DescribeKeyOutput, crate::error::DescribeKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_describe_key_error(response) } else { crate::operation_deser::parse_describe_key_response(response) } } } /// <p>Sets the state of a KMS key to disabled. This change temporarily /// prevents use of the KMS key for <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations">cryptographic operations</a>. </p> /// <p>For more information about how key state affects the use of a KMS key, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i> /// <i>Key Management Service Developer Guide</i> /// </i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DisableKey</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>EnableKey</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DisableKey { _private: (), } impl DisableKey { /// Creates a new builder-style object to manufacture [`DisableKeyInput`](crate::input::DisableKeyInput) pub fn builder() -> crate::input::disable_key_input::Builder { crate::input::disable_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DisableKey { type Output = std::result::Result<crate::output::DisableKeyOutput, crate::error::DisableKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_disable_key_error(response) } else { crate::operation_deser::parse_disable_key_response(response) } } } /// <p>Disables <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">automatic /// rotation of the key material</a> for the specified symmetric KMS key.</p> /// <p> You cannot enable automatic rotation of <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks">asymmetric KMS keys</a>, KMS keys with <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">imported key material</a>, or KMS keys in a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>. To enable or disable automatic rotation of a set of related <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key">multi-Region keys</a>, set the property on the primary key. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DisableKeyRotation</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>EnableKeyRotation</a> /// </p> /// </li> /// <li> /// <p> /// <a>GetKeyRotationStatus</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DisableKeyRotation { _private: (), } impl DisableKeyRotation { /// Creates a new builder-style object to manufacture [`DisableKeyRotationInput`](crate::input::DisableKeyRotationInput) pub fn builder() -> crate::input::disable_key_rotation_input::Builder { crate::input::disable_key_rotation_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DisableKeyRotation { type Output = std::result::Result< crate::output::DisableKeyRotationOutput, crate::error::DisableKeyRotationError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_disable_key_rotation_error(response) } else { crate::operation_deser::parse_disable_key_rotation_response(response) } } } /// <p>Disconnects the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a> from its associated CloudHSM cluster. While a custom key /// store is disconnected, you can manage the custom key store and its KMS keys, but you cannot create or use KMS keys in the custom key store. You can reconnect the /// custom key store at any time.</p> /// <note> /// <p>While a custom key store is disconnected, all attempts to create KMS keys in the custom key store or to use existing KMS keys in <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations">cryptographic operations</a> will /// fail. This action can prevent users from storing and accessing sensitive data.</p> /// </note> /// <p></p> /// <p>To find the connection state of a custom key store, use the <a>DescribeCustomKeyStores</a> operation. To reconnect a custom key store, use the /// <a>ConnectCustomKeyStore</a> operation.</p> /// <p>If the operation succeeds, it returns a JSON object with no /// properties.</p> /// <p>This operation is part of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Custom Key Store feature</a> feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// single-tenant key store.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:DisconnectCustomKeyStore</a> (IAM policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ConnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>CreateCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeCustomKeyStores</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct DisconnectCustomKeyStore { _private: (), } impl DisconnectCustomKeyStore { /// Creates a new builder-style object to manufacture [`DisconnectCustomKeyStoreInput`](crate::input::DisconnectCustomKeyStoreInput) pub fn builder() -> crate::input::disconnect_custom_key_store_input::Builder { crate::input::disconnect_custom_key_store_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for DisconnectCustomKeyStore { type Output = std::result::Result< crate::output::DisconnectCustomKeyStoreOutput, crate::error::DisconnectCustomKeyStoreError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_disconnect_custom_key_store_error(response) } else { crate::operation_deser::parse_disconnect_custom_key_store_response(response) } } } /// <p>Sets the key state of a KMS key to enabled. This allows you to use the KMS key for <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations">cryptographic operations</a>. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:EnableKey</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>DisableKey</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct EnableKey { _private: (), } impl EnableKey { /// Creates a new builder-style object to manufacture [`EnableKeyInput`](crate::input::EnableKeyInput) pub fn builder() -> crate::input::enable_key_input::Builder { crate::input::enable_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for EnableKey { type Output = std::result::Result<crate::output::EnableKeyOutput, crate::error::EnableKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_enable_key_error(response) } else { crate::operation_deser::parse_enable_key_response(response) } } } /// <p>Enables <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">automatic rotation /// of the key material</a> for the specified symmetric KMS key.</p> /// <p>You cannot enable automatic rotation of <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks">asymmetric KMS keys</a>, KMS keys with <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">imported key material</a>, or KMS keys in a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>. To enable or disable automatic rotation of a set of related <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key">multi-Region keys</a>, set the property on the primary key.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:EnableKeyRotation</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>DisableKeyRotation</a> /// </p> /// </li> /// <li> /// <p> /// <a>GetKeyRotationStatus</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct EnableKeyRotation { _private: (), } impl EnableKeyRotation { /// Creates a new builder-style object to manufacture [`EnableKeyRotationInput`](crate::input::EnableKeyRotationInput) pub fn builder() -> crate::input::enable_key_rotation_input::Builder { crate::input::enable_key_rotation_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for EnableKeyRotation { type Output = std::result::Result< crate::output::EnableKeyRotationOutput, crate::error::EnableKeyRotationError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_enable_key_rotation_error(response) } else { crate::operation_deser::parse_enable_key_rotation_response(response) } } } /// <p>Encrypts plaintext into ciphertext by using a KMS key. The /// <code>Encrypt</code> operation has two primary use cases:</p> /// <ul> /// <li> /// <p>You can encrypt small amounts of arbitrary data, such as a personal identifier or /// database password, or other sensitive information. </p> /// </li> /// <li> /// <p>You can use the <code>Encrypt</code> operation to move encrypted data from one Amazon Web Services Region to another. For example, in Region A, generate a data key and use the plaintext key to encrypt /// your data. Then, in Region A, use the <code>Encrypt</code> operation to encrypt the /// plaintext data key under a KMS key in Region B. Now, you can move the encrypted data and the /// encrypted data key to Region B. When necessary, you can decrypt the encrypted data key and /// the encrypted data entirely within in Region B.</p> /// </li> /// </ul> /// <p>You don't need to use the <code>Encrypt</code> operation to encrypt a data key. The <a>GenerateDataKey</a> and <a>GenerateDataKeyPair</a> operations return a /// plaintext data key and an encrypted copy of that data key.</p> /// <p>When you encrypt data, you must specify a symmetric or asymmetric KMS key to use in the /// encryption operation. The KMS key must have a <code>KeyUsage</code> value of /// <code>ENCRYPT_DECRYPT.</code> To find the <code>KeyUsage</code> of a KMS key, use the <a>DescribeKey</a> operation. </p> /// <p>If you use a symmetric KMS key, you can use an encryption context to add additional security /// to your encryption operation. If you specify an <code>EncryptionContext</code> when encrypting /// data, you must specify the same encryption context (a case-sensitive exact match) when /// decrypting the data. Otherwise, the request to decrypt fails with an /// <code>InvalidCiphertextException</code>. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">Encryption /// Context</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>If you specify an asymmetric KMS key, you must also specify the encryption algorithm. The /// algorithm must be compatible with the KMS key type.</p> /// <important> /// <p>When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.</p> /// <p>You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.</p> /// </important> /// <p>The maximum size of the data that you can encrypt varies with the type of KMS key and the /// encryption algorithm that you choose.</p> /// <ul> /// <li> /// <p>Symmetric KMS keys</p> /// <ul> /// <li> /// <p> /// <code>SYMMETRIC_DEFAULT</code>: 4096 bytes</p> /// </li> /// </ul> /// </li> /// <li> /// <p> /// <code>RSA_2048</code> /// </p> /// <ul> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_1</code>: 214 bytes</p> /// </li> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_256</code>: 190 bytes</p> /// </li> /// </ul> /// </li> /// <li> /// <p> /// <code>RSA_3072</code> /// </p> /// <ul> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_1</code>: 342 bytes</p> /// </li> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_256</code>: 318 bytes</p> /// </li> /// </ul> /// </li> /// <li> /// <p> /// <code>RSA_4096</code> /// </p> /// <ul> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_1</code>: 470 bytes</p> /// </li> /// <li> /// <p> /// <code>RSAES_OAEP_SHA_256</code>: 446 bytes</p> /// </li> /// </ul> /// </li> /// </ul> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:Encrypt</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct Encrypt { _private: (), } impl Encrypt { /// Creates a new builder-style object to manufacture [`EncryptInput`](crate::input::EncryptInput) pub fn builder() -> crate::input::encrypt_input::Builder { crate::input::encrypt_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for Encrypt { type Output = std::result::Result<crate::output::EncryptOutput, crate::error::EncryptError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_encrypt_error(response) } else { crate::operation_deser::parse_encrypt_response(response) } } } /// <p>Generates a unique symmetric data key for client-side encryption. This operation returns a /// plaintext copy of the data key and a copy that is encrypted under a KMS key /// that you specify. You can use the plaintext key to encrypt your data outside of KMS and /// store the encrypted data key with the encrypted data.</p> /// <p> /// <code>GenerateDataKey</code> returns a unique data key for each request. The bytes in the /// plaintext key are not related to the caller or the KMS key.</p> /// <p>To generate a data key, specify the symmetric KMS key that will be used to encrypt the data /// key. You cannot use an asymmetric KMS key to generate data keys. To get the type of your KMS key, use /// the <a>DescribeKey</a> operation. You must also specify the length of the data key. /// Use either the <code>KeySpec</code> or <code>NumberOfBytes</code> parameters (but not both). /// For 128-bit and 256-bit data keys, use the <code>KeySpec</code> parameter. </p> /// <p>To get only an encrypted copy of the data key, use <a>GenerateDataKeyWithoutPlaintext</a>. To generate an asymmetric data key pair, use /// the <a>GenerateDataKeyPair</a> or <a>GenerateDataKeyPairWithoutPlaintext</a> operation. To get a cryptographically secure /// random byte string, use <a>GenerateRandom</a>.</p> /// <p>You can use the optional encryption context to add additional security to the encryption /// operation. If you specify an <code>EncryptionContext</code>, you must specify the same /// encryption context (a case-sensitive exact match) when decrypting the encrypted data key. /// Otherwise, the request to decrypt fails with an <code>InvalidCiphertextException</code>. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">Encryption Context</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>Applications in Amazon Web Services Nitro Enclaves can call this operation by using the <a href="https://github.com/aws/aws-nitro-enclaves-sdk-c">Amazon Web Services Nitro Enclaves Development Kit</a>. For information about the supporting parameters, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html">How Amazon Web Services Nitro Enclaves use KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>How to use your data key</b> /// </p> /// <p>We recommend that you use the following pattern to encrypt data locally in your application. /// You can write your own code or use a client-side encryption library, such as the <a href="https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/">Amazon Web Services Encryption SDK</a>, the <a href="https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/">Amazon DynamoDB Encryption Client</a>, or /// <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html">Amazon S3 /// client-side encryption</a> to do these tasks for you.</p> /// <p>To encrypt data outside of KMS:</p> /// <ol> /// <li> /// <p>Use the <code>GenerateDataKey</code> operation to get a data key.</p> /// </li> /// <li> /// <p>Use the plaintext data key (in the <code>Plaintext</code> field of the response) to /// encrypt your data outside of KMS. Then erase the plaintext data key from memory.</p> /// </li> /// <li> /// <p>Store the encrypted data key (in the <code>CiphertextBlob</code> field of the /// response) with the encrypted data.</p> /// </li> /// </ol> /// <p>To decrypt data outside of KMS:</p> /// <ol> /// <li> /// <p>Use the <a>Decrypt</a> operation to decrypt the encrypted data key. The /// operation returns a plaintext copy of the data key.</p> /// </li> /// <li> /// <p>Use the plaintext data key to decrypt data outside of KMS, then erase the plaintext /// data key from memory.</p> /// </li> /// </ol> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GenerateDataKey</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPairWithoutPlaintext</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyWithoutPlaintext</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GenerateDataKey { _private: (), } impl GenerateDataKey { /// Creates a new builder-style object to manufacture [`GenerateDataKeyInput`](crate::input::GenerateDataKeyInput) pub fn builder() -> crate::input::generate_data_key_input::Builder { crate::input::generate_data_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GenerateDataKey { type Output = std::result::Result< crate::output::GenerateDataKeyOutput, crate::error::GenerateDataKeyError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_generate_data_key_error(response) } else { crate::operation_deser::parse_generate_data_key_response(response) } } } /// <p>Generates a unique asymmetric data key pair. The <code>GenerateDataKeyPair</code> /// operation returns a plaintext public key, a plaintext private key, and a copy of the private /// key that is encrypted under the symmetric KMS key you specify. You can use the data key pair to /// perform asymmetric cryptography and implement digital signatures outside of KMS.</p> /// <p>You can use the public key that <code>GenerateDataKeyPair</code> returns to encrypt data /// or verify a signature outside of KMS. Then, store the encrypted private key with the data. /// When you are ready to decrypt data or sign a message, you can use the <a>Decrypt</a> operation to decrypt the encrypted private key.</p> /// <p>To generate a data key pair, you must specify a symmetric KMS key to /// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a /// custom key store. To get the type and origin of your KMS key, use the <a>DescribeKey</a> operation. </p> /// <p>Use the <code>KeyPairSpec</code> parameter to choose an RSA or Elliptic Curve (ECC) data /// key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs /// for either encryption or signing, but not both. However, KMS cannot enforce any restrictions /// on the use of data key pairs outside of KMS.</p> /// <p>If you are using the data key pair to encrypt data, or for any operation where you don't /// immediately need a private key, consider using the <a>GenerateDataKeyPairWithoutPlaintext</a> operation. /// <code>GenerateDataKeyPairWithoutPlaintext</code> returns a plaintext public key and an /// encrypted private key, but omits the plaintext private key that you need only to decrypt /// ciphertext or sign a message. Later, when you need to decrypt the data or sign a message, use /// the <a>Decrypt</a> operation to decrypt the encrypted private key in the data key /// pair.</p> /// <p> /// <code>GenerateDataKeyPair</code> returns a unique data key pair for each request. The /// bytes in the keys are not related to the caller or the KMS key that is used to encrypt the private /// key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in <a href="https://tools.ietf.org/html/rfc5280">RFC 5280</a>. The /// private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in <a href="https://tools.ietf.org/html/rfc5958">RFC /// 5958</a>.</p> /// <p>You can use the optional encryption context to add additional security to the encryption /// operation. If you specify an <code>EncryptionContext</code>, you must specify the same /// encryption context (a case-sensitive exact match) when decrypting the encrypted data key. /// Otherwise, the request to decrypt fails with an <code>InvalidCiphertextException</code>. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">Encryption Context</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GenerateDataKeyPair</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPairWithoutPlaintext</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyWithoutPlaintext</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GenerateDataKeyPair { _private: (), } impl GenerateDataKeyPair { /// Creates a new builder-style object to manufacture [`GenerateDataKeyPairInput`](crate::input::GenerateDataKeyPairInput) pub fn builder() -> crate::input::generate_data_key_pair_input::Builder { crate::input::generate_data_key_pair_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GenerateDataKeyPair { type Output = std::result::Result< crate::output::GenerateDataKeyPairOutput, crate::error::GenerateDataKeyPairError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_generate_data_key_pair_error(response) } else { crate::operation_deser::parse_generate_data_key_pair_response(response) } } } /// <p>Generates a unique asymmetric data key pair. The /// <code>GenerateDataKeyPairWithoutPlaintext</code> operation returns a plaintext public key /// and a copy of the private key that is encrypted under the symmetric KMS key you specify. Unlike /// <a>GenerateDataKeyPair</a>, this operation does not return a plaintext private /// key. </p> /// <p>You can use the public key that <code>GenerateDataKeyPairWithoutPlaintext</code> returns /// to encrypt data or verify a signature outside of KMS. Then, store the encrypted private key /// with the data. When you are ready to decrypt data or sign a message, you can use the <a>Decrypt</a> operation to decrypt the encrypted private key.</p> /// <p>To generate a data key pair, you must specify a symmetric KMS key to /// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key or a KMS key in a /// custom key store. To get the type and origin of your KMS key, use the <a>DescribeKey</a> operation. </p> /// <p>Use the <code>KeyPairSpec</code> parameter to choose an RSA or Elliptic Curve (ECC) data /// key pair. KMS recommends that your use ECC key pairs for signing, and use RSA key pairs /// for either encryption or signing, but not both. However, KMS cannot enforce any restrictions /// on the use of data key pairs outside of KMS.</p> /// <p> /// <code>GenerateDataKeyPairWithoutPlaintext</code> returns a unique data key pair for each /// request. The bytes in the key are not related to the caller or KMS key that is used to encrypt the /// private key. The public key is a DER-encoded X.509 SubjectPublicKeyInfo, as specified in /// <a href="https://tools.ietf.org/html/rfc5280">RFC 5280</a>.</p> /// <p>You can use the optional encryption context to add additional security to the encryption /// operation. If you specify an <code>EncryptionContext</code>, you must specify the same /// encryption context (a case-sensitive exact match) when decrypting the encrypted data key. /// Otherwise, the request to decrypt fails with an <code>InvalidCiphertextException</code>. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">Encryption Context</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GenerateDataKeyPairWithoutPlaintext</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyWithoutPlaintext</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GenerateDataKeyPairWithoutPlaintext { _private: (), } impl GenerateDataKeyPairWithoutPlaintext { /// Creates a new builder-style object to manufacture [`GenerateDataKeyPairWithoutPlaintextInput`](crate::input::GenerateDataKeyPairWithoutPlaintextInput) pub fn builder() -> crate::input::generate_data_key_pair_without_plaintext_input::Builder { crate::input::generate_data_key_pair_without_plaintext_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GenerateDataKeyPairWithoutPlaintext { type Output = std::result::Result< crate::output::GenerateDataKeyPairWithoutPlaintextOutput, crate::error::GenerateDataKeyPairWithoutPlaintextError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_generate_data_key_pair_without_plaintext_error(response) } else { crate::operation_deser::parse_generate_data_key_pair_without_plaintext_response( response, ) } } } /// <p>Generates a unique symmetric data key. This operation returns a data key that is encrypted /// under a KMS key that you specify. To request an asymmetric data key pair, /// use the <a>GenerateDataKeyPair</a> or <a>GenerateDataKeyPairWithoutPlaintext</a> operations.</p> /// <p> /// <code>GenerateDataKeyWithoutPlaintext</code> is identical to the <a>GenerateDataKey</a> operation except that returns only the encrypted copy of the /// data key. This operation is useful for systems that need to encrypt data at some point, but /// not immediately. When you need to encrypt the data, you call the <a>Decrypt</a> /// operation on the encrypted copy of the key. </p> /// <p>It's also useful in distributed systems with different levels of trust. For example, you /// might store encrypted data in containers. One component of your system creates new containers /// and stores an encrypted data key with each container. Then, a different component puts the /// data into the containers. That component first decrypts the data key, uses the plaintext data /// key to encrypt data, puts the encrypted data into the container, and then destroys the /// plaintext data key. In this system, the component that creates the containers never sees the /// plaintext data key.</p> /// <p> /// <code>GenerateDataKeyWithoutPlaintext</code> returns a unique data key for each request. /// The bytes in the keys are not related to the caller or KMS key that is used to encrypt the private /// key.</p> /// <p>To generate a data key, you must specify the symmetric KMS key that is /// used to encrypt the data key. You cannot use an asymmetric KMS key to generate a data key. To get /// the type of your KMS key, use the <a>DescribeKey</a> operation.</p> /// <p>If the operation succeeds, you will find the encrypted copy of the data key in the /// <code>CiphertextBlob</code> field.</p> /// <p>You can use the optional encryption context to add additional security to the encryption /// operation. If you specify an <code>EncryptionContext</code>, you must specify the same /// encryption context (a case-sensitive exact match) when decrypting the encrypted data key. /// Otherwise, the request to decrypt fails with an <code>InvalidCiphertextException</code>. For more information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">Encryption Context</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GenerateDataKeyWithoutPlaintext</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPairWithoutPlaintext</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GenerateDataKeyWithoutPlaintext { _private: (), } impl GenerateDataKeyWithoutPlaintext { /// Creates a new builder-style object to manufacture [`GenerateDataKeyWithoutPlaintextInput`](crate::input::GenerateDataKeyWithoutPlaintextInput) pub fn builder() -> crate::input::generate_data_key_without_plaintext_input::Builder { crate::input::generate_data_key_without_plaintext_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GenerateDataKeyWithoutPlaintext { type Output = std::result::Result< crate::output::GenerateDataKeyWithoutPlaintextOutput, crate::error::GenerateDataKeyWithoutPlaintextError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_generate_data_key_without_plaintext_error(response) } else { crate::operation_deser::parse_generate_data_key_without_plaintext_response(response) } } } /// <p>Returns a random byte string that is cryptographically secure.</p> /// <p>By default, the random byte string is generated in KMS. To generate the byte string in /// the CloudHSM cluster that is associated with a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>, specify the custom key store /// ID.</p> /// <p>Applications in Amazon Web Services Nitro Enclaves can call this operation by using the <a href="https://github.com/aws/aws-nitro-enclaves-sdk-c">Amazon Web Services Nitro Enclaves Development Kit</a>. For information about the supporting parameters, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html">How Amazon Web Services Nitro Enclaves use KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>For more information about entropy and random number generation, see <a href="https://docs.aws.amazon.com/kms/latest/cryptographic-details/">Key Management Service Cryptographic Details</a>.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GenerateRandom</a> (IAM policy)</p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GenerateRandom { _private: (), } impl GenerateRandom { /// Creates a new builder-style object to manufacture [`GenerateRandomInput`](crate::input::GenerateRandomInput) pub fn builder() -> crate::input::generate_random_input::Builder { crate::input::generate_random_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GenerateRandom { type Output = std::result::Result<crate::output::GenerateRandomOutput, crate::error::GenerateRandomError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_generate_random_error(response) } else { crate::operation_deser::parse_generate_random_response(response) } } } /// <p>Gets a key policy attached to the specified KMS key.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GetKeyPolicy</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>PutKeyPolicy</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GetKeyPolicy { _private: (), } impl GetKeyPolicy { /// Creates a new builder-style object to manufacture [`GetKeyPolicyInput`](crate::input::GetKeyPolicyInput) pub fn builder() -> crate::input::get_key_policy_input::Builder { crate::input::get_key_policy_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GetKeyPolicy { type Output = std::result::Result<crate::output::GetKeyPolicyOutput, crate::error::GetKeyPolicyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_get_key_policy_error(response) } else { crate::operation_deser::parse_get_key_policy_response(response) } } } /// <p>Gets a Boolean value that indicates whether <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">automatic rotation of the key material</a> is /// enabled for the specified KMS key.</p> /// <p>You cannot enable automatic rotation of <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks">asymmetric KMS keys</a>, KMS keys with <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">imported key material</a>, or KMS keys in a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>. To enable or disable automatic rotation of a set of related <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-replica-key">multi-Region keys</a>, set the property on the primary key. The key rotation status for these KMS keys is always <code>false</code>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <ul> /// <li> /// <p>Disabled: The key rotation status does not change when you disable a KMS key. However, /// while the KMS key is disabled, KMS does not rotate the key material.</p> /// </li> /// <li> /// <p>Pending deletion: While a KMS key is pending deletion, its key rotation status is /// <code>false</code> and KMS does not rotate the key material. If you cancel the /// deletion, the original key rotation status is restored.</p> /// </li> /// </ul> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key /// ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GetKeyRotationStatus</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>DisableKeyRotation</a> /// </p> /// </li> /// <li> /// <p> /// <a>EnableKeyRotation</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GetKeyRotationStatus { _private: (), } impl GetKeyRotationStatus { /// Creates a new builder-style object to manufacture [`GetKeyRotationStatusInput`](crate::input::GetKeyRotationStatusInput) pub fn builder() -> crate::input::get_key_rotation_status_input::Builder { crate::input::get_key_rotation_status_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GetKeyRotationStatus { type Output = std::result::Result< crate::output::GetKeyRotationStatusOutput, crate::error::GetKeyRotationStatusError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_get_key_rotation_status_error(response) } else { crate::operation_deser::parse_get_key_rotation_status_response(response) } } } /// <p>Returns the items you need to import key material into a symmetric, customer managed /// KMS key. For more information about importing key material into KMS, see /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">Importing Key /// Material</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>This operation returns a public key and an import token. Use the public key to encrypt the /// symmetric key material. Store the import token to send with a subsequent <a>ImportKeyMaterial</a> request.</p> /// <p>You must specify the key ID of the symmetric KMS key into which you will import key material. /// This KMS key's <code>Origin</code> must be <code>EXTERNAL</code>. You must also specify the /// wrapping algorithm and type of wrapping key (public key) that you will use to encrypt the key /// material. You cannot perform this operation on an asymmetric KMS key or on any KMS key in a different Amazon Web Services account.</p> /// <p>To import key material, you must use the public key and import token from the same /// response. These items are valid for 24 hours. The expiration date and time appear in the /// <code>GetParametersForImport</code> response. You cannot use an expired token in an <a>ImportKeyMaterial</a> request. If your key and token expire, send another /// <code>GetParametersForImport</code> request.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GetParametersForImport</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ImportKeyMaterial</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteImportedKeyMaterial</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GetParametersForImport { _private: (), } impl GetParametersForImport { /// Creates a new builder-style object to manufacture [`GetParametersForImportInput`](crate::input::GetParametersForImportInput) pub fn builder() -> crate::input::get_parameters_for_import_input::Builder { crate::input::get_parameters_for_import_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GetParametersForImport { type Output = std::result::Result< crate::output::GetParametersForImportOutput, crate::error::GetParametersForImportError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_get_parameters_for_import_error(response) } else { crate::operation_deser::parse_get_parameters_for_import_response(response) } } } /// <p>Returns the public key of an asymmetric KMS key. Unlike the private key of a asymmetric KMS key, /// which never leaves KMS unencrypted, callers with <code>kms:GetPublicKey</code> permission /// can download the public key of an asymmetric KMS key. You can share the public key to allow others /// to encrypt messages and verify signatures outside of KMS. For information about symmetric and asymmetric KMS keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>You do not need to download the public key. Instead, you can use the public key within /// KMS by calling the <a>Encrypt</a>, <a>ReEncrypt</a>, or <a>Verify</a> operations with the identifier of an asymmetric KMS key. When you use the /// public key within KMS, you benefit from the authentication, authorization, and logging that /// are part of every KMS operation. You also reduce of risk of encrypting data that cannot be /// decrypted. These features are not effective outside of KMS. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/download-public-key.html#download-public-key-considerations">Special /// Considerations for Downloading Public Keys</a>.</p> /// <p>To help you use the public key safely outside of KMS, <code>GetPublicKey</code> returns /// important information about the public key in the response, including:</p> /// <ul> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec">KeySpec</a>: The type of key material in the public key, such as /// <code>RSA_4096</code> or <code>ECC_NIST_P521</code>.</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage">KeyUsage</a>: Whether the key is used for encryption or signing.</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms">EncryptionAlgorithms</a> or <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms">SigningAlgorithms</a>: A list of the encryption algorithms or the signing /// algorithms for the key.</p> /// </li> /// </ul> /// <p>Although KMS cannot enforce these restrictions on external operations, it is crucial /// that you use this information to prevent the public key from being used improperly. For /// example, you can prevent a public signing key from being used encrypt data, or prevent a /// public key from being used with an encryption algorithm that is not supported by KMS. You /// can also avoid errors, such as using the wrong signing algorithm in a verification /// operation.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:GetPublicKey</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>CreateKey</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct GetPublicKey { _private: (), } impl GetPublicKey { /// Creates a new builder-style object to manufacture [`GetPublicKeyInput`](crate::input::GetPublicKeyInput) pub fn builder() -> crate::input::get_public_key_input::Builder { crate::input::get_public_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for GetPublicKey { type Output = std::result::Result<crate::output::GetPublicKeyOutput, crate::error::GetPublicKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_get_public_key_error(response) } else { crate::operation_deser::parse_get_public_key_response(response) } } } /// <p>Imports key material into an existing symmetric KMS KMS key that was /// created without key material. After you successfully import key material into a KMS key, you can /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material">reimport the same key material</a> into that KMS key, but you cannot import different key /// material. </p> /// <p>You cannot perform this operation on an asymmetric KMS key or on any KMS key in a different Amazon Web Services account. For more information about creating KMS keys with no key material and /// then importing key material, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html">Importing Key Material</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>Before using this operation, call <a>GetParametersForImport</a>. Its response /// includes a public key and an import token. Use the public key to encrypt the key material. /// Then, submit the import token from the same <code>GetParametersForImport</code> /// response.</p> /// <p>When calling this operation, you must specify the following values:</p> /// <ul> /// <li> /// <p>The key ID or key ARN of a KMS key with no key material. Its <code>Origin</code> must be /// <code>EXTERNAL</code>.</p> /// <p>To create a KMS key with no key material, call <a>CreateKey</a> and set the /// value of its <code>Origin</code> parameter to <code>EXTERNAL</code>. To get the /// <code>Origin</code> of a KMS key, call <a>DescribeKey</a>.)</p> /// </li> /// <li> /// <p>The encrypted key material. To get the public key to encrypt the key material, call /// <a>GetParametersForImport</a>.</p> /// </li> /// <li> /// <p>The import token that <a>GetParametersForImport</a> returned. You must use /// a public key and token from the same <code>GetParametersForImport</code> response.</p> /// </li> /// <li> /// <p>Whether the key material expires and if so, when. If you set an expiration date, KMS /// deletes the key material from the KMS key on the specified date, and the KMS key becomes unusable. /// To use the KMS key again, you must reimport the same key material. The only way to change an /// expiration date is by reimporting the same key material and specifying a new expiration /// date. </p> /// </li> /// </ul> /// <p>When this operation is successful, the key state of the KMS key changes from /// <code>PendingImport</code> to <code>Enabled</code>, and you can use the KMS key.</p> /// <p>If this operation fails, use the exception to help determine the problem. If the error is /// related to the key material, the import token, or wrapping key, use <a>GetParametersForImport</a> to get a new public key and import token for the KMS key and /// repeat the import procedure. For help, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview">How To Import Key /// Material</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ImportKeyMaterial</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>DeleteImportedKeyMaterial</a> /// </p> /// </li> /// <li> /// <p> /// <a>GetParametersForImport</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ImportKeyMaterial { _private: (), } impl ImportKeyMaterial { /// Creates a new builder-style object to manufacture [`ImportKeyMaterialInput`](crate::input::ImportKeyMaterialInput) pub fn builder() -> crate::input::import_key_material_input::Builder { crate::input::import_key_material_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ImportKeyMaterial { type Output = std::result::Result< crate::output::ImportKeyMaterialOutput, crate::error::ImportKeyMaterialError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_import_key_material_error(response) } else { crate::operation_deser::parse_import_key_material_response(response) } } } /// <p>Gets a list of aliases in the caller's Amazon Web Services account and region. For more information about /// aliases, see <a>CreateAlias</a>.</p> /// <p>By default, the <code>ListAliases</code> operation returns all aliases in the account and /// region. To get only the aliases associated with a particular KMS key, use /// the <code>KeyId</code> parameter.</p> /// <p>The <code>ListAliases</code> response can include aliases that you created and associated /// with your customer managed keys, and aliases that Amazon Web Services created and associated with Amazon Web Services managed keys in your account. You can recognize Amazon Web Services aliases because their names have the format /// <code>aws/<service-name></code>, such as <code>aws/dynamodb</code>.</p> /// <p>The response might also include aliases that have no <code>TargetKeyId</code> field. These /// are predefined aliases that Amazon Web Services has created but has not yet associated with a KMS key. Aliases /// that Amazon Web Services creates in your account, including predefined aliases, do not count against your /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit">KMS aliases /// quota</a>.</p> /// <p> /// <b>Cross-account use</b>: No. <code>ListAliases</code> does not /// return aliases in other Amazon Web Services accounts.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListAliases</a> (IAM policy)</p> /// <p>For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access">Controlling access to aliases</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b>
/// </p> /// <ul> /// <li> /// <p> /// <a>CreateAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdateAlias</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListAliases { _private: (), } impl ListAliases { /// Creates a new builder-style object to manufacture [`ListAliasesInput`](crate::input::ListAliasesInput) pub fn builder() -> crate::input::list_aliases_input::Builder { crate::input::list_aliases_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListAliases { type Output = std::result::Result<crate::output::ListAliasesOutput, crate::error::ListAliasesError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_aliases_error(response) } else { crate::operation_deser::parse_list_aliases_response(response) } } } /// <p>Gets a list of all grants for the specified KMS key. </p> /// <p>You must specify the KMS key in all requests. You can filter the grant list by grant ID /// or grantee principal.</p> /// <p>For detailed information about grants, including grant terminology, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html">Using grants</a> in the /// <i> /// <i>Key Management Service Developer Guide</i> /// </i>. For examples of working with grants in several /// programming languages, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html">Programming grants</a>. </p> /// <note> /// <p>The <code>GranteePrincipal</code> field in the <code>ListGrants</code> response usually contains the /// user or role designated as the grantee principal in the grant. However, when the grantee /// principal in the grant is an Amazon Web Services service, the <code>GranteePrincipal</code> field contains /// the <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services">service /// principal</a>, which might represent several different grantee principals.</p> /// </note> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key /// ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListGrants</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListRetirableGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>RetireGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>RevokeGrant</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListGrants { _private: (), } impl ListGrants { /// Creates a new builder-style object to manufacture [`ListGrantsInput`](crate::input::ListGrantsInput) pub fn builder() -> crate::input::list_grants_input::Builder { crate::input::list_grants_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListGrants { type Output = std::result::Result<crate::output::ListGrantsOutput, crate::error::ListGrantsError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_grants_error(response) } else { crate::operation_deser::parse_list_grants_response(response) } } } /// <p>Gets the names of the key policies that are attached to a KMS key. This /// operation is designed to get policy names that you can use in a <a>GetKeyPolicy</a> /// operation. However, the only valid policy name is <code>default</code>. </p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListKeyPolicies</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>GetKeyPolicy</a> /// </p> /// </li> /// <li> /// <p> /// <a>PutKeyPolicy</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListKeyPolicies { _private: (), } impl ListKeyPolicies { /// Creates a new builder-style object to manufacture [`ListKeyPoliciesInput`](crate::input::ListKeyPoliciesInput) pub fn builder() -> crate::input::list_key_policies_input::Builder { crate::input::list_key_policies_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListKeyPolicies { type Output = std::result::Result< crate::output::ListKeyPoliciesOutput, crate::error::ListKeyPoliciesError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_key_policies_error(response) } else { crate::operation_deser::parse_list_key_policies_response(response) } } } /// <p>Gets a list of all KMS keys in the caller's Amazon Web Services account and /// Region.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListKeys</a> (IAM policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListAliases</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListResourceTags</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListKeys { _private: (), } impl ListKeys { /// Creates a new builder-style object to manufacture [`ListKeysInput`](crate::input::ListKeysInput) pub fn builder() -> crate::input::list_keys_input::Builder { crate::input::list_keys_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListKeys { type Output = std::result::Result<crate::output::ListKeysOutput, crate::error::ListKeysError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_keys_error(response) } else { crate::operation_deser::parse_list_keys_response(response) } } } /// <p>Returns all tags on the specified KMS key.</p> /// <p>For general information about tags, including the format and syntax, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in /// the <i>Amazon Web Services General Reference</i>. For information about using /// tags in KMS, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html">Tagging /// keys</a>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListResourceTags</a> (key policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ReplicateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>TagResource</a> /// </p> /// </li> /// <li> /// <p> /// <a>UntagResource</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListResourceTags { _private: (), } impl ListResourceTags { /// Creates a new builder-style object to manufacture [`ListResourceTagsInput`](crate::input::ListResourceTagsInput) pub fn builder() -> crate::input::list_resource_tags_input::Builder { crate::input::list_resource_tags_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListResourceTags { type Output = std::result::Result< crate::output::ListResourceTagsOutput, crate::error::ListResourceTagsError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_resource_tags_error(response) } else { crate::operation_deser::parse_list_resource_tags_response(response) } } } /// <p>Returns information about all grants in the Amazon Web Services account and Region that have the specified /// retiring principal. </p> /// <p>You can specify any principal in your Amazon Web Services account. The grants that are returned include /// grants for KMS keys in your Amazon Web Services account and other Amazon Web Services accounts. You might use this operation to /// determine which grants you may retire. To retire a grant, use the <a>RetireGrant</a> operation.</p> /// <p>For detailed information about grants, including grant terminology, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html">Using grants</a> in the /// <i> /// <i>Key Management Service Developer Guide</i> /// </i>. For examples of working with grants in several /// programming languages, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html">Programming grants</a>. </p> /// <p> /// <b>Cross-account use</b>: You must specify a principal in your /// Amazon Web Services account. However, this operation can return grants in any Amazon Web Services account. You do not need /// <code>kms:ListRetirableGrants</code> permission (or any other additional permission) in any /// Amazon Web Services account other than your own.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ListRetirableGrants</a> (IAM policy) in your Amazon Web Services account.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>RetireGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>RevokeGrant</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ListRetirableGrants { _private: (), } impl ListRetirableGrants { /// Creates a new builder-style object to manufacture [`ListRetirableGrantsInput`](crate::input::ListRetirableGrantsInput) pub fn builder() -> crate::input::list_retirable_grants_input::Builder { crate::input::list_retirable_grants_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ListRetirableGrants { type Output = std::result::Result< crate::output::ListRetirableGrantsOutput, crate::error::ListRetirableGrantsError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_list_retirable_grants_error(response) } else { crate::operation_deser::parse_list_retirable_grants_response(response) } } } /// <p>Attaches a key policy to the specified KMS key. </p> /// <p>For more information about key policies, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html">Key Policies</a> in the <i>Key Management Service Developer Guide</i>. /// For help writing and formatting a JSON policy document, see the <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html">IAM JSON Policy Reference</a> in the <i> /// <i>Identity and Access Management User Guide</i> /// </i>. For examples of adding a key policy in multiple programming languages, /// see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy">Setting a key policy</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:PutKeyPolicy</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>GetKeyPolicy</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct PutKeyPolicy { _private: (), } impl PutKeyPolicy { /// Creates a new builder-style object to manufacture [`PutKeyPolicyInput`](crate::input::PutKeyPolicyInput) pub fn builder() -> crate::input::put_key_policy_input::Builder { crate::input::put_key_policy_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for PutKeyPolicy { type Output = std::result::Result<crate::output::PutKeyPolicyOutput, crate::error::PutKeyPolicyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_put_key_policy_error(response) } else { crate::operation_deser::parse_put_key_policy_response(response) } } } /// <p>Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this /// operation to change the KMS key under which data is encrypted, such as when /// you <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually">manually rotate</a> a KMS key or change the KMS key that protects a ciphertext. You can also /// use it to reencrypt ciphertext under the same KMS key, such as to change the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context">encryption /// context</a> of a ciphertext.</p> /// <p>The <code>ReEncrypt</code> operation can decrypt ciphertext that was encrypted by using an /// KMS KMS key in an KMS operation, such as <a>Encrypt</a> or <a>GenerateDataKey</a>. It can also decrypt ciphertext that was encrypted by using the /// public key of an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks">asymmetric KMS key</a> outside of KMS. However, it cannot decrypt ciphertext /// produced by other libraries, such as the <a href="https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/">Amazon Web Services Encryption SDK</a> or <a href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html">Amazon S3 client-side encryption</a>. /// These libraries return a ciphertext format that is incompatible with KMS.</p> /// <p>When you use the <code>ReEncrypt</code> operation, you need to provide information for the /// decrypt operation and the subsequent encrypt operation.</p> /// <ul> /// <li> /// <p>If your ciphertext was encrypted under an asymmetric KMS key, you must use the /// <code>SourceKeyId</code> parameter to identify the KMS key that encrypted the ciphertext. /// You must also supply the encryption algorithm that was used. This information is required /// to decrypt the data.</p> /// </li> /// <li> /// <p>If your ciphertext was encrypted under a symmetric KMS key, the <code>SourceKeyId</code> /// parameter is optional. KMS can get this information from metadata that it adds to the /// symmetric ciphertext blob. This feature adds durability to your implementation by ensuring /// that authorized users can decrypt ciphertext decades after it was encrypted, even if /// they've lost track of the key ID. However, specifying the source KMS key is always recommended /// as a best practice. When you use the <code>SourceKeyId</code> parameter to specify a KMS key, /// KMS uses only the KMS key you specify. If the ciphertext was encrypted under a different KMS key, the <code>ReEncrypt</code> operation fails. This practice ensures that you use the KMS key that you intend.</p> /// </li> /// <li> /// <p>To reencrypt the data, you must use the <code>DestinationKeyId</code> parameter /// specify the KMS key that re-encrypts the data after it is decrypted. You can select a /// symmetric or asymmetric KMS key. If the destination KMS key is an asymmetric KMS key, you must also /// provide the encryption algorithm. The algorithm that you choose must be compatible with /// the KMS key.</p> /// <important> /// <p>When you use an asymmetric KMS key to encrypt or reencrypt data, be sure to record the KMS key and encryption algorithm that you choose. You will be required to provide the same KMS key and encryption algorithm when you decrypt the data. If the KMS key and algorithm do not match the values used to encrypt the data, the decrypt operation fails.</p> /// <p>You are not required to supply the key ID and encryption algorithm when you decrypt with symmetric KMS keys because KMS stores this information in the ciphertext blob. KMS cannot store metadata in ciphertext generated with asymmetric keys. The standard format for asymmetric key ciphertext does not include configurable fields.</p> /// </important> /// </li> /// </ul> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. The source KMS key and destination KMS key can be in different Amazon Web Services accounts. Either or both KMS keys can be in a different account than /// the caller. To specify a KMS key in a different account, you must use its key ARN or alias /// ARN.</p> /// <p> /// <b>Required permissions</b>:</p> /// <ul> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ReEncryptFrom</a> permission on the source KMS key (key policy)</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:ReEncryptTo</a> permission on the destination KMS key (key policy)</p> /// </li> /// </ul> /// <p>To permit reencryption from or to a KMS key, include the <code>"kms:ReEncrypt*"</code> /// permission in your <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html">key policy</a>. This permission is /// automatically included in the key policy when you use the console to create a KMS key. But you /// must include it manually when you create a KMS key programmatically or when you use the <a>PutKeyPolicy</a> operation to set a key policy.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>Decrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>Encrypt</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>GenerateDataKeyPair</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ReEncrypt { _private: (), } impl ReEncrypt { /// Creates a new builder-style object to manufacture [`ReEncryptInput`](crate::input::ReEncryptInput) pub fn builder() -> crate::input::re_encrypt_input::Builder { crate::input::re_encrypt_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ReEncrypt { type Output = std::result::Result<crate::output::ReEncryptOutput, crate::error::ReEncryptError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_re_encrypt_error(response) } else { crate::operation_deser::parse_re_encrypt_response(response) } } } /// <p>Replicates a multi-Region key into the specified Region. This operation creates a /// multi-Region replica key based on a multi-Region primary key in a different Region of the same /// Amazon Web Services partition. You can create multiple replicas of a primary key, but each must be in a /// different Region. To create a multi-Region primary key, use the <a>CreateKey</a> /// operation.</p> /// <p>This operation supports <i>multi-Region keys</i>, an KMS feature that lets you create multiple /// interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key /// material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt /// it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html">Using multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>A <i>replica key</i> is a fully-functional KMS key that can be used /// independently of its primary and peer replica keys. A primary key and its replica keys share /// properties that make them interoperable. They have the same <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id">key ID</a> and key material. They also /// have the same <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec">key /// spec</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage">key /// usage</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin">key /// material origin</a>, and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">automatic key rotation status</a>. KMS automatically synchronizes these shared /// properties among related multi-Region keys. All other properties of a replica key can differ, /// including its <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html">key /// policy</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html">tags</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html">aliases</a>, and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">key /// state</a>. KMS pricing and quotas for KMS keys apply to each primary key and replica /// key.</p> /// <p>When this operation completes, the new replica key has a transient key state of /// <code>Creating</code>. This key state changes to <code>Enabled</code> (or /// <code>PendingImport</code>) after a few seconds when the process of creating the new replica /// key is complete. While the key state is <code>Creating</code>, you can manage key, but you /// cannot yet use it in cryptographic operations. If you are creating and using the replica key /// programmatically, retry on <code>KMSInvalidStateException</code> or call /// <code>DescribeKey</code> to check its <code>KeyState</code> value before using it. For /// details about the <code>Creating</code> key state, see <a href="kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>The CloudTrail log of a <code>ReplicateKey</code> operation records a /// <code>ReplicateKey</code> operation in the primary key's Region and a <a>CreateKey</a> operation in the replica key's Region.</p> /// <p>If you replicate a multi-Region primary key with imported key material, the replica key is /// created with no key material. You must import the same key material that you imported into the /// primary key. For details, see <a href="kms/latest/developerguide/multi-region-keys-import.html">Importing key material into multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>To convert a replica key to a primary key, use the <a>UpdatePrimaryRegion</a> /// operation.</p> /// <note> /// <p> /// <code>ReplicateKey</code> uses different default values for the <code>KeyPolicy</code> and /// <code>Tags</code> parameters than those used in the KMS console. For details, see the /// parameter descriptions.</p> /// </note> /// <p> /// <b>Cross-account use</b>: No. You cannot use this operation to /// create a replica key in a different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b>: </p> /// <ul> /// <li> /// <p> /// <code>kms:ReplicateKey</code> on the primary key (in the primary key's Region). Include this /// permission in the primary key's key policy.</p> /// </li> /// <li> /// <p> /// <code>kms:CreateKey</code> in an IAM policy in the replica Region.</p> /// </li> /// <li> /// <p>To use the <code>Tags</code> parameter, <code>kms:TagResource</code> in an IAM policy /// in the replica Region.</p> /// </li> /// </ul> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>UpdatePrimaryRegion</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ReplicateKey { _private: (), } impl ReplicateKey { /// Creates a new builder-style object to manufacture [`ReplicateKeyInput`](crate::input::ReplicateKeyInput) pub fn builder() -> crate::input::replicate_key_input::Builder { crate::input::replicate_key_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ReplicateKey { type Output = std::result::Result<crate::output::ReplicateKeyOutput, crate::error::ReplicateKeyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_replicate_key_error(response) } else { crate::operation_deser::parse_replicate_key_response(response) } } } /// <p>Deletes a grant. Typically, you retire a grant when you no longer need its permissions. To /// identify the grant to retire, use a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token">grant token</a>, or both the grant ID and a /// key identifier (key ID or key ARN) of the KMS key. The <a>CreateGrant</a> operation returns both values.</p> /// <p>This operation can be called by the <i>retiring principal</i> for a grant, /// by the <i>grantee principal</i> if the grant allows the <code>RetireGrant</code> /// operation, and by the Amazon Web Services account (root user) in which the grant is created. It can also be /// called by principals to whom permission for retiring a grant is delegated. For details, see /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete">Retiring and /// revoking grants</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>For detailed information about grants, including grant terminology, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html">Using grants</a> in the /// <i> /// <i>Key Management Service Developer Guide</i> /// </i>. For examples of working with grants in several /// programming languages, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html">Programming grants</a>. </p> /// <p> /// <b>Cross-account use</b>: Yes. You can retire a grant on a KMS key /// in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions:</b>:Permission to retire a grant is /// determined primarily by the grant. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete">Retiring and revoking grants</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListRetirableGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>RevokeGrant</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct RetireGrant { _private: (), } impl RetireGrant { /// Creates a new builder-style object to manufacture [`RetireGrantInput`](crate::input::RetireGrantInput) pub fn builder() -> crate::input::retire_grant_input::Builder { crate::input::retire_grant_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for RetireGrant { type Output = std::result::Result<crate::output::RetireGrantOutput, crate::error::RetireGrantError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_retire_grant_error(response) } else { crate::operation_deser::parse_retire_grant_response(response) } } } /// <p>Deletes the specified grant. You revoke a grant to terminate the permissions that the /// grant allows. For more /// information, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete">Retiring and revoking grants</a> in /// the <i> /// <i>Key Management Service Developer Guide</i> /// </i>.</p> /// <p>When you create, retire, or revoke a grant, there might be a brief delay, usually less than five minutes, until the grant is available throughout KMS. This state is known as <i>eventual consistency</i>. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency">Eventual consistency</a> in /// the <i> /// <i>Key Management Service Developer Guide</i> /// </i>. </p> /// <p>For detailed information about grants, including grant terminology, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/grants.html">Using grants</a> in the /// <i> /// <i>Key Management Service Developer Guide</i> /// </i>. For examples of working with grants in several /// programming languages, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html">Programming grants</a>. </p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation on a KMS key in a different Amazon Web Services account, specify the key /// ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:RevokeGrant</a> (key policy).</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateGrant</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListRetirableGrants</a> /// </p> /// </li> /// <li> /// <p> /// <a>RetireGrant</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct RevokeGrant { _private: (), } impl RevokeGrant { /// Creates a new builder-style object to manufacture [`RevokeGrantInput`](crate::input::RevokeGrantInput) pub fn builder() -> crate::input::revoke_grant_input::Builder { crate::input::revoke_grant_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for RevokeGrant { type Output = std::result::Result<crate::output::RevokeGrantOutput, crate::error::RevokeGrantError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_revoke_grant_error(response) } else { crate::operation_deser::parse_revoke_grant_response(response) } } } /// <p>Schedules the deletion of a KMS key. By default, KMS applies a waiting /// period of 30 days, but you can specify a waiting period of 7-30 days. When this operation is /// successful, the key state of the KMS key changes to <code>PendingDeletion</code> and the key can't /// be used in any cryptographic operations. It remains in this state for the duration of the /// waiting period. Before the waiting period ends, you can use <a>CancelKeyDeletion</a> to cancel the deletion of the KMS key. After the waiting period ends, KMS deletes the KMS key, /// its key material, and all KMS data associated with it, including all aliases that refer to /// it.</p> /// <important> /// <p>Deleting a KMS key is a destructive and potentially dangerous operation. When a KMS key is /// deleted, all data that was encrypted under the KMS key is unrecoverable. (The only exception is /// a multi-Region replica key.) To prevent the use of a KMS key without deleting it, use <a>DisableKey</a>. </p> /// </important> /// <p>If you schedule deletion of a KMS key from a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">custom key store</a>, when the waiting period /// expires, <code>ScheduleKeyDeletion</code> deletes the KMS key from KMS. Then KMS makes a best /// effort to delete the key material from the associated CloudHSM cluster. However, you might need /// to manually <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key">delete the orphaned key /// material</a> from the cluster and its backups.</p> /// <p>You can schedule the deletion of a multi-Region primary key and its replica keys at any /// time. However, KMS will not delete a multi-Region primary key with existing replica keys. If /// you schedule the deletion of a primary key with replicas, its key state changes to /// <code>PendingReplicaDeletion</code> and it cannot be replicated or used in cryptographic /// operations. This status can continue indefinitely. When the last of its replicas keys is /// deleted (not just scheduled), the key state of the primary key changes to /// <code>PendingDeletion</code> and its waiting period (<code>PendingWindowInDays</code>) /// begins. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html">Deleting multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>. </p> /// <p>For more information about scheduling a KMS key for deletion, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html">Deleting KMS keys</a> in the /// <i>Key Management Service Developer Guide</i>.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: kms:ScheduleKeyDeletion (key policy)</p> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CancelKeyDeletion</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisableKey</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct ScheduleKeyDeletion { _private: (), } impl ScheduleKeyDeletion { /// Creates a new builder-style object to manufacture [`ScheduleKeyDeletionInput`](crate::input::ScheduleKeyDeletionInput) pub fn builder() -> crate::input::schedule_key_deletion_input::Builder { crate::input::schedule_key_deletion_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for ScheduleKeyDeletion { type Output = std::result::Result< crate::output::ScheduleKeyDeletionOutput, crate::error::ScheduleKeyDeletionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_schedule_key_deletion_error(response) } else { crate::operation_deser::parse_schedule_key_deletion_response(response) } } } /// <p>Creates a <a href="https://en.wikipedia.org/wiki/Digital_signature">digital /// signature</a> for a message or message digest by using the private key in an asymmetric KMS key. To verify the signature, use the <a>Verify</a> operation, or use the public /// key in the same asymmetric KMS key outside of KMS. For information about symmetric and asymmetric KMS keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>Digital signatures are generated and verified by using asymmetric key pair, such as an RSA /// or ECC pair that is represented by an asymmetric KMS key. The key owner (or /// an authorized user) uses their private key to sign a message. Anyone with the public key can /// verify that the message was signed with that particular private key and that the message /// hasn't changed since it was signed. </p> /// <p>To use the <code>Sign</code> operation, provide the following information:</p> /// <ul> /// <li> /// <p>Use the <code>KeyId</code> parameter to identify an asymmetric KMS key with a /// <code>KeyUsage</code> value of <code>SIGN_VERIFY</code>. To get the /// <code>KeyUsage</code> value of a KMS key, use the <a>DescribeKey</a> operation. /// The caller must have <code>kms:Sign</code> permission on the KMS key.</p> /// </li> /// <li> /// <p>Use the <code>Message</code> parameter to specify the message or message digest to /// sign. You can submit messages of up to 4096 bytes. To sign a larger message, generate a /// hash digest of the message, and then provide the hash digest in the <code>Message</code> /// parameter. To indicate whether the message is a full message or a digest, use the /// <code>MessageType</code> parameter.</p> /// </li> /// <li> /// <p>Choose a signing algorithm that is compatible with the KMS key. </p> /// </li> /// </ul> /// <important> /// <p>When signing a message, be sure to record the KMS key and the signing algorithm. This /// information is required to verify the signature.</p> /// </important> /// <p>To verify the signature that this operation generates, use the <a>Verify</a> /// operation. Or use the <a>GetPublicKey</a> operation to download the public key and /// then use the public key to verify the signature outside of KMS. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:Sign</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>Verify</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct Sign { _private: (), } impl Sign { /// Creates a new builder-style object to manufacture [`SignInput`](crate::input::SignInput) pub fn builder() -> crate::input::sign_input::Builder { crate::input::sign_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for Sign { type Output = std::result::Result<crate::output::SignOutput, crate::error::SignError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_sign_error(response) } else { crate::operation_deser::parse_sign_response(response) } } } /// <p>Adds or edits tags on a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk">customer managed key</a>.</p> /// <note> /// <p>Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/abac.html">Using ABAC in KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// </note> /// <p>Each tag consists of a tag key and a tag value, both of which are case-sensitive strings. /// The tag value can be an empty (null) string. To add a tag, specify a new tag key and a tag /// value. To edit a tag, specify an existing tag key and a new tag value.</p> /// <p>You can use this operation to tag a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk">customer managed key</a>, but you cannot /// tag an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk">Amazon Web Services managed key</a>, an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk">Amazon Web Services owned key</a>, a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept">custom key store</a>, or /// an <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept">alias</a>.</p> /// <p>You can also add tags to a KMS key while creating it (<a>CreateKey</a>) or replicating it (<a>ReplicateKey</a>).</p> /// <p>For information about using tags in KMS, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html">Tagging keys</a>. For general information about /// tags, including the format and syntax, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon /// Web Services General Reference</i>. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:TagResource</a> (key policy)</p> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListResourceTags</a> /// </p> /// </li> /// <li> /// <p> /// <a>ReplicateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>UntagResource</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct TagResource { _private: (), } impl TagResource { /// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput) pub fn builder() -> crate::input::tag_resource_input::Builder { crate::input::tag_resource_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for TagResource { type Output = std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_tag_resource_error(response) } else { crate::operation_deser::parse_tag_resource_response(response) } } } /// <p>Deletes tags from a <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk">customer managed key</a>. To delete a tag, /// specify the tag key and the KMS key.</p> /// <note> /// <p>Tagging or untagging a KMS key can allow or deny permission to the KMS key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/abac.html">Using ABAC in KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// </note> /// <p>When it succeeds, the <code>UntagResource</code> operation doesn't return any output. /// Also, if the specified tag key isn't found on the KMS key, it doesn't throw an exception or return /// a response. To confirm that the operation worked, use the <a>ListResourceTags</a> operation.</p> /// <p>For information about using tags in KMS, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html">Tagging keys</a>. For general information about /// tags, including the format and syntax, see <a href="https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html">Tagging Amazon Web Services resources</a> in the <i>Amazon /// Web Services General Reference</i>. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account.</p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UntagResource</a> (key policy)</p> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListResourceTags</a> /// </p> /// </li> /// <li> /// <p> /// <a>ReplicateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>TagResource</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UntagResource { _private: (), } impl UntagResource { /// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput) pub fn builder() -> crate::input::untag_resource_input::Builder { crate::input::untag_resource_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for UntagResource { type Output = std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_untag_resource_error(response) } else { crate::operation_deser::parse_untag_resource_response(response) } } } /// <p>Associates an existing KMS alias with a different KMS key. Each alias /// is associated with only one KMS key at a time, although a KMS key can have multiple aliases. The alias /// and the KMS key must be in the same Amazon Web Services account and Region.</p> /// <note> /// <p>Adding, deleting, or updating an alias can allow or deny permission to the KMS key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/abac.html">Using ABAC in KMS</a> in the <i>Key Management Service Developer Guide</i>.</p> /// </note> /// <p>The current and new KMS key must be the same type (both symmetric or both asymmetric), and /// they must have the same key usage (<code>ENCRYPT_DECRYPT</code> or <code>SIGN_VERIFY</code>). /// This restriction prevents errors in code that uses aliases. If you must assign an alias to a /// different type of KMS key, use <a>DeleteAlias</a> to delete the old alias and <a>CreateAlias</a> to create a new alias.</p> /// <p>You cannot use <code>UpdateAlias</code> to change an alias name. To change an alias name, /// use <a>DeleteAlias</a> to delete the old alias and <a>CreateAlias</a> to /// create a new alias.</p> /// <p>Because an alias is not a property of a KMS key, you can create, update, and delete the /// aliases of a KMS key without affecting the KMS key. Also, aliases do not appear in the response from /// the <a>DescribeKey</a> operation. To get the aliases of all KMS keys in the account, /// use the <a>ListAliases</a> operation. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b> /// </p> /// <ul> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UpdateAlias</a> on the alias (IAM policy).</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UpdateAlias</a> on the current KMS key (key policy).</p> /// </li> /// <li> /// <p> /// <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UpdateAlias</a> on the new KMS key (key policy).</p> /// </li> /// </ul> /// <p>For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access">Controlling access to aliases</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteAlias</a> /// </p> /// </li> /// <li> /// <p> /// <a>ListAliases</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UpdateAlias { _private: (), } impl UpdateAlias { /// Creates a new builder-style object to manufacture [`UpdateAliasInput`](crate::input::UpdateAliasInput) pub fn builder() -> crate::input::update_alias_input::Builder { crate::input::update_alias_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for UpdateAlias { type Output = std::result::Result<crate::output::UpdateAliasOutput, crate::error::UpdateAliasError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_update_alias_error(response) } else { crate::operation_deser::parse_update_alias_response(response) } } } /// <p>Changes the properties of a custom key store. Use the <code>CustomKeyStoreId</code> /// parameter to identify the custom key store you want to edit. Use the remaining parameters to /// change the properties of the custom key store.</p> /// <p>You can only update a custom key store that is disconnected. To disconnect the custom key /// store, use <a>DisconnectCustomKeyStore</a>. To reconnect the custom key store after /// the update completes, use <a>ConnectCustomKeyStore</a>. To find the connection /// state of a custom key store, use the <a>DescribeCustomKeyStores</a> /// operation.</p> /// <p>Use the parameters of <code>UpdateCustomKeyStore</code> to edit your keystore /// settings.</p> /// <ul> /// <li> /// <p>Use the <b>NewCustomKeyStoreName</b> parameter to change the /// friendly name of the custom key store to the value that you specify.</p> /// <p> </p> /// </li> /// <li> /// <p>Use the <b>KeyStorePassword</b> parameter tell KMS the /// current password of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser"> /// <code>kmsuser</code> crypto /// user (CU)</a> in the associated CloudHSM cluster. You can use this parameter to <a href="https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password">fix /// connection failures</a> that occur when KMS cannot log into the associated cluster /// because the <code>kmsuser</code> password has changed. This value does not change the /// password in the CloudHSM cluster.</p> /// <p> </p> /// </li> /// <li> /// <p>Use the <b>CloudHsmClusterId</b> parameter to associate the /// custom key store with a different, but related, CloudHSM cluster. You can use this parameter /// to repair a custom key store if its CloudHSM cluster becomes corrupted or is deleted, or when /// you need to create or restore a cluster from a backup. </p> /// </li> /// </ul> /// <p>If the operation succeeds, it returns a JSON object with no /// properties.</p> /// <p>This operation is part of the <a href="https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html">Custom Key Store feature</a> feature in KMS, which /// combines the convenience and extensive integration of KMS with the isolation and control of a /// single-tenant key store.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a custom key store in a different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UpdateCustomKeyStore</a> (IAM policy)</p> /// <p> /// <b>Related operations:</b> /// </p> /// <ul> /// <li> /// <p> /// <a>ConnectCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>CreateCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DeleteCustomKeyStore</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeCustomKeyStores</a> /// </p> /// </li> /// <li> /// <p> /// <a>DisconnectCustomKeyStore</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UpdateCustomKeyStore { _private: (), } impl UpdateCustomKeyStore { /// Creates a new builder-style object to manufacture [`UpdateCustomKeyStoreInput`](crate::input::UpdateCustomKeyStoreInput) pub fn builder() -> crate::input::update_custom_key_store_input::Builder { crate::input::update_custom_key_store_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for UpdateCustomKeyStore { type Output = std::result::Result< crate::output::UpdateCustomKeyStoreOutput, crate::error::UpdateCustomKeyStoreError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_update_custom_key_store_error(response) } else { crate::operation_deser::parse_update_custom_key_store_response(response) } } } /// <p>Updates the description of a KMS key. To see the description of a KMS key, /// use <a>DescribeKey</a>. </p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot perform this operation on a KMS key in a different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:UpdateKeyDescription</a> (key policy)</p> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>DescribeKey</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UpdateKeyDescription { _private: (), } impl UpdateKeyDescription { /// Creates a new builder-style object to manufacture [`UpdateKeyDescriptionInput`](crate::input::UpdateKeyDescriptionInput) pub fn builder() -> crate::input::update_key_description_input::Builder { crate::input::update_key_description_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for UpdateKeyDescription { type Output = std::result::Result< crate::output::UpdateKeyDescriptionOutput, crate::error::UpdateKeyDescriptionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_update_key_description_error(response) } else { crate::operation_deser::parse_update_key_description_response(response) } } } /// <p>Changes the primary key of a multi-Region key. </p> /// <p>This operation changes the replica key in the specified Region to a primary key and /// changes the former primary key to a replica key. For example, suppose you have a primary key /// in <code>us-east-1</code> and a replica key in <code>eu-west-2</code>. If you run /// <code>UpdatePrimaryRegion</code> with a <code>PrimaryRegion</code> value of /// <code>eu-west-2</code>, the primary key is now the key in <code>eu-west-2</code>, and the /// key in <code>us-east-1</code> becomes a replica key. For details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update">Updating the primary Region</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>This operation supports <i>multi-Region keys</i>, an KMS feature that lets you create multiple /// interoperable KMS keys in different Amazon Web Services Regions. Because these KMS keys have the same key ID, key /// material, and other metadata, you can use them interchangeably to encrypt data in one Amazon Web Services Region and decrypt /// it in a different Amazon Web Services Region without re-encrypting the data or making a cross-Region call. For more information about multi-Region keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html">Using multi-Region keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>The <i>primary key</i> of a multi-Region key is the source for properties /// that are always shared by primary and replica keys, including the key material, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id">key ID</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec">key spec</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage">key usage</a>, <a href="https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin">key material /// origin</a>, and <a href="https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html">automatic /// key rotation</a>. It's the only key that can be replicated. You cannot <a href="https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html">delete the primary /// key</a> until all replica keys are deleted.</p> /// <p>The key ID and primary Region that you specify uniquely identify the replica key that will /// become the primary key. The primary Region must already have a replica key. This operation /// does not create a KMS key in the specified Region. To find the replica keys, use the <a>DescribeKey</a> operation on the primary key or any replica key. To create a replica /// key, use the <a>ReplicateKey</a> operation.</p> /// <p>You can run this operation while using the affected multi-Region keys in cryptographic /// operations. This operation should not delay, interrupt, or cause failures in cryptographic /// operations. </p> /// <p>Even after this operation completes, the process of updating the primary Region might /// still be in progress for a few more seconds. Operations such as <code>DescribeKey</code> might /// display both the old and new primary keys as replicas. The old and new primary keys have a /// transient key state of <code>Updating</code>. The original key state is restored when the /// update is complete. While the key state is <code>Updating</code>, you can use the keys in /// cryptographic operations, but you cannot replicate the new primary key or perform certain /// management operations, such as enabling or disabling these keys. For details about the /// <code>Updating</code> key state, see <a href="kms/latest/developerguide/key-state.html">Key state: /// Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>This operation does not return any output. To verify that primary key is changed, use the /// <a>DescribeKey</a> operation.</p> /// <p> /// <b>Cross-account use</b>: No. You cannot use this operation in a /// different Amazon Web Services account. </p> /// <p> /// <b>Required permissions</b>: </p> /// <ul> /// <li> /// <p> /// <code>kms:UpdatePrimaryRegion</code> on the current primary key (in the primary key's /// Region). Include this permission primary key's key policy.</p> /// </li> /// <li> /// <p> /// <code>kms:UpdatePrimaryRegion</code> on the current replica key (in the replica key's /// Region). Include this permission in the replica key's key policy.</p> /// </li> /// </ul> /// <p> /// <b>Related operations</b> /// </p> /// <ul> /// <li> /// <p> /// <a>CreateKey</a> /// </p> /// </li> /// <li> /// <p> /// <a>ReplicateKey</a> /// </p> /// </li> /// </ul> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct UpdatePrimaryRegion { _private: (), } impl UpdatePrimaryRegion { /// Creates a new builder-style object to manufacture [`UpdatePrimaryRegionInput`](crate::input::UpdatePrimaryRegionInput) pub fn builder() -> crate::input::update_primary_region_input::Builder { crate::input::update_primary_region_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for UpdatePrimaryRegion { type Output = std::result::Result< crate::output::UpdatePrimaryRegionOutput, crate::error::UpdatePrimaryRegionError, >; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_update_primary_region_error(response) } else { crate::operation_deser::parse_update_primary_region_response(response) } } } /// <p>Verifies a digital signature that was generated by the <a>Sign</a> operation. </p> /// <p></p> /// <p>Verification confirms that an authorized user signed the message with the specified KMS key /// and signing algorithm, and the message hasn't changed since it was signed. If the signature is /// verified, the value of the <code>SignatureValid</code> field in the response is /// <code>True</code>. If the signature verification fails, the <code>Verify</code> operation /// fails with an <code>KMSInvalidSignatureException</code> exception.</p> /// <p>A digital signature is generated by using the private key in an asymmetric KMS key. The /// signature is verified by using the public key in the same asymmetric KMS key. /// For information about symmetric and asymmetric KMS keys, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using Symmetric and Asymmetric KMS keys</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p>To verify a digital signature, you can use the <code>Verify</code> operation. Specify the /// same asymmetric KMS key, message, and signing algorithm that were used to produce the /// signature.</p> /// <p>You can also verify the digital signature by using the public key of the KMS key outside of /// KMS. Use the <a>GetPublicKey</a> operation to download the public key in the /// asymmetric KMS key and then use the public key to verify the signature outside of KMS. The /// advantage of using the <code>Verify</code> operation is that it is performed within KMS. As /// a result, it's easy to call, the operation is performed within the FIPS boundary, it is logged /// in CloudTrail, and you can use key policy and IAM policy to determine who is authorized to use /// the KMS key to verify signatures.</p> /// <p>The KMS key that you use for this operation must be in a compatible key state. For /// details, see <a href="https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html">Key state: Effect on your KMS key</a> in the <i>Key Management Service Developer Guide</i>.</p> /// <p> /// <b>Cross-account use</b>: Yes. To perform this operation with a KMS key in a different Amazon Web Services account, specify /// the key ARN or alias ARN in the value of the <code>KeyId</code> parameter. </p> /// <p> /// <b>Required permissions</b>: <a href="https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html">kms:Verify</a> (key policy)</p> /// <p> /// <b>Related operations</b>: <a>Sign</a> /// </p> #[derive(std::default::Default, std::clone::Clone, std::fmt::Debug)] pub struct Verify { _private: (), } impl Verify { /// Creates a new builder-style object to manufacture [`VerifyInput`](crate::input::VerifyInput) pub fn builder() -> crate::input::verify_input::Builder { crate::input::verify_input::Builder::default() } pub fn new() -> Self { Self { _private: () } } } impl smithy_http::response::ParseStrictResponse for Verify { type Output = std::result::Result<crate::output::VerifyOutput, crate::error::VerifyError>; fn parse(&self, response: &http::Response<bytes::Bytes>) -> Self::Output { if !response.status().is_success() && response.status().as_u16() != 200 { crate::operation_deser::parse_verify_error(response) } else { crate::operation_deser::parse_verify_response(response) } } }
selenium_egencia.py
from selenium import webdriver from selenium.webdriver.common.keys import Keys import time class Egencia(object): def __init__(self, credentials): self.driver = webdriver.Remote('http://localhost:4444/wd/hub', webdriver.DesiredCapabilities.CHROME) self.__login(credentials) self.standard_wait_time = 10 def close(self): self.driver.quit() def __login(self, credentials): # Go to the home page to log in self.driver.get("https://www.egencia.com/public/uk/") self.driver.find_element_by_id("login").click() self.driver.find_element_by_id("userName").send_keys(credentials["email"]) self.driver.find_element_by_id("password").send_keys(credentials["password"]) self.driver.find_element_by_id("authLoginSubmit").click() def get_double_points_offers(self, checkin, checkout, brand, lat, lon, maxspend): double_points_offers = [] self.driver.get("https://www.egencia.co.uk/hotels/search?" + \ "lon=" + lon + "&lat=" + lat + \ "&start_date=" + checkin + \ "&end_date=" + checkout + \ "&hotel_name=" + brand) time.sleep(self.standard_wait_time) try: self.driver.find_elements_by_class_name("modal-close")[0].click() except: pass time.sleep(self.standard_wait_time) available_hotels = self.driver.find_elements_by_class_name("hotel-available") for hotel in available_hotels: # Find the hotel name and make an empty entry hotel_name = hotel.find_elements_by_class_name("hotel-name-for-coworker-bookings")[0].get_attribute('innerHTML') #print("Hotel: {}".format(hotel_name)) hotels_dp_offers = {"name": hotel_name, "offers":[]} # Click on the hotel hotel.click() time.sleep(self.standard_wait_time) # Show all of the rates self.driver.find_element_by_id("hotel-details-view-all-rates-toggle").click() time.sleep(self.standard_wait_time) # Look through all the rates for a double points offer within budget for rate in self.driver.find_elements_by_class_name("rate-tile"): dp_offers = rate.find_elements_by_xpath('.//span[contains(text(), "2 X Points")]') dp_rate = float(rate.find_elements_by_class_name('price-details')[0].get_attribute('innerHTML').replace('£', '')) if len(dp_offers) > 0 and dp_rate <= maxspend: description = dp_offers[0].get_attribute('innerHTML') hotels_dp_offers["offers"].append({"description": description, "rate": dp_rate}) # If there are offers then add to the payload if len(hotels_dp_offers["offers"]) > 0: d
# Return to view all the hotels self.driver.find_element_by_id("hotel-details-close-button").click() return double_points_offers def make_pretty_message(offers): html = "" for hotel in offers: html += "*{}*\n".format(hotel['name']) for offer in hotel["offers"]: html += "_{}\n".format(offer["description"]) html += "£{0:.2f}_\n".format(offer["rate"]) html += "\n" return html
ouble_points_offers.append(hotels_dp_offers)
demo_generators.py
def custom_range(min, max):
it = custom_range(1, 2) print(next(it)) print(next(it)) print((x for x in range(3))) even = filter(lambda x: x % 2 == 0, range(10)) for x in even: print(x)
index = min while index <= max: yield index index += 1
error.js
const c = require('colors'); module.exports = class { constructor (client) { this.client = client; } async execute (node, error) { console.log(c.red(`❌ [Lavalink] - Ocorreu um erro no Node ${node.identifier}.\nErro: ${error.message}`)); if (error.message.startsWith('Unable to connect after')) this.reconnect(node); } async reconnect (node) { node.disconnect(); this.nodes.splice(this.nodes.indexOf(node), 1); const newNode = new Node(this, { id: String(node.identifier), hostname: node.options.hostname, port: node.options.port, password: node.options.password, maxRetryAttempts: 10, retryAttemptsInterval: 3000, secure: false, region: node.options.region }); this.nodes.push(newNode); newNode.connect(); }
};
processor.go
package heartbeats import ( "strconv" "kafmesh-example/internal/definitions/heartbeats" "kafmesh-example/internal/definitions/models/kafmesh/deviceId" ) var _ heartbeats.HeartbeatEnricher_Processor = &Processor{} // Processor enriches device details with customer information type Processor struct{} // NewProcessor creates a new processor func NewProcessor() *Processor { return &Processor{} } // HandleDeviceIDHeartbeat handles device heartbeat input func (p *Processor) HandleDeviceIDHeartbeat(ctx heartbeats.HeartbeatEnricher_ProcessorContext, message *deviceId.Heartbeat) error { customer := ctx.Join_DeviceIDCustomer() if customer == nil
customerDetails := ctx.Lookup_CustomerIDDetails(strconv.Itoa(int(customer.Id))) if customerDetails == nil { return nil } ctx.Output_DeviceIDEnrichedHeartbeat(ctx.Key(), &deviceId.EnrichedHeartbeat{ Time: message.Time, IsHealthy: message.IsHealthy, CustomerId: customer.Id, CustomerName: customerDetails.Name, }) return nil }
{ return nil }
server.py
from flask import Flask, escape, request from flask import send_file from Graph.plot import Plot app = Flask(__name__) @app.route('/', methods=["POST"]) def hello(): print(request.method) req_data= request.get_json() print(req_data) name = request.args.get("name", "World") return f'Hello, {escape(name)}!' @app.route('/get_image',methods=["POST"]) def
(): req_data= request.get_json() plot= Plot() plot.labels_x=list(req_data["labels_x"]) plot.labels_y=req_data["label_y"] plot.title=req_data["title"] plot.legend=list(req_data["legend"]) plot.valueGroup1=list(req_data["valueGroup"][0]) plot.valueGroup2=list(req_data["valueGroup"][1]) plot.filename=req_data["filename"] if req_data["type"]=="1": plot.createGroupBarPlot() elif req_data["type"]=="2": plot.createPieChart() return send_file(req_data["filename"], mimetype='image/png')
get_image
yt8m_input_test.py
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from absl import logging from absl.testing import parameterized import numpy as np import tensorflow as tf from official.core import input_reader from official.projects.yt8m.configs import yt8m as yt8m_configs from official.projects.yt8m.dataloaders import utils from official.projects.yt8m.dataloaders import yt8m_input from official.vision.dataloaders import tfexample_utils class Yt8mInputTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): super().setUp() self._model_dir = os.path.join(self.get_temp_dir(), 'model_dir') tf.io.gfile.makedirs(self._model_dir) data_dir = os.path.join(self.get_temp_dir(), 'data') tf.io.gfile.makedirs(data_dir) self.data_path = os.path.join(data_dir, 'data.tfrecord') self.num_segment = 6 examples = [utils.MakeYt8mExample(self.num_segment) for _ in range(8)] tfexample_utils.dump_to_tfrecord(self.data_path, tf_examples=examples) def create_input_reader(self, params):
@parameterized.parameters((True,), (False,)) def test_read_video_level_input(self, include_video_id): params = yt8m_configs.yt8m(is_training=False) params.global_batch_size = 4 params.segment_labels = False params.input_path = self.data_path params.include_video_id = include_video_id reader = self.create_input_reader(params) dataset = reader.read() iterator = iter(dataset) example = next(iterator) for k, v in example.items(): logging.info('DEBUG read example %r %r %r', k, v.shape, type(v)) if include_video_id: self.assertCountEqual( ['video_matrix', 'labels', 'num_frames', 'video_ids'], example.keys()) else: self.assertCountEqual(['video_matrix', 'labels', 'num_frames'], example.keys()) batch_size = params.global_batch_size self.assertEqual( example['video_matrix'].shape.as_list(), [batch_size, params.max_frames, sum(params.feature_sizes)]) self.assertEqual(example['labels'].shape.as_list(), [batch_size, params.num_classes]) self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1]) if include_video_id: self.assertEqual(example['video_ids'].shape.as_list(), [batch_size, 1]) @parameterized.parameters((True,), (False,)) def test_read_segement_level_input(self, include_video_id): params = yt8m_configs.yt8m(is_training=False) params.global_batch_size = 4 params.segment_labels = True params.input_path = self.data_path params.include_video_id = include_video_id reader = self.create_input_reader(params) dataset = reader.read() iterator = iter(dataset) example = next(iterator) for k, v in example.items(): logging.info('DEBUG read example %r %r %r', k, v.shape, type(v)) if include_video_id: self.assertCountEqual([ 'video_matrix', 'labels', 'num_frames', 'label_weights', 'video_ids' ], example.keys()) else: self.assertCountEqual( ['video_matrix', 'labels', 'num_frames', 'label_weights'], example.keys()) batch_size = params.global_batch_size * self.num_segment self.assertEqual( example['video_matrix'].shape.as_list(), [batch_size, params.segment_size, sum(params.feature_sizes)]) self.assertEqual(example['labels'].shape.as_list(), [batch_size, params.num_classes]) self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1]) self.assertEqual(example['label_weights'].shape.as_list(), [batch_size, params.num_classes]) if include_video_id: self.assertEqual(example['video_ids'].shape.as_list(), [batch_size]) @parameterized.parameters((True,), (False,)) def test_read_video_level_float_input(self, include_video_id): data_dir = os.path.join(self.get_temp_dir(), 'data2') tf.io.gfile.makedirs(data_dir) data_path = os.path.join(data_dir, 'data2.tfrecord') examples = [ utils.MakeExampleWithFloatFeatures(self.num_segment) for _ in range(8) ] tfexample_utils.dump_to_tfrecord(data_path, tf_examples=examples) params = yt8m_configs.yt8m(is_training=False) params.global_batch_size = 4 params.segment_labels = False params.input_path = data_path params.num_frames = 2 params.max_frames = 2 params.feature_names = ('VIDEO_EMBEDDING/context_feature/floats', 'FEATURE/feature/floats') params.feature_sources = ('context', 'feature') params.feature_dtypes = ('float32', 'float32') params.feature_sizes = (256, 2048) params.feature_from_bytes = (False, False) params.include_video_id = include_video_id reader = self.create_input_reader(params) dataset = reader.read() iterator = iter(dataset) example = next(iterator) for k, v in example.items(): logging.info('DEBUG read example %r %r %r', k, v.shape, type(v)) logging.info('DEBUG read example %r', example['video_matrix'][0, 0, :]) if include_video_id: self.assertCountEqual( ['video_matrix', 'labels', 'num_frames', 'video_ids'], example.keys()) else: self.assertCountEqual(['video_matrix', 'labels', 'num_frames'], example.keys()) # Check tensor values. expected_context = examples[0].context.feature[ 'VIDEO_EMBEDDING/context_feature/floats'].float_list.value expected_feature = examples[0].feature_lists.feature_list[ 'FEATURE/feature/floats'].feature[0].float_list.value expected_labels = examples[0].context.feature[ params.label_field].int64_list.value self.assertAllEqual( expected_feature, example['video_matrix'][0, 0, params.feature_sizes[0]:]) self.assertAllEqual( expected_context, example['video_matrix'][0, 0, :params.feature_sizes[0]]) self.assertAllEqual( np.nonzero(example['labels'][0, :].numpy())[0], expected_labels) # Check tensor shape. batch_size = params.global_batch_size self.assertEqual( example['video_matrix'].shape.as_list(), [batch_size, params.max_frames, sum(params.feature_sizes)]) self.assertEqual(example['labels'].shape.as_list(), [batch_size, params.num_classes]) self.assertEqual(example['num_frames'].shape.as_list(), [batch_size, 1]) if include_video_id: self.assertEqual(example['video_ids'].shape.as_list(), [batch_size, 1]) if __name__ == '__main__': tf.test.main()
decoder = yt8m_input.Decoder(input_params=params) decoder_fn = decoder.decode parser = yt8m_input.Parser(input_params=params) parser_fn = parser.parse_fn(params.is_training) postprocess = yt8m_input.PostBatchProcessor(input_params=params) postprocess_fn = postprocess.post_fn transform_batch = yt8m_input.TransformBatcher(input_params=params) batch_fn = transform_batch.batch_fn return input_reader.InputReader( params, dataset_fn=tf.data.TFRecordDataset, decoder_fn=decoder_fn, parser_fn=parser_fn, postprocess_fn=postprocess_fn, transform_and_batch_fn=batch_fn)
specialization.py
from numba import jit, int32 @jit(int32(int32, int32)) def
(x, y): # A somewhat trivial example return x + y print(f) # print(f(123, 123**30)) @jit(nopython=True) def f(x, y): return x + y
f
roomConfig.go
package common import ( pb "gameServer/src/grpc" "strconv" "strings" // "fmt" ) // GetRoomConfig 获得房间的某项配置 func GetRoomConfig(room
Info, name string) string { /*config := Configer.GetGameConfig(roomInfo.GetGameType(), roomInfo.GetGameScene(), name) if config == nil { return "" } return config.GetValue()*/ for _, oneConfig := range roomInfo.GetConfig() { if oneConfig.GetName() == name { return oneConfig.GetValue() } } return "" } // UpdateRoomConfig 更新房间的某项配置 func UpdateRoomConfig(roomInfo *pb.RoomInfo, name string, value string) { for _, oneConfig := range roomInfo.GetConfig() { if oneConfig.GetName() == name { oneConfig.Value = value return } } newConfig := &pb.GameConfig{} newConfig.Name = name newConfig.Value = value roomInfo.Config = append(roomInfo.GetConfig(), newConfig) } // CustomRoomConfig 自定义房间配置,需检测合法之后,同步 func CustomRoomConfig(roomInfo *pb.RoomInfo, name string, value string) *pb.ErrorMessage { switch name { case "Ante": anteInt, err := strconv.Atoi(value) if err != nil { LogError("CustomRoomConfig Ante Atoi has err", err) return GetGrpcErrorMessage(pb.ErrorCode_AnteConfigError, "") } if anteInt < 1000 || anteInt > 1000000 { LogError("CustomRoomConfig Ante anteInt out range") return GetGrpcErrorMessage(pb.ErrorCode_AnteConfigError, "") } UpdateRoomConfig(roomInfo, name, value) case "EnterGoldBean": enterGoldBeanInt, err := strconv.Atoi(value) if err != nil { LogError("CustomRoomConfig EnterGoldBean Atoi has err", err) return GetGrpcErrorMessage(pb.ErrorCode_EnterConfigError, "") } if enterGoldBeanInt < 1000 || enterGoldBeanInt > 100000000000 { LogError("CustomRoomConfig EnterGoldBean enterGoldBeanInt out range") return GetGrpcErrorMessage(pb.ErrorCode_EnterConfigError, "") } UpdateRoomConfig(roomInfo, name, value) case "MaxPlayer": maxPlayerInt, err := strconv.Atoi(value) if err != nil { LogError("CustomRoomConfig MaxPlayer Atoi has err", err) return GetGrpcErrorMessage(pb.ErrorCode_MaxPlayerConfigError, "") } if maxPlayerInt < 2 || maxPlayerInt > 9 { LogError("CustomRoomConfig MaxPlayer maxPlayerInt out range") return GetGrpcErrorMessage(pb.ErrorCode_MaxPlayerConfigError, "") } UpdateRoomConfig(roomInfo, name, value) case "PlayerStartNum": // 有几个玩家就可以开始游戏了 playerStartNumInt, err := strconv.Atoi(value) if err != nil { LogError("CustomRoomConfig PlayerStartNum Atoi has err", err) return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } if playerStartNumInt < 2 || playerStartNumInt > 9 { LogError("CustomRoomConfig PlayerStartNum playerStartNumInt out range") return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } UpdateRoomConfig(roomInfo, name, value) case "RoomAllPlayNum": // 房间可玩的总局数 gameName := roomInfo.GetGameType().String() roomPlayStr := Configer.GetGlobal(gameName + "RoomPlayNum").GetValue() masterPayStr := Configer.GetGlobal(gameName + "MasterPayNum").GetValue() aAPayStr := Configer.GetGlobal(gameName + "AAPayNum").GetValue() roomPlayArr := strings.Split(roomPlayStr, ",") masterPayArr := strings.Split(masterPayStr, ",") aAPayArr := strings.Split(aAPayStr, ",") if len(roomPlayArr) != len(masterPayArr) || len(masterPayArr) != len(aAPayArr) { LogError("CustomRoomConfig RoomAllPlayNum config err") return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } wantIndex := IndexOf(roomPlayArr, value) if wantIndex == -1 { LogError("CustomRoomConfig RoomAllPlayNum invalid RoomAllPlayNum") return GetGrpcErrorMessage(pb.ErrorCode_RoomAllPlayNumConfigError, "") } roomPlayInt, err := strconv.Atoi(roomPlayArr[wantIndex]) if err != nil { LogError("CustomRoomConfig RoomAllPlayNum Atoi roomPlayArr has err", err) return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } masterPayInt, err := strconv.Atoi(masterPayArr[wantIndex]) if err != nil { LogError("CustomRoomConfig RoomAllPlayNum Atoi masterPayArr has err", err) return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } aAPayInt, err := strconv.Atoi(aAPayArr[wantIndex]) if err != nil { LogError("CustomRoomConfig RoomAllPlayNum Atoi aAPayArr has err", err) return GetGrpcErrorMessage(pb.ErrorCode_ServerError, "") } roomInfo.RoomAllPlayNum = int32(roomPlayInt) roomInfo.MasterPayNum = int64(masterPayInt) roomInfo.AaPayNum = int64(aAPayInt) case "PayType": payTypeInt, err := strconv.Atoi(value) if err != nil { LogError("CustomRoomConfig PayType Atoi has err", err) return GetGrpcErrorMessage(pb.ErrorCode_PayTypeConfigError, "") } payType := pb.PayType(payTypeInt) if payType == pb.PayType_PayType_None { LogError("CustomRoomConfig PayType payType has err") return GetGrpcErrorMessage(pb.ErrorCode_PayTypeConfigError, "") } roomInfo.PayType = payType } return nil }
Info *pb.Room
Unet.py
# -*- coding: utf-8 -*- # @Time : 2021/4/8 15:52 # @Author : aurorazeng # @File : Unet.py # @license: (C) Copyright 2021-2026, aurorazeng; No reprobaiction without permission. """
import numpy as np import torch import torch.nn as nn from torch.distributions.uniform import Uniform class ConvBlock(nn.Module): """two convolution layers with batch norm and leaky relu""" def __init__(self, in_channels, out_channels, dropout_p): super(ConvBlock, self).__init__() self.conv_conv = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), # nn.LeakyReLU(), nn.ReLU(), nn.Dropout(dropout_p), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), # nn.LeakyReLU() nn.ReLU() ) def forward(self, x): return self.conv_conv(x) class DownBlock(nn.Module): """Downsampling followed by ConvBlock""" def __init__(self, in_channels, out_channels, dropout_p): super(DownBlock, self).__init__() self.maxpool_conv = nn.Sequential( nn.MaxPool2d(2), ConvBlock(in_channels, out_channels, dropout_p) ) def forward(self, x): return self.maxpool_conv(x) class UpBlock(nn.Module): """Upssampling followed by ConvBlock""" def __init__(self, in_channels1, in_channels2, out_channels, dropout_p, bilinear=True): super(UpBlock, self).__init__() self.bilinear = bilinear if bilinear: self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1) self.up = nn.Upsample( scale_factor=2, mode='bilinear', align_corners=True) else: self.up = nn.ConvTranspose2d( in_channels1, in_channels2, kernel_size=2, stride=2) self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p) def forward(self, x1, x2): if self.bilinear: x1 = self.conv1x1(x1) x1 = self.up(x1) x = torch.cat([x2, x1], dim=1) return self.conv(x) class Encoder(nn.Module): def __init__(self, params): super(Encoder, self).__init__() self.params = params self.in_chns = self.params['in_chns'] self.ft_chns = self.params['feature_chns'] self.n_class = self.params['class_num'] self.bilinear = self.params['bilinear'] self.dropout = self.params['dropout'] assert (len(self.ft_chns) == 5) self.in_conv = ConvBlock( self.in_chns, self.ft_chns[0], self.dropout[0]) self.down1 = DownBlock( self.ft_chns[0], self.ft_chns[1], self.dropout[1]) self.down2 = DownBlock( self.ft_chns[1], self.ft_chns[2], self.dropout[2]) self.down3 = DownBlock( self.ft_chns[2], self.ft_chns[3], self.dropout[3]) self.down4 = DownBlock( self.ft_chns[3], self.ft_chns[4], self.dropout[4]) def forward(self, x): x0 = self.in_conv(x) x1 = self.down1(x0) x2 = self.down2(x1) x3 = self.down3(x2) x4 = self.down4(x3) return [x0, x1, x2, x3, x4] class Decoder(nn.Module): def __init__(self, params): super(Decoder, self).__init__() self.params = params self.in_chns = self.params['in_chns'] self.ft_chns = self.params['feature_chns'] self.n_class = self.params['class_num'] self.bilinear = self.params['bilinear'] assert (len(self.ft_chns) == 5) self.up1 = UpBlock( self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0) self.up2 = UpBlock( self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0) self.up3 = UpBlock( self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0) self.up4 = UpBlock( self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0) self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class, kernel_size=1, padding=0) def forward(self, feature): x0 = feature[0] x1 = feature[1] x2 = feature[2] x3 = feature[3] x4 = feature[4] x = self.up1(x4, x3) x = self.up2(x, x2) x = self.up3(x, x1) x = self.up4(x, x0) output = self.out_conv(x) return output class UNet(nn.Module): def __init__(self, in_chns, class_num): super(UNet, self).__init__() params = {'in_chns': in_chns, # 'feature_chns': [16, 32, 64, 128, 256], 'feature_chns': [32, 64, 128, 256, 512], 'dropout': [0, 0, 0, 0, 0], 'class_num': class_num, 'bilinear': False, 'acti_func': 'relu'} self.encoder = Encoder(params) self.decoder = Decoder(params) def forward(self, x): feature = self.encoder(x) output = self.decoder(feature) return output class UNetWithDrop(nn.Module): def __init__(self, in_chns, class_num): super(UNetWithDrop, self).__init__() params = {'in_chns': in_chns, # 'feature_chns': [16, 32, 64, 128, 256], 'feature_chns': [32, 64, 128, 256, 512], 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5], 'class_num': class_num, 'bilinear': False, 'acti_func': 'relu'} self.encoder = Encoder(params) self.decoder = Decoder(params) def forward(self, x): feature = self.encoder(x) output = self.decoder(feature) return output
The implementation is borrowed from: https://github.com/HiLab-git/PyMIC """ from __future__ import division, print_function
Fullscreen.js
// All material copyright ESRI, All Rights Reserved, unless otherwise specified. // See https://js.arcgis.com/4.11/esri/copyright.txt for details. //>>built define("require exports ../core/tsSupport/declareExtendsHelper ../core/tsSupport/decorateHelper dojo/i18n!./Fullscreen/nls/Fullscreen ../core/accessorSupport/decorators ./Widget ./Fullscreen/FullscreenViewModel ./support/widget".split(" "),function(n,p,l,d,g,e,m,h,f){return function(k){function
(b){b=k.call(this)||this;b.element=null;b.view=null;b.viewModel=new h;return b}l(a,k);a.prototype.render=function(){var b,a,c=this.get("viewModel.state"),d=(b={},b["esri-disabled"]="disabled"===c||"feature-unsupported"=== c,b);b=(a={},a["esri-icon-zoom-out-fixed"]="ready"===c||"disabled"===c||"feature-unsupported"===c,a["esri-icon-zoom-in-fixed"]="active"===c,a);a="active"===c?g.exit:"ready"===c?g.enter:"";return f.tsx("div",{bind:this,class:this.classes("esri-fullscreen esri-widget--button esri-widget",d),role:"button",tabIndex:0,onclick:this._toggle,onkeydown:this._toggle,"aria-label":a,title:a},f.tsx("span",{class:this.classes("esri-icon",b),"aria-hidden":"true"}),f.tsx("span",{class:"esri-icon-font-fallback-text"}, a))};a.prototype._toggle=function(){this.viewModel.toggle()};d([e.aliasOf("viewModel.element")],a.prototype,"element",void 0);d([e.aliasOf("viewModel.view")],a.prototype,"view",void 0);d([e.property({type:h}),f.renderable("viewModel.state")],a.prototype,"viewModel",void 0);d([f.accessibleHandler()],a.prototype,"_toggle",null);return a=d([e.subclass("esri.widgets.Fullscreen")],a)}(e.declared(m))});
a
useranimationcard.py
""" User Animation Card =================== Copyright (c) 2019 Ivanov Yuri For suggestions and questions: <[email protected]> This file is distributed under the terms of the same license, as the Kivy framework. Example ------- from kivymd.app import MDApp from kivy.lang import Builder from kivy.factory import Factory from kivymd.toast import toast from kivymd.theming import ThemeManager from kivymd.uix.useranimationcard import MDUserAnimationCard from kivymd.uix.button import MDIconButton from kivymd.uix.list import ILeftBodyTouch # Your content for a contact card. Builder.load_string(''' #:import get_hex_from_color kivy.utils.get_hex_from_color <TestAnimationCard@BoxLayout> orientation: 'vertical' padding: dp(10) spacing: dp(10) size_hint_y: None height: self.minimum_height BoxLayout: size_hint_y: None height: self.minimum_height Widget: MDRoundFlatButton: text: "Free call" Widget: MDRoundFlatButton: text: "Free message" Widget: OneLineIconListItem: text: "Video call" IconLeftSampleWidget: icon: 'camera-front-variant' TwoLineIconListItem: text: "Call Viber Out" secondary_text: "[color=%s]Advantageous rates for calls[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'phone' TwoLineIconListItem: text: "Call over mobile network" secondary_text: "[color=%s]Operator's tariffs apply[/color]" % get_hex_from_color(app.theme_cls.primary_color) IconLeftSampleWidget: icon: 'remote' ''') class IconLeftSampleWidget(ILeftBodyTouch, MDIconButton): pass class Example(MDApp): title = "Example Animation Card" def __init__(self, **kwargs): super().__init__(**kwargs) self.user_animation_card = None def build(self): def main_back_callback(): toast('Close card') if not self.user_animation_card: self.user_animation_card = MDUserAnimationCard( user_name="Lion Lion", path_to_avatar="./assets/african-lion-951778_1280.jpg", callback=main_back_callback) self.user_animation_card.box_content.add_widget( Factory.TestAnimationCard()) self.user_animation_card.open() Example().run() """ from kivy.clock import Clock from kivy.animation import Animation from kivy.core.window import Window from kivy.metrics import dp, sp from kivy.properties import ObjectProperty, StringProperty, ListProperty from kivy.lang import Builder from kivy.uix.boxlayout import BoxLayout from kivy.uix.floatlayout import FloatLayout from kivy.uix.modalview import ModalView from kivymd.uix.behaviors import SpecificBackgroundColorBehavior from kivymd.uix.button import MDIconButton from kivymd.theming import ThemableBehavior Builder.load_string( """ #:import Window kivy.core.window.Window #:import StiffScrollEffect kivymd.stiffscroll.StiffScrollEffect <ModifiedToolbar> size_hint_y: None height: root.theme_cls.standard_increment padding: [root.theme_cls.horizontal_margins - dp(12), 0] BoxLayout: id: left_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] BoxLayout: padding: dp(12), 0 MDLabel: font_style: 'H6' opposite_colors: root.opposite_colors theme_text_color: 'Custom' text_color: root.specific_text_color text: root.title shorten: True shorten_from: 'right' BoxLayout: id: right_actions orientation: 'horizontal' size_hint_x: None padding: [0, (self.height - dp(48))/2] <UserAnimationCard> canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos FitImage: id: image
source: root.path_to_avatar size_hint: 1, None height: Window.height * 40 // 100 y: Window.height - self.height allow_stretch: True keep_ratio: False canvas.after: Color: rgba: root._primary_color Rectangle: size: self.size pos: self.pos MDLabel: id: user_name font_style: 'H4' theme_text_color: 'Custom' color: 1, 1, 1, 1 shorten: True shorten_from: 'right' text: root.user_name size_hint_y: None height: self.texture_size[1] ModifiedToolbar: id: toolbar md_bg_color: 0, 0, 0, 0 left_action_items: [['arrow-left', lambda x: root._callback_back()]] y: Window.height - self.height ScrollView: id: scroll y: -image.height effect_cls: StiffScrollEffect scroll_distance: 100 GridLayout: id: box_content size_hint_y: None height: self.minimum_height cols: 1 canvas: Color: rgba: root.theme_cls.bg_dark \ if root.theme_cls.theme_style == 'Dark' \ else root.theme_cls.bg_light Rectangle: size: self.size pos: self.pos """ ) class MDUserAnimationCard(ThemableBehavior, ModalView): user_name = StringProperty() path_to_avatar = StringProperty() box_content = ObjectProperty() callback = ObjectProperty() _anim_bottom = True def __init__(self, **kwargs): super().__init__(**kwargs) self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card = UserAnimationCard( user_name=self.user_name, path_to_avatar=self.path_to_avatar, _callback_back=self._callback_back, _primary_color=self._primary_color, ) self.user_animation_card.ids.user_name.pos = ( dp(15), Window.height - self.user_animation_card.ids.image.height, ) self.box_content = self.user_animation_card.ids.box_content self.add_widget(self.user_animation_card) self._obj_avatar = self.user_animation_card.ids.image self._obj_user_name = self.user_animation_card.ids.user_name self._obj_toolbar = self.user_animation_card.ids.toolbar self._obj_scroll = self.user_animation_card.ids.scroll self._set_current_pos_objects() def _callback_back(self): self.dismiss() if self.callback: self.callback() def on_open(self): self._primary_color = self.theme_cls.primary_color self._primary_color[3] = 0 self.user_animation_card._primary_color = self._primary_color def _set_current_pos_objects(self): self._avatar_y = self._obj_avatar.y self._toolbar_y = self._obj_toolbar.y self._user_name_y = self._obj_user_name.y self._scroll_y = self._obj_scroll.y def on_touch_move(self, touch): if touch.ud["swipe_begin"] < touch.y: if self._anim_bottom: self._anim_bottom = False self.animation_to_top() else: if not self._anim_bottom: self._anim_bottom = True self.animation_to_bottom() def on_touch_down(self, touch): touch.ud["swipe_begin"] = touch.y return super().on_touch_down(touch) def on_touch_up(self, touch): touch.ud["swipe_begin"] = 0 def animation_to_bottom(self): Animation(y=self._scroll_y, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=self._user_name_y, d=0.5, x=dp(15), t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(36), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation(_primary_color=[0, 0, 0, 0], d=0.3, t="in_out_cubic").start( self.user_animation_card ) Animation(y=self._avatar_y, d=0.4, t="in_out_cubic").start( self._obj_avatar ) def animation_to_top(self): user_name_y = ( Window.height - self._obj_toolbar.height + (self.theme_cls.standard_increment // 2 - dp(12)) ) user_name_x = self.theme_cls.horizontal_margins + dp(12) * 5 Animation(y=-self._obj_toolbar.height, d=0.4, t="in_out_cubic").start( self._obj_scroll ) Animation(y=user_name_y, d=0.3, x=user_name_x, t="in_out_cubic").start( self._obj_user_name ) Animation(font_size=sp(20), d=0.3, t="in_out_cubic").start( self._obj_user_name ) Animation( _primary_color=self.theme_cls.primary_color, d=0.3, t="in_out_cubic" ).start(self.user_animation_card) Animation(y=self._obj_avatar.y + 30, d=0.4, t="in_out_cubic").start( self._obj_avatar ) class UserAnimationCard(ThemableBehavior, FloatLayout): user_name = StringProperty() path_to_avatar = StringProperty() _callback_back = ObjectProperty() _primary_color = ListProperty() class ModifiedToolbar( ThemableBehavior, SpecificBackgroundColorBehavior, BoxLayout ): left_action_items = ListProperty() title = StringProperty() def __init__(self, **kwargs): super().__init__(**kwargs) self.bind(specific_text_color=self.update_action_bar_text_colors) Clock.schedule_once( lambda x: self.on_left_action_items(0, self.left_action_items) ) def on_left_action_items(self, instance, value): self.update_action_bar(self.ids["left_actions"], value) def update_action_bar(self, action_bar, action_bar_items): action_bar.clear_widgets() new_width = 0 for item in action_bar_items: new_width += dp(48) action_bar.add_widget( MDIconButton( icon=item[0], on_release=item[1], opposite_colors=True, text_color=self.specific_text_color, theme_text_color="Custom", ) ) action_bar.width = new_width def update_action_bar_text_colors(self, instance, value): for child in self.ids["left_actions"].children: child.text_color = self.specific_text_color
datamodel.rs
use crate::field::{Field, RelationField, ScalarField}; use crate::model::Model; use crate::r#enum::Enum; use crate::relation_info::RelationInfo; /// Entities in the datamodel can be flagged as `is_commented_out`. This let's the renderer /// know that introspection encountered unsupported names or features and these are supposed /// to be rendered as comments. Since the parser will not set these flags when reading a schema /// string, only introspection and the lowering of the datamodel to the ast care about these flags. /// The FieldType: Unsupported behaves in the same way. /// Both of these are never converted into the internal datamodel. #[derive(Debug, PartialEq, Clone, Default)] pub struct Datamodel { pub enums: Vec<Enum>, pub models: Vec<Model>, } impl Datamodel { pub fn new() -> Datamodel { Datamodel { ..Default::default() } } /// Checks if a datamodel contains neither enums nor models. pub fn is_empty(&self) -> bool { self.enums.is_empty() && self.models.is_empty() } /// Adds an enum to this datamodel. pub fn add_enum(&mut self, en: Enum) { self.enums.push(en); } /// Adds a model to this datamodel. pub fn add_model(&mut self, model: Model) { self.models.push(model); } /// Gets an iterator over all models. pub fn models(&self) -> std::slice::Iter<Model> { self.models.iter() } /// Gets an iterator over all enums. pub fn enums(&self) -> std::slice::Iter<Enum> { self.enums.iter() } /// Gets a mutable iterator over all models. pub fn models_mut(&mut self) -> std::slice::IterMut<Model> { self.models.iter_mut() } /// Gets a mutable iterator over all enums. pub fn enums_mut(&mut self) -> std::slice::IterMut<Enum> { self.enums.iter_mut() } /// Finds a model by name. pub fn find_model(&self, name: &str) -> Option<&Model> { self.models().find(|model| model.name == name) }
/// Finds a model by database name. This will only find models with a name /// remapped to the provided `db_name`. pub fn find_model_db_name(&self, db_name: &str) -> Option<&Model> { self.models() .find(|model| model.database_name.as_deref() == Some(db_name)) } /// Finds parent model for a field reference. pub fn find_model_by_relation_field_ref(&self, field: &RelationField) -> Option<&Model> { self.find_model(&self.find_related_field_bang(field).1.relation_info.to) } /// Finds a mutable field reference by a model and field name. pub fn find_field_mut(&mut self, model: &str, field: &str) -> &mut Field { self.find_model_mut(model).find_field_mut(field) } /// Finds a mutable scalar field reference by a model and field name. pub fn find_scalar_field_mut(&mut self, model: &str, field: &str) -> &mut ScalarField { // This uses the memory location of field for equality. self.find_model_mut(model).find_scalar_field_mut(field) } /// Finds a mutable relation field reference by a model and field name. pub fn find_relation_field_mut(&mut self, model: &str, field: &str) -> &mut RelationField { self.find_model_mut(model).find_relation_field_mut(field) } /// Finds an enum by name. pub fn find_enum(&self, name: &str) -> Option<&Enum> { self.enums().find(|m| m.name == *name) } /// Finds an enum by database name. pub fn find_enum_db_name(&self, db_name: &str) -> Option<&Enum> { self.enums().find(|e| e.database_name == Some(db_name.to_owned())) } /// Finds a model by name and returns a mutable reference. pub fn find_model_mut(&mut self, name: &str) -> &mut Model { self.models_mut() .find(|m| m.name == *name) .expect("We assume an internally valid datamodel before mutating.") } /// Finds an enum by name and returns a mutable reference. pub fn find_enum_mut(&mut self, name: &str) -> &mut Enum { self.enums_mut() .find(|m| m.name == *name) .expect("We assume an internally valid datamodel before mutating.") } /// Returns (model_name, field_name) for all fields using a specific enum. pub fn find_enum_fields(&self, enum_name: &str) -> Vec<(String, String)> { let mut fields = vec![]; for model in self.models() { for field in model.scalar_fields() { if field.field_type.is_enum(enum_name) { fields.push((model.name.clone(), field.name.clone())) } } } fields } /// Returns (model_name, field_name) for all relation fields pointing to a specific model. pub fn find_relation_fields_for_model(&mut self, model_name: &str) -> Vec<(String, String)> { let mut fields = vec![]; for model in self.models() { for field in model.relation_fields() { if field.relation_info.to == model_name { fields.push((model.name.clone(), field.name.clone())) } } } fields } /// Finds a relation field related to a relation info. Returns a tuple (index_of_relation_field_in_model, relation_field). pub fn find_related_field_for_info(&self, info: &RelationInfo, exclude: &str) -> Option<(usize, &RelationField)> { self.find_model(&info.to) .expect("The model referred to by a RelationInfo should always exist.") .fields .iter() .enumerate() .filter_map(|(idx, field)| field.as_relation_field().map(|f| (idx, f))) .find(|(_idx, f)| { f.relation_info.name == info.name && (f.relation_info.to != info.to || // This is to differentiate the opposite field from self in the self relation case. f.name != exclude) }) } /// This finds the related field for a relationfield if available pub fn find_related_field(&self, rf: &RelationField) -> Option<(usize, &RelationField)> { self.find_related_field_for_info(&rf.relation_info, &rf.name) } /// This is used once we assume the datamodel to be internally valid pub fn find_related_field_bang(&self, rf: &RelationField) -> (usize, &RelationField) { self.find_related_field(rf) .expect("Every RelationInfo should have a complementary RelationInfo on the opposite relation field.") } }
note-type.js
import { Factory, faker } from '@bigtest/mirage'; export default Factory.extend({ id: faker.random.uuid, name: faker.commerce.productName, metadata: { createdByUserId: faker.random.uuid,
createdByUsername: faker.name.firstName, createdDate: () => faker.date.past(2), updatedByUserId: faker.random.uuid, updatedDate: () => faker.date.past(1), }, usage: { noteTotal: 0, }, });
linear_svm.py
import numpy as np from random import shuffle def svm_loss_naive(W, X, y, reg):
def svm_loss_vectorized(W, X, y, reg): """ Structured SVM loss function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive. """ loss = 0.0 dW = np.zeros(W.shape) # initialize the gradient as zero ############################################################################# # TODO: # # Implement a vectorized version of the structured SVM loss, storing the # # result in loss. # ############################################################################# print 'X.shape: ', X.shape print 'y.shape: ', y.shape print 'W.shape: ', W.shape scores = X.dot(W) # 500 x 10 matrix print 'scores.shape: ', scores.shape correct_scores = np.ones(scores.shape) * y[:,np.newaxis] # 500 x 10 deltas = np.ones(scores.shape) # 1 matrix, 500 x 10 L = scores - correct_scores + deltas print 'L.shape: ', L.shape L[L < 0] = 0 # set all negative values to 0, replaces max(0, scores - scores[y] + 1) L[np.arange(0, scores.shape[0]), y] = 0 # don't count y_i # sum losses of single image per row, results in column vector: 500 x 1 loss = np.sum(L, axis=1) # caluclate final average loss loss = np.sum(loss) / X.shape[0] # Add L2 regularization loss += 0.5 * reg * np.sum(W * W) print 'loss', loss ############################################################################# # END OF YOUR CODE # ############################################################################# ############################################################################# # TODO: # # Implement a vectorized version of the gradient for the structured SVM # # loss, storing the result in dW. # # # # Hint: Instead of computing the gradient from scratch, it may be easier # # to reuse some of the intermediate values that you used to compute the # # loss. # ############################################################################# #L[L > 0] = 1 #L[np.arange(0, scores.shape[0]), y] = -1 * np.sum(L, axis=1) #dW = np.dot(L, X.T) dW = np.gradient(scores) # Average over number of training examples #num_train = X.shape[0] #dW /= num_train ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
""" Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ dW = np.zeros(W.shape) # initialize the gradient as zero # compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i in xrange(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in xrange(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score + 1 # note delta = 1 if margin > 0: dW[:, y[i]] += -X[i] dW[:, j] += X[i] # gradient update for incorrect rows loss += margin # Average gradients as well dW /= num_train # Add regularization to the gradient dW += reg * W # Right now the loss is a sum over all training examples, but we want it # to be an average instead so we divide by num_train. loss /= num_train # Add regularization to the loss. loss += 0.5 * reg * np.sum(W * W) ############################################################################# # TODO: # # Compute the gradient of the loss function and store it dW. # # Rather that first computing the loss and then computing the derivative, # # it may be simpler to compute the derivative at the same time that the # # loss is being computed. As a result you may need to modify some of the # # code above to compute the gradient. # ############################################################################# return loss, dW
constants.ts
export const DATABASE_VERSION = "11";
export const DB_INSTANCE_PROP_NAME = "dbInstance";
client.rs
use crate::net::data::Command; #[cfg(feature = "enable-serde")] use std::io::{Read, Result as IoResult, Write}; use std::net::{SocketAddr, TcpStream}; pub struct Client { stream: TcpStream, } impl Client { /// Creates a new client that connects to a specified server. /// # Panics /// If the server address is invalid. #[must_use] pub fn open(host: &str, port: u16) -> Client { let server_addr: SocketAddr = format!("{}:{}", host, port) .parse() .expect("Invalid socket address."); let stream = TcpStream::connect(server_addr).unwrap(); Client { stream } } /// Attempts to send a command to the server. /// # Errors /// IO error if the operation fails. pub fn send(&mut self, cmd: &Command) -> IoResult<()> { let serialized = serde_json::to_string(cmd)?; self.stream.write_all(serialized.as_bytes())?; Ok(()) } /// Attempts to receive a result from the server. /// # Errors /// IO error if the operation fails. pub fn receive(&mut self, buf: &mut [u8]) -> IoResult<usize> { let n = self.stream.read(buf)?; Ok(n) } } #[cfg(test)] mod tests { use super::Client; use crate::net::{Command, IPAService, Pool}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; const SERVER_HOST: &str = "127.0.0.1"; const SERVER_PORT: u16 = 7182; const ECHO_TEST: &str = "test"; const LOOP_COUNT: usize = 100; const CLIENT_THREAD_COUNT: usize = 10; fn start_server() { IPAService::new(SERVER_HOST, SERVER_PORT, 3).start(); } #[test] #[ignore] fn echo() { stderrlog::new().verbosity(5).init().unwrap(); start_server(); let mut pool = Pool::new(CLIENT_THREAD_COUNT); let counter = Arc::new(AtomicUsize::new(0)); for _ in 0..LOOP_COUNT { let c_counter = Arc::clone(&counter); pool.execute(move || { let mut client = Client::open(SERVER_HOST, SERVER_PORT); client .send(&Command::Echo(String::from(ECHO_TEST))) .unwrap(); let mut buf = [0; ECHO_TEST.len()]; client.receive(&mut buf).unwrap(); if buf == ECHO_TEST.as_bytes() {
} pool.shutdown().unwrap(); assert_eq!(counter.load(Ordering::Relaxed), LOOP_COUNT); } }
c_counter.fetch_add(1, Ordering::Relaxed); } }) .unwrap();
fuse.go
package handler import ( "fgame/fgame/common/codec" uipb "fgame/fgame/common/codec/pb/ui" "fgame/fgame/common/dispatch" "fgame/fgame/common/lang" commonlog "fgame/fgame/common/log" "fgame/fgame/core/session" coreutils "fgame/fgame/core/utils" chatlogic "fgame/fgame/game/chat/logic" chattypes "fgame/fgame/game/chat/types" "fgame/fgame/game/common/common" funcopentypes "fgame/fgame/game/funcopen/types" "fgame/fgame/game/inventory/inventory" inventorylogic "fgame/fgame/game/inventory/logic" playerinventory "fgame/fgame/game/inventory/player" inventorytypes "fgame/fgame/game/inventory/types" "fgame/fgame/game/item/item" itemtypes "fgame/fgame/game/item/types" noticelogic "fgame/fgame/game/notice/logic" "fgame/fgame/game/player" playerlogic "fgame/fgame/game/player/logic" playertypes "fgame/fgame/game/player/types" "fgame/fgame/game/processor" propertylogic "fgame/fgame/game/property/logic" playerproperty "fgame/fgame/game/property/player" ringlogic "fgame/fgame/game/ring/logic" "fgame/fgame/game/ring/pbutil" playerring "fgame/fgame/game/ring/player" ringtemplate "fgame/fgame/game/ring/template" ringtypes "fgame/fgame/game/ring/types" gamesession "fgame/fgame/game/session" skilltemplate "fgame/fgame/game/skill/template" "fgame/fgame/pkg/mathutils" "fmt" log "github.com/Sirupsen/logrus" ) func init()
func handleRingFuse(s session.Session, msg interface{}) (err error) { log.Debug("ring: 开始处理特戒融合请求消息") gcs := gamesession.SessionInContext(s.Context()) pl := gcs.Player() tpl := pl.(player.Player) csRingFuse := msg.(*uipb.CSRingFuse) isBag := csRingFuse.GetIsBag() typ := ringtypes.RingType(0) index := int32(0) if isBag { index = csRingFuse.GetIndex() } else { typ := ringtypes.RingType(csRingFuse.GetType()) if !typ.Valid() { log.WithFields( log.Fields{ "playerId": tpl.GetId(), "type": int32(typ), }).Warn("ring: 特戒类型不合法") return } } needIndex := csRingFuse.GetNeedIndex() err = ringFuse(tpl, typ, isBag, index, needIndex) if err != nil { log.WithFields( log.Fields{ "playerId": tpl.GetId(), "err": err, }).Error("ring: 处理特戒融合请求消息,错误") return } log.WithFields( log.Fields{ "playerId": tpl.GetId(), }).Debug("ring: 处理特戒融合请求消息,成功") return } func ringFuse(pl player.Player, typ ringtypes.RingType, isBag bool, index int32, needIndex int32) (err error) { if !pl.IsFuncOpen(funcopentypes.FuncOpenTypeRingFuse) { log.WithFields( log.Fields{ "playerId": pl.GetId(), }).Warn("ring: 功能未开启") playerlogic.SendSystemMessage(pl, lang.CommonFuncNoOpen) return } inventoryManager := pl.GetPlayerDataManager(playertypes.PlayerInventoryDataManagerType).(*playerinventory.PlayerInventoryDataManager) propertyManager := pl.GetPlayerDataManager(playertypes.PlayerPropertyDataManagerType).(*playerproperty.PlayerPropertyDataManager) ringManager := pl.GetPlayerDataManager(playertypes.PlayerRingDataManagerType).(*playerring.PlayerRingDataManager) itemId := int32(0) var itemType itemtypes.ItemType var propertyData inventorytypes.ItemPropertyData // 判断第一个槽里的特戒是否在背包 if isBag { it := inventoryManager.FindItemByIndex(inventorytypes.BagTypePrim, index) //物品不存在 if it == nil || it.IsEmpty() { log.WithFields( log.Fields{ "playerId": pl.GetId(), "index": index, }).Warn("ring: 使用特戒,物品不存在") playerlogic.SendSystemMessage(pl, lang.InventoryItemNoExist) return } itemId = it.ItemId propertyData = it.PropertyData // 判断物品是否未特戒 itemTemplate := item.GetItemService().GetItem(int(itemId)) if !itemTemplate.IsTeRing() { log.WithFields( log.Fields{ "playerId": pl.GetId(), "index": index, }).Warn("ring: 使用特戒, 物品不是特戒") playerlogic.SendSystemMessage(pl, lang.RingIsNotRing) return } itemType = itemTemplate.GetItemType() fuseTemp := ringtemplate.GetRingTemplateService().GetRingFuseSynthesisTemplate(itemId) if fuseTemp == nil { log.WithFields( log.Fields{ "playerId": pl.GetId(), "index": index, }).Warn("ring: 使用特戒,物品不存在") playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist) return } createItemId := fuseTemp.ItemId createItemNum := fuseTemp.ItemCount // 判断背包是否足够 if !inventoryManager.HasEnoughSlot(createItemId, createItemNum) { log.WithFields( log.Fields{ "playerId": pl.GetId(), "createItemId": createItemId, "createItemNum": createItemNum, }).Warn("ring: 背包不足") playerlogic.SendSystemMessage(pl, lang.InventorySlotNoEnough) return } ringTemp := ringtemplate.GetRingTemplateService().GetRingTemplate(itemId) if ringTemp == nil { log.WithFields( log.Fields{ "playerId": pl.GetId(), "itemId": itemId, }).Warn("ring:模板不存在") playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist) return } typ = ringTemp.GetRingType() } else { ringObj := ringManager.GetPlayerRingObject(typ) if ringObj == nil { log.WithFields( log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring: 玩家未穿戴该特戒") playerlogic.SendSystemMessage(pl, lang.RingNotEquip) return } itemId = ringObj.GetItemId() } needIt := inventoryManager.FindItemByIndex(inventorytypes.BagTypePrim, needIndex) // 第二槽位的物品不存在 if needIt == nil || needIt.IsEmpty() { log.WithFields( log.Fields{ "playerId": pl.GetId(), "index": index, }).Warn("ring:使用特戒,物品不存在") playerlogic.SendSystemMessage(pl, lang.InventoryItemNoExist) return } needItemId := needIt.ItemId // 判断第二槽位的物品是否为特戒 needItemTemplate := item.GetItemService().GetItem(int(needItemId)) if !needItemTemplate.IsTeRing() { log.WithFields( log.Fields{ "playerId": pl.GetId(), "needIndex": needIndex, "needItemId": needItemId, }).Warn("ring: 使用特戒, 物品不是特戒") playerlogic.SendSystemMessage(pl, lang.RingIsNotRing) return } // 融合模板 fuseTemp := ringtemplate.GetRingTemplateService().GetRingFuseSynthesisTemplate(itemId) if fuseTemp == nil { log.WithFields( log.Fields{ "playerId": pl.GetId(), "index": index, "itemId": itemId, }).Warn("ring: 模板不存在") playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist) return } if needItemId != fuseTemp.NeedItemId2 { log.WithFields( log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), "needIndex": needIndex, "needItemId": needItemId, "NeedItemId2": fuseTemp.NeedItemId2, }).Warn("ring: 特戒融合需要物品与当前物品不符") playerlogic.SendSystemMessage(pl, lang.RingFuseItemNotSuit) return } // 消耗的钱 costGold := int64(fuseTemp.NeedGold) costSilver := int64(fuseTemp.NeedSilver) costBindGold := int64(fuseTemp.NeedBindGold) // 是否足够银两 if costSilver != 0 { flag := propertyManager.HasEnoughSilver(costSilver) if !flag { log.WithFields(log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring: 银两不足,无法融合") playerlogic.SendSystemMessage(pl, lang.PlayerSilverNoEnough) return } } // 是否足够元宝 if costGold != 0 { flag := propertyManager.HasEnoughGold(costGold, false) if !flag { log.WithFields(log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring:元宝不足,无法融合") playerlogic.SendSystemMessage(pl, lang.PlayerGoldNoEnough) return } } // 是否足够绑元 needBindGold := costBindGold + costGold if needBindGold != 0 { flag := propertyManager.HasEnoughGold(needBindGold, true) if !flag { log.WithFields(log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring:元宝不足,无法融合") playerlogic.SendSystemMessage(pl, lang.PlayerGoldNoEnough) return } } // 判断物品是否足够 needItemNum := fuseTemp.NeedItemCount2 curNum := inventoryManager.NumOfItems(needItemId) if curNum < needItemNum { log.WithFields( log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring: 所需物品不足") playerlogic.SendSystemMessage(pl, lang.InventoryItemNoEnough) return } //消耗钱 goldUseReason := commonlog.GoldLogReasonRingFuse goldUseReasonStr := fmt.Sprintf(goldUseReason.String(), typ.String()) silverUseReason := commonlog.SilverLogReasonRingFuse silverUseReasonStr := fmt.Sprintf(silverUseReason.String(), typ.String()) flag := propertyManager.Cost(costBindGold, costGold, goldUseReason, goldUseReasonStr, costSilver, silverUseReason, silverUseReasonStr) if !flag { panic(fmt.Errorf("ring: 特戒融合消耗钱应该成功")) } //同步元宝 if costGold != 0 || costSilver != 0 || costBindGold != 0 { propertylogic.SnapChangedProperty(pl) } if needItemNum > 0 { reason := commonlog.InventoryLogReasonRingAdvance reasonText := fmt.Sprintf(reason.String(), typ.String()) flag, err = inventoryManager.RemoveIndex(inventorytypes.BagTypePrim, needIndex, needItemNum, reason, reasonText) if !flag { panic("ring: 消耗物品应该成功") } if err != nil { return } } success := mathutils.RandomHit(common.MAX_RATE, int(fuseTemp.SuccessRate)) // 成功消耗第一槽位物品 if success && isBag { reason := commonlog.InventoryLogReasonRingAdvance reasonText := fmt.Sprintf(reason.String(), typ.String()) flag, err = inventoryManager.RemoveIndex(inventorytypes.BagTypePrim, index, 1, reason, reasonText) if !flag { panic("ring: 消耗物品应该成功") } if err != nil { return } } createItemId := fuseTemp.ItemId createItemNum := fuseTemp.ItemCount if isBag { if success { createItemTemp := item.GetItemService().GetItem(int(createItemId)) if createItemTemp == nil { log.WithFields( log.Fields{ "playerId": pl.GetId(), "typ": typ.String(), }).Warn("ring: 融合成功的物品模板不存在") playerlogic.SendSystemMessage(pl, lang.RingTempalteNotExist) return } reason := commonlog.InventoryLogReasonRingFuseGet reasonText := fmt.Sprintf(reason.String(), typ.String()) flag = inventoryManager.AddItemLevelWithPropertyData(createItemId, createItemNum, createItemTemp.NeedLevel, createItemTemp.GetBindType(), propertyData, reason, reasonText) if !flag { panic(fmt.Errorf("ring: 添加物品应该成功")) } } } else { if success { ringManager.RingFuseSuccess(typ, createItemId) } } // 物品改变推送 inventorylogic.SnapInventoryChanged(pl) // 推送属性变化 ringlogic.RingPropertyChange(pl) propertylogic.SnapChangedProperty(pl) // 公告 plName := coreutils.FormatColor(chattypes.ColorTypePlayerName, coreutils.FormatNoticeStr(pl.GetName())) fuseNum := int32(0) skillId := int32(0) rongheTemp := ringtemplate.GetRingTemplateService().GetRingTemplate(createItemId) if rongheTemp != nil { fuseNum = rongheTemp.Level skillId = rongheTemp.SkillId } itemTemp := item.GetItemService().GetItem(int(createItemId)) if itemTemp == nil { log.Warningf("ring: 物品模板不存在,itemId:%d", createItemId) return } qualityType := itemtypes.ItemQualityType(itemTemp.Quality) itemName := coreutils.FormatColor(qualityType.GetColor(), fmt.Sprintf("[%s]", typ.String())) data, ok := propertyData.(*ringtypes.RingPropertyData) if !ok { base := inventorytypes.CreateDefaultItemPropertyDataBase() propertyData = inventory.CreatePropertyDataInterface(itemType, base) } args := []int64{int64(chattypes.ChatLinkTypeItem), int64(createItemId), int64(data.StrengthLevel), int64(data.Advance), int64(data.JingLingLevel)} infoLink := coreutils.FormatLink(itemName, args) // 计算该融合等级属性加成的战力 power := int64(0) if skillId != 0 { skillTemplate := skilltemplate.GetSkillTemplateService().GetSkillTemplate(skillId) power += int64(skillTemplate.AddPower) power += propertylogic.CulculateForce(rongheTemp.GetBattlePropertyMap()) } content := fmt.Sprintf(lang.GetLangService().ReadLang(lang.RingFuseNotice), plName, infoLink, fuseNum, power) chatlogic.SystemBroadcast(chattypes.MsgTypeText, []byte(content)) noticelogic.NoticeNumBroadcast([]byte(content), 0, 1) fmt.Println("ring: 特戒应该有回复*******************") scRingFuse := pbutil.BuildSCRingFuse(success, isBag, int32(typ), index, needIndex, createItemId, createItemNum) pl.SendMsg(scRingFuse) return }
{ processor.Register(codec.MessageType(uipb.MessageType_CS_RING_FUSE_TYPE), dispatch.HandlerFunc(handleRingFuse)) }
all_builtins.go
// Copyright 2017 The Cockroach Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. package builtins
import ( "sort" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sem/types" ) // AllBuiltinNames is an array containing all the built-in function // names, sorted in alphabetical order. This can be used for a // deterministic walk through the Builtins map. var AllBuiltinNames []string // AllAggregateBuiltinNames is an array containing the subset of // AllBuiltinNames that corresponds to aggregate functions. var AllAggregateBuiltinNames []string // AllWindowBuiltinNames is an array containing the subset of // AllBuiltinNames that corresponds to window functions. var AllWindowBuiltinNames []string func init() { initAggregateBuiltins() initWindowBuiltins() initGeneratorBuiltins() initPGBuiltins() AllBuiltinNames = make([]string, 0, len(builtins)) AllAggregateBuiltinNames = make([]string, 0, len(aggregates)) tree.FunDefs = make(map[string]*tree.FunctionDefinition) for name, def := range builtins { fDef := tree.NewFunctionDefinition(name, &def.props, def.overloads) tree.FunDefs[name] = fDef if fDef.Private { // Avoid listing help for private functions. continue } AllBuiltinNames = append(AllBuiltinNames, name) if def.props.Class == tree.AggregateClass { AllAggregateBuiltinNames = append(AllAggregateBuiltinNames, name) } else if def.props.Class == tree.WindowClass { AllWindowBuiltinNames = append(AllWindowBuiltinNames, name) } } // Generate missing categories. for _, name := range AllBuiltinNames { def := builtins[name] if def.props.Category == "" { def.props.Category = getCategory(def.overloads) builtins[name] = def } } sort.Strings(AllBuiltinNames) sort.Strings(AllAggregateBuiltinNames) sort.Strings(AllWindowBuiltinNames) } func getCategory(b []tree.Overload) string { // If single argument attempt to categorize by the type of the argument. for _, ovl := range b { switch typ := ovl.Types.(type) { case tree.ArgTypes: if len(typ) == 1 { return categorizeType(typ[0].Typ) } } // Fall back to categorizing by return type. if retType := ovl.FixedReturnType(); retType != nil { return categorizeType(retType) } } return "" } func collectOverloads( props tree.FunctionProperties, types []types.T, gens ...func(types.T) tree.Overload, ) builtinDefinition { r := make([]tree.Overload, 0, len(types)*len(gens)) for _, f := range gens { for _, t := range types { r = append(r, f(t)) } } return builtinDefinition{ props: props, overloads: r, } }
service_initfunc.go
/* * Tencent is pleased to support the open source community by making 蓝鲸 available. * Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "net/http" "configcenter/src/common/http/rest" "github.com/emicklei/go-restful" ) func (s *Service) initAssociation(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) // mainline topo methods utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/topo/model/{owner_id}/{cls_id}/{bk_obj_id}", Handler: s.SearchObjectByClassificationID}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/topo/tree/brief/biz/{bk_biz_id}", Handler: s.SearchBriefBizTopo}) utility.AddToRestfulWebService(web) } func (s *Service) initAuditLog(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/find/audit_dict", Handler: s.SearchAuditDict}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/audit_list", Handler: s.SearchAuditList}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/audit", Handler: s.SearchAuditDetail}) utility.AddToRestfulWebService(web) } func (s *Service) initBusiness(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/app/search/{owner_id}", Handler: s.SearchBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/app/{owner_id}", Handler: s.CreateBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/app/{owner_id}/{app_id}", Handler: s.UpdateBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/app/status/{flag}/{owner_id}/{app_id}", Handler: s.UpdateBusinessStatus}) // utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/app/search/{owner_id}", Handler: s.SearchBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/app/{app_id}/basic_info", Handler: s.GetBusinessBasicInfo}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/app/default/{owner_id}/search", Handler: s.SearchOwnerResourcePoolBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/app/default/{owner_id}", Handler: s.CreateDefaultBusiness}) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/topo/internal/{owner_id}/{app_id}", Handler: s.GetInternalModule}) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/topo/internal/{owner_id}/{app_id}/with_statistics", Handler: s.GetInternalModuleWithStatistics}) // find reduced business list with only few fields for business itself. utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/app/with_reduced", Handler: s.SearchReducedBusinessList}) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/app/simplify", Handler: s.ListAllBusinessSimplify}) utility.AddToRestfulWebService(web) } func (s *Service) initModule(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/module/{app_id}/{set_id}", Handler: s.CreateModule}) utility.AddHandler(rest.Action{Verb: http.MethodDelete, Path: "/module/{app_id}/{set_id}/{module_id}", Handler: s.DeleteModule}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/module/{app_id}/{set_id}/{module_id}", Handler: s.UpdateModule}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/module/search/{owner_id}/{bk_biz_id}/{bk_set_id}", Handler: s.SearchModule}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/module/biz/{bk_biz_id}", Handler: s.SearchModuleByCondition}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/module/bk_biz_id/{bk_biz_id}", Handler: s.SearchModuleBatch}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/module/with_relation/biz/{bk_biz_id}", Handler: s.SearchModuleWithRelation}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/module/bk_biz_id/{bk_biz_id}/service_template_id/{service_template_id}", Handler: s.ListModulesByServiceTemplateID}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/module/host_apply_enable_status/bk_biz_id/{bk_biz_id}/bk_module_id/{bk_module_id}", Handler: s.UpdateModuleHostApplyEnableStatus}) utility.AddToRestfulWebService(web) } func (s *Service) initSet(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/set/{app_id}", Handler: s.CreateSet}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/set/{app_id}/batch", Handler: s.BatchCreateSet}) utility.AddHandler(rest.Action{Verb: http.MethodDelete, Path: "/set/{app_id}/{set_id}", Handler: s.DeleteSet}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/set/{app_id}/{set_id}", Handler: s.UpdateSet}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/set/search/{owner_id}/{app_id}", Handler: s.SearchSet}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/set/bk_biz_id/{bk_biz_id}", Handler: s.SearchSetBatch}) utility.AddToRestfulWebService(web) } func (s *Service) initInst(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/inst/search/{owner_id}/{bk_obj_id}", Handler: s.SearchInsts}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/inst/association/object/{bk_obj_id}/inst_id/{id}/offset/{start}/limit/{limit}/web", Handler: s.SearchInstAssociationUI}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/inst/association/association_object/inst_base_info", Handler: s.SearchInstAssociationWithOtherObject}) utility.AddToRestfulWebService(web) } func (s *Service) initObjectAttribute(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/update/objectattr/index/{bk_obj_id}/{id}", Handler: s.UpdateObjectAttributeIndex}) utility.AddToRestfulWebService(web) } func (s *Service) initObjectGroup(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodDelete, Path: "/objectatt/group/owner/{owner_id}/object/{bk_object_id}/propertyids/{property_id}/groupids/{group_id}", Handler: s.DeleteObjectAttributeGroup}) utility.AddToRestfulWebService(web) } func (s *Service) initObject(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr,
utility.AddToRestfulWebService(web) } func (s *Service) initIdentifier(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/identifier/{obj_type}/search", Handler: s.SearchIdentifier}) utility.AddToRestfulWebService(web) } // 全文索引 func (s *Service) initFullTextSearch(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/full_text", Handler: s.FullTextFind}) utility.AddToRestfulWebService(web) } // 资源池目录 func (s *Service) initResourceDirectory(web *restful.WebService) { utility := rest.NewRestUtility(rest.Config{ ErrorIf: s.Engine.CCErr, Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/create/resource/directory", Handler: s.CreateResourceDirectory}) utility.AddHandler(rest.Action{Verb: http.MethodPut, Path: "/update/resource/directory/{bk_module_id}", Handler: s.UpdateResourceDirectory}) utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/resource/directory", Handler: s.SearchResourceDirectory}) utility.AddHandler(rest.Action{Verb: http.MethodDelete, Path: "/delete/resource/directory/{bk_module_id}", Handler: s.DeleteResourceDirectory}) utility.AddToRestfulWebService(web) } func (s *Service) initService(web *restful.WebService) { s.initAssociation(web) s.initAuditLog(web) s.initBusiness(web) s.initInst(web) s.initModule(web) s.initSet(web) s.initObject(web) s.initObjectAttribute(web) s.initObjectGroup(web) s.initIdentifier(web) s.initBusinessObject(web) s.initBusinessClassification(web) s.initBusinessObjectAttribute(web) s.initBusinessObjectUnique(web) s.initBusinessObjectAttrGroup(web) s.initBusinessAssociation(web) s.initBusinessGraphics(web) s.initBusinessInst(web) s.initFullTextSearch(web) s.initSetTemplate(web) s.initInternalTask(web) s.initResourceDirectory(web) }
Language: s.Engine.Language, }) utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/object/statistics", Handler: s.GetModelStatistics})
merge-header.directive.ts
import { Input, Output, EventEmitter, Directive, TemplateRef, ContentChild, ElementRef } from '@angular/core'; @Directive({ selector: 'ngx-datatable-merge-header' }) export class DatatableMergeHeaderDirective { @Input() start:number = 0;
}
@Input() colspan:number = 1; @Input() title:string; @Input() class:string;
czqapi.js
import request2 from '@/utils/request2' // import service from '../utils/request2' export function fetchList(query) { return request2({ url: '/downlog/getlist', method: 'get', params: query }) } //获取市场信息 export function fetchMarket(query) { return request2({ url: '/market/getlist', method: 'get', params: query }) } //保存市场信息 export function saveMarket(query) { re
st2({ url: '/market/submit', method: 'get', params: query }) }
turn reque
bpo.js
let _Op = (function(){ 'bpo disable'; return { add(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint')) return a+b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__add__(b); }else if(pyodide.isPyProxy(b)){ return b.__radd__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorAdd) return a.operatorAdd(b); return a + b; }, sub(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a-b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__sub__(b); }else if(pyodide.isPyProxy(b)){ return b.__rsub__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorSub) return a.operatorSub(b); return a - b; }, mul(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a*b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__mul__(b); }else if(pyodide.isPyProxy(b)){ return b.__rmul__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorMul) return a.operatorMul(b); return a * b;
if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a/b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__truediv__(b); }else if(pyodide.isPyProxy(b)){ return b.__rtruediv__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorDiv) return a.operatorDiv(b); return a / b; }, pow(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a**b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__pow__(b); }else if(pyodide.isPyProxy(b)){ return b.__rpow__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorPow) return a.operatorPow(b); return a ** b; }, binaryAnd(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a&b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__and__(b); }else if(pyodide.isPyProxy(b)){ return b.__rand__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorBinaryAnd) return a.operatorBinaryAnd(b); return a & b; }, binaryOr(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a|b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__or__(b); }else if(pyodide.isPyProxy(b)){ return b.__ror__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorBinaryOr) return a.operatorBinaryOr(b); return a | b; }, binaryXor(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a^b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__xor_(b); }else if(pyodide.isPyProxy(b)){ return b.__rxor__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorBinaryXor) return a.operatorBinaryXor(b); return a ^ b; }, binaryLShift(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a<<b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__lshift__(b); }else if(pyodide.isPyProxy(b)){ return b.__rlshift__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorBinaryLShift) return a.operatorBinaryLShift(b); return a << b; }, binaryRShift(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a>>b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__rshift__(b); }else if(pyodide.isPyProxy(b)){ return b.__rrshift__(a); } } if(typeof(a)!='object' && typeof(b)=='object'){ a = new b.constructor(a); }else if(typeof(a)=='object' && typeof(b)=='object' && a.constructor!=b.constructor){ throw new Error('bpo: not the same class'); } if(typeof(a)=='object'&&a.operatorBinaryRShift) return a.operatorBinaryRShift(b); return a >> b; }, less(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a<b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__lt__(b); }else if(pyodide.isPyProxy(b)){ return b.__gt__(a); } } if(typeof(a)=='object'&&a.operatorLess) return a.operatorLess(b); else if(typeof(b)=='object'&&b.operatorGreater) return b.operatorGreater(a); else if(typeof(a)=='object'&&a.operatorGreaterEqual) return !a.operatorGreaterEqual(b); return a < b; }, greater(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a>b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__gt__(b); }else if(pyodide.isPyProxy(b)){ return b.__lt__(a); } } if(typeof(a)=='object'&&a.operatorGreater) return a.operatorGreater(b); else if(typeof(b)=='object'&&b.operatorLess) return b.operatorLess(a); else if(typeof(a)=='object'&&a.operatorLessEqual) return !a.operatorLessEqual(b); return a > b; }, lessEqual(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a<=b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__le__(b); }else if(pyodide.isPyProxy(b)){ return b.__ge__(a); } } if(typeof(a)=='object'&&a.operatorLessEqual) return a.operatorLessEqual(b); else if(typeof(b)=='object'&&b.operatorGreaterEqual) return b.operatorGreaterEqual(a); else if(typeof(a)=='object'&&a.operatorGreater) return !a.operatorGreater(b); return a <= b; }, greaterEqual(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a>=b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__ge__(b); }else if(pyodide.isPyProxy(b)){ return b.__le__(a); } } if(typeof(a)=='object'&&a.operatorGreaterEqual) return a.operatorGreaterEqual(b); else if(typeof(b)=='object'&&b.operatorLessEqual) return b.operatorLessEqual(a); else if(typeof(a)=='object'&&a.operatorLess) return !a.operatorLess(b); return a >= b; }, equal(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a==b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__eq__(b); }else if(pyodide.isPyProxy(b)){ return b.__eq__(a); } } if(typeof(a)=='object'&&a.operatorEqual) return a.operatorEqual(b); else if(typeof(a)=='object'&&a.operatorNotEqual) return !a.operatorNotEqual(b); else if(typeof(b)=='object'&&b.operatorEqual) return b.operatorEqual(a); else if(typeof(b)=='object'&&b.operatorNotEqual) return !b.operatorNotEqual(a); return a == b; }, notEqual(a, b) { if((typeof(a)=='number' && typeof(b)=='number') || (typeof(a)=='bigint' && typeof(b)=='bigint'))return a!=b; if(globalThis.pyodide?.isPyProxy){ if(pyodide.isPyProxy(a)){ return a.__ne__(b); }else if(pyodide.isPyProxy(b)){ return b.__ne__(a); } } if(typeof(a)=='object'&&a.operatorNotEqual) return a.operatorNotEqual(b); else if(typeof(a)=='object'&&a.operatorEqual) return !a.operatorEqual(b); else if(typeof(b)=='object'&&b.operatorNotEqual) return b.operatorNotEqual(a); else if(typeof(b)=='object'&&b.operatorEqual) return !b.operatorEqual(a); return a != b; }, }; })(); export {_Op}; export function visitor(babel) { var t = babel.types; /* var preCode = (function() { var _Op=window._Op; }).toString(); preCode = preCode.slice(preCode.indexOf('{') + 1, preCode.lastIndexOf('}')); var preCodeAST = babel.template(preCode)({});*/ function initStatus(path) { var firstBlockStatement = path.findParent(path => t.isBlockStatement(path.node) || t.isProgram(path.node)); if(firstBlockStatement) { for(let directiveID in firstBlockStatement.node.directives) { let directive = firstBlockStatement.node.directives[directiveID]; if(directive.value.value == 'bpo disable'){ path.node.BPO_HAVE_DEFAULT = true; path.node.BPO_STATUS = false; break; } else if(directive.value.value == 'bpo enable'){ path.node.BPO_HAVE_DEFAULT = true; path.node.BPO_STATUS = true; break; } } if(!path.node.BPO_HAVE_DEFAULT && firstBlockStatement.node.BPO_HAVE_DEFAULT) { path.node.BPO_HAVE_DEFAULT = true; path.node.BPO_STATUS = firstBlockStatement.node.BPO_STATUS; } } if(!path.node.BPO_HAVE_DEFAULT) { path.node.BPO_HAVE_DEFAULT = true; path.node.BPO_STATUS = false; } } return { visitor: { Program(path) { //path.unshiftContainer('body', preCodeAST); }, BlockStatement(path) { initStatus(path); }, BinaryExpression(path) { initStatus(path, true); if(!path.node.BPO_STATUS) return; var tab = { '+': 'add', '-': 'sub', '*': 'mul', '/': 'div', '**': 'pow', '&': 'binaryAnd', '|': 'binaryOr', '^': 'binaryXor', '<<': 'binaryLShift', '>>': 'binaryRShift', '<': 'less', '>': 'greater', '<=': 'lessEqual', '>=': 'greaterEqual', '==': 'equal', '!=': 'notEqual', }; if(!(path.node.operator in tab)) return; path.replaceWith( t.callExpression( t.MemberExpression(t.identifier('_Op'), t.identifier(tab[path.node.operator])), [path.node.left, path.node.right] ) ); }, }, }; };
}, div(a, b) {
default_root.py
import os
from pathlib import Path DEFAULT_ROOT_PATH = Path(os.path.expanduser(os.getenv("SHL_ROOT", "~/.shl/mainnet"))).resolve()
FizzBuzz.ts
export const fizzBuzz = (n: number) => [...Array(n)].map((v, i) => { const val = i + 1;
if (val % 3 === 0) return "Fizz"; if (val % 5 === 0) return "Buzz"; return val + ""; });
if (val % 3 === 0 && val % 5 === 0) return "FizzBuzz";
test_hide_application.py
# -*- coding: utf-8 -*- from selenium_tests.UserDriverTest import UserDriverTest from selenium.webdriver.common.by import By class
(UserDriverTest): def test_hide_application(self): self.wait_until_application_list_loaded() self.type_text_in_element_located(By.ID, "search-input", "foobarheho") self.wait_until_text_inside_element_located(By.ID, "applistentries", "")
TestHideApplication
synoptic_manager.py
# This file is part of the ISIS IBEX application. # Copyright (C) 2012-2016 Science & Technology Facilities Council. # All rights reserved. # # This program is distributed in the hope that it will be useful. # This program and the accompanying materials are made available under the # terms of the Eclipse Public License v1.0 which accompanies this distribution. # EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM # AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details. # # You should have received a copy of the Eclipse Public License v1.0 # along with this program; if not, you can obtain a copy from # https://www.eclipse.org/org/documents/epl-v10.php or # http://opensource.org/licenses/eclipse-1.0.php import os from typing import List, TYPE_CHECKING if TYPE_CHECKING: from block_server import BlockServer from BlockServer.core.active_config_holder import ActiveConfigHolder from BlockServer.core.config_list_manager import InvalidDeleteException from BlockServer.core.file_path_manager import FILEPATH_MANAGER from BlockServer.core.on_the_fly_pv_interface import OnTheFlyPvInterface from BlockServer.fileIO.schema_checker import ConfigurationSchemaChecker from lxml import etree from server_common.common_exceptions import MaxAttemptsExceededException from server_common.utilities import print_and_log, compress_and_hex, create_pv_name, \ convert_to_json, convert_from_json from BlockServer.synoptic.synoptic_file_io import SynopticFileIO # Synoptics PVs are of the form IN:DEMO:SYNOPTICS:XXXXX (no BLOCKSERVER in the name) # This is to allow longer synoptic names without exceeded the maximum allowed length for PVs SYNOPTIC_PRE = "SYNOPTICS:" SYNOPTIC_GET = ":GET" SYNOPTIC_SET = ":SET" SYNOPTIC_NAMES = "NAMES" SYNOPTIC_GET_DEFAULT = "GET_DEFAULT" SYNOPTIC_BLANK = "__BLANK__" SYNOPTIC_SET_DETAILS = "SET_DETAILS" SYNOPTIC_DELETE = "DELETE" SYNOPTIC_SCHEMA = "SCHEMA" SYNOPTIC_SCHEMA_FILE = "synoptic.xsd" class SynopticManager(OnTheFlyPvInterface): """Class for managing the PVs associated with synoptics""" def __init__(self, block_server: 'BlockServer', schema_folder: str, active_configholder: ActiveConfigHolder, file_io: SynopticFileIO = SynopticFileIO()): """Constructor. Args: block_server: A reference to the BlockServer instance schema_folder: The filepath for the synoptic schema active_configholder: A reference to the active configuration file_io: Responsible for file IO """ super(SynopticManager, self).__init__() self.pvs_to_write.extend([SYNOPTIC_PRE + SYNOPTIC_DELETE, SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS]) self._directory = FILEPATH_MANAGER.synoptic_dir self._schema_folder = schema_folder self._synoptic_pvs = dict() self._bs = block_server self._activech = active_configholder self._file_io = file_io self._default_syn_xml = b"" self._create_standard_pvs() self._load_initial() def handle_pv_write(self, pv: str, data: str): try: if pv == SYNOPTIC_PRE + SYNOPTIC_DELETE: self.delete(convert_from_json(data)) self.update_monitors() elif pv == SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS: self.save_synoptic_xml(bytes(data, encoding="utf-8")) self.update_monitors() except IOError as err: print_and_log(f"Error accessing synoptic file: {err}", "MAJOR") except Exception as err: print_and_log(f"Error writing to PV {pv}: {err}", "MAJOR") def handle_pv_read(self, pv): # Nothing to do as it is all handled by monitors pass def update_monitors(self): with self._bs.monitor_lock: print_and_log("Updating synoptic monitors") self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, compress_and_hex(str(self.get_default_synoptic_xml(), encoding="utf-8"))) names = convert_to_json(self.get_synoptic_list()) self._bs.setParam(SYNOPTIC_PRE + SYNOPTIC_NAMES, compress_and_hex(names)) self._bs.updatePVs() print_and_log("Finished updating synoptic monitors") def on_config_change(self, full_init=False): # If the config has a default synoptic then set the PV to that default = self._activech.get_config_meta().synoptic self.set_default_synoptic(default) self.update_monitors() def _create_standard_pvs(self): self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_NAMES, 16000) self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_GET_DEFAULT, 16000) self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET, 16000) self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SET_DETAILS, 16000) self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_DELETE, 16000) self._bs.add_string_pv_to_db(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, 16000) # Set values for PVs that don't change self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_BLANK + SYNOPTIC_GET, compress_and_hex(self.get_blank_synoptic())) self.update_pv_value(SYNOPTIC_PRE + SYNOPTIC_SCHEMA, compress_and_hex(self.get_synoptic_schema())) def _load_initial(self): """Create the PVs for all the synoptics found in the synoptics directory.""" for f in self._file_io.get_list_synoptic_files(self._directory): # Load the data, checking the schema try: data = self._file_io.read_synoptic_file(self._directory, f) ConfigurationSchemaChecker.check_xml_matches_schema( os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), data, "Synoptic") # Get the synoptic name self._create_pv(data) except MaxAttemptsExceededException: print_and_log(f"Could not open synoptic file {f}. Please check the file is " f"not in use by another process.", "MAJOR") except Exception as err: print_and_log(f"Error creating synoptic PV: {err}", "MAJOR") def _create_pv(self, data: bytes): """Creates a single PV based on a name and data. Adds this PV to the dictionary returned on get_synoptic_list Args: data (bytes): Starting data for the pv, the pv name is derived from the name tag of this """ name = self._get_synoptic_name_from_xml(data) if name not in self._synoptic_pvs: # Extra check, if a non-case sensitive match exist remove it for key in self._synoptic_pvs.keys(): if name.lower() == key.lower(): self._synoptic_pvs.pop(key) pv = create_pv_name(name, list(self._synoptic_pvs.values()), "SYNOPTIC") self._synoptic_pvs[name] = pv # Create the PV self._bs.add_string_pv_to_db(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, 16000) # Update the value self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(str(data, encoding="utf-8"))) def update_pv_value(self, name, data): """ Updates value of a PV holding synoptic information with new data Args: name (string): The name of the edited synoptic data (bytes): The new synoptic data """ self._bs.setParam(name, data) self._bs.updatePVs() def get_synoptic_list(self): """Gets the names and associated pvs of the synoptic files in the synoptics directory. Returns: list : Alphabetical list of synoptics files on the server, along with their associated pvs """ syn_list = [] default_is_none_synoptic = True for k, v in self._synoptic_pvs.items(): is_default = False if bytes(f"<name>{k}</name>", encoding="utf-8") in self._default_syn_xml: default_is_none_synoptic = False is_default = True syn_list.append({"name": k, "pv": v, "is_default": is_default}) ans = sorted(syn_list, key=lambda x: x['name'].lower()) # Insert the "blank" synoptic ans.insert(0, {"pv": "__BLANK__", "name": "-- NONE --", "is_default": default_is_none_synoptic}) return ans def set_default_synoptic(self, name): """Sets the default synoptic. Args: name (string): the name of the synoptic to load """ fullname = name + ".xml" f = self._file_io.get_list_synoptic_files(self._directory) if fullname in f: # Load the data try: data = self._file_io.read_synoptic_file(self._directory, fullname) self._default_syn_xml = data except MaxAttemptsExceededException: print_and_log(f"Could not open synoptic file {fullname}. Please check the file is not " f"in use by another process.", "MAJOR") self._default_syn_xml = b"" else: # No synoptic self._default_syn_xml = b"" def get_default_synoptic_xml(self) -> bytes: """Gets the XML for the default synoptic. Returns: bytes : The XML for the synoptic """ return self._default_syn_xml def _get_synoptic_name_from_xml(self, xml_data: bytes): name = None root = etree.fromstring(xml_data) for child in root: if child.tag.split('}', 1)[1] == "name": name = child.text if name is None: raise Exception("Synoptic contains no name tag") return name
Args: xml_data (bytes): The XML to be saved """ try: # Check against schema ConfigurationSchemaChecker.check_xml_matches_schema(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), xml_data, "Synoptic") # Update PVs self._create_pv(xml_data) except Exception as err: print_and_log(err) raise name = self._get_synoptic_name_from_xml(xml_data) save_path = FILEPATH_MANAGER.get_synoptic_path(name) try: self._file_io.write_synoptic_file(name, save_path, xml_data) except MaxAttemptsExceededException: raise IOError(f"Could not save to synoptic file at {save_path}. Please check the file is " f"not in use by another process.") print_and_log("Synoptic saved: " + name) def delete(self, delete_list: List[str]): """Takes a list of synoptics and removes them from the file system and any relevant PVs. Args: delete_list (list): The synoptics to delete """ print_and_log("Deleting: " + ', '.join(list(delete_list)), "INFO") delete_list = set(delete_list) if not delete_list.issubset(self._synoptic_pvs.keys()): raise InvalidDeleteException("Delete list contains unknown configurations") for synoptic in delete_list: self._delete_synoptic(synoptic) def _delete_synoptic(self, synoptic: str): fullname = synoptic + ".xml" try: self._file_io.delete_synoptic(self._directory, fullname) except MaxAttemptsExceededException: print_and_log(f"Could not delete synoptic file {fullname}. Please check the file is " f"not in use by another process.", "MINOR") return self._bs.delete_pv_from_db(SYNOPTIC_PRE + self._synoptic_pvs[synoptic] + SYNOPTIC_GET) del self._synoptic_pvs[synoptic] def update(self, xml_data: str): """Updates the synoptic list when modifications are made via the filesystem. Args: xml_data (str): The xml data to update the PV with """ # Convert to bytes bytes_xml_data = bytes(xml_data, encoding="utf-8") name = self._get_synoptic_name_from_xml(bytes_xml_data) names = self._synoptic_pvs.keys() if name in names: self.update_pv_value(SYNOPTIC_PRE + self._synoptic_pvs[name] + SYNOPTIC_GET, compress_and_hex(xml_data)) else: self._create_pv(bytes_xml_data) self.update_monitors() def get_synoptic_schema(self): """Gets the XSD data for the synoptic. Returns: string : The XML for the synoptic schema """ schema = "" with open(os.path.join(self._schema_folder, SYNOPTIC_SCHEMA_FILE), 'r') as schemafile: schema = schemafile.read() return schema def get_blank_synoptic(self): """Gets a blank synoptic. Returns: string : The XML for the blank synoptic """ return """<?xml version="1.0" ?><instrument xmlns="http://www.isis.stfc.ac.uk//instrument"> <name>-- NONE --</name><components/></instrument>"""
def save_synoptic_xml(self, xml_data: bytes): """Saves the xml under the filename taken from the xml name tag.
convertValues.go
/* * Cadence - The resource-oriented smart contract programming language * * Copyright 2019-2020 Dapper Labs, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package runtime import ( "fmt" "github.com/onflow/cadence" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" "github.com/onflow/cadence/runtime/sema" "github.com/onflow/cadence/runtime/stdlib" ) // exportValue converts a runtime value to its native Go representation. func exportValue(value exportableValue) (cadence.Value, error) { return exportValueWithInterpreter(value.Value, value.Interpreter(), seenReferences{}) } // ExportValue converts a runtime value to its native Go representation. func ExportValue(value interpreter.Value, inter *interpreter.Interpreter) (cadence.Value, error) { return exportValueWithInterpreter(value, inter, seenReferences{}) } // NOTE: Do not generalize to map[interpreter.Value], // as not all values are Go hashable, i.e. this might lead to run-time panics type seenReferences map[*interpreter.EphemeralReferenceValue]struct{} // exportValueWithInterpreter exports the given internal (interpreter) value to an external value. // // The export is recursive, the results parameter prevents cycles: // it is checked at the start of the recursively called function, // and pre-set before a recursive call. // func exportValueWithInterpreter( value interpreter.Value, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Value, error, ) { switch v := value.(type) { case interpreter.VoidValue: return cadence.NewVoid(), nil case interpreter.NilValue: return cadence.NewOptional(nil), nil case *interpreter.SomeValue: return exportSomeValue(v, inter, seenReferences) case interpreter.BoolValue: return cadence.NewBool(bool(v)), nil case *interpreter.StringValue: return cadence.NewString(v.Str) case *interpreter.ArrayValue: return exportArrayValue(v, inter, seenReferences) case interpreter.IntValue: return cadence.NewIntFromBig(v.ToBigInt()), nil case interpreter.Int8Value: return cadence.NewInt8(int8(v)), nil case interpreter.Int16Value: return cadence.NewInt16(int16(v)), nil case interpreter.Int32Value: return cadence.NewInt32(int32(v)), nil case interpreter.Int64Value: return cadence.NewInt64(int64(v)), nil case interpreter.Int128Value: return cadence.NewInt128FromBig(v.ToBigInt()) case interpreter.Int256Value: return cadence.NewInt256FromBig(v.ToBigInt()) case interpreter.UIntValue: return cadence.NewUIntFromBig(v.ToBigInt()) case interpreter.UInt8Value: return cadence.NewUInt8(uint8(v)), nil case interpreter.UInt16Value: return cadence.NewUInt16(uint16(v)), nil case interpreter.UInt32Value: return cadence.NewUInt32(uint32(v)), nil case interpreter.UInt64Value: return cadence.NewUInt64(uint64(v)), nil case interpreter.UInt128Value: return cadence.NewUInt128FromBig(v.ToBigInt()) case interpreter.UInt256Value: return cadence.NewUInt256FromBig(v.ToBigInt()) case interpreter.Word8Value: return cadence.NewWord8(uint8(v)), nil case interpreter.Word16Value: return cadence.NewWord16(uint16(v)), nil case interpreter.Word32Value: return cadence.NewWord32(uint32(v)), nil case interpreter.Word64Value: return cadence.NewWord64(uint64(v)), nil case interpreter.Fix64Value: return cadence.Fix64(v), nil case interpreter.UFix64Value: return cadence.UFix64(v), nil case *interpreter.CompositeValue: return exportCompositeValue(v, inter, seenReferences) case *interpreter.SimpleCompositeValue: return exportSimpleCompositeValue(v, inter, seenReferences) case *interpreter.DictionaryValue: return exportDictionaryValue(v, inter, seenReferences) case interpreter.AddressValue: return cadence.NewAddress(v), nil case interpreter.LinkValue: return exportLinkValue(v, inter), nil case interpreter.PathValue: return exportPathValue(v), nil case interpreter.TypeValue: return exportTypeValue(v, inter), nil case *interpreter.CapabilityValue: return exportCapabilityValue(v, inter), nil case *interpreter.EphemeralReferenceValue: // Break recursion through ephemeral references if _, ok := seenReferences[v]; ok { return nil, nil } defer delete(seenReferences, v) seenReferences[v] = struct{}{} return exportValueWithInterpreter(v.Value, inter, seenReferences) case *interpreter.StorageReferenceValue: referencedValue := v.ReferencedValue(inter) if referencedValue == nil { return nil, nil } return exportValueWithInterpreter(*referencedValue, inter, seenReferences) } return nil, fmt.Errorf("cannot export value of type %T", value) } func exportSomeValue( v *interpreter.SomeValue, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Optional, error, ) { if v.Value == nil { return cadence.NewOptional(nil), nil } value, err := exportValueWithInterpreter(v.Value, inter, seenReferences) if err != nil { return cadence.Optional{}, err } return cadence.NewOptional(value), nil } func exportArrayValue( v *interpreter.ArrayValue, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Array, error, ) { values := make([]cadence.Value, 0, v.Count()) var err error v.Iterate(func(value interpreter.Value) (resume bool) { var exportedValue cadence.Value exportedValue, err = exportValueWithInterpreter(value, inter, seenReferences) if err != nil { return false } values = append( values, exportedValue, ) return true }) if err != nil { return cadence.Array{}, err } return cadence.NewArray(values), nil } func exportCompositeValue( v *interpreter.CompositeValue, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Value, error, ) { dynamicType := v.DynamicType(inter, interpreter.SeenReferences{}).(interpreter.CompositeDynamicType) staticType := dynamicType.StaticType.(*sema.CompositeType) // TODO: consider making the results map "global", by moving it up to exportValueWithInterpreter t := exportCompositeType(staticType, map[sema.TypeID]cadence.Type{}) // NOTE: use the exported type's fields to ensure fields in type // and value are in sync fieldNames := t.CompositeFields() fields := make([]cadence.Value, len(fieldNames)) for i, field := range fieldNames { fieldName := field.Identifier // TODO: provide proper location range fieldValue := v.GetField(inter, interpreter.ReturnEmptyLocationRange, fieldName) if fieldValue == nil && v.ComputedFields != nil { if computedField, ok := v.ComputedFields[fieldName]; ok { // TODO: provide proper location range fieldValue = computedField(inter, interpreter.ReturnEmptyLocationRange) } } exportedFieldValue, err := exportValueWithInterpreter(fieldValue, inter, seenReferences) if err != nil { return nil, err } fields[i] = exportedFieldValue } // NOTE: when modifying the cases below, // also update the error message below! switch staticType.Kind { case common.CompositeKindStructure: return cadence.NewStruct(fields).WithType(t.(*cadence.StructType)), nil case common.CompositeKindResource: return cadence.NewResource(fields).WithType(t.(*cadence.ResourceType)), nil case common.CompositeKindEvent: return cadence.NewEvent(fields).WithType(t.(*cadence.EventType)), nil case common.CompositeKindContract: return cadence.NewContract(fields).WithType(t.(*cadence.ContractType)), nil case common.CompositeKindEnum: return cadence.NewEnum(fields).WithType(t.(*cadence.EnumType)), nil } return nil, fmt.Errorf( "invalid composite kind `%s`, must be %s", staticType.Kind, common.EnumerateWords( []string{ common.CompositeKindStructure.Name(), common.CompositeKindResource.Name(), common.CompositeKindEvent.Name(), common.CompositeKindContract.Name(), common.CompositeKindEnum.Name(), }, "or", ), ) } func exportSimpleCompositeValue( v *interpreter.SimpleCompositeValue, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Value, error, ) { dynamicType, ok := v.DynamicType(inter, interpreter.SeenReferences{}).(interpreter.CompositeDynamicType) if !ok { return nil, fmt.Errorf( "unexportable composite value: %s", dynamicType.StaticType, ) } staticType := dynamicType.StaticType.(*sema.CompositeType) // TODO: consider making the results map "global", by moving it up to exportValueWithInterpreter t := exportCompositeType(staticType, map[sema.TypeID]cadence.Type{}) // NOTE: use the exported type's fields to ensure fields in type // and value are in sync fieldNames := t.CompositeFields() fields := make([]cadence.Value, len(fieldNames)) for i, field := range fieldNames { fieldName := field.Identifier fieldValue := v.Fields[fieldName] if fieldValue == nil && v.ComputedFields != nil { if computedField, ok := v.ComputedFields[fieldName]; ok { // TODO: provide proper location range fieldValue = computedField(inter, interpreter.ReturnEmptyLocationRange) } } exportedFieldValue, err := exportValueWithInterpreter(fieldValue, inter, seenReferences) if err != nil { return nil, err } fields[i] = exportedFieldValue } // NOTE: when modifying the cases below, // also update the error message below! switch staticType.Kind { case common.CompositeKindStructure: return cadence.NewStruct(fields).WithType(t.(*cadence.StructType)), nil case common.CompositeKindResource: return cadence.NewResource(fields).WithType(t.(*cadence.ResourceType)), nil case common.CompositeKindEvent: return cadence.NewEvent(fields).WithType(t.(*cadence.EventType)), nil case common.CompositeKindContract: return cadence.NewContract(fields).WithType(t.(*cadence.ContractType)), nil case common.CompositeKindEnum: return cadence.NewEnum(fields).WithType(t.(*cadence.EnumType)), nil } return nil, fmt.Errorf( "invalid composite kind `%s`, must be %s", staticType.Kind, common.EnumerateWords( []string{ common.CompositeKindStructure.Name(), common.CompositeKindResource.Name(), common.CompositeKindEvent.Name(), common.CompositeKindContract.Name(), common.CompositeKindEnum.Name(), }, "or", ), ) } func exportDictionaryValue( v *interpreter.DictionaryValue, inter *interpreter.Interpreter, seenReferences seenReferences, ) ( cadence.Dictionary, error, ) { pairs := make([]cadence.KeyValuePair, 0, v.Count()) var err error v.Iterate(func(key, value interpreter.Value) (resume bool) { var convertedKey cadence.Value convertedKey, err = exportValueWithInterpreter(key, inter, seenReferences) if err != nil { return false } var convertedValue cadence.Value convertedValue, err = exportValueWithInterpreter(value, inter, seenReferences) if err != nil { return false } pairs = append( pairs, cadence.KeyValuePair{ Key: convertedKey, Value: convertedValue, }, ) return true }) if err != nil { return cadence.Dictionary{}, err } return cadence.NewDictionary(pairs), nil } func exportLinkValue(v interpreter.LinkValue, inter *interpreter.Interpreter) cadence.Link { path := exportPathValue(v.TargetPath) ty := string(inter.MustConvertStaticToSemaType(v.Type).ID()) return cadence.NewLink(path, ty) } func exportPathValue(v interpreter.PathValue) cadence.Path { return cadence.Path{ Domain: v.Domain.Identifier(), Identifier: v.Identifier, } } func
(v interpreter.TypeValue, inter *interpreter.Interpreter) cadence.TypeValue { var typ sema.Type if v.Type != nil { typ = inter.MustConvertStaticToSemaType(v.Type) } return cadence.TypeValue{ StaticType: ExportType(typ, map[sema.TypeID]cadence.Type{}), } } func exportCapabilityValue(v *interpreter.CapabilityValue, inter *interpreter.Interpreter) cadence.Capability { var borrowType sema.Type if v.BorrowType != nil { borrowType = inter.MustConvertStaticToSemaType(v.BorrowType) } return cadence.Capability{ Path: exportPathValue(v.Path), Address: cadence.NewAddress(v.Address), BorrowType: ExportType(borrowType, map[sema.TypeID]cadence.Type{}), } } // exportEvent converts a runtime event to its native Go representation. func exportEvent(event exportableEvent, seenReferences seenReferences) (cadence.Event, error) { fields := make([]cadence.Value, len(event.Fields)) for i, field := range event.Fields { value, err := exportValueWithInterpreter(field.Value, field.Interpreter(), seenReferences) if err != nil { return cadence.Event{}, err } fields[i] = value } eventType := ExportType(event.Type, map[sema.TypeID]cadence.Type{}).(*cadence.EventType) return cadence.NewEvent(fields).WithType(eventType), nil } // importValue converts a Cadence value to a runtime value. func importValue(inter *interpreter.Interpreter, value cadence.Value, expectedType sema.Type) (interpreter.Value, error) { switch v := value.(type) { case cadence.Void: return interpreter.VoidValue{}, nil case cadence.Optional: return importOptionalValue(inter, v, expectedType) case cadence.Bool: return interpreter.BoolValue(v), nil case cadence.String: return interpreter.NewStringValue(string(v)), nil case cadence.Bytes: return interpreter.ByteSliceToByteArrayValue(inter, v), nil case cadence.Address: return interpreter.NewAddressValue(common.Address(v)), nil case cadence.Int: return interpreter.NewIntValueFromBigInt(v.Value), nil case cadence.Int8: return interpreter.Int8Value(v), nil case cadence.Int16: return interpreter.Int16Value(v), nil case cadence.Int32: return interpreter.Int32Value(v), nil case cadence.Int64: return interpreter.Int64Value(v), nil case cadence.Int128: return interpreter.NewInt128ValueFromBigInt(v.Value), nil case cadence.Int256: return interpreter.NewInt256ValueFromBigInt(v.Value), nil case cadence.UInt: return interpreter.NewUIntValueFromBigInt(v.Value), nil case cadence.UInt8: return interpreter.UInt8Value(v), nil case cadence.UInt16: return interpreter.UInt16Value(v), nil case cadence.UInt32: return interpreter.UInt32Value(v), nil case cadence.UInt64: return interpreter.UInt64Value(v), nil case cadence.UInt128: return interpreter.NewUInt128ValueFromBigInt(v.Value), nil case cadence.UInt256: return interpreter.NewUInt256ValueFromBigInt(v.Value), nil case cadence.Word8: return interpreter.Word8Value(v), nil case cadence.Word16: return interpreter.Word16Value(v), nil case cadence.Word32: return interpreter.Word32Value(v), nil case cadence.Word64: return interpreter.Word64Value(v), nil case cadence.Fix64: return interpreter.Fix64Value(v), nil case cadence.UFix64: return interpreter.UFix64Value(v), nil case cadence.Path: return importPathValue(v), nil case cadence.Array: return importArrayValue(inter, v, expectedType) case cadence.Dictionary: return importDictionaryValue(inter, v, expectedType) case cadence.Struct: return importCompositeValue( inter, common.CompositeKindStructure, v.StructType.Location, v.StructType.QualifiedIdentifier, v.StructType.Fields, v.Fields, ) case cadence.Resource: return importCompositeValue( inter, common.CompositeKindResource, v.ResourceType.Location, v.ResourceType.QualifiedIdentifier, v.ResourceType.Fields, v.Fields, ) case cadence.Event: return importCompositeValue( inter, common.CompositeKindEvent, v.EventType.Location, v.EventType.QualifiedIdentifier, v.EventType.Fields, v.Fields, ) case cadence.Enum: return importCompositeValue( inter, common.CompositeKindEnum, v.EnumType.Location, v.EnumType.QualifiedIdentifier, v.EnumType.Fields, v.Fields, ) case cadence.TypeValue: return importTypeValue( inter, v.StaticType, ) case cadence.Capability: return importCapability( inter, v.Path, v.Address, v.BorrowType, ) } return nil, fmt.Errorf("cannot import value of type %T", value) } func importPathValue(v cadence.Path) interpreter.PathValue { return interpreter.PathValue{ Domain: common.PathDomainFromIdentifier(v.Domain), Identifier: v.Identifier, } } func importTypeValue( inter *interpreter.Interpreter, v cadence.Type, ) ( interpreter.TypeValue, error, ) { typ := ImportType(v) /* creating a static type performs no validation, so in order to be sure the type we have created is legal, we convert it to a sema type. If this fails, the import is invalid */ _, err := inter.ConvertStaticToSemaType(typ) if err != nil { return interpreter.TypeValue{}, err } return interpreter.TypeValue{ Type: typ, }, nil } func importCapability( _ *interpreter.Interpreter, path cadence.Path, address cadence.Address, borrowType cadence.Type, ) ( *interpreter.CapabilityValue, error, ) { _, ok := borrowType.(cadence.ReferenceType) if !ok { return nil, fmt.Errorf( "cannot import capability: expected reference, got '%s'", borrowType.ID(), ) } return &interpreter.CapabilityValue{ Path: importPathValue(path), Address: interpreter.NewAddressValueFromBytes(address.Bytes()), BorrowType: ImportType(borrowType), }, nil } func importOptionalValue( inter *interpreter.Interpreter, v cadence.Optional, expectedType sema.Type, ) ( interpreter.Value, error, ) { if v.Value == nil { return interpreter.NilValue{}, nil } var innerType sema.Type if optionalType, ok := expectedType.(*sema.OptionalType); ok { innerType = optionalType.Type } innerValue, err := importValue(inter, v.Value, innerType) if err != nil { return nil, err } return interpreter.NewSomeValueNonCopying(innerValue), nil } func importArrayValue( inter *interpreter.Interpreter, v cadence.Array, expectedType sema.Type, ) ( *interpreter.ArrayValue, error, ) { values := make([]interpreter.Value, len(v.Values)) var elementType sema.Type arrayType, ok := expectedType.(sema.ArrayType) if ok { elementType = arrayType.ElementType(false) } for i, element := range v.Values { value, err := importValue(inter, element, elementType) if err != nil { return nil, err } values[i] = value } var staticArrayType interpreter.ArrayStaticType if arrayType != nil { staticArrayType = interpreter.ConvertSemaArrayTypeToStaticArrayType(arrayType) } else { types := make([]sema.Type, len(v.Values)) for i, value := range values { typ, err := inter.ConvertStaticToSemaType(value.StaticType()) if err != nil { return nil, err } types[i] = typ } elementSuperType := sema.LeastCommonSuperType(types...) if elementSuperType == sema.InvalidType { return nil, fmt.Errorf("cannot import array: elements do not belong to the same type") } staticArrayType = interpreter.VariableSizedStaticType{ Type: interpreter.ConvertSemaToStaticType(elementSuperType), } } return interpreter.NewArrayValue( inter, staticArrayType, common.Address{}, values..., ), nil } func importDictionaryValue( inter *interpreter.Interpreter, v cadence.Dictionary, expectedType sema.Type, ) ( *interpreter.DictionaryValue, error, ) { keysAndValues := make([]interpreter.Value, len(v.Pairs)*2) var keyType sema.Type var valueType sema.Type dictionaryType, ok := expectedType.(*sema.DictionaryType) if ok { keyType = dictionaryType.KeyType valueType = dictionaryType.ValueType } for i, pair := range v.Pairs { key, err := importValue(inter, pair.Key, keyType) if err != nil { return nil, err } keysAndValues[i*2] = key value, err := importValue(inter, pair.Value, valueType) if err != nil { return nil, err } keysAndValues[i*2+1] = value } var dictionaryStaticType interpreter.DictionaryStaticType if dictionaryType != nil { dictionaryStaticType = interpreter.ConvertSemaDictionaryTypeToStaticDictionaryType(dictionaryType) } else { size := len(v.Pairs) keyTypes := make([]sema.Type, size) valueTypes := make([]sema.Type, size) for i := 0; i < size; i++ { keyType, err := inter.ConvertStaticToSemaType(keysAndValues[i*2].StaticType()) if err != nil { return nil, err } keyTypes[i] = keyType valueType, err := inter.ConvertStaticToSemaType(keysAndValues[i*2+1].StaticType()) if err != nil { return nil, err } valueTypes[i] = valueType } keySuperType := sema.LeastCommonSuperType(keyTypes...) valueSuperType := sema.LeastCommonSuperType(valueTypes...) if !sema.IsValidDictionaryKeyType(keySuperType) { return nil, fmt.Errorf( "cannot import dictionary: keys does not belong to the same type", ) } if valueSuperType == sema.InvalidType { return nil, fmt.Errorf("cannot import dictionary: values does not belong to the same type") } dictionaryStaticType = interpreter.DictionaryStaticType{ KeyType: interpreter.ConvertSemaToStaticType(keySuperType), ValueType: interpreter.ConvertSemaToStaticType(valueSuperType), } } return interpreter.NewDictionaryValue( inter, dictionaryStaticType, keysAndValues..., ), nil } func importCompositeValue( inter *interpreter.Interpreter, kind common.CompositeKind, location Location, qualifiedIdentifier string, fieldTypes []cadence.Field, fieldValues []cadence.Value, ) ( *interpreter.CompositeValue, error, ) { var fields []interpreter.CompositeField typeID := common.NewTypeIDFromQualifiedName(location, qualifiedIdentifier) compositeType, typeErr := inter.GetCompositeType(location, qualifiedIdentifier, typeID) if typeErr != nil { return nil, typeErr } for i := 0; i < len(fieldTypes) && i < len(fieldValues); i++ { fieldType := fieldTypes[i] fieldValue := fieldValues[i] var expectedFieldType sema.Type member, ok := compositeType.Members.Get(fieldType.Identifier) if ok { expectedFieldType = member.TypeAnnotation.Type } importedFieldValue, err := importValue(inter, fieldValue, expectedFieldType) if err != nil { return nil, err } fields = append(fields, interpreter.CompositeField{ Name: fieldType.Identifier, Value: importedFieldValue, }, ) } if location == nil { switch sema.NativeCompositeTypes[qualifiedIdentifier] { case sema.PublicKeyType: // PublicKey has a dedicated constructor // (e.g. it has computed fields that must be initialized) return importPublicKey(inter, fields) case sema.HashAlgorithmType: // HashAlgorithmType has a dedicated constructor // (e.g. it has host functions) return importHashAlgorithm(inter, fields) case sema.SignatureAlgorithmType: // SignatureAlgorithmType has a dedicated constructor // (e.g. it has host functions) return importSignatureAlgorithm(inter, fields) default: return nil, fmt.Errorf( "cannot import value of type %s", qualifiedIdentifier, ) } } return interpreter.NewCompositeValue( inter, location, qualifiedIdentifier, kind, fields, common.Address{}, ), nil } func importPublicKey( inter *interpreter.Interpreter, fields []interpreter.CompositeField, ) ( *interpreter.CompositeValue, error, ) { var publicKeyValue *interpreter.ArrayValue var signAlgoValue *interpreter.CompositeValue ty := sema.PublicKeyType for _, field := range fields { switch field.Name { case sema.PublicKeyPublicKeyField: arrayValue, ok := field.Value.(*interpreter.ArrayValue) if !ok { return nil, fmt.Errorf( "cannot import value of type '%s'. invalid value for field '%s': %v", ty, field.Name, field.Value, ) } publicKeyValue = arrayValue case sema.PublicKeySignAlgoField: compositeValue, ok := field.Value.(*interpreter.CompositeValue) if !ok { return nil, fmt.Errorf( "cannot import value of type '%s'. invalid value for field '%s': %v", ty, field.Name, field.Value, ) } signAlgoValue = compositeValue case sema.PublicKeyIsValidField: // 'isValid' field set by the user must be ignored. // This is calculated when creating the public key. default: return nil, fmt.Errorf( "cannot import value of type '%s'. invalid field '%s'", ty, field.Name, ) } } if publicKeyValue == nil { return nil, fmt.Errorf( "cannot import value of type '%s'. missing field '%s'", ty, sema.PublicKeyPublicKeyField, ) } if signAlgoValue == nil { return nil, fmt.Errorf( "cannot import value of type '%s'. missing field '%s'", ty, sema.PublicKeySignAlgoField, ) } // TODO: provide proper location range return interpreter.NewPublicKeyValue( inter, interpreter.ReturnEmptyLocationRange, publicKeyValue, signAlgoValue, inter.PublicKeyValidationHandler, ), nil } func importHashAlgorithm( inter *interpreter.Interpreter, fields []interpreter.CompositeField, ) ( *interpreter.CompositeValue, error, ) { var foundRawValue bool var rawValue interpreter.UInt8Value ty := sema.HashAlgorithmType for _, field := range fields { switch field.Name { case sema.EnumRawValueFieldName: rawValue, foundRawValue = field.Value.(interpreter.UInt8Value) if !foundRawValue { return nil, fmt.Errorf( "cannot import value of type '%s'. invalid value for field '%s': %v", ty, field.Name, field.Value, ) } default: return nil, fmt.Errorf( "cannot import value of type '%s'. invalid field '%s'", ty, field.Name, ) } } if !foundRawValue { return nil, fmt.Errorf( "cannot import value of type '%s'. missing field '%s'", ty, sema.EnumRawValueFieldName, ) } return stdlib.NewHashAlgorithmCase(inter, uint8(rawValue)), nil } func importSignatureAlgorithm( inter *interpreter.Interpreter, fields []interpreter.CompositeField, ) ( *interpreter.CompositeValue, error, ) { var foundRawValue bool var rawValue interpreter.UInt8Value ty := sema.SignatureAlgorithmType for _, field := range fields { switch field.Name { case sema.EnumRawValueFieldName: rawValue, foundRawValue = field.Value.(interpreter.UInt8Value) if !foundRawValue { return nil, fmt.Errorf( "cannot import value of type '%s'. invalid value for field '%s': %v", ty, field.Name, field.Value, ) } default: return nil, fmt.Errorf( "cannot import value of type '%s'. invalid field '%s'", ty, field.Name, ) } } if !foundRawValue { return nil, fmt.Errorf( "cannot import value of type '%s'. missing field '%s'", ty, sema.EnumRawValueFieldName, ) } return stdlib.NewSignatureAlgorithmCase(inter, uint8(rawValue)), nil }
exportTypeValue
primitives_test.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for JAX primitive coverage.""" import unittest from absl.testing import absltest from absl.testing import parameterized from functools import partial import jax from jax import dtypes from jax import lax from jax import numpy as jnp from jax import test_util as jtu from jax.config import config from jax.experimental import jax2tf from jax.experimental.jax2tf.tests import tf_test_util from jax.interpreters import xla import numpy as np import tensorflow as tf # type: ignore[import] config.parse_flags_with_absl() # Import after parsing flags from jax.experimental.jax2tf.tests import primitive_harness REDUCE = ( jnp.all, jnp.any, jnp.max, jnp.min, jnp.prod, jnp.sum, ) INDEX = ( jax.ops.index_add, jax.ops.index_max, jax.ops.index_min, jax.ops.index_mul, jax.ops.index_update, ) class JaxPrimitiveTest(tf_test_util.JaxToTfTestCase): def test_primitive_coverage(self): """Fail if there are JAX primitives that are not implemented.""" # Harvest primitives from XLA translation tables all_primitives = (set(xla.translations) | set(xla.backend_specific_translations['cpu']) | set(xla.backend_specific_translations['gpu']) | set(xla.backend_specific_translations['tpu']) | set(xla.initial_style_translations) | set(xla.parallel_translations)) tf_impl = set(jax.experimental.jax2tf.jax2tf.tf_impl) tf_not_yet_impl = set(jax.experimental.jax2tf.jax2tf.tf_not_yet_impl) all_primitives = tuple(sorted(all_primitives, key=str)) for p in all_primitives: # TODO: remove tie_in once omnistaging is on by default if p.name == "axis_index" or p.name == "tie_in": continue if p in tf_not_yet_impl: self.assertNotIn(p, tf_impl) # Should not be in both tf_impl and tf_not_yet_impl else: self.assertIn(p, tf_impl) @parameterized.named_parameters( dict(testcase_name=f"_{f_jax.__name__}", f_jax=f_jax) for f_jax in [jnp.add, jnp.subtract, jnp.multiply, jnp.divide, jnp.less, jnp.less_equal, jnp.equal, jnp.greater, jnp.greater_equal, jnp.not_equal, jnp.maximum, jnp.minimum]) def test_type_promotion(self, f_jax=jnp.add): # We only test a few types here, as tensorflow does not support many # types like uint* or bool in binary ops. types = [dtypes.bfloat16, np.int32, np.int64, np.float32] for x_dtype in types: for y_dtype in types: x = np.array([1, 2], dtype=x_dtype) y = np.array([3, 4], dtype=y_dtype) self.ConvertAndCompare(f_jax, x, y) def test_concat(self): values = [np.array([1, 2], dtype=np.float32), np.array([1, 2], dtype=np.int32), np.array([1, 2], dtype=np.int8)] f_jax = jax.jit(lambda x: jnp.concatenate(x, axis=0)) self.ConvertAndCompare(f_jax, values) @primitive_harness.parameterized(primitive_harness.lax_pad) def test_pad(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_top_k) def test_top_k(self, harness: primitive_harness.Harness): if (harness.params["k"] > harness.params["shape"][-1] or harness.params["k"] < 0): with self.assertRaisesRegex(ValueError, "k argument to top_k must be"): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) elif harness.params["dtype"] in jtu.dtypes.complex: # TODO(necula): fix top_k complex bug on TPU if jtu.device_under_test() == "tpu": raise unittest.SkipTest("top_k complex on TPU raises different error") with self.assertRaisesRegex(RuntimeError, "Unimplemented: complex comparison"): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) # TODO: TF and JAX sort [inf, nan] differently. elif harness.name.startswith("nan_"): raise unittest.SkipTest("inconsistent [nan, inf] sorting") else: self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_sort) def test_sort(self, harness: primitive_harness.Harness): if (jtu.device_under_test() == "gpu" and len(harness.arg_descriptors) == 4 and not harness.params["is_stable"]): # TODO: fix the TF GPU test raise unittest.SkipTest("GPU tests are running TF on CPU") if jtu.device_under_test() == "tpu" and harness.params["dtype"] in jtu.dtypes.complex: raise unittest.SkipTest("JAX sort is not implemented on TPU for complex") self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_fft) @jtu.skip_on_flag("jax_skip_slow_tests", True) def test_fft(self, harness: primitive_harness.Harness): if len(harness.params["fft_lengths"]) > 3: with self.assertRaisesRegex(RuntimeError, "FFT only supports ranks 1-3"): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) elif (jtu.device_under_test() == "tpu" and len(harness.params["fft_lengths"]) > 1): # TODO(b/140351181): FFT is mostly unimplemented on TPU, even for JAX with self.assertRaisesRegex(RuntimeError, "only 1D FFT is currently supported."): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) else: tol = None if jtu.device_under_test() == "gpu": if harness.params["dtype"] in jtu.dtypes.boolean: tol = 0.01 else: tol = 1e-3 self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), atol=tol, rtol=tol) @primitive_harness.parameterized(primitive_harness.lax_linalg_qr) def test_qr(self, harness: primitive_harness.Harness): # See jax.lib.lapack.geqrf for the list of compatible types dtype = harness.params["dtype"] dut = jtu.device_under_test() # These cases are not implemented in JAX if dtype in (jtu.dtypes.all_integer + [jnp.bfloat16]): unimplemented_jax = True elif dtype is np.complex64 and dut == "tpu": unimplemented_jax = True elif dtype is np.float16 and dut in ("cpu", "gpu"): unimplemented_jax = True else: unimplemented_jax = False if unimplemented_jax: raise unittest.SkipTest(f"QR not implemented in JAX for {dtype} on {dut}") # TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824. # - for now, the performance of the HLO QR implementation called when # compiling with TF is expected to have worse performance than the # custom calls made in JAX. self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), atol=1e-5, rtol=1e-5) @primitive_harness.parameterized(primitive_harness.lax_linalg_svd) @jtu.skip_on_flag("jax_skip_slow_tests", True) def test_svd(self, harness: primitive_harness.Harness): if harness.params["dtype"] in [np.float16, dtypes.bfloat16]: if jtu.device_under_test() != "tpu": # Does not work in JAX with self.assertRaisesRegex(NotImplementedError, "Unsupported dtype"): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) return if harness.params["dtype"] in [np.complex64, np.complex128]: if jtu.device_under_test() == "tpu": # TODO: on JAX on TPU there is no SVD implementation for complex with self.assertRaisesRegex(RuntimeError, "Binary op compare with different element types"): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) return def _custom_assert(r_jax, r_tf, atol=1e-6, rtol=1e-6): def _reconstruct_operand(result, is_tf: bool): # Reconstructing operand as documented in numpy.linalg.svd (see # https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html) s, u, v = result if is_tf: s = s.numpy() u = u.numpy() v = v.numpy() U = u[..., :s.shape[-1]] V = v[..., :s.shape[-1], :] S = s[..., None, :] return jnp.matmul(U * S, V), s.shape, u.shape, v.shape if harness.params["compute_uv"]: r_jax_reconstructed = _reconstruct_operand(r_jax, False) r_tf_reconstructed = _reconstruct_operand(r_tf, True) self.assertAllClose(r_jax_reconstructed, r_tf_reconstructed, atol=atol, rtol=rtol) else: self.assertAllClose(r_jax, r_tf, atol=atol, rtol=rtol) tol = 1e-4 custom_assert = partial(_custom_assert, atol=tol, rtol=tol) self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), atol=tol, rtol=tol, custom_assert=custom_assert, always_custom_assert=True) @primitive_harness.parameterized(primitive_harness.lax_select_and_gather_add) @jtu.ignore_warning(category=UserWarning, message="Using reduced precision for gradient.*") def test_select_and_gather_add(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_reduce_window) def test_reduce_window(self, harness: primitive_harness.Harness): dtype = harness.params['dtype'] if (jtu.device_under_test() == 'tpu' and dtype is np.complex64): raise unittest.SkipTest( 'TODO: JAX reduce_window on TPU does not handle complex64' ) self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_unary_elementwise) def test_unary_elementwise(self, harness: primitive_harness.Harness): dtype = harness.params["dtype"] lax_name = harness.params["lax_name"] arg, = harness.dyn_args_maker(self.rng()) custom_assert = None if lax_name == "digamma": # TODO(necula): fix bug with digamma/(f32|f16) on TPU if dtype in [np.float16, np.float32] and jtu.device_under_test() == "tpu": raise unittest.SkipTest("TODO: fix bug: nan vs not-nan") # In the bfloat16 case, TF and lax both return NaN in undefined cases. if not dtype is dtypes.bfloat16: # digamma is not defined at 0 and -1 def custom_assert(result_jax, result_tf): # lax.digamma returns NaN and tf.math.digamma returns inf special_cases = (arg == 0.) | (arg == -1.) nr_special_cases = np.count_nonzero(special_cases) self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan)), result_jax[special_cases]) self.assertAllClose(np.full((nr_special_cases,), dtype(np.inf)), result_tf[special_cases]) # non-special cases are equal self.assertAllClose(result_jax[~ special_cases], result_tf[~ special_cases]) if lax_name == "erf_inv": # TODO(necula): fix erf_inv bug on TPU if jtu.device_under_test() == "tpu": raise unittest.SkipTest("erf_inv bug on TPU: nan vs non-nan") # TODO: investigate: in the (b)float16 cases, TF and lax both return the # same result in undefined cases. if not dtype in [np.float16, dtypes.bfloat16]: # erf_inv is not defined for arg <= -1 or arg >= 1 def custom_assert(result_jax, result_tf): # noqa: F811 # for arg < -1 or arg > 1 # lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf special_cases = (arg < -1.) | (arg > 1.) nr_special_cases = np.count_nonzero(special_cases) self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan), dtype=dtype), result_jax[special_cases]) signs = np.where(arg[special_cases] < 0., -1., 1.) self.assertAllClose(np.full((nr_special_cases,), signs * dtype(np.inf), dtype=dtype), result_tf[special_cases]) # non-special cases are equal self.assertAllClose(result_jax[~ special_cases], result_tf[~ special_cases]) atol = None if jtu.device_under_test() == "gpu": # TODO(necula): revisit once we fix the GPU tests atol = 1e-3 self.ConvertAndCompare(harness.dyn_fun, arg, custom_assert=custom_assert, atol=atol) @primitive_harness.parameterized(primitive_harness.lax_bitwise_not) def test_bitwise_not(self, harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_population_count) def test_population_count(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_add_mul) def test_add_mul(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_min_max) def test_min_max(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise) def test_binary_elementwise(self, harness): tol = None lax_name, dtype = harness.params["lax_name"], harness.params["dtype"] if lax_name in ("igamma", "igammac"): # TODO(necula): fix bug with igamma/f16 if dtype in [np.float16, dtypes.bfloat16]: raise unittest.SkipTest("TODO: igamma(c) unsupported with (b)float16 in JAX") # TODO(necula): fix bug with igamma/f32 on TPU if dtype is np.float32 and jtu.device_under_test() == "tpu": raise unittest.SkipTest("TODO: fix bug: nan vs not-nan") arg1, arg2 = harness.dyn_args_maker(self.rng()) custom_assert = None if lax_name == "igamma": # igamma is not defined when the first argument is <=0 def custom_assert(result_jax, result_tf): # lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0 special_cases = (arg1 == 0.) & (arg2 == 0.) nr_special_cases = np.count_nonzero(special_cases) self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype), result_jax[special_cases]) self.assertAllClose(np.full((nr_special_cases,), 0., dtype=dtype), result_tf[special_cases]) # non-special cases are equal self.assertAllClose(result_jax[~ special_cases], result_tf[~ special_cases]) if lax_name == "igammac": # On GPU, tolerance also needs to be adjusted in compiled mode if dtype == np.float64 and jtu.device_under_test() == 'gpu': tol = 1e-14 # igammac is not defined when the first argument is <=0 def custom_assert(result_jax, result_tf): # noqa: F811 # lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN special_cases = (arg1 <= 0.) | (arg2 <= 0) nr_special_cases = np.count_nonzero(special_cases) self.assertAllClose(np.full((nr_special_cases,), 1., dtype=dtype), result_jax[special_cases]) self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype), result_tf[special_cases]) # On CPU, tolerance only needs to be adjusted in eager & graph modes tol = None if dtype == np.float64: tol = 1e-14 # non-special cases are equal self.assertAllClose(result_jax[~ special_cases], result_tf[~ special_cases], atol=tol, rtol=tol) self.ConvertAndCompare(harness.dyn_fun, arg1, arg2, custom_assert=custom_assert, atol=tol, rtol=tol) @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise_logical) def test_binary_elementwise_logical(self, harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_betainc) def test_betainc(self, harness: primitive_harness.Harness): dtype = harness.params["dtype"] # TODO: https://www.tensorflow.org/api_docs/python/tf/math/betainc only # supports float32/64 tests. # TODO(bchetioui): investigate why the test actually fails in JAX. if dtype in [np.float16, dtypes.bfloat16]: raise unittest.SkipTest("(b)float16 not implemented in TF") tol = None if dtype is np.float64: tol = 1e-14 self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), atol=tol, rtol=tol) # TODO(necula): combine tests that are identical except for the harness # wait until we get more experience with using harnesses. @primitive_harness.parameterized(primitive_harness.lax_shift_left) def test_shift_left(self, harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_shift_right_logical) def test_shift_right_logical(self, harness): if jtu.device_under_test() == "tpu" and harness.params["dtype"] in [np.int8, np.int16]: raise unittest.SkipTest("TODO: silent error for negative inputs") self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_shift_right_arithmetic) def test_shift_right_arithmetic(self, harness): if jtu.device_under_test() == "tpu" and harness.params["dtype"] in [np.uint8, np.uint16]: raise unittest.SkipTest("TODO: silent error for negative inputs") self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_slice) def test_slice(self, harness): # JAX.slice rejects negative indices; check, and skip jax2tf if any(si < 0 or si >= sh or li < 0 or li > sh for sh, si, li in zip(harness.params["shape"], harness.params["start_indices"], harness.params["limit_indices"])): with self.assertRaisesRegex(TypeError, ""): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) else: self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_dynamic_slice) def test_dynamic_slice(self, harness): # JAX.dynamic_slice rejects slice sizes too big; check this, and skip jax2tf args = harness.dyn_args_maker(self.rng()) if any(li - si < 0 or li - si >= sh for sh, si, li in zip(harness.params["shape"], harness.params["start_indices"], harness.params["limit_indices"])): with self.assertRaisesRegex(TypeError, ""): harness.dyn_fun(*args) return self.ConvertAndCompare(harness.dyn_fun, *args) @primitive_harness.parameterized(primitive_harness.lax_dynamic_update_slice) def test_dynamic_update_slice(self, harness): # JAX.dynamic_update_slice rejects update slices too big; check, and skip jax2tf if any(ush > sh for sh, ush in zip(harness.params["shape"], harness.params["update_shape"])): with self.assertRaisesRegex(TypeError, ""): harness.dyn_fun(*harness.dyn_args_maker(self.rng())) else: self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_squeeze) def test_squeeze(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_conv_general_dilated) def test_conv_general_dilated(self, harness: primitive_harness.Harness): if jtu.device_under_test() == "gpu": raise unittest.SkipTest("TODO: test failures on GPU") tol = None # TODO(bchetioui): significant discrepancies in some float16 cases. if harness.params["dtype"] is np.float16: tol = 1. # TODO(bchetioui): slight occasional discrepancy in float32 cases. elif harness.params["dtype"] is np.float32: tol = 1e-5 self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), atol=tol, rtol=tol) @primitive_harness.parameterized(primitive_harness.lax_gather) def test_gather(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) @primitive_harness.parameterized(primitive_harness.lax_scatter) def test_scatter(self, harness: primitive_harness.Harness): f_name = harness.params['f_lax'].__name__ dtype = harness.params['dtype'] if jtu.device_under_test() == 'tpu': if dtype is np.complex64 and f_name in ['scatter_min', 'scatter_max']: raise unittest.SkipTest(f"TODO: complex {f_name} on TPU fails in JAX") self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) def test_boolean_gather(self): values = np.array([[True, True], [False, True], [False, False]], dtype=np.bool_) indices = np.array([0, 1], dtype=np.int32) for axis in [0, 1]: f_jax = jax.jit(lambda v, i: jnp.take(v, i, axis=axis)) # pylint: disable=cell-var-from-loop self.ConvertAndCompare(f_jax, values, indices) def test_gather_rank_change(self): params = jnp.array([[1.0, 1.5, 2.0], [2.0, 2.5, 3.0], [3.0, 3.5, 4.0]]) indices = jnp.array([[1, 1, 2], [0, 1, 0]]) f_jax = jax.jit(lambda i: params[i]) self.ConvertAndCompare(f_jax, indices) @parameterized.named_parameters(jtu.cases_from_list( dict(testcase_name=f"_{f_jax.__name__}", f_jax=f_jax) for f_jax in REDUCE)) def test_reduce_ops_with_numerical_input(self, f_jax): values = np.array([1, 2, 3], dtype=np.float32) self.ConvertAndCompare(f_jax, values) @parameterized.named_parameters(jtu.cases_from_list( dict(testcase_name=f"_{f_jax.__name__}", f_jax=f_jax) for f_jax in (jnp.cumsum, jnp.cumprod))) def test_cumulated_ops(self, f_jax):
@parameterized.named_parameters(jtu.cases_from_list( dict(testcase_name=f"_{op.__name__}", op=op) for op in INDEX)) def test_scatter_static(self, op): values = np.ones((5, 6), dtype=np.float32) update = np.float32(6.) f_jax = jax.jit(lambda v, u: op(v, jax.ops.index[::2, 3:], u)) self.ConvertAndCompare(f_jax, values, update) @parameterized.named_parameters(jtu.cases_from_list( dict(testcase_name=f"_{f_jax.__name__}", f_jax=f_jax) for f_jax in REDUCE)) def test_reduce_ops_with_boolean_input(self, f_jax): values = np.array([True, False, True], dtype=np.bool_) self.ConvertAndCompare(f_jax, values) @primitive_harness.parameterized(primitive_harness.random_gamma) def test_random_gamma(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()), rtol=1e-5) @primitive_harness.parameterized(primitive_harness.random_split) def test_random_split(self, harness: primitive_harness.Harness): self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng())) def test_zeros_like(self): v = np.float32(2.) f_jax = jax.ad_util.zeros_like_jaxval self.ConvertAndCompare(f_jax, v) def test_stop_gradient(self): f = jax2tf.convert(lax.stop_gradient) self.assertEqual(f(tf.ones([])), 1.) # test_bfloat16_constant checks that https://github.com/google/jax/issues/3942 is # fixed def test_bfloat16_constant(self): def jax_fn_scalar(x): x = x.astype(jnp.bfloat16) x *= 2. return x def jax_fn_array(x): x = x.astype(jnp.bfloat16) x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16) return x tf_fn_scalar = jax2tf.convert(jax_fn_scalar) self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750)) tf_fn_array = jax2tf.convert(jax_fn_array) self.assertAllClose(tf_fn_array(np.array([3, 4, 5])), np.array([4.5, 10, 17.5], jnp.bfloat16)) if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
values = np.array([1, 2, 3], dtype=np.float32) self.ConvertAndCompare(f_jax, values)
LFM.py
# 导入包 import random import math import numpy as np import time from tqdm import tqdm from tqdm import trange # 1 通用函数定义 ## 定义装饰器,监控运行时间 def timmer(func): def wrapper(*args, **kwargs): start_time = time.time() res = fun
ta ### split data class Dataset(): def __init__(self,fp): self.data = self.loadData(fp) @timmer def loadData(self,fp): data = [] for l in open(fp): data.append(tuple(map(int, l.strip().split('::')[:2]))) return data @timmer def splitData(self, M, k, seed=1): ''' :params: data, 加载的所有(user, item)数据条目 :params: M, 划分的数目,最后需要取M折的平均 :params: k, 本次是第几次划分,k~[0, M) :params: seed, random的种子数,对于不同的k应设置成一样的 :return: train, test ''' train , test = [], [] random.seed(seed) for user, item in self.data: # 这里与书中的不一致,本人认为取M-1较为合理,因randint是左右都覆盖的 if random.randint(0, M-1) == k: test.append((user, item)) else: train.append((user, item)) # 处理成字典的形式,user->set(items) def convert_dict(data): data_dict = {} for user, item in data: if user not in data_dict: data_dict[user] = set() data_dict[user].add(item) data_dict = {k: list(data_dict[k]) for k in data_dict} return data_dict return convert_dict(train), convert_dict(test) ## 评价指标 ### Precision ### Recall ### Coverage ### Popularity(Novelty) class Metric(): def __init__(self, train, test, GetRecommendation): ''' :params: train, 训练数据 :params: test, 测试数据 :params: GetRecommendation, 为某个用户获取推荐物品的接口函数 ''' self.train = train self.test = test self.GetRecommendation = GetRecommendation self.recs = self.getRec() # 为test中的每个用户进行推荐 def getRec(self): recs = {} for user in self.test: rank = self.GetRecommendation(user) recs[user] = rank return recs # 定义精确率指标计算方式 def precision(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(rank) return round(hit / all * 100, 2) # 定义召回率指标计算方式 def recall(self): all, hit = 0, 0 for user in self.test: test_items = set(self.test[user]) rank = self.recs[user] for item, score in rank: if item in test_items: hit += 1 all += len(test_items) return round(hit / all * 100, 2) # 定义覆盖率指标计算方式 def coverage(self): all_item, recom_item = set(), set() for user in self.test: for item in self.train[user]: all_item.add(item) rank = self.recs[user] for item, score in rank: recom_item.add(item) return round(len(recom_item) / len(all_item) * 100, 2) # 定义新颖度指标计算方式 def popularity(self): # 计算物品的流行度 item_pop = {} for user in self.train: for item in self.train[user]: if item not in item_pop: item_pop[item] = 0 item_pop[item] += 1 num, pop = 0, 0 for user in self.test: rank = self.recs[user] for item, score in rank: # 取对数,防止因长尾问题带来的被流行物品所主导 pop += math.log(1 + item_pop[item]) num += 1 return round(pop / num, 6) def eval(self): metric = {'Precision': self.precision(), 'Recall': self.recall(), 'Coverage': self.coverage(), 'Popularity': self.popularity()} print('Metric:', metric) return metric # 2 LFM算法实现 def LFM(train,ratio,K,lr,step,lmbda,N): ''' :params: train, 训练数据 :params: ratio, 负采样的正负比例 :params: K, 隐语义个数 :params: lr, 初始学习率 :params: step, 迭代次数 :params: lmbda, 正则化系数 :params: N, 推荐TopN物品的个数 :return: GetRecommendation, 获取推荐结果的接口 ''' all_items = {} for user in train: for item in train[user]: if item not in all_items: all_items[item] = 0 all_items[item] += 1 all_items = list(all_items.items()) items = [x[0] for x in all_items] pops = [x[1] for x in all_items] # 负采样函数(按照流行度就行采样) def nSample(data,ratio): new_data = {} # 正样本 for user in data: if user not in new_data: new_data[user] = {} for item in data[user]: new_data[user][item] = 1 # 负样本 for user in new_data: seen = set(new_data[user]) pos_num = len(seen) item = np.random.choice(items, int(pos_num * ratio * 3), pops) item = [x for x in item if x not in seen][:int(pos_num * ratio)] new_data[user].update({x: 0 for x in item}) return new_data # 训练 P, Q = {}, {} for user in train: P[user] = np.random.random(K) for item in items: Q[item] = np.random.random(K) for s in trange(step): data = nSample(train, ratio) for user in data: for item in data[user]: eui = data[user][item] - (P[user] * Q[item]).sum() P[user] += lr * (Q[item] * eui - lmbda * P[user]) Q[item] += lr * (P[user] * eui - lmbda * Q[item]) lr *= 0.9 # 调整学习率 # 获取接口函数 def GetRecommendation(user): seen_items = set(train[user]) recs = {} for item in items: if item not in seen_items: recs[item] = (P[user] * Q[item]).sum() recs = list(sorted(recs.items(), key=lambda x: x[1], reverse=True))[:N] return recs return GetRecommendation # 3 LFM实验 ## M=8, N=10, ratio=[1, 2, 3, 5, 10, 20] class Experiment(): def __init__(self, M, N, ratio=1, K=100, lr=0.02, step=100, lmbda=0.01, fp='../dataset/ml-1m/ratings.dat'): ''' :params: M, 进行多少次实验 :params: N, TopN推荐物品的个数 :params: ratio, 正负样本比例 :params: K, 隐语义个数 :params: lr, 学习率 :params: step, 训练步数 :params: lmbda, 正则化系数 :params: fp, 数据文件路径 ''' self.M = M self.K = K self.N = N self.ratio = ratio self.lr = lr self.step = step self.lmbda = lmbda self.fp = fp self.alg = LFM # 定义单次实验 @timmer def worker(self, train, test): ''' :params: train, 训练数据集 :params: test, 测试数据集 :return: 各指标的值 ''' getRecommendation = self.alg(train, self.ratio, self.K, self.lr, self.step, self.lmbda, self.N) metric = Metric(train, test, getRecommendation) return metric.eval() # 多次实验取平均 @timmer def run(self): metrics = {'Precision': 0, 'Recall': 0, 'Coverage': 0, 'Popularity': 0} dataset = Dataset(self.fp) for ii in range(self.M): train, test = dataset.splitData(self.M, ii) print('Experiment {}:'.format(ii)) metric = self.worker(train, test) metrics = {k: metrics[k]+metric[k] for k in metrics} metrics = {k: metrics[k] / self.M for k in metrics} print('Average Result (M={}, N={}, ratio={}): {}'.format(\ self.M, self.N, self.ratio, metrics)) # LFM实验(运行时间较长,这里没贴实验结果) M, N = 8, 10 for r in [1, 2, 3, 5, 10, 20]: exp = Experiment(M, N, ratio=r) exp.run()
c(*args, **kwargs) stop_time = time.time() print('Func {},run time:{}'.format(func.__name__,stop_time - start_time)) return res return wrapper ## 数据处理相关 ### load da
ball_window.py
from gi.repository import Gtk, Gdk import cairo from random import getrandbits from ball_drawing_area import BallDrawingArea from store import store from near import near import config def rand_velocity():
class BallWindow(Gtk.Window): def __init__(self, window_size, x=None, y=None, x_velocity=None, y_velocity=None): Gtk.Window.__init__(self) self.window_size = window_size self.x = x self.y = y self.x_velocity = x_velocity if x_velocity != None else rand_velocity() self.y_velocity = y_velocity if y_velocity != None else rand_velocity() self.rebuilding = False self.set_app_paintable(True) self.set_decorated(False) self.set_accept_focus(False) self.set_keep_above(True) self.set_skip_taskbar_hint(True) self.set_skip_pager_hint(True) self.set_deletable(False) self.set_size_request(config.BALL_DIAMETER, config.BALL_DIAMETER) self.set_position(Gtk.WindowPosition.CENTER) self.set_visual(self.get_screen().get_rgba_visual()) self.add_tick_callback(self.tick) if self.x and self.y: self.move(self.x, self.y) self.connect('realize', self.realize) self.connect('draw', self.draw) self.connect('window-state-event', self.check_minimized) def rebuild(self, use_current_position): if not self.rebuilding: if config.USE_ANTI_TAMPER or not use_current_position: self.rebuilding = True self.destroy() if use_current_position: self = BallWindow(self.window_size, self.x, self.y, self.x_velocity, self.y_velocity) else: self = BallWindow(self.window_size) self.show_all() def tick(self, _widget, _frame_clock): if config.USE_ANTI_TAMPER: self.set_keep_above(True) self.get_window().move_to_desktop(0) current_x, current_y = self.get_position() ai_x, ai_y = store['ai_paddle'] player_x, player_y = store['player_paddle'] width, height = self.window_size if ai_x == None or player_x == None: return True if ( current_x < config.SCREEN_PADDING or current_x + config.BALL_DIAMETER > width - config.SCREEN_PADDING ): self.x -= self.x_velocity self.rebuild(False) if ( current_y < config.SCREEN_PADDING or current_y + config.BALL_DIAMETER > height - config.SCREEN_PADDING ): self.y -= self.y_velocity self.y_velocity *= -1 if ( self.x < ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING) and self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > ai_y and self.y < ai_y + config.PADDLE_SIZE[1] + config.BALL_PADDING ): self.x_velocity *= -1 if config.USE_BALL_STUCK_IN_PADDLE_FIX: self.x = ai_x + (config.BALL_DIAMETER / 2 + config.BALL_PADDING) if ( self.x > player_x - (config.BALL_DIAMETER + config.BALL_PADDING) and self.y + (config.BALL_DIAMETER + config.BALL_PADDING) > player_y and self.y < player_y + config.PADDLE_SIZE[1] + config.BALL_PADDING ): self.x_velocity *= -1 if config.USE_BALL_STUCK_IN_PADDLE_FIX: self.x = player_x - (config.BALL_DIAMETER + config.BALL_PADDING) if not near(self.x, current_x, config.BALL_LEEWAY) or not near(self.y, current_y, config.BALL_LEEWAY): self.rebuild(True) self.x += self.x_velocity self.y += self.y_velocity self.move(self.x, self.y) store['ball_position'] = (self.x, self.y, self.x_velocity, self.y_velocity) return True def check_minimized(self, _widget, event): if event.new_window_state & Gdk.WindowState.ICONIFIED: self.rebuild(True) def realize(self, _widget): current_x, current_y = self.get_position() self.x = current_x self.y = current_y ball = BallDrawingArea(self.get_window()) self.add(ball) ball.show_all() cursor = Gdk.Cursor.new_from_name(Gdk.Display.get_default(), 'not-allowed') self.get_window().set_cursor(cursor) def draw(self, _widget, cr): cr.set_source_rgba(1.0, 1.0, 1.0, 0.0) cr.set_operator(cairo.OPERATOR_SOURCE) cr.paint() return False
return config.BALL_VELOCITY if getrandbits(1) else -config.BALL_VELOCITY
MINER_STATE.py
import json def str_2_json(str): return json.loads(str, encoding="utf-8") class MapInfo: def __init__(self): self.max_x = 0 self.max_y = 0 self.golds = [] self.obstacles = [] self.numberOfPlayers = 0 self.maxStep = 0 def init_map(self, gameInfo): self.max_x = gameInfo["width"] - 1 self.max_y = gameInfo["height"] - 1 self.golds = gameInfo["golds"] self.obstacles = gameInfo["obstacles"] self.maxStep = gameInfo["steps"] self.numberOfPlayers = gameInfo["numberOfPlayers"] def update(self, golds, changedObstacles): self.golds = golds for cob in changedObstacles: newOb = True for ob in self.obstacles: if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]: newOb = False #print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ", # cob["type"], " / value: ", ob["value"], " -> ", cob["value"]) ob["type"] = cob["type"] ob["value"] = cob["value"] break if newOb: self.obstacles.append(cob) #print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ", # cob["value"]) def get_min_x(self): return min([cell["posx"] for cell in self.golds]) def get_max_x(self): return max([cell["posx"] for cell in self.golds]) def get_min_y(self): return min([cell["posy"] for cell in self.golds]) def get_max_y(self): return max([cell["posy"] for cell in self.golds]) def is_row_has_gold(self, y): return y in [cell["posy"] for cell in self.golds] def is_column_has_gold(self, x): return x in [cell["posx"] for cell in self.golds] def
(self, x, y): for cell in self.golds: if x == cell["posx"] and y == cell["posy"]: return cell["amount"] return 0 def get_obstacle(self, x, y): # Getting the kind of the obstacle at cell(x,y) for cell in self.obstacles: if x == cell["posx"] and y == cell["posy"]: return cell["type"] return -1 # No obstacle at the cell (x,y) class State: STATUS_PLAYING = 0 STATUS_ELIMINATED_WENT_OUT_MAP = 1 STATUS_ELIMINATED_OUT_OF_ENERGY = 2 STATUS_ELIMINATED_INVALID_ACTION = 3 STATUS_STOP_EMPTY_GOLD = 4 STATUS_STOP_END_STEP = 5 def __init__(self): self.end = False self.score = 0 self.lastAction = None self.id = 0 self.x = 0 self.y = 0 self.energy = 0 self.mapInfo = MapInfo() self.players = [] self.stepCount = 0 self.status = State.STATUS_PLAYING def init_state(self, data): #parse data from server into object game_info = str_2_json(data) self.end = False self.score = 0 self.lastAction = None self.id = game_info["playerId"] self.x = game_info["posx"] self.y = game_info["posy"] self.energy = game_info["energy"] self.mapInfo.init_map(game_info["gameinfo"]) self.stepCount = 0 self.status = State.STATUS_PLAYING self.players = [{"playerId": 2, "posx": self.x, "posy": self.y}, {"playerId": 3, "posx": self.x, "posy": self.y}, {"playerId": 4, "posx": self.x, "posy": self.y}] def update_state(self, data): new_state = str_2_json(data) for player in new_state["players"]: if player["playerId"] == self.id: self.x = player["posx"] self.y = player["posy"] self.energy = player["energy"] self.score = player["score"] self.lastAction = player["lastAction"] self.status = player["status"] self.mapInfo.update(new_state["golds"], new_state["changedObstacles"]) self.players = new_state["players"] for i in range(len(self.players) + 1, 5, 1): self.players.append({"playerId": i, "posx": self.x, "posy": self.y}) self.stepCount = self.stepCount + 1
gold_amount
bugreport.go
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bugreport import ( "context" "errors" "fmt" "os" "path" "path/filepath" "reflect" "runtime" "strings" "sync" "time" "github.com/kr/pretty" "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" "istio.io/istio/operator/pkg/util" "istio.io/istio/pkg/kube" "istio.io/istio/pkg/kube/inject" "istio.io/istio/pkg/proxy" "istio.io/istio/tools/bug-report/pkg/archive" cluster2 "istio.io/istio/tools/bug-report/pkg/cluster" "istio.io/istio/tools/bug-report/pkg/common" "istio.io/istio/tools/bug-report/pkg/config" "istio.io/istio/tools/bug-report/pkg/content" "istio.io/istio/tools/bug-report/pkg/filter" "istio.io/istio/tools/bug-report/pkg/kubeclient" "istio.io/istio/tools/bug-report/pkg/kubectlcmd" "istio.io/istio/tools/bug-report/pkg/processlog" "istio.io/pkg/log" "istio.io/pkg/version" ) const ( bugReportDefaultTimeout = 30 * time.Minute istioRevisionLabel = "istio.io/rev" ) var ( bugReportDefaultIstioNamespace = "istio-system" bugReportDefaultInclude = []string{""} bugReportDefaultExclude = []string{strings.Join(inject.IgnoredNamespaces.UnsortedList(), ",")} ) // Cmd returns a cobra command for bug-report. func Cmd(logOpts *log.Options) *cobra.Command { rootCmd := &cobra.Command{ Use: "bug-report", Short: "Cluster information and log capture support tool.", SilenceUsage: true, Long: `bug-report selectively captures cluster information and logs into an archive to help diagnose problems. Proxy logs can be filtered using: --include|--exclude ns1,ns2.../dep1,dep2.../pod1,pod2.../cntr1,cntr.../lbl1=val1,lbl2=val2.../ann1=val1,ann2=val2... where ns=namespace, dep=deployment, cntr=container, lbl=label, ann=annotation The filter spec is interpreted as 'must be in (ns1 OR ns2) AND (dep1 OR dep2) AND (cntr1 OR cntr2)...' The log will be included only if the container matches at least one include filter and does not match any exclude filters. All parts of the filter are optional and can be omitted e.g. ns1//pod1 filters only for namespace ns1 and pod1. All names except label and annotation keys support '*' glob matching pattern. e.g. --include ns1,ns2 (only namespaces ns1 and ns2) --include n*//p*/l=v* (pods with name beginning with 'p' in namespaces beginning with 'n' and having label 'l' with value beginning with 'v'.)`, RunE: func(cmd *cobra.Command, args []string) error { return runBugReportCommand(cmd, logOpts) }, } rootCmd.AddCommand(version.CobraCommand()) addFlags(rootCmd, gConfig) return rootCmd } var ( // Logs, along with stats and importance metrics. Key is path (namespace/deployment/pod/cluster) which can be // parsed with ParsePath. logs = make(map[string]string) stats = make(map[string]*processlog.Stats) importance = make(map[string]int) // Aggregated errors for all fetch operations. gErrors util.Errors lock = sync.RWMutex{} ) func runBugReportCommand(_ *cobra.Command, logOpts *log.Options) error { kubectlcmd.ReportRunningTasks() if err := configLogs(logOpts); err != nil { return err } config, err := parseConfig() if err != nil { return err } clusterCtxStr := "" if config.Context == "" { var err error clusterCtxStr, err = content.GetClusterContext(config.KubeConfigPath) if err != nil { return err } } else { clusterCtxStr = config.Context } common.LogAndPrintf("\nTarget cluster context: %s\n", clusterCtxStr) common.LogAndPrintf("Running with the following config: \n\n%s\n\n", config) clientConfig, clientset, err := kubeclient.New(config.KubeConfigPath, config.Context) if err != nil { return fmt.Errorf("could not initialize k8s client: %s ", err) } client, err := kube.NewExtendedClient(clientConfig, "") if err != nil { return err } common.LogAndPrintf("\nCluster endpoint: %s\n", client.RESTConfig().Host) clusterResourcesCtx, getClusterResourcesCancel := context.WithTimeout(context.Background(), commandTimeout) curTime := time.Now() defer func() { message := "Timeout when get cluster resources, please using --include or --exclude to filter" if time.Until(curTime.Add(commandTimeout)) < 0 { common.LogAndPrintf(message) } getClusterResourcesCancel() }() resources, err := cluster2.GetClusterResources(clusterResourcesCtx, clientset, config) if err != nil { return err } dumpRevisionsAndVersions(resources, config.KubeConfigPath, config.Context, config.IstioNamespace) log.Infof("Cluster resource tree:\n\n%s\n\n", resources) paths, err := filter.GetMatchingPaths(config, resources) if err != nil { return err } common.LogAndPrintf("\n\nFetching proxy logs for the following containers:\n\n%s\n", strings.Join(paths, "\n")) gatherInfo(client, config, resources, paths) if len(gErrors) != 0 { log.Error(gErrors.ToError()) } // TODO: sort by importance and discard any over the size limit. for path, text := range logs { namespace, _, pod, _, err := cluster2.ParsePath(path) if err != nil
writeFile(filepath.Join(archive.ProxyOutputPath(tempDir, namespace, pod), common.ProxyContainerName+".log"), text) } outDir, err := os.Getwd() if err != nil { log.Errorf("using ./ to write archive: %s", err.Error()) outDir = "." } outPath := filepath.Join(outDir, "bug-report.tar.gz") common.LogAndPrintf("Creating an archive at %s.\n", outPath) archiveDir := archive.DirToArchive(tempDir) if err := archive.Create(archiveDir, outPath); err != nil { return err } common.LogAndPrintf("Cleaning up temporary files in %s.\n", archiveDir) if err := os.RemoveAll(archiveDir); err != nil { return err } common.LogAndPrintf("Done.\n") return nil } func dumpRevisionsAndVersions(resources *cluster2.Resources, kubeconfig, configContext, istioNamespace string) { text := "" text += fmt.Sprintf("CLI version:\n%s\n\n", version.Info.LongForm()) revisions := getIstioRevisions(resources) istioVersions, proxyVersions := getIstioVersions(kubeconfig, configContext, istioNamespace, revisions) text += "The following Istio control plane revisions/versions were found in the cluster:\n" for rev, ver := range istioVersions { text += fmt.Sprintf("Revision %s:\n%s\n\n", rev, ver) } text += "The following proxy revisions/versions were found in the cluster:\n" for rev, ver := range proxyVersions { text += fmt.Sprintf("Revision %s: Versions {%s}\n", rev, strings.Join(ver, ", ")) } common.LogAndPrintf(text) writeFile(filepath.Join(archive.OutputRootDir(tempDir), "versions"), text) } // getIstioRevisions returns a slice with all Istio revisions detected in the cluster. func getIstioRevisions(resources *cluster2.Resources) []string { revMap := make(map[string]struct{}) for _, podLabels := range resources.Labels { for label, value := range podLabels { if label == istioRevisionLabel { revMap[value] = struct{}{} } } } var out []string for k := range revMap { out = append(out, k) } return out } // getIstioVersions returns a mapping of revision to aggregated version string for Istio components and revision to // slice of versions for proxies. Any errors are embedded in the revision strings. func getIstioVersions(kubeconfig, configContext, istioNamespace string, revisions []string) (map[string]string, map[string][]string) { istioVersions := make(map[string]string) proxyVersionsMap := make(map[string]map[string]struct{}) proxyVersions := make(map[string][]string) for _, revision := range revisions { istioVersions[revision] = getIstioVersion(kubeconfig, configContext, istioNamespace, revision) proxyInfo, err := proxy.GetProxyInfo(kubeconfig, configContext, revision, istioNamespace) if err != nil { log.Error(err) continue } for _, pi := range *proxyInfo { if proxyVersionsMap[revision] == nil { proxyVersionsMap[revision] = make(map[string]struct{}) } proxyVersionsMap[revision][pi.IstioVersion] = struct{}{} } } for revision, vmap := range proxyVersionsMap { for version := range vmap { proxyVersions[revision] = append(proxyVersions[revision], version) } } return istioVersions, proxyVersions } func getIstioVersion(kubeconfig, configContext, istioNamespace, revision string) string { kubeClient, err := kube.NewExtendedClient(kube.BuildClientCmd(kubeconfig, configContext), revision) if err != nil { return err.Error() } versions, err := kubeClient.GetIstioVersions(context.TODO(), istioNamespace) if err != nil { return err.Error() } return pretty.Sprint(versions) } // gatherInfo fetches all logs, resources, debug etc. using goroutines. // proxy logs and info are saved in logs/stats/importance global maps. // Errors are reported through gErrors. func gatherInfo(client kube.ExtendedClient, config *config.BugReportConfig, resources *cluster2.Resources, paths []string) { // no timeout on mandatoryWg. var mandatoryWg sync.WaitGroup cmdTimer := time.NewTimer(time.Duration(config.CommandTimeout)) beginTime := time.Now() clusterDir := archive.ClusterInfoPath(tempDir) params := &content.Params{ Client: client, DryRun: config.DryRun, KubeConfig: config.KubeConfigPath, KubeContext: config.Context, } common.LogAndPrintf("\nFetching Istio control plane information from cluster.\n\n") getFromCluster(content.GetK8sResources, params, clusterDir, &mandatoryWg) getFromCluster(content.GetCRs, params, clusterDir, &mandatoryWg) getFromCluster(content.GetEvents, params, clusterDir, &mandatoryWg) getFromCluster(content.GetClusterInfo, params, clusterDir, &mandatoryWg) getFromCluster(content.GetNodeInfo, params, clusterDir, &mandatoryWg) getFromCluster(content.GetSecrets, params.SetVerbose(config.FullSecrets), clusterDir, &mandatoryWg) getFromCluster(content.GetDescribePods, params.SetIstioNamespace(config.IstioNamespace), clusterDir, &mandatoryWg) // optionalWg is subject to timer. var optionalWg sync.WaitGroup for _, p := range paths { namespace, _, pod, container, err := cluster2.ParsePath(p) if err != nil { log.Error(err.Error()) continue } cp := params.SetNamespace(namespace).SetPod(pod).SetContainer(container) proxyDir := archive.ProxyOutputPath(tempDir, namespace, pod) switch { case common.IsProxyContainer(params.ClusterVersion, container): getFromCluster(content.GetCoredumps, cp, filepath.Join(proxyDir, "cores"), &mandatoryWg) getFromCluster(content.GetNetstat, cp, proxyDir, &mandatoryWg) getFromCluster(content.GetProxyInfo, cp, archive.ProxyOutputPath(tempDir, namespace, pod), &optionalWg) getProxyLogs(client, config, resources, p, namespace, pod, container, &optionalWg) case resources.IsDiscoveryContainer(params.ClusterVersion, namespace, pod, container): getFromCluster(content.GetIstiodInfo, cp, archive.IstiodPath(tempDir, namespace, pod), &mandatoryWg) getIstiodLogs(client, config, resources, namespace, pod, &mandatoryWg) case common.IsOperatorContainer(params.ClusterVersion, container): getOperatorLogs(client, config, resources, namespace, pod, &optionalWg) } } // Not all items are subject to timeout. Proceed only if the non-cancellable items have completed. mandatoryWg.Wait() // If log fetches have completed, cancel the timeout. go func() { optionalWg.Wait() cmdTimer.Reset(0) }() // Wait for log fetches, up to the timeout. <-cmdTimer.C // Find the timeout duration left for the analysis process. analyzeTimeout := time.Until(beginTime.Add(time.Duration(config.CommandTimeout))) // Analyze runs many queries internally, so run these queries sequentially and after everything else has finished. runAnalyze(config, params, analyzeTimeout) } // getFromCluster runs a cluster info fetching function f against the cluster and writes the results to fileName. // Runs if a goroutine, with errors reported through gErrors. func getFromCluster(f func(params *content.Params) (map[string]string, error), params *content.Params, dir string, wg *sync.WaitGroup) { wg.Add(1) log.Infof("Waiting on %s", runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()) go func() { defer wg.Done() out, err := f(params) appendGlobalErr(err) if err == nil { writeFiles(dir, out) } log.Infof("Done with %s", runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()) }() } // getProxyLogs fetches proxy logs for the given namespace/pod/container and stores the output in global structs. // Runs if a goroutine, with errors reported through gErrors. // TODO(stewartbutler): output the logs to a more robust/complete structure. func getProxyLogs(client kube.ExtendedClient, config *config.BugReportConfig, resources *cluster2.Resources, path, namespace, pod, container string, wg *sync.WaitGroup) { wg.Add(1) log.Infof("Waiting on logs %s", pod) go func() { defer wg.Done() clog, cstat, imp, err := getLog(client, resources, config, namespace, pod, container) appendGlobalErr(err) lock.Lock() if err == nil { logs[path], stats[path], importance[path] = clog, cstat, imp } lock.Unlock() log.Infof("Done with logs %s", pod) }() } // getIstiodLogs fetches Istiod logs for the given namespace/pod and writes the output. // Runs if a goroutine, with errors reported through gErrors. func getIstiodLogs(client kube.ExtendedClient, config *config.BugReportConfig, resources *cluster2.Resources, namespace, pod string, wg *sync.WaitGroup) { wg.Add(1) log.Infof("Waiting on logs %s", pod) go func() { defer wg.Done() clog, _, _, err := getLog(client, resources, config, namespace, pod, common.DiscoveryContainerName) appendGlobalErr(err) writeFile(filepath.Join(archive.IstiodPath(tempDir, namespace, pod), "discovery.log"), clog) log.Infof("Done with logs %s", pod) }() } // getOperatorLogs fetches istio-operator logs for the given namespace/pod and writes the output. func getOperatorLogs(client kube.ExtendedClient, config *config.BugReportConfig, resources *cluster2.Resources, namespace, pod string, wg *sync.WaitGroup) { wg.Add(1) log.Infof("Waiting on logs %s", pod) go func() { defer wg.Done() clog, _, _, err := getLog(client, resources, config, namespace, pod, common.OperatorContainerName) appendGlobalErr(err) writeFile(filepath.Join(archive.OperatorPath(tempDir, namespace, pod), "operator.log"), clog) log.Infof("Done with logs %s", pod) }() } // getLog fetches the logs for the given namespace/pod/container and returns the log text and stats for it. func getLog(client kube.ExtendedClient, resources *cluster2.Resources, config *config.BugReportConfig, namespace, pod, container string) (string, *processlog.Stats, int, error) { log.Infof("Getting logs for %s/%s/%s...", namespace, pod, container) clog, err := kubectlcmd.Logs(client, namespace, pod, container, false, config.DryRun) if err != nil { return "", nil, 0, err } if resources.ContainerRestarts(namespace, pod, container) > 0 { pclog, err := kubectlcmd.Logs(client, namespace, pod, container, true, config.DryRun) if err != nil { return "", nil, 0, err } clog = "========= Previous log present (appended at the end) =========\n\n" + clog + "\n\n========= Previous log =========\n\n" + pclog } var cstat *processlog.Stats clog, cstat = processlog.Process(config, clog) return clog, cstat, cstat.Importance(), nil } func runAnalyze(config *config.BugReportConfig, params *content.Params, analyzeTimeout time.Duration) { newParam := params.SetNamespace(common.NamespaceAll) common.LogAndPrintf("Running istio analyze on all namespaces and report as below:") out, err := content.GetAnalyze(newParam.SetIstioNamespace(config.IstioNamespace), analyzeTimeout) if err != nil { log.Error(err.Error()) return } common.LogAndPrintf("\nAnalysis Report:\n") common.LogAndPrintf(out[common.StrNamespaceAll]) common.LogAndPrintf("\n") writeFiles(archive.AnalyzePath(tempDir, common.StrNamespaceAll), out) } func writeFiles(dir string, files map[string]string) { for fname, text := range files { writeFile(filepath.Join(dir, fname), text) } } func writeFile(path, text string) { if strings.TrimSpace(text) == "" { return } mkdirOrExit(path) if err := os.WriteFile(path, []byte(text), 0o644); err != nil { log.Errorf(err.Error()) } } func mkdirOrExit(fpath string) { if err := os.MkdirAll(path.Dir(fpath), 0o755); err != nil { fmt.Printf("Could not create output directories: %s", err) os.Exit(-1) } } func appendGlobalErr(err error) { if err == nil { return } lock.Lock() gErrors = util.AppendErr(gErrors, err) lock.Unlock() } func BuildClientsFromConfig(kubeConfig []byte) (kube.Client, error) { if len(kubeConfig) == 0 { return nil, errors.New("kubeconfig is empty") } rawConfig, err := clientcmd.Load(kubeConfig) if err != nil { return nil, fmt.Errorf("kubeconfig cannot be loaded: %v", err) } if err := clientcmd.Validate(*rawConfig); err != nil { return nil, fmt.Errorf("kubeconfig is not valid: %v", err) } clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, &clientcmd.ConfigOverrides{}) clients, err := kube.NewClient(clientConfig) if err != nil { return nil, fmt.Errorf("failed to create kube clients: %v", err) } return clients, nil } func configLogs(opt *log.Options) error { logDir := filepath.Join(archive.OutputRootDir(tempDir), "bug-report.log") mkdirOrExit(logDir) f, err := os.Create(logDir) if err != nil { return err } f.Close() op := []string{logDir} opt2 := *opt opt2.OutputPaths = op opt2.ErrorOutputPaths = op opt2.SetOutputLevel("default", log.InfoLevel) return log.Configure(&opt2) }
{ log.Errorf(err.Error()) continue }
WorkItemResultSection.tsx
import { WorkItemType } from 'azure-devops-extension-api/WorkItemTracking'; import { ConditionalChildren } from 'azure-devops-ui/ConditionalChildren'; import { ZeroData } from 'azure-devops-ui/ZeroData'; import ProcessedItem from '../../common/models/ProcessedItem';
interface WorkItemResultSectionProps { types: WorkItemType[]; items: ProcessedItem[]; } const WorkItemResultSection = ({ types, items }: WorkItemResultSectionProps): JSX.Element => { return ( <div className="flex-column"> <h2>Result</h2> <ConditionalChildren renderChildren={items.length > 1}> <div className="flex-column"> {items.map(x => { const type = types.find(y => y.name === x.type); return type ? ( <WorkItemDisplay key={x.id} type={type} id={x.id} title={x.title} state={x.updatedState} transition={{ from: x.sourceState, to: x.updatedState }} /> ) : null; })} </div> </ConditionalChildren> <ConditionalChildren renderChildren={items.length === 1}> <ZeroData iconProps={{ iconName: 'WorkItem' }} imageAltText="" primaryText="No work items will be changed" /> </ConditionalChildren> </div> ); }; export default WorkItemResultSection;
import WorkItemDisplay from './WorkItemDisplay';
target.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{Context, Error}, blackout_target::{CommonCommand, CommonOpts}, byteorder::{NativeEndian, ReadBytesExt, WriteBytesExt}, files_async::readdir, fs_management::{Blobfs, Filesystem}, fuchsia_async as fasync, fuchsia_merkle::MerkleTreeBuilder, rand::{distributions::Standard, Rng}, std::{collections::HashMap, fs::File, io::Write}, structopt::StructOpt, }; #[derive(Debug, StructOpt)] #[structopt(rename_all = "kebab-case")] struct Opts { #[structopt(flatten)] common: CommonOpts, /// A particular step of the test to perform. #[structopt(subcommand)] commands: CommonCommand, } fn write_blob(root: &str, i: u64) -> Result<String, Error> { let mut rng = rand::thread_rng(); let mut data = vec![]; data.write_u64::<NativeEndian>(i)?; // length of extra random data in bytes let rand_length: usize = rng.gen_range(0..6000); data.extend(rng.sample_iter::<u8, _>(&Standard).take(rand_length)); // generate merkle root for new blob let mut builder = MerkleTreeBuilder::new(); builder.write(&data); let merkle = builder.finish(); let path = format!("{}/{}", root, merkle.root()); // blob writing dance let mut blob = File::create(&path)?; blob.set_len(data.len() as u64)?; blob.write_all(&data)?; Ok(path) } fn setup(blobfs: &mut Filesystem<Blobfs>) -> Result<(), Error> { let mut rng = rand::thread_rng(); println!("formatting provided block device with blobfs"); blobfs.format().context("failed to format blobfs")?; let root = format!("/test-fs-root-{}", rng.gen::<u16>()); println!("mounting blobfs into default namespace at {}", root); blobfs.mount(&root).context("failed to mount blobfs")?; // Normally these tests just format in the setup, but I want a pile of files that I'm never // going to touch again, so this is the best place to set them up. Each file has a number // followed by a random amount of random garbage up to 6k (so the data for each blob takes up // one block at most). We want to stay within the bounds of the provided partition, so query // the size of the filesystem, and fill about 3/4ths of it with blobs. let q = blobfs.query_filesystem()?; println!("got query results - {:#?}", q); let num_blobs = (((q.total_bytes - q.used_bytes) / q.block_size as u64) * 3) / 4; let num_blobs = num_blobs - (num_blobs % 2); println!("just kidding - creating {} blobs on disk for setup", num_blobs); for i in 0..num_blobs { let _ = write_blob(&root, i)?; } println!("unmounting blobfs"); blobfs.unmount().context("failed to unmount blobfs")?; Ok(()) } async fn test(blobfs: &mut Filesystem<Blobfs>) -> Result<(), Error> { let mut rng = rand::thread_rng(); let root = format!("/test-fs-root-{}", rng.gen::<u16>()); println!("mounting blobfs into default namespace at {}", root); blobfs.mount(&root).context("failed to mount blobfs")?; println!("some prep work..."); // Get a list of all the blobs on the partition so we can generate our load gen state. We have // exclusive access to this block device, so they were either made by us in setup or made by us // in a previous iteration of the test. This test is designed to be run multiple times in a row // and could be in any state when we cut the power, so we have to reconstruct it based off the // test invariants. Those invariants are // 1. even number of blob "slots" // 2. odd number blobs are never modified // 3. even number blobs can be deleted and rewritted with new data // 4. that means they might not be there when we start (hence "slots") // 5. blobs start with their number, which is a u64 written in native endian with byteorder #[derive(Clone, Debug)] enum Slot { Empty, Blob { path: String }, } let mut blobs: HashMap<u64, Slot> = HashMap::new(); // let root_proxy = blobfs.open(io_util::OpenFlags::RIGHT_READABLE)?; let root_proxy = io_util::open_directory_in_namespace(&root, io_util::OpenFlags::RIGHT_READABLE)?; // first we figure out what blobs are there. for entry in readdir(&root_proxy).await? { let path = format!("{}/{}", root, entry.name); let mut blob = File::open(&path)?; let slot_num = blob.read_u64::<NativeEndian>()?; debug_assert!(!blobs.contains_key(&slot_num)); blobs.insert(slot_num, Slot::Blob { path }); } println!("found {} blobs", blobs.len()); // What is the max slot number we found? If it's even, it's the number of slots, if it's odd, // then it's the number of slots - 1. There should always be at least one slot filled out (odds // are never touched, so really we are going to have at least 1/2 of all possible slots filled // already). let max_found = blobs.keys().max().expect("Didn't find a maximum slot number. No blobs on disk?"); // Either the last even slot was filled or we found the largest odd slot so this gets the // maximum number of slots. let max_found = max_found + 1; let max_slots = max_found + (max_found % 2); debug_assert!(max_slots % 2 == 0); let half_slots = max_slots / 2; println!( "max_found = {}. assuming max_slots = {} (half_slots = {})", max_found, max_slots, half_slots ); let mut slots = vec![Slot::Empty; max_slots as usize]; for (k, v) in blobs.into_iter() { slots[k as usize] = v; } println!("generating load"); loop { // Get a random, even numbered slot and do the "next thing" to it. // 1. if the slot is empty, create a blob and write random data to it // 2. if it's not empty // - 50% chance we just open it and read the contents and close it again // - 50% chance we delete it // Obviously this isn't a "realistic" workload - blobs in the wild are going to spend a lot // of time getting read before they are deleted - but we want things to change a lot. let slot_num = rng.gen_range(0..half_slots as usize) * 2; let maybe_new_slot = match &slots[slot_num] { Slot::Empty => { let path = write_blob(&root, slot_num as u64)?; Some(Slot::Blob { path }) } Slot::Blob { path } => { if rng.gen_bool(1.0 / 2.0) { let mut blob = File::open(&path)?; let _ = blob.read_u64::<NativeEndian>()?; None } else { std::fs::remove_file(&path)?; Some(Slot::Empty) } } }; if let Some(new_slot) = maybe_new_slot { slots[slot_num] = new_slot; } } } fn verify(blobfs: &mut Filesystem<Blobfs>) -> Result<(), Error> { println!("verifying disk with fsck"); blobfs.fsck().context("failed to run fsck")?; println!("verification successful"); Ok(()) } #[fasync::run_singlethreaded] async fn main() -> Result<(), Error> { let opts = Opts::from_args(); println!("provided block device: {}", opts.common.block_device); let dev = blackout_target::find_dev(&opts.common.block_device).await?; println!("using equivalent block device: {}", dev);
CommonCommand::Setup => setup(&mut blobfs), CommonCommand::Test => test(&mut blobfs).await, CommonCommand::Verify => verify(&mut blobfs), } }
let mut blobfs = Blobfs::new(&dev)?; match opts.commands {
rotate_axis.py
from machine import Pin, PWM import pycom import time class Rotate: # Servo to fixed position def __init__(self, pwm): # Assum 50ms timer already set up and going to reuse self.pwm = pwm self.is_active = False self.at_position = 50 def run(self): pass def state_text(self): return 'Rotate position = {}'.format(self.at_position) def activate(self, start_dc=0.15): # May not be switched on if not self.is_active: self.is_active = True self.pwm_c = self.pwm.channel(2, pin='G13', duty_cycle=start_dc) def set_position(self, position): # Converts to 1 -2 ms pulses self.at_position = position # speed in % dc = (position / 100.0) * (1/20) + (1/20) self.activate(start_dc=dc) self.pwm_c.duty_cycle(dc) return dc def wait_set_position(self, position): """Rotates and waits until rotate gets there. Guess time from assuming a constant rotation speed""" full_rotate_time = 3000 # ms # Estiamte on rotation at full speed time_estimate = full_rotate_time * abs(self.at_position - position) / 100 # Allow for creep which can take a minimum time if self.at_position - position != 0: time_estimate = min(int(time_estimate), 1500) self.set_position(position) time.sleep_ms(int(time_estimate)) def shutdown(self): # Ideally won't move servo self.pwm_off = Pin('G13', mode=Pin.IN, pull=Pin.PULL_UP) self.is_active = False def in_bin(self): self.wait_set_position(86) def out_bin(self):
def dvd(self): self.wait_set_position(50)
self.wait_set_position(12) # min diff seems to be 2 self.wait_set_position(14)
multihash.rs
#![allow(dead_code)] #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum HashType { SHA1, SHA2_256, SHA256, SHA2_512_256, SHA2_512_512, SHA512, BLAKE2B_256, BLAKE2B_512, BLAKE2S_128, BLAKE2S_256, } pub type HashBytes = (HashType, Box<[u8]>); pub fn hash_type_to_string(hash_type: HashType) -> String { use self::HashType::*; match hash_type { SHA1 => String::from("SHA1"), SHA2_256 | SHA256 => String::from("SHA256"), SHA2_512_256 => String::from("SHA2-512-256"), SHA2_512_512 | SHA512 => String::from("SHA512"), BLAKE2B_256 => String::from("BLAKE2b-256"), BLAKE2B_512 => String::from("BLAKE2b-512"), BLAKE2S_128 => String::from("BLAKE2s-128"), BLAKE2S_256 => String::from("BLAKE2s-256"), } } pub fn string_to_hash_type(string: &str) -> Result<HashType, ()> { let string = string.to_lowercase(); use self::HashType::*; let hash_type = match string.as_str() { "sha1" => Ok(SHA1), "sha2-256" | "sha256" => Ok(SHA256), "sha2-512-256" => Ok(SHA2_512_256), "sha2-512-512" | "sha512" => Ok(SHA512), "blake2b-256" => Ok(BLAKE2B_256), "blake2b-512" => Ok(BLAKE2B_512), "blake2s-128" => Ok(BLAKE2S_128), "blake2s-256" => Ok(BLAKE2S_256), _ => Err(()), }; match hash_type { Ok(hs) => { if hash::hash_type_is_supported(hs) { Ok(hs) } else { Err(()) } } Err(()) => Err(()), } } pub fn hash_bytes_to_bytes(hash_bytes: &HashBytes, buffer: &mut [u8]) { let param = specs::Param::new(hash_bytes.0); let digest_bytes = &hash_bytes.1; for i in 0..param.hash_func_type.len() { buffer[i] = param.hash_func_type[i]; } buffer[param.hash_func_type.len()] = param.digest_length; let offset = param.hash_func_type.len() + 1; for i in 0..param.digest_length as usize { buffer[i + offset] = digest_bytes[i]; } } pub fn hash_bytes_into_bytes(hash_bytes: &HashBytes) -> Box<[u8]> { let param = specs::Param::new(hash_bytes.0); let mut buffer = vec![0; param.total_length()].into_boxed_slice(); hash_bytes_to_bytes(hash_bytes, &mut buffer); buffer } pub mod specs { use super::*; #[derive(Copy, Clone, Debug)] pub struct Param { pub hash_func_type: &'static [u8], pub digest_length: u8, } macro_rules! param { ( $func_type:ident; $len:expr ) => { Param { hash_func_type: &$func_type, digest_length: $len, } }; } static SHA1_HFT: [u8; 1] = [0x11]; static SHA256_HFT: [u8; 1] = [0x12]; static SHA512_HFT: [u8; 1] = [0x13]; static BLAKE2B_256_HFT: [u8; 2] = [0xb2, 0x20]; static BLAKE2B_512_HFT: [u8; 2] = [0xb2, 0x40]; static BLAKE2S_128_HFT: [u8; 2] = [0xb2, 0x50]; static BLAKE2S_256_HFT: [u8; 2] = [0xb2, 0x60]; pub static SHA1_PARAM: Param = param!(SHA1_HFT; 0x14); pub static SHA256_PARAM: Param = param!(SHA256_HFT; 0x20); pub static SHA2_512_256_PARAM: Param = param!(SHA512_HFT; 0x20); pub static SHA512_PARAM: Param = param!(SHA512_HFT; 0x40); pub static BLAKE2B_256_PARAM: Param = param!(BLAKE2B_256_HFT; 0x20); pub static BLAKE2B_512_PARAM: Param = param!(BLAKE2B_512_HFT; 0x40); pub static BLAKE2S_128_PARAM: Param = param!(BLAKE2S_128_HFT; 0x10); pub static BLAKE2S_256_PARAM: Param = param!(BLAKE2S_256_HFT; 0x20); impl Param { pub fn new(hash_type: HashType) -> Param { use super::HashType::*; match hash_type { SHA1 => SHA1_PARAM, SHA2_256 | SHA256 => SHA256_PARAM, SHA2_512_256 => SHA2_512_256_PARAM, SHA2_512_512 | SHA512 => SHA512_PARAM, BLAKE2B_256 => BLAKE2B_256_PARAM, BLAKE2B_512 => BLAKE2B_512_PARAM, BLAKE2S_128 => BLAKE2S_128_PARAM, BLAKE2S_256 => BLAKE2S_256_PARAM, } } pub fn total_length(&self) -> usize { self.hash_func_type.len() + 1 + self.digest_length as usize } } } pub mod hash { use super::*; use blake2::{VarBlake2b, VarBlake2s}; #[derive(Clone, Debug)] pub struct
{ ctx: _Ctx, } #[allow(non_camel_case_types)] #[derive(Clone, Debug)] enum _Ctx { SHA1(sha1::Sha1), SHA256(sha2::Sha256), SHA512(sha2::Sha512), BLAKE2B_256(VarBlake2b), BLAKE2B_512(VarBlake2b), BLAKE2S_128(VarBlake2s), BLAKE2S_256(VarBlake2s), } pub fn hash_type_is_supported(hash_type: HashType) -> bool { match Ctx::new(hash_type) { Ok(_) => true, Err(_) => false, } } impl Ctx { pub fn new(hash_type: HashType) -> Result<Ctx, ()> { let ctx = match hash_type { HashType::SHA1 => { use sha1::Digest; Some(_Ctx::SHA1(sha1::Sha1::new())) } HashType::SHA2_256 | HashType::SHA256 => { use sha2::Digest; Some(_Ctx::SHA256(sha2::Sha256::new())) } HashType::SHA2_512_256 => None, HashType::SHA2_512_512 | HashType::SHA512 => { use sha2::Digest; Some(_Ctx::SHA512(sha2::Sha512::new())) } HashType::BLAKE2B_256 => { use blake2::digest::VariableOutput; Some(_Ctx::BLAKE2B_256( VarBlake2b::new(specs::Param::new(hash_type).digest_length as usize) .unwrap(), )) } HashType::BLAKE2B_512 => { use blake2::digest::VariableOutput; Some(_Ctx::BLAKE2B_512( VarBlake2b::new(specs::Param::new(hash_type).digest_length as usize) .unwrap(), )) } HashType::BLAKE2S_128 => { use blake2::digest::VariableOutput; Some(_Ctx::BLAKE2S_128( VarBlake2s::new(specs::Param::new(hash_type).digest_length as usize) .unwrap(), )) } HashType::BLAKE2S_256 => { use blake2::digest::VariableOutput; Some(_Ctx::BLAKE2S_256( VarBlake2s::new(specs::Param::new(hash_type).digest_length as usize) .unwrap(), )) } }; match ctx { Some(ctx) => Ok(Ctx { ctx }), None => Err(()), } } pub fn hash_type(&self) -> HashType { match self.ctx { _Ctx::SHA1(_) => HashType::SHA1, _Ctx::SHA256(_) => HashType::SHA256, _Ctx::SHA512(_) => HashType::SHA512, _Ctx::BLAKE2B_256(_) => HashType::BLAKE2B_256, _Ctx::BLAKE2B_512(_) => HashType::BLAKE2B_512, _Ctx::BLAKE2S_128(_) => HashType::BLAKE2S_128, _Ctx::BLAKE2S_256(_) => HashType::BLAKE2S_256, } } pub fn update(&mut self, data: &[u8]) { match self.ctx { _Ctx::SHA1(ref mut ctx) => { use sha1::Digest; ctx.input(data) } _Ctx::SHA256(ref mut ctx) => { use sha2::Digest; ctx.input(data) } _Ctx::SHA512(ref mut ctx) => { use sha2::Digest; ctx.input(data) } _Ctx::BLAKE2B_256(ref mut ctx) => { use blake2::digest::Input; ctx.input(data); } _Ctx::BLAKE2B_512(ref mut ctx) => { use blake2::digest::Input; ctx.input(data); } _Ctx::BLAKE2S_128(ref mut ctx) => { use blake2::digest::Input; ctx.input(data); } _Ctx::BLAKE2S_256(ref mut ctx) => { use blake2::digest::Input; ctx.input(data); } } } pub fn finish_to_bytes(self, hashval: &mut [u8]) { match self.ctx { _Ctx::SHA1(ctx) => { use sha1::Digest; hashval.copy_from_slice(&ctx.result()) } _Ctx::SHA256(ctx) => { use sha2::Digest; hashval.copy_from_slice(&ctx.result()) } _Ctx::SHA512(ctx) => { use sha2::Digest; hashval.copy_from_slice(&ctx.result()) } _Ctx::BLAKE2B_256(ctx) => { use blake2::digest::VariableOutput; hashval.copy_from_slice(&ctx.vec_result()) } _Ctx::BLAKE2B_512(ctx) => { use blake2::digest::VariableOutput; hashval.copy_from_slice(&ctx.vec_result()) } _Ctx::BLAKE2S_128(ctx) => { use blake2::digest::VariableOutput; hashval.copy_from_slice(&ctx.vec_result()) } _Ctx::BLAKE2S_256(ctx) => { use blake2::digest::VariableOutput; hashval.copy_from_slice(&ctx.vec_result()) } } } pub fn finish_into_bytes(self) -> Box<[u8]> { let hash_type = self.hash_type(); let param = specs::Param::new(hash_type); let digest_len = param.digest_length; let mut hashval = vec![0; digest_len as usize].into_boxed_slice(); self.finish_to_bytes(&mut hashval); hashval } pub fn finish_to_hash_bytes(self, hash_bytes: &mut HashBytes) { hash_bytes.0 = self.hash_type(); self.finish_to_bytes(&mut hash_bytes.1); } pub fn finish_into_hash_bytes(self) -> HashBytes { (self.hash_type(), self.finish_into_bytes()) } } } pub mod parsers { use super::super::misc_utils; use super::specs; use super::{HashBytes, HashType}; macro_rules! make_hash_parser_w_len { ( $name:ident, $ht:path, $param:path ) => { named!( $name<HashBytes>, do_parse!( _total_len: tag!(&[$param.total_length() as u8]) >> _id: tag!($param.hash_func_type) >> _n: tag!(&[$param.digest_length]) >> res: take!($param.digest_length) >> (($ht, misc_utils::slice_to_boxed(res))) ) ); }; } make_hash_parser_w_len!(sha1_w_len_p, HashType::SHA1, specs::SHA1_PARAM); make_hash_parser_w_len!(sha256_w_len_p, HashType::SHA256, specs::SHA256_PARAM); make_hash_parser_w_len!( sha2_512_256_w_len_p, HashType::SHA2_512_256, specs::SHA2_512_256_PARAM ); make_hash_parser_w_len!(sha512_w_len_p, HashType::SHA512, specs::SHA512_PARAM); make_hash_parser_w_len!( blake2b_256_w_len_p, HashType::BLAKE2B_256, specs::BLAKE2B_256_PARAM ); make_hash_parser_w_len!( blake2b_512_w_len_p, HashType::BLAKE2B_512, specs::BLAKE2B_512_PARAM ); make_hash_parser_w_len!( blake2s_128_w_len_p, HashType::BLAKE2S_128, specs::BLAKE2S_128_PARAM ); make_hash_parser_w_len!( blake2s_256_w_len_p, HashType::BLAKE2S_256, specs::BLAKE2S_256_PARAM ); named!(pub multihash_w_len_p <HashBytes>, alt!( complete!(sha1_w_len_p) | complete!(sha256_w_len_p) | complete!(sha2_512_256_w_len_p) | complete!(sha512_w_len_p) | complete!(blake2b_256_w_len_p) | complete!(blake2b_512_w_len_p) | complete!(blake2s_128_w_len_p) | complete!(blake2s_256_w_len_p) ) ); }
Ctx
wer.py
def
(r, h): """ Calculation of WER with Levenshtein distance. Works only for iterables up to 254 elements (uint8). O(nm) time ans space complexity. Parameters ---------- r : list h : list Returns ------- int Examples -------- >>> wer("who is there".split(), "is there".split()) 1 >>> wer("who is there".split(), "".split()) 3 >>> wer("".split(), "who is there".split()) 3 """ # initialisation import numpy d = numpy.zeros((len(r)+1)*(len(h)+1), dtype=numpy.uint8) d = d.reshape((len(r)+1, len(h)+1)) for i in range(len(r)+1): for j in range(len(h)+1): if i == 0: d[0][j] = j elif j == 0: d[i][0] = i # computation for i in range(1, len(r)+1): for j in range(1, len(h)+1): if r[i-1] == h[j-1]: d[i][j] = d[i-1][j-1] else: substitution = d[i-1][j-1] + 1 insertion = d[i][j-1] + 1 deletion = d[i-1][j] + 1 d[i][j] = min(substitution, insertion, deletion) return d[len(r)][len(h)]
wer
test_hanging_termination.py
import os from pathlib import Path import numpy as np import pytest from jina import Flow, Document from jina.clients import Client from jina.logging.profile import TimeContext from jina.parsers import set_client_cli_parser from typing import Dict from jina import DocumentArray, Executor, requests class DumpExecutor(Executor): @requests def dump(self, docs: DocumentArray, parameters: Dict, **kwargs): shards = int(parameters['shards']) dump_path = parameters['dump_path'] shard_size = len(docs) / shards os.makedirs(dump_path, exist_ok=True) for i in range(shards): dump_file = f'{dump_path}/{i}.ndjson' docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)] docs_to_be_dumped.save(dump_file) class ErrorExecutor(Executor): @requests def dump(self, docs: DocumentArray, **kwargs): if len(docs) > 0: assert False class ReloadExecutor(Executor): def __init__(self, dump_path=None, *args, **kwargs): super().__init__(*args, **kwargs) # backwards compatibility assert 'dump_path' in kwargs['runtime_args'].keys() if dump_path is not None: shard_id = getattr(self.runtime_args, 'pea_id', None) shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson') self._docs = DocumentArray.load(shard_dump_path) else: self._docs = DocumentArray() @requests def search(self, docs: DocumentArray, **kwargs): docs.clear() docs.extend(self._docs) class MergeExecutor(Executor): @requests def merge(self, docs_matrix: DocumentArray, **kwargs): merged_docs = DocumentArray() for docs in docs_matrix: merged_docs.extend(docs) return merged_docs def get_client(port): args = set_client_cli_parser().parse_args( ['--host', 'localhost', '--port', str(port)] ) return Client(args) def get_documents(count=10, emb_size=7): for i in range(count): yield Document( id=i, text=f'hello world {i}', embedding=np.random.random(emb_size), tags={'tag_field': f'tag data {i}'}, ) def path_size(dump_path): return ( sum( f.stat().st_size for f in Path(dump_path).glob('**/*') if f.is_file() ) / 1e6 )
@pytest.mark.repeat(20) @pytest.mark.parametrize('shards', [5, 3, 1]) @pytest.mark.parametrize('nr_docs', [7]) @pytest.mark.parametrize('emb_size', [10]) def test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2): """showcases using replicas + dump + rolling update with independent clients""" with Flow().add(uses=DumpExecutor, name='dump_exec').add( uses=ErrorExecutor, name='error_exec' ) as flow_dump: merge_executor = MergeExecutor if shards > 1 else None with Flow().add( uses=ReloadExecutor, name='reload_exec', replicas=2, shards=shards, uses_after=merge_executor, ) as flow_reload: for run_number in range(times_to_index): dump_path = os.path.join(tmpdir, f'dump-{run_number}') client_dbms = get_client(flow_dump.port_expose) client_query = get_client(flow_reload.port_expose) docs = list( get_documents( count=nr_docs * (run_number + 1), emb_size=emb_size, ) ) with TimeContext(f'### dumping {len(docs)} docs'): client_dbms.post( on='/dump', inputs=docs, target_peapod='dump_exec', parameters={'dump_path': dump_path, 'shards': shards}, ) print(f'### dump path size: {path_size(dump_path)} MBs') with TimeContext(f'### rolling update on {len(docs)}'): # flow object is used for ctrl requests flow_reload.rolling_update('reload_exec', dump_path) for _ in range(5): result = client_query.post( on='/search', inputs=[Document()], return_results=True ) assert len(docs) == len(result[0].docs)
lib.rs
// This is not a public api. #![deny(clippy::all)] #![allow(clippy::ptr_arg)] #[doc(hidden)] pub extern crate swc_ecma_ast; use std::{borrow::Cow, fmt::Debug}; use num_bigint::BigInt as BigIntValue; use swc_atoms::JsWord; use swc_common::{pass::CompilerPass, Span, DUMMY_SP}; use swc_ecma_ast::*; use swc_visit::{define, AndThen, Repeat, Repeated}; impl<A, B> Fold for AndThen<A, B> where A: Fold, B: Fold, { #[inline(always)] fn fold_module(&mut self, n: Module) -> Module { let n = self.first.fold_module(n); self.second.fold_module(n) } #[inline(always)] fn fold_script(&mut self, n: Script) -> Script { let n = self.first.fold_script(n); self.second.fold_script(n) } } impl<A, B> VisitMut for AndThen<A, B> where A: VisitMut, B: VisitMut, { fn visit_mut_module(&mut self, n: &mut Module) { self.first.visit_mut_module(n); self.second.visit_mut_module(n) } fn visit_mut_script(&mut self, n: &mut Script) { self.first.visit_mut_script(n); self.second.visit_mut_script(n) } } impl<A, B> Visit for AndThen<A, B> where A: Visit, B: Visit, { fn visit_module(&mut self, n: &Module) { self.first.visit_module(n); self.second.visit_module(n); } fn visit_script(&mut self, n: &Script) { self.first.visit_script(n); self.second.visit_script(n); } } impl<V> Fold for Repeat<V> where V: Fold + Repeated, { fn fold_module(&mut self, mut node: Module) -> Module { loop { self.pass.reset(); node = node.fold_with(&mut self.pass); if !self.pass.changed() { break; } } node } fn fold_script(&mut self, mut node: Script) -> Script { loop { self.pass.reset(); node = node.fold_with(&mut self.pass); if !self.pass.changed() { break; } } node } } impl<V> VisitMut for Repeat<V> where V: VisitMut + Repeated, { fn visit_mut_module(&mut self, node: &mut Module) { loop { self.pass.reset(); node.visit_mut_with(&mut self.pass); if !self.pass.changed() { break; } } } fn visit_mut_script(&mut self, node: &mut Script) { loop { self.pass.reset(); node.visit_mut_with(&mut self.pass); if !self.pass.changed() { break; } } } } /// Not a public api. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] struct SpanRemover; /// Returns a `Fold` which changes all span into `DUMMY_SP`. pub fn span_remover() -> impl Debug + Fold + Copy + Eq + Default + 'static { SpanRemover }
DUMMY_SP } } #[macro_export] macro_rules! assert_eq_ignore_span { ($l:expr, $r:expr) => {{ use $crate::FoldWith; let l = $l.fold_with(&mut $crate::span_remover()); let r = $r.fold_with(&mut $crate::span_remover()); assert_eq!(l, r); }}; ($l:expr, $r:expr, $($tts:tt)*) => {{ use $crate::FoldWith; let l = $l.fold_with(&mut $crate::span_remover()); let r = $r.fold_with(&mut $crate::span_remover()); assert_eq!(l, r, $($tts)*); }}; } /// Implemented for passes which inject variables. /// /// If a pass depends on other pass which injects variables, this trait can be /// used to keep the variables. pub trait InjectVars { fn take_vars(&mut self) -> Vec<VarDeclarator>; } impl<V> InjectVars for Folder<V> where V: VisitMut + InjectVars, { fn take_vars(&mut self) -> Vec<VarDeclarator> { self.0.take_vars() } } /// The returned folder only handles `fold_script` and `fold_module`, and /// typescript nodes are ignored. So if your visitor needs to handle typescript /// or low-level nodes, you should use [as_folder] instead. #[inline] pub fn as_folder<V>(v: V) -> Folder<V> where V: VisitMut, { Folder(v) } /// Wrap a [VisitMut] as a [Fold] #[derive(Debug, Clone, Copy)] pub struct Folder<V: VisitMut>(V); impl<V> Repeated for Folder<V> where V: Repeated + VisitMut, { fn changed(&self) -> bool { self.0.changed() } fn reset(&mut self) { self.0.reset(); } } impl<V> CompilerPass for Folder<V> where V: VisitMut + CompilerPass, { fn name() -> Cow<'static, str> { V::name() } } macro_rules! delegate { ($name:ident, $T:ty) => { fn $name(&mut self, n: &mut $T) { n.visit_mut_with(&mut self.0); } }; } /// This only proxies subset of methods. impl<V> VisitMut for Folder<V> where V: VisitMut, { delegate!(visit_mut_ident, Ident); delegate!(visit_mut_span, Span); delegate!(visit_mut_expr, Expr); delegate!(visit_mut_decl, Decl); delegate!(visit_mut_stmt, Stmt); delegate!(visit_mut_pat, Pat); delegate!(visit_mut_ts_type, TsType); delegate!(visit_mut_module, Module); delegate!(visit_mut_script, Script); delegate!(visit_mut_program, Program); } macro_rules! method { ($name:ident, $T:ty) => { fn $name(&mut self, mut n: $T) -> $T { n.visit_mut_with(&mut self.0); n } }; } impl<V> Fold for Folder<V> where V: VisitMut, { method!(fold_ident, Ident); method!(fold_span, Span); method!(fold_expr, Expr); method!(fold_decl, Decl); method!(fold_stmt, Stmt); method!(fold_pat, Pat); method!(fold_ts_type, TsType); method!(fold_script, Script); method!(fold_program, Program); #[inline(always)] fn fold_module(&mut self, mut n: Module) -> Module { #[cfg(all(debug_assertions, feature = "debug"))] let _tracing = { let visitor_name = std::any::type_name::<V>(); tracing::span!(tracing::Level::INFO, "as_folder", visitor = visitor_name).entered() }; n.visit_mut_with(&mut self.0); n } } /// Note: Ignoring more types is not considered as a breaking change. #[macro_export] macro_rules! noop_fold_type { ($name:ident, $N:tt) => { fn $name(&mut self, node: $crate::swc_ecma_ast::$N) -> $crate::swc_ecma_ast::$N { node } }; () => { noop_fold_type!(fold_accessibility, Accessibility); noop_fold_type!(fold_true_plus_minus, TruePlusMinus); noop_fold_type!(fold_ts_array_type, TsArrayType); noop_fold_type!(fold_ts_call_signature_decl, TsCallSignatureDecl); noop_fold_type!(fold_ts_conditional_type, TsConditionalType); noop_fold_type!(fold_ts_construct_signature_decl, TsConstructSignatureDecl); noop_fold_type!(fold_ts_constructor_type, TsConstructorType); noop_fold_type!(fold_ts_entity_name, TsEntityName); noop_fold_type!(fold_ts_enum_decl, TsEnumDecl); noop_fold_type!(fold_ts_enum_member, TsEnumMember); noop_fold_type!(fold_ts_enum_member_id, TsEnumMemberId); noop_fold_type!(fold_ts_external_module_ref, TsExternalModuleRef); noop_fold_type!(fold_ts_fn_or_constructor_type, TsFnOrConstructorType); noop_fold_type!(fold_ts_fn_param, TsFnParam); noop_fold_type!(fold_ts_fn_type, TsFnType); noop_fold_type!(fold_ts_import_equals_decl, TsImportEqualsDecl); noop_fold_type!(fold_ts_import_type, TsImportType); noop_fold_type!(fold_ts_index_signature, TsIndexSignature); noop_fold_type!(fold_ts_indexed_access_type, TsIndexedAccessType); noop_fold_type!(fold_ts_infer_type, TsInferType); noop_fold_type!(fold_ts_interface_body, TsInterfaceBody); noop_fold_type!(fold_ts_interface_decl, TsInterfaceDecl); noop_fold_type!(fold_ts_intersection_type, TsIntersectionType); noop_fold_type!(fold_ts_keyword_type, TsKeywordType); noop_fold_type!(fold_ts_keyword_type_kind, TsKeywordTypeKind); noop_fold_type!(fold_ts_mapped_type, TsMappedType); noop_fold_type!(fold_ts_method_signature, TsMethodSignature); noop_fold_type!(fold_ts_module_block, TsModuleBlock); noop_fold_type!(fold_ts_module_decl, TsModuleDecl); noop_fold_type!(fold_ts_module_name, TsModuleName); noop_fold_type!(fold_ts_module_ref, TsModuleRef); noop_fold_type!(fold_ts_namespace_body, TsNamespaceBody); noop_fold_type!(fold_ts_namespace_decl, TsNamespaceDecl); noop_fold_type!(fold_ts_namespace_export_decl, TsNamespaceExportDecl); noop_fold_type!(fold_ts_optional_type, TsOptionalType); noop_fold_type!(fold_ts_param_prop, TsParamProp); noop_fold_type!(fold_ts_param_prop_param, TsParamPropParam); noop_fold_type!(fold_ts_parenthesized_type, TsParenthesizedType); noop_fold_type!(fold_ts_property_signature, TsPropertySignature); noop_fold_type!(fold_ts_qualified_name, TsQualifiedName); noop_fold_type!(fold_ts_rest_type, TsRestType); noop_fold_type!(fold_ts_this_type, TsThisType); noop_fold_type!(fold_ts_this_type_or_ident, TsThisTypeOrIdent); noop_fold_type!(fold_ts_tuple_type, TsTupleType); noop_fold_type!(fold_ts_type, TsType); noop_fold_type!(fold_ts_type_alias_decl, TsTypeAliasDecl); noop_fold_type!(fold_ts_type_ann, TsTypeAnn); noop_fold_type!(fold_ts_type_assertion, TsTypeAssertion); noop_fold_type!(fold_ts_type_element, TsTypeElement); noop_fold_type!(fold_ts_type_lit, TsTypeLit); noop_fold_type!(fold_ts_type_operator, TsTypeOperator); noop_fold_type!(fold_ts_type_operator_op, TsTypeOperatorOp); noop_fold_type!(fold_ts_type_param, TsTypeParam); noop_fold_type!(fold_ts_type_param_decl, TsTypeParamDecl); noop_fold_type!(fold_ts_type_param_instantiation, TsTypeParamInstantiation); noop_fold_type!(fold_ts_type_predicate, TsTypePredicate); noop_fold_type!(fold_ts_type_query, TsTypeQuery); noop_fold_type!(fold_ts_type_query_expr, TsTypeQueryExpr); noop_fold_type!(fold_ts_type_ref, TsTypeRef); noop_fold_type!( fold_ts_union_or_intersection_type, TsUnionOrIntersectionType ); noop_fold_type!(fold_ts_union_type, TsUnionType); }; } /// Note: Ignoring more types is not considered as a breaking change. #[macro_export] macro_rules! noop_visit_type { ($name:ident, $N:tt) => { fn $name(&mut self, _: &$crate::swc_ecma_ast::$N) {} }; () => { noop_visit_type!(visit_accessibility, Accessibility); noop_visit_type!(visit_true_plus_minus, TruePlusMinus); noop_visit_type!(visit_ts_array_type, TsArrayType); noop_visit_type!(visit_ts_call_signature_decl, TsCallSignatureDecl); noop_visit_type!(visit_ts_conditional_type, TsConditionalType); noop_visit_type!(visit_ts_construct_signature_decl, TsConstructSignatureDecl); noop_visit_type!(visit_ts_constructor_type, TsConstructorType); noop_visit_type!(visit_ts_entity_name, TsEntityName); noop_visit_type!(visit_ts_external_module_ref, TsExternalModuleRef); noop_visit_type!(visit_ts_fn_or_constructor_type, TsFnOrConstructorType); noop_visit_type!(visit_ts_fn_param, TsFnParam); noop_visit_type!(visit_ts_fn_type, TsFnType); noop_visit_type!(visit_ts_import_type, TsImportType); noop_visit_type!(visit_ts_index_signature, TsIndexSignature); noop_visit_type!(visit_ts_indexed_access_type, TsIndexedAccessType); noop_visit_type!(visit_ts_infer_type, TsInferType); noop_visit_type!(visit_ts_interface_body, TsInterfaceBody); noop_visit_type!(visit_ts_interface_decl, TsInterfaceDecl); noop_visit_type!(visit_ts_intersection_type, TsIntersectionType); noop_visit_type!(visit_ts_keyword_type, TsKeywordType); noop_visit_type!(visit_ts_keyword_type_kind, TsKeywordTypeKind); noop_visit_type!(visit_ts_mapped_type, TsMappedType); noop_visit_type!(visit_ts_method_signature, TsMethodSignature); noop_visit_type!(visit_ts_module_ref, TsModuleRef); noop_visit_type!(visit_ts_optional_type, TsOptionalType); noop_visit_type!(visit_ts_parenthesized_type, TsParenthesizedType); noop_visit_type!(visit_ts_property_signature, TsPropertySignature); noop_visit_type!(visit_ts_qualified_name, TsQualifiedName); noop_visit_type!(visit_ts_rest_type, TsRestType); noop_visit_type!(visit_ts_this_type, TsThisType); noop_visit_type!(visit_ts_this_type_or_ident, TsThisTypeOrIdent); noop_visit_type!(visit_ts_tuple_type, TsTupleType); noop_visit_type!(visit_ts_type, TsType); noop_visit_type!(visit_ts_type_alias_decl, TsTypeAliasDecl); noop_visit_type!(visit_ts_type_ann, TsTypeAnn); noop_visit_type!(visit_ts_type_element, TsTypeElement); noop_visit_type!(visit_ts_type_lit, TsTypeLit); noop_visit_type!(visit_ts_type_operator, TsTypeOperator); noop_visit_type!(visit_ts_type_operator_op, TsTypeOperatorOp); noop_visit_type!(visit_ts_type_param, TsTypeParam); noop_visit_type!(visit_ts_type_param_decl, TsTypeParamDecl); noop_visit_type!(visit_ts_type_param_instantiation, TsTypeParamInstantiation); noop_visit_type!(visit_ts_type_predicate, TsTypePredicate); noop_visit_type!(visit_ts_type_query, TsTypeQuery); noop_visit_type!(visit_ts_type_query_expr, TsTypeQueryExpr); noop_visit_type!(visit_ts_type_ref, TsTypeRef); noop_visit_type!( visit_ts_union_or_intersection_type, TsUnionOrIntersectionType ); noop_visit_type!(visit_ts_union_type, TsUnionType); }; } /// Note: Ignoring more types is not considered as a breaking change. #[macro_export] macro_rules! noop_visit_mut_type { ($name:ident, $N:ident) => { fn $name(&mut self, _: &mut $crate::swc_ecma_ast::$N) {} }; () => { noop_visit_mut_type!(visit_mut_accessibility, Accessibility); noop_visit_mut_type!(visit_mut_true_plus_minus, TruePlusMinus); noop_visit_mut_type!(visit_mut_ts_array_type, TsArrayType); noop_visit_mut_type!(visit_mut_ts_call_signature_decl, TsCallSignatureDecl); noop_visit_mut_type!(visit_mut_ts_conditional_type, TsConditionalType); noop_visit_mut_type!( visit_mut_ts_construct_signature_decl, TsConstructSignatureDecl ); noop_visit_mut_type!(visit_mut_ts_constructor_type, TsConstructorType); noop_visit_mut_type!(visit_mut_ts_entity_name, TsEntityName); noop_visit_mut_type!(visit_mut_ts_external_module_ref, TsExternalModuleRef); noop_visit_mut_type!(visit_mut_ts_fn_or_constructor_type, TsFnOrConstructorType); noop_visit_mut_type!(visit_mut_ts_fn_param, TsFnParam); noop_visit_mut_type!(visit_mut_ts_fn_type, TsFnType); noop_visit_mut_type!(visit_mut_ts_import_type, TsImportType); noop_visit_mut_type!(visit_mut_ts_index_signature, TsIndexSignature); noop_visit_mut_type!(visit_mut_ts_indexed_access_type, TsIndexedAccessType); noop_visit_mut_type!(visit_mut_ts_infer_type, TsInferType); noop_visit_mut_type!(visit_mut_ts_interface_body, TsInterfaceBody); noop_visit_mut_type!(visit_mut_ts_interface_decl, TsInterfaceDecl); noop_visit_mut_type!(visit_mut_ts_intersection_type, TsIntersectionType); noop_visit_mut_type!(visit_mut_ts_keyword_type, TsKeywordType); noop_visit_mut_type!(visit_mut_ts_keyword_type_kind, TsKeywordTypeKind); noop_visit_mut_type!(visit_mut_ts_mapped_type, TsMappedType); noop_visit_mut_type!(visit_mut_ts_method_signature, TsMethodSignature); noop_visit_mut_type!(visit_mut_ts_module_ref, TsModuleRef); noop_visit_mut_type!(visit_mut_ts_optional_type, TsOptionalType); noop_visit_mut_type!(visit_mut_ts_parenthesized_type, TsParenthesizedType); noop_visit_mut_type!(visit_mut_ts_property_signature, TsPropertySignature); noop_visit_mut_type!(visit_mut_ts_qualified_name, TsQualifiedName); noop_visit_mut_type!(visit_mut_ts_rest_type, TsRestType); noop_visit_mut_type!(visit_mut_ts_this_type, TsThisType); noop_visit_mut_type!(visit_mut_ts_this_type_or_ident, TsThisTypeOrIdent); noop_visit_mut_type!(visit_mut_ts_tuple_type, TsTupleType); noop_visit_mut_type!(visit_mut_ts_type, TsType); noop_visit_mut_type!(visit_mut_ts_type_alias_decl, TsTypeAliasDecl); noop_visit_mut_type!(visit_mut_ts_type_ann, TsTypeAnn); noop_visit_mut_type!(visit_mut_ts_type_element, TsTypeElement); noop_visit_mut_type!(visit_mut_ts_type_lit, TsTypeLit); noop_visit_mut_type!(visit_mut_ts_type_operator, TsTypeOperator); noop_visit_mut_type!(visit_mut_ts_type_operator_op, TsTypeOperatorOp); noop_visit_mut_type!(visit_mut_ts_type_param, TsTypeParam); noop_visit_mut_type!(visit_mut_ts_type_param_decl, TsTypeParamDecl); noop_visit_mut_type!( visit_mut_ts_type_param_instantiation, TsTypeParamInstantiation ); noop_visit_mut_type!(visit_mut_ts_type_predicate, TsTypePredicate); noop_visit_mut_type!(visit_mut_ts_type_query, TsTypeQuery); noop_visit_mut_type!(visit_mut_ts_type_query_expr, TsTypeQueryExpr); noop_visit_mut_type!(visit_mut_ts_type_ref, TsTypeRef); noop_visit_mut_type!( visit_mut_ts_union_or_intersection_type, TsUnionOrIntersectionType ); noop_visit_mut_type!(visit_mut_ts_union_type, TsUnionType); }; } define!({ pub struct Class { pub span: Span, pub decorators: Vec<Decorator>, pub body: Vec<ClassMember>, pub super_class: Option<Box<Expr>>, pub is_abstract: bool, pub type_params: Option<TsTypeParamDecl>, pub super_type_params: Option<TsTypeParamInstantiation>, pub implements: Vec<TsExprWithTypeArgs>, } pub enum ClassMember { Constructor(Constructor), Method(ClassMethod), PrivateMethod(PrivateMethod), ClassProp(ClassProp), PrivateProp(PrivateProp), TsIndexSignature(TsIndexSignature), Empty(EmptyStmt), StaticBlock(StaticBlock), } pub struct ClassProp { pub span: Span, pub key: PropName, pub value: Option<Box<Expr>>, pub type_ann: Option<TsTypeAnn>, pub is_static: bool, pub decorators: Vec<Decorator>, pub accessibility: Option<Accessibility>, pub is_abstract: bool, pub is_optional: bool, pub is_override: bool, pub readonly: bool, pub declare: bool, pub definite: bool, } pub struct PrivateProp { pub span: Span, pub key: PrivateName, pub value: Option<Box<Expr>>, pub type_ann: Option<TsTypeAnn>, pub is_static: bool, pub decorators: Vec<Decorator>, pub accessibility: Option<Accessibility>, pub is_optional: bool, pub is_override: bool, pub readonly: bool, pub definite: bool, } pub struct ClassMethod { pub span: Span, pub key: PropName, pub function: Function, pub kind: MethodKind, pub is_static: bool, pub accessibility: Option<Accessibility>, pub is_abstract: bool, pub is_optional: bool, pub is_override: bool, } pub struct PrivateMethod { pub span: Span, pub key: PrivateName, pub function: Function, pub kind: MethodKind, pub is_static: bool, pub accessibility: Option<Accessibility>, pub is_abstract: bool, pub is_optional: bool, pub is_override: bool, } pub struct Constructor { pub span: Span, pub key: PropName, pub params: Vec<ParamOrTsParamProp>, pub body: Option<BlockStmt>, pub accessibility: Option<Accessibility>, pub is_optional: bool, } pub struct Decorator { pub span: Span, pub expr: Box<Expr>, } pub struct StaticBlock { pub span: Span, pub body: BlockStmt, } pub enum MethodKind { Method, Getter, Setter, } pub enum Decl { Class(ClassDecl), Fn(FnDecl), Var(VarDecl), TsInterface(TsInterfaceDecl), TsTypeAlias(TsTypeAliasDecl), TsEnum(TsEnumDecl), TsModule(TsModuleDecl), } pub struct FnDecl { pub ident: Ident, pub declare: bool, pub function: Function, } pub struct ClassDecl { pub ident: Ident, pub declare: bool, pub class: Class, } pub struct VarDecl { pub span: Span, pub kind: VarDeclKind, pub declare: bool, pub decls: Vec<VarDeclarator>, } pub enum VarDeclKind { Var, Let, Const, } pub struct VarDeclarator { pub span: Span, pub name: Pat, pub init: Option<Box<Expr>>, pub definite: bool, } pub enum Expr { This(ThisExpr), Array(ArrayLit), Object(ObjectLit), Fn(FnExpr), Unary(UnaryExpr), Update(UpdateExpr), Bin(BinExpr), Assign(AssignExpr), Member(MemberExpr), SuperProp(SuperPropExpr), Cond(CondExpr), Call(CallExpr), New(NewExpr), Seq(SeqExpr), Ident(Ident), Lit(Lit), Tpl(Tpl), TaggedTpl(TaggedTpl), Arrow(ArrowExpr), Class(ClassExpr), Yield(YieldExpr), MetaProp(MetaPropExpr), Await(AwaitExpr), Paren(ParenExpr), JSXMember(JSXMemberExpr), JSXNamespacedName(JSXNamespacedName), JSXEmpty(JSXEmptyExpr), JSXElement(Box<JSXElement>), JSXFragment(JSXFragment), TsTypeAssertion(TsTypeAssertion), TsConstAssertion(TsConstAssertion), TsNonNull(TsNonNullExpr), TsAs(TsAsExpr), TsInstantiation(TsInstantiation), PrivateName(PrivateName), OptChain(OptChainExpr), Invalid(Invalid), } pub struct ThisExpr { pub span: Span, } pub struct ArrayLit { pub span: Span, pub elems: Vec<Option<ExprOrSpread>>, } pub struct ObjectLit { pub span: Span, pub props: Vec<PropOrSpread>, } pub enum PropOrSpread { Spread(SpreadElement), Prop(Box<Prop>), } pub struct SpreadElement { pub dot3_token: Span, pub expr: Box<Expr>, } pub struct UnaryExpr { pub span: Span, pub op: UnaryOp, pub arg: Box<Expr>, } pub struct UpdateExpr { pub span: Span, pub op: UpdateOp, pub prefix: bool, pub arg: Box<Expr>, } pub struct BinExpr { pub span: Span, pub op: BinaryOp, pub left: Box<Expr>, pub right: Box<Expr>, } pub struct FnExpr { pub ident: Option<Ident>, pub function: Function, } pub struct ClassExpr { pub ident: Option<Ident>, pub class: Class, } pub struct AssignExpr { pub span: Span, pub op: AssignOp, pub left: PatOrExpr, pub right: Box<Expr>, } pub struct MemberExpr { pub span: Span, pub obj: Box<Expr>, pub prop: MemberProp, } pub enum MemberProp { Ident(Ident), PrivateName(PrivateName), Computed(ComputedPropName), } pub struct SuperPropExpr { pub span: Span, pub obj: Super, pub prop: SuperProp, } pub enum SuperProp { Ident(Ident), Computed(ComputedPropName), } pub struct CondExpr { pub span: Span, pub test: Box<Expr>, pub cons: Box<Expr>, pub alt: Box<Expr>, } pub struct CallExpr { pub span: Span, pub callee: Callee, pub args: Vec<ExprOrSpread>, pub type_args: Option<TsTypeParamInstantiation>, } pub struct NewExpr { pub span: Span, pub callee: Box<Expr>, pub args: Option<Vec<ExprOrSpread>>, pub type_args: Option<TsTypeParamInstantiation>, } pub struct SeqExpr { pub span: Span, pub exprs: Vec<Box<Expr>>, } pub struct ArrowExpr { pub span: Span, pub params: Vec<Pat>, pub body: BlockStmtOrExpr, pub is_async: bool, pub is_generator: bool, pub type_params: Option<TsTypeParamDecl>, pub return_type: Option<TsTypeAnn>, } pub struct YieldExpr { pub span: Span, pub arg: Option<Box<Expr>>, pub delegate: bool, } pub struct MetaPropExpr { pub span: Span, pub kind: MetaPropKind, } pub enum MetaPropKind { NewTarget, ImportMeta, } pub struct AwaitExpr { pub span: Span, pub arg: Box<Expr>, } pub struct Tpl { pub span: Span, pub exprs: Vec<Box<Expr>>, pub quasis: Vec<TplElement>, } pub struct TaggedTpl { pub span: Span, pub tag: Box<Expr>, pub type_params: Option<TsTypeParamInstantiation>, pub tpl: Tpl, } pub struct TplElement { pub span: Span, pub tail: bool, pub cooked: Option<JsWord>, pub raw: JsWord, } pub struct ParenExpr { pub span: Span, pub expr: Box<Expr>, } pub enum Callee { Super(Super), Import(Import), Expr(Box<Expr>), } pub struct Super { pub span: Span, } pub struct Import { pub span: Span, } pub struct ExprOrSpread { pub spread: Option<Span>, pub expr: Box<Expr>, } pub enum BlockStmtOrExpr { BlockStmt(BlockStmt), Expr(Box<Expr>), } pub enum PatOrExpr { Expr(Box<Expr>), Pat(Box<Pat>), } pub struct OptChainExpr { pub span: Span, pub question_dot_token: Span, pub base: OptChainBase, } pub enum OptChainBase { Member(MemberExpr), Call(OptCall), } pub struct OptCall { pub span: Span, pub callee: Box<Expr>, pub args: Vec<ExprOrSpread>, pub type_args: Option<TsTypeParamInstantiation>, } pub struct Function { pub params: Vec<Param>, pub decorators: Vec<Decorator>, pub span: Span, pub body: Option<BlockStmt>, pub is_generator: bool, pub is_async: bool, pub type_params: Option<TsTypeParamDecl>, pub return_type: Option<TsTypeAnn>, } pub struct Param { pub span: Span, pub decorators: Vec<Decorator>, pub pat: Pat, } pub enum ParamOrTsParamProp { TsParamProp(TsParamProp), Param(Param), } pub struct BindingIdent { pub id: Ident, pub type_ann: Option<TsTypeAnn>, } pub struct Ident { pub span: Span, pub sym: JsWord, pub optional: bool, } pub struct PrivateName { pub span: Span, pub id: Ident, } pub enum JSXObject { JSXMemberExpr(Box<JSXMemberExpr>), Ident(Ident), } pub struct JSXMemberExpr { pub obj: JSXObject, pub prop: Ident, } pub struct JSXNamespacedName { pub ns: Ident, pub name: Ident, } pub struct JSXEmptyExpr { pub span: Span, } pub struct JSXExprContainer { pub span: Span, pub expr: JSXExpr, } pub enum JSXExpr { JSXEmptyExpr(JSXEmptyExpr), Expr(Box<Expr>), } pub struct JSXSpreadChild { pub span: Span, pub expr: Box<Expr>, } pub enum JSXElementName { Ident(Ident), JSXMemberExpr(JSXMemberExpr), JSXNamespacedName(JSXNamespacedName), } pub struct JSXOpeningElement { pub name: JSXElementName, pub span: Span, pub attrs: Vec<JSXAttrOrSpread>, pub self_closing: bool, pub type_args: Option<TsTypeParamInstantiation>, } pub enum JSXAttrOrSpread { JSXAttr(JSXAttr), SpreadElement(SpreadElement), } pub struct JSXClosingElement { pub span: Span, pub name: JSXElementName, } pub struct JSXAttr { pub span: Span, pub name: JSXAttrName, pub value: Option<JSXAttrValue>, } pub enum JSXAttrName { Ident(Ident), JSXNamespacedName(JSXNamespacedName), } pub enum JSXAttrValue { Lit(Lit), JSXExprContainer(JSXExprContainer), JSXElement(Box<JSXElement>), JSXFragment(JSXFragment), } pub struct JSXText { pub span: Span, pub value: JsWord, pub raw: JsWord, } pub struct JSXElement { pub span: Span, pub opening: JSXOpeningElement, pub children: Vec<JSXElementChild>, pub closing: Option<JSXClosingElement>, } pub enum JSXElementChild { JSXText(JSXText), JSXExprContainer(JSXExprContainer), JSXSpreadChild(JSXSpreadChild), JSXElement(Box<JSXElement>), JSXFragment(JSXFragment), } pub struct JSXFragment { pub span: Span, pub opening: JSXOpeningFragment, pub children: Vec<JSXElementChild>, pub closing: JSXClosingFragment, } pub struct JSXOpeningFragment { pub span: Span, } pub struct JSXClosingFragment { pub span: Span, } pub struct Invalid { pub span: Span, } pub enum Lit { Str(Str), Bool(Bool), Null(Null), Num(Number), BigInt(BigInt), Regex(Regex), JSXText(JSXText), } pub struct BigInt { pub span: Span, pub value: BigIntValue, pub raw: Option<JsWord>, } pub struct Str { pub span: Span, pub value: JsWord, pub raw: Option<JsWord>, } pub struct Bool { pub span: Span, pub value: bool, } pub struct Null { pub span: Span, } pub struct Regex { pub span: Span, pub exp: JsWord, pub flags: JsWord, } pub struct Number { pub span: Span, pub value: f64, pub raw: Option<JsWord>, } pub enum Program { Module(Module), Script(Script), } pub struct Module { pub span: Span, pub body: Vec<ModuleItem>, pub shebang: Option<JsWord>, } pub struct Script { pub span: Span, pub body: Vec<Stmt>, pub shebang: Option<JsWord>, } pub enum ModuleItem { ModuleDecl(ModuleDecl), Stmt(Stmt), } pub enum ModuleDecl { Import(ImportDecl), ExportDecl(ExportDecl), ExportNamed(NamedExport), ExportDefaultDecl(ExportDefaultDecl), ExportDefaultExpr(ExportDefaultExpr), ExportAll(ExportAll), TsImportEquals(TsImportEqualsDecl), TsExportAssignment(TsExportAssignment), TsNamespaceExport(TsNamespaceExportDecl), } pub struct ExportDefaultExpr { pub span: Span, pub expr: Box<Expr>, } pub struct ExportDecl { pub span: Span, pub decl: Decl, } pub struct ImportDecl { pub span: Span, pub specifiers: Vec<ImportSpecifier>, pub src: Str, pub type_only: bool, pub asserts: Option<ObjectLit>, } pub struct ExportAll { pub span: Span, pub src: Str, pub asserts: Option<ObjectLit>, } pub struct NamedExport { pub span: Span, pub specifiers: Vec<ExportSpecifier>, pub src: Option<Str>, pub type_only: bool, pub asserts: Option<ObjectLit>, } pub struct ExportDefaultDecl { pub span: Span, pub decl: DefaultDecl, } pub enum DefaultDecl { Class(ClassExpr), Fn(FnExpr), TsInterfaceDecl(TsInterfaceDecl), } pub enum ImportSpecifier { Named(ImportNamedSpecifier), Default(ImportDefaultSpecifier), Namespace(ImportStarAsSpecifier), } pub struct ImportDefaultSpecifier { pub span: Span, pub local: Ident, } pub struct ImportStarAsSpecifier { pub span: Span, pub local: Ident, } pub struct ImportNamedSpecifier { pub span: Span, pub local: Ident, pub imported: Option<ModuleExportName>, pub is_type_only: bool, } pub enum ExportSpecifier { Namespace(ExportNamespaceSpecifier), Default(ExportDefaultSpecifier), Named(ExportNamedSpecifier), } pub struct ExportNamespaceSpecifier { pub span: Span, pub name: ModuleExportName, } pub struct ExportDefaultSpecifier { pub exported: Ident, } pub enum ModuleExportName { Ident(Ident), Str(Str), } pub struct ExportNamedSpecifier { pub span: Span, pub orig: ModuleExportName, pub exported: Option<ModuleExportName>, pub is_type_only: bool, } pub enum BinaryOp { EqEq, NotEq, EqEqEq, NotEqEq, Lt, LtEq, Gt, GtEq, LShift, RShift, ZeroFillRShift, Add, Sub, Mul, Div, Mod, BitOr, BitXor, BitAnd, LogicalOr, LogicalAnd, In, InstanceOf, Exp, NullishCoalescing, } pub enum AssignOp { Assign, AddAssign, SubAssign, MulAssign, DivAssign, ModAssign, LShiftAssign, RShiftAssign, ZeroFillRShiftAssign, BitOrAssign, BitXorAssign, BitAndAssign, ExpAssign, AndAssign, OrAssign, NullishAssign, } pub enum UpdateOp { PlusPlus, MinusMinus, } pub enum UnaryOp { Minus, Plus, Bang, Tilde, TypeOf, Void, Delete, } pub enum Pat { Ident(BindingIdent), Array(ArrayPat), Rest(RestPat), Object(ObjectPat), Assign(AssignPat), Invalid(Invalid), Expr(Box<Expr>), } pub struct ArrayPat { pub span: Span, pub elems: Vec<Option<Pat>>, pub optional: bool, pub type_ann: Option<TsTypeAnn>, } pub struct ObjectPat { pub span: Span, pub props: Vec<ObjectPatProp>, pub optional: bool, pub type_ann: Option<TsTypeAnn>, } pub struct AssignPat { pub span: Span, pub left: Box<Pat>, pub right: Box<Expr>, pub type_ann: Option<TsTypeAnn>, } pub struct RestPat { pub span: Span, pub dot3_token: Span, pub arg: Box<Pat>, pub type_ann: Option<TsTypeAnn>, } pub enum ObjectPatProp { KeyValue(KeyValuePatProp), Assign(AssignPatProp), Rest(RestPat), } pub struct KeyValuePatProp { pub key: PropName, pub value: Box<Pat>, } pub struct AssignPatProp { pub span: Span, pub key: Ident, pub value: Option<Box<Expr>>, } pub enum Prop { Shorthand(Ident), KeyValue(KeyValueProp), Assign(AssignProp), Getter(GetterProp), Setter(SetterProp), Method(MethodProp), } pub struct KeyValueProp { pub key: PropName, pub value: Box<Expr>, } pub struct AssignProp { pub key: Ident, pub value: Box<Expr>, } pub struct GetterProp { pub span: Span, pub key: PropName, pub type_ann: Option<TsTypeAnn>, pub body: Option<BlockStmt>, } pub struct SetterProp { pub span: Span, pub key: PropName, pub param: Pat, pub body: Option<BlockStmt>, } pub struct MethodProp { pub key: PropName, pub function: Function, } pub enum PropName { Ident(Ident), Str(Str), Num(Number), BigInt(BigInt), Computed(ComputedPropName), } pub struct ComputedPropName { pub span: Span, pub expr: Box<Expr>, } pub struct BlockStmt { pub span: Span, pub stmts: Vec<Stmt>, } pub enum Stmt { Block(BlockStmt), Empty(EmptyStmt), Debugger(DebuggerStmt), With(WithStmt), Return(ReturnStmt), Labeled(LabeledStmt), Break(BreakStmt), Continue(ContinueStmt), If(IfStmt), Switch(SwitchStmt), Throw(ThrowStmt), Try(TryStmt), While(WhileStmt), DoWhile(DoWhileStmt), For(ForStmt), ForIn(ForInStmt), ForOf(ForOfStmt), Decl(Decl), Expr(ExprStmt), } pub struct ExprStmt { pub span: Span, pub expr: Box<Expr>, } pub struct EmptyStmt { pub span: Span, } pub struct DebuggerStmt { pub span: Span, } pub struct WithStmt { pub span: Span, pub obj: Box<Expr>, pub body: Box<Stmt>, } pub struct ReturnStmt { pub span: Span, pub arg: Option<Box<Expr>>, } pub struct LabeledStmt { pub span: Span, pub label: Ident, pub body: Box<Stmt>, } pub struct BreakStmt { pub span: Span, pub label: Option<Ident>, } pub struct ContinueStmt { pub span: Span, pub label: Option<Ident>, } pub struct IfStmt { pub span: Span, pub test: Box<Expr>, pub cons: Box<Stmt>, pub alt: Option<Box<Stmt>>, } pub struct SwitchStmt { pub span: Span, pub discriminant: Box<Expr>, pub cases: Vec<SwitchCase>, } pub struct ThrowStmt { pub span: Span, pub arg: Box<Expr>, } pub struct TryStmt { pub span: Span, pub block: BlockStmt, pub handler: Option<CatchClause>, pub finalizer: Option<BlockStmt>, } pub struct WhileStmt { pub span: Span, pub test: Box<Expr>, pub body: Box<Stmt>, } pub struct DoWhileStmt { pub span: Span, pub test: Box<Expr>, pub body: Box<Stmt>, } pub struct ForStmt { pub span: Span, pub init: Option<VarDeclOrExpr>, pub test: Option<Box<Expr>>, pub update: Option<Box<Expr>>, pub body: Box<Stmt>, } pub struct ForInStmt { pub span: Span, pub left: VarDeclOrPat, pub right: Box<Expr>, pub body: Box<Stmt>, } pub struct ForOfStmt { pub span: Span, pub await_token: Option<Span>, pub left: VarDeclOrPat, pub right: Box<Expr>, pub body: Box<Stmt>, } pub struct SwitchCase { pub span: Span, pub test: Option<Box<Expr>>, pub cons: Vec<Stmt>, } pub struct CatchClause { pub span: Span, pub param: Option<Pat>, pub body: BlockStmt, } pub enum VarDeclOrPat { VarDecl(VarDecl), Pat(Pat), } pub enum VarDeclOrExpr { VarDecl(VarDecl), Expr(Box<Expr>), } pub struct TsTypeAnn { pub span: Span, pub type_ann: Box<TsType>, } pub struct TsTypeParamDecl { pub span: Span, pub params: Vec<TsTypeParam>, } pub struct TsTypeParam { pub span: Span, pub name: Ident, pub is_in: bool, pub is_out: bool, pub constraint: Option<Box<TsType>>, pub default: Option<Box<TsType>>, } pub struct TsTypeParamInstantiation { pub span: Span, pub params: Vec<Box<TsType>>, } pub struct TsParamProp { pub span: Span, pub decorators: Vec<Decorator>, pub accessibility: Option<Accessibility>, pub is_override: bool, pub readonly: bool, pub param: TsParamPropParam, } pub enum TsParamPropParam { Ident(BindingIdent), Assign(AssignPat), } pub struct TsQualifiedName { pub left: TsEntityName, pub right: Ident, } pub enum TsEntityName { TsQualifiedName(Box<TsQualifiedName>), Ident(Ident), } pub enum TsTypeElement { TsCallSignatureDecl(TsCallSignatureDecl), TsConstructSignatureDecl(TsConstructSignatureDecl), TsPropertySignature(TsPropertySignature), TsGetterSignature(TsGetterSignature), TsSetterSignature(TsSetterSignature), TsMethodSignature(TsMethodSignature), TsIndexSignature(TsIndexSignature), } pub struct TsCallSignatureDecl { pub span: Span, pub params: Vec<TsFnParam>, pub type_ann: Option<TsTypeAnn>, pub type_params: Option<TsTypeParamDecl>, } pub struct TsConstructSignatureDecl { pub span: Span, pub params: Vec<TsFnParam>, pub type_ann: Option<TsTypeAnn>, pub type_params: Option<TsTypeParamDecl>, } pub struct TsPropertySignature { pub span: Span, pub readonly: bool, pub key: Box<Expr>, pub computed: bool, pub optional: bool, pub init: Option<Box<Expr>>, pub params: Vec<TsFnParam>, pub type_ann: Option<TsTypeAnn>, pub type_params: Option<TsTypeParamDecl>, } pub struct TsGetterSignature { pub span: Span, pub readonly: bool, pub key: Box<Expr>, pub computed: bool, pub optional: bool, pub type_ann: Option<TsTypeAnn>, } pub struct TsSetterSignature { pub span: Span, pub readonly: bool, pub key: Box<Expr>, pub computed: bool, pub optional: bool, pub param: TsFnParam, } pub struct TsMethodSignature { pub span: Span, pub readonly: bool, pub key: Box<Expr>, pub computed: bool, pub optional: bool, pub params: Vec<TsFnParam>, pub type_ann: Option<TsTypeAnn>, pub type_params: Option<TsTypeParamDecl>, } pub struct TsIndexSignature { pub params: Vec<TsFnParam>, pub type_ann: Option<TsTypeAnn>, pub readonly: bool, pub is_static: bool, pub span: Span, } pub enum TsType { TsKeywordType(TsKeywordType), TsThisType(TsThisType), TsFnOrConstructorType(TsFnOrConstructorType), TsTypeRef(TsTypeRef), TsTypeQuery(TsTypeQuery), TsTypeLit(TsTypeLit), TsArrayType(TsArrayType), TsTupleType(TsTupleType), TsOptionalType(TsOptionalType), TsRestType(TsRestType), TsUnionOrIntersectionType(TsUnionOrIntersectionType), TsConditionalType(TsConditionalType), TsInferType(TsInferType), TsParenthesizedType(TsParenthesizedType), TsTypeOperator(TsTypeOperator), TsIndexedAccessType(TsIndexedAccessType), TsMappedType(TsMappedType), TsLitType(TsLitType), TsTypePredicate(TsTypePredicate), TsImportType(TsImportType), } pub enum TsFnOrConstructorType { TsFnType(TsFnType), TsConstructorType(TsConstructorType), } pub struct TsKeywordType { pub span: Span, pub kind: TsKeywordTypeKind, } pub enum TsKeywordTypeKind { TsAnyKeyword, TsUnknownKeyword, TsNumberKeyword, TsObjectKeyword, TsBooleanKeyword, TsBigIntKeyword, TsStringKeyword, TsSymbolKeyword, TsVoidKeyword, TsUndefinedKeyword, TsNullKeyword, TsNeverKeyword, TsIntrinsicKeyword, } pub struct TsThisType { pub span: Span, } pub enum TsFnParam { Ident(BindingIdent), Array(ArrayPat), Rest(RestPat), Object(ObjectPat), } pub struct TsFnType { pub span: Span, pub params: Vec<TsFnParam>, pub type_params: Option<TsTypeParamDecl>, pub type_ann: TsTypeAnn, } pub struct TsConstructorType { pub span: Span, pub params: Vec<TsFnParam>, pub type_params: Option<TsTypeParamDecl>, pub type_ann: TsTypeAnn, pub is_abstract: bool, } pub struct TsTypeRef { pub span: Span, pub type_name: TsEntityName, pub type_params: Option<TsTypeParamInstantiation>, } pub struct TsTypePredicate { pub span: Span, pub asserts: bool, pub param_name: TsThisTypeOrIdent, pub type_ann: Option<TsTypeAnn>, } pub enum TsThisTypeOrIdent { TsThisType(TsThisType), Ident(Ident), } pub struct TsTypeQuery { pub span: Span, pub expr_name: TsTypeQueryExpr, pub type_args: Option<TsTypeParamInstantiation>, } pub enum TsTypeQueryExpr { TsEntityName(TsEntityName), Import(TsImportType), } pub struct TsImportType { pub span: Span, pub arg: Str, pub qualifier: Option<TsEntityName>, pub type_args: Option<TsTypeParamInstantiation>, } pub struct TsTypeLit { pub span: Span, pub members: Vec<TsTypeElement>, } pub struct TsArrayType { pub span: Span, pub elem_type: Box<TsType>, } pub struct TsTupleType { pub span: Span, pub elem_types: Vec<TsTupleElement>, } pub struct TsTupleElement { pub span: Span, pub label: Option<Pat>, pub ty: TsType, } pub struct TsOptionalType { pub span: Span, pub type_ann: Box<TsType>, } pub struct TsRestType { pub span: Span, pub type_ann: Box<TsType>, } pub enum TsUnionOrIntersectionType { TsUnionType(TsUnionType), TsIntersectionType(TsIntersectionType), } pub struct TsUnionType { pub span: Span, pub types: Vec<Box<TsType>>, } pub struct TsIntersectionType { pub span: Span, pub types: Vec<Box<TsType>>, } pub struct TsConditionalType { pub span: Span, pub check_type: Box<TsType>, pub extends_type: Box<TsType>, pub true_type: Box<TsType>, pub false_type: Box<TsType>, } pub struct TsInferType { pub span: Span, pub type_param: TsTypeParam, } pub struct TsParenthesizedType { pub span: Span, pub type_ann: Box<TsType>, } pub struct TsTypeOperator { pub span: Span, pub op: TsTypeOperatorOp, pub type_ann: Box<TsType>, } pub enum TsTypeOperatorOp { KeyOf, Unique, ReadOnly, } pub struct TsIndexedAccessType { pub span: Span, pub readonly: bool, pub obj_type: Box<TsType>, pub index_type: Box<TsType>, } pub enum TruePlusMinus { True, Plus, Minus, } pub struct TsMappedType { pub span: Span, pub readonly: Option<TruePlusMinus>, pub type_param: TsTypeParam, pub name_type: Option<Box<TsType>>, pub optional: Option<TruePlusMinus>, pub type_ann: Option<Box<TsType>>, } pub struct TsLitType { pub span: Span, pub lit: TsLit, } pub enum TsLit { BigInt(BigInt), Number(Number), Str(Str), Bool(Bool), Tpl(TsTplLitType), } pub struct TsTplLitType { pub span: Span, pub types: Vec<Box<TsType>>, pub quasis: Vec<TplElement>, } pub struct TsInterfaceDecl { pub span: Span, pub id: Ident, pub declare: bool, pub type_params: Option<TsTypeParamDecl>, pub extends: Vec<TsExprWithTypeArgs>, pub body: TsInterfaceBody, } pub struct TsInterfaceBody { pub span: Span, pub body: Vec<TsTypeElement>, } pub struct TsExprWithTypeArgs { pub span: Span, pub expr: Box<Expr>, pub type_args: Option<TsTypeParamInstantiation>, } pub struct TsTypeAliasDecl { pub span: Span, pub declare: bool, pub id: Ident, pub type_params: Option<TsTypeParamDecl>, pub type_ann: Box<TsType>, } pub struct TsEnumDecl { pub span: Span, pub declare: bool, pub is_const: bool, pub id: Ident, pub members: Vec<TsEnumMember>, } pub struct TsEnumMember { pub span: Span, pub id: TsEnumMemberId, pub init: Option<Box<Expr>>, } pub enum TsEnumMemberId { Ident(Ident), Str(Str), } pub struct TsModuleDecl { pub span: Span, pub declare: bool, pub global: bool, pub id: TsModuleName, pub body: Option<TsNamespaceBody>, } pub enum TsNamespaceBody { TsModuleBlock(TsModuleBlock), TsNamespaceDecl(TsNamespaceDecl), } pub struct TsModuleBlock { pub span: Span, pub body: Vec<ModuleItem>, } pub struct TsNamespaceDecl { pub span: Span, pub declare: bool, pub global: bool, pub id: Ident, pub body: Box<TsNamespaceBody>, } pub enum TsModuleName { Ident(Ident), Str(Str), } pub struct TsImportEqualsDecl { pub span: Span, pub declare: bool, pub is_export: bool, pub is_type_only: bool, pub id: Ident, pub module_ref: TsModuleRef, } pub enum TsModuleRef { TsEntityName(TsEntityName), TsExternalModuleRef(TsExternalModuleRef), } pub struct TsExternalModuleRef { pub span: Span, pub expr: Str, } pub struct TsExportAssignment { pub span: Span, pub expr: Box<Expr>, } pub struct TsNamespaceExportDecl { pub span: Span, pub id: Ident, } pub struct TsAsExpr { pub span: Span, pub expr: Box<Expr>, pub type_ann: Box<TsType>, } pub struct TsTypeAssertion { pub span: Span, pub expr: Box<Expr>, pub type_ann: Box<TsType>, } pub struct TsNonNullExpr { pub span: Span, pub expr: Box<Expr>, } pub enum Accessibility { Public, Protected, Private, } pub struct TsConstAssertion { pub span: Span, pub expr: Box<Expr>, } pub struct TsInstantiation { pub span: Span, pub expr: Box<Expr>, pub type_args: TsTypeParamInstantiation, } }); #[macro_export] macro_rules! visit_obj_and_computed { () => { fn visit_member_expr(&mut self, n: &$crate::swc_ecma_ast::MemberExpr) { n.obj.visit_with(self); if let $crate::swc_ecma_ast::MemberProp::Computed(c) = &n.prop { c.visit_with(self); } } fn visit_super_prop_expr(&mut self, n: &$crate::swc_ecma_ast::SuperPropExpr) { if let $crate::swc_ecma_ast::SuperProp::Computed(c) = &n.prop { c.visit_with(self); } } }; } #[macro_export] macro_rules! visit_mut_obj_and_computed { () => { fn visit_mut_member_expr(&mut self, n: &mut $crate::swc_ecma_ast::MemberExpr) { n.obj.visit_mut_with(self); if let $crate::swc_ecma_ast::MemberProp::Computed(c) = &mut n.prop { c.visit_mut_with(self); } } fn visit_mut_super_prop_expr(&mut self, n: &mut $crate::swc_ecma_ast::SuperPropExpr) { if let $crate::swc_ecma_ast::SuperProp::Computed(c) = &mut n.prop { c.visit_mut_with(self); } } }; }
impl Fold for SpanRemover { fn fold_span(&mut self, _: Span) -> Span {
background_jobs.rs
use reqwest::blocking::Client; use std::panic::AssertUnwindSafe; use std::sync::{Arc, Mutex, MutexGuard, PoisonError}; use diesel::r2d2::PoolError; use swirl::PerformError; use crate::db::{DieselPool, DieselPooledConn}; use crate::git::Repository; use crate::uploaders::Uploader; impl<'a> swirl::db::BorrowedConnection<'a> for DieselPool { type Connection = DieselPooledConn<'a>; } impl swirl::db::DieselPool for DieselPool { type Error = PoolError; fn get(&self) -> Result<swirl::db::DieselPooledConn<'_, Self>, Self::Error>
} #[allow(missing_debug_implementations)] pub struct Environment { index: Arc<Mutex<Repository>>, pub uploader: Uploader, http_client: AssertUnwindSafe<Client>, } // FIXME: AssertUnwindSafe should be `Clone`, this can be replaced with // `#[derive(Clone)]` if that is fixed in the standard lib impl Clone for Environment { fn clone(&self) -> Self { Self { index: self.index.clone(), uploader: self.uploader.clone(), http_client: AssertUnwindSafe(self.http_client.0.clone()), } } } impl Environment { pub fn new(index: Repository, uploader: Uploader, http_client: Client) -> Self { Self::new_shared(Arc::new(Mutex::new(index)), uploader, http_client) } pub fn new_shared( index: Arc<Mutex<Repository>>, uploader: Uploader, http_client: Client, ) -> Self { Self { index, uploader, http_client: AssertUnwindSafe(http_client), } } pub fn lock_index(&self) -> Result<MutexGuard<'_, Repository>, PerformError> { let repo = self.index.lock().unwrap_or_else(PoisonError::into_inner); repo.reset_head()?; Ok(repo) } /// Returns a client for making HTTP requests to upload crate files. pub(crate) fn http_client(&self) -> &Client { &self.http_client } }
{ self.get() }
server.rs
//! This module defines a wrapper around Minecraft's //! [ServerListPing](https://wiki.vg/Server_List_Ping) use serde::Deserialize; use std::io; use thiserror::Error; use tokio::net::TcpStream; use crate::protocol::{self, AsyncReadRawPacket, AsyncWriteRawPacket, ProtocolError}; #[derive(Error, Debug)] pub enum ServerError { #[error("error reading or writing data: \"{0}\"")] ProtocolError(#[from] ProtocolError), #[error("failed to connect to server: \"{0}\"")] Io(#[from] io::Error), #[error("invalid JSON response: \"{0}\"")] InvalidJson(#[from] serde_json::Error), } pub type ForgeMods = Vec<ForgeModInfo>; /// Contains information about the server version. #[derive(Debug, Deserialize, Clone)] pub struct ServerVersion { /// The server's Minecraft version, i.e. "1.15.2". pub name: String, /// The server's ServerListPing protocol version. pub protocol: u32, } /// Contains information about a player. #[derive(Debug, Deserialize, Clone)] pub struct ServerPlayer { /// The player's in-game name. pub name: String, /// The player's UUID. pub id: String, } /// Contains information about the currently online /// players. #[derive(Debug, Deserialize, Clone)] pub struct
{ /// The configured maximum number of players for the /// server. pub max: u32, /// The number of players currently online. pub online: u32, /// An optional list of player information for /// currently online players. pub sample: Option<Vec<ServerPlayer>>, } /// Contains the server's MOTD. #[derive(Debug, Deserialize, Clone)] pub struct BigServerDescription { #[serde(default)] pub text: String, #[serde(default)] pub extra: Vec<ExtraDescriptionPart>, } /// Contains a segment of the extra part of a server description #[derive(Debug, Deserialize, Clone)] pub struct ExtraDescriptionPart { pub text: String, #[serde(default)] pub color: String, #[serde(default)] pub bold: bool, #[serde(default)] pub italic: bool, } // TODO maybe add some more mod lists? allthough i'm not aware of other servers sending mod lists. /// this is a response containing information about mods which modded servers send #[derive(Debug, Deserialize, Clone)] #[serde(tag = "type")] pub enum ModInfo { #[serde(rename = "FML")] Forge { #[serde(rename = "modList")] mod_list: ForgeMods, }, } impl ModInfo { /// gets the forge mod info of this ModInfo. will only stay until there are other ModInfo types discovered #[allow(irrefutable_let_patterns)] pub fn to_forge(&self) -> &ForgeMods { if let ModInfo::Forge { mod_list } = self { mod_list } else { unreachable!() } } } /// this struct represents a modInfo entry as sent by forge #[derive(Debug, Deserialize, Clone)] pub struct ForgeModInfo { #[serde(alias = "modId")] pub modid: String, #[serde(alias = "modmarker")] pub version: String, } /// there are 2 variants of server descriptions /// the Simple variation is rarely used, but the minecraft client understands it /// so we should be compatible too #[derive(Debug, Deserialize, Clone)] #[serde(untagged)] pub enum ServerDescription { /// this is used if the `description` field in the JSON response is a String Simple(String), /// this is used if the `description` field in the JSON respons is a `BigServerDescription` Big(BigServerDescription), } impl ServerDescription { /// gets the text of this `ServerDescription` no matter if it is a /// `Simple` or `Big` description pub fn get_text(&self) -> &String { match self { Self::Big(desc) => &desc.text, Self::Simple(desc) => desc, } } } /// The decoded JSON response from a status query over /// ServerListPing. #[derive(Debug, Deserialize, Clone)] pub struct StatusResponse { /// Information about the server's version. pub version: ServerVersion, /// Information about currently online players. pub players: ServerPlayers, /// Single-field struct containing the server's MOTD. pub description: ServerDescription, /// Optional field containing a path to the server's /// favicon. pub favicon: Option<String>, pub modinfo: Option<ModInfo>, /// information added by forge servers #[serde(rename = "forgeData")] pub forge_data: Option<ForgeData>, } impl StatusResponse { /// gets the forge mod information of this resonse if present pub fn forge_mod_info(&self) -> Option<&ForgeMods> { self.modinfo .as_ref() .map(ModInfo::to_forge) .or_else(|| self.forge_data.as_ref().map(|d| &d.mods)) } } #[derive(Debug, Deserialize, Clone)] pub struct ForgeData { pub channels: Vec<ForgeChannel>, pub mods: ForgeMods, } /// packet channel of forge mods #[derive(Debug, Deserialize, Clone)] pub struct ForgeChannel { pub res: String, pub required: bool, pub version: String, } const LATEST_PROTOCOL_VERSION: usize = 578; const DEFAULT_PORT: u16 = 25565; /// Builder for a Minecraft /// ServerListPing connection. pub struct ConnectionConfig { protocol_version: usize, address: String, port: u16, } impl ConnectionConfig { /// Initiates the Minecraft server /// connection build process. pub fn build(address: String) -> Self { ConnectionConfig { protocol_version: LATEST_PROTOCOL_VERSION, address, port: DEFAULT_PORT, } } /// Sets a specific /// protocol version for the connection to /// use. If not specified, the latest version /// will be used. pub fn with_protocol_version(mut self, protocol_version: usize) -> Self { self.protocol_version = protocol_version; self } /// Sets a specific port for the /// connection to use. If not specified, the /// default port of 25565 will be used. pub fn with_port(mut self, port: u16) -> Self { self.port = port; self } /// Connects to the server and consumes the builder. pub async fn connect(self) -> Result<StatusConnection, ServerError> { let stream = TcpStream::connect(format!("{}:{}", self.address, self.port)).await?; Ok(StatusConnection { stream, protocol_version: self.protocol_version, address: self.address, port: self.port, }) } } /// Convenience wrapper for easily connecting /// to a server on the default port with /// the latest protocol version. pub async fn connect(address: String) -> Result<StatusConnection, ServerError> { ConnectionConfig::build(address).connect().await } /// Wraps a built connection pub struct StatusConnection { stream: TcpStream, protocol_version: usize, address: String, port: u16, } impl StatusConnection { /// Sends and reads the packets for the /// ServerListPing status call. pub async fn status_raw(&mut self) -> Result<String, ServerError> { let handshake = protocol::HandshakePacket::new( self.protocol_version, self.address.to_string(), self.port, ); self.stream.write_packet(handshake).await?; self.stream .write_packet(protocol::RequestPacket::new()) .await?; let response: protocol::ResponsePacket = self.stream.read_packet().await?; Ok(response.body) } pub async fn status(&mut self) -> Result<StatusResponse, ServerError> { Ok(serde_json::from_str(&self.status_raw().await?)?) } }
ServerPlayers
models.py
'''OIDC server example''' # import datetime from sqlalchemy import Column, Integer, String from sqlalchemy.dialects.sqlite import JSON # from authlib.integrations.sqla_oauth2 import ( # OAuth2ClientMixin, # OAuth2TokenMixin, # OAuth2AuthorizationCodeMixin # ) from database import Base from utils import disambiguate_referent import uuid class User(Base): # pylint: disable=R0903 '''User class example''' __tablename__ = "user" id = Column(Integer, primary_key=True) uuid = Column(String(100), unique=True) def get_id(self): '''Fetch user identifier''' return self.id # OIDC Authentication Challenge # Template for a proof request that will be sent as a challenge to authenticating users class
(Base): '''OIDC Proof Request class example''' __tablename__ = 'oidc_proof_request' # The oidc scope allows a relying party to specify the proof request the OP should challenge the user with oidc_scope = Column(String(100), primary_key=True) # Attribute within the proof request that identifies the subject responding the to authentication challenge subject_identifier = Column(String(100)) proof_request = Column(JSON) def get_oidc_scope(self): '''Fetch oidc proof request identifier''' return self.oidc_scope def __str__(self): return f"{self.id}" def to_json(self): proof_request = { "name": self.proof_request.get("name", ""), "version": self.proof_request.get("version", ""), "requested_attributes": {}, "requested_predicates": {}, } for attr in self.proof_request.get("requested_attributes", []): label = attr.get("label", str(uuid.uuid4())) if label in proof_request.get("requested_attributes", {}).keys(): label = disambiguate_referent(label) proof_request["requested_attributes"].update({label: attr}) for attr in self.proof_request.get("requested_predicates", []): label = attr.get("label", str(uuid.uuid4())) if label in proof_request.get("requested_predicates", {}).keys(): label = disambiguate_referent(label) proof_request["requested_predicates"].update({label: attr}) return {"proof_request": proof_request}
OIDCProofRequest
watch.go
// Copyright 2015 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v3rpc import ( "context" "io" "math/rand" "sync" "time" "github.com/moederballa/etcd/auth" "github.com/moederballa/etcd/etcdserver" "github.com/moederballa/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/moederballa/etcd/etcdserver/etcdserverpb" "github.com/moederballa/etcd/mvcc" "github.com/moederballa/etcd/mvcc/mvccpb" "go.uber.org/zap" ) type watchServer struct { lg *zap.Logger clusterID int64 memberID int64 maxRequestBytes int sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter } // NewWatchServer returns a new watch server. func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer
var ( // External test can read this with GetProgressReportInterval() // and change this to a small value to finish fast with // SetProgressReportInterval(). progressReportInterval = 10 * time.Minute progressReportIntervalMu sync.RWMutex ) // GetProgressReportInterval returns the current progress report interval (for testing). func GetProgressReportInterval() time.Duration { progressReportIntervalMu.RLock() interval := progressReportInterval progressReportIntervalMu.RUnlock() // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not // send progress notifications to watchers around the same time even when watchers // are created around the same time (which is common when a client restarts itself). jitter := time.Duration(rand.Int63n(int64(interval) / 10)) return interval + jitter } // SetProgressReportInterval updates the current progress report interval (for testing). func SetProgressReportInterval(newTimeout time.Duration) { progressReportIntervalMu.Lock() progressReportInterval = newTimeout progressReportIntervalMu.Unlock() } // We send ctrl response inside the read loop. We do not want // send to block read, but we still want ctrl response we sent to // be serialized. Thus we use a buffered chan to solve the problem. // A small buffer should be OK for most cases, since we expect the // ctrl requests are infrequent. const ctrlStreamBufLen = 16 // serverWatchStream is an etcd server side stream. It receives requests // from client side gRPC stream. It receives watch events from mvcc.WatchStream, // and creates responses that forwarded to gRPC stream. // It also forwards control message like watch created and canceled. type serverWatchStream struct { lg *zap.Logger clusterID int64 memberID int64 maxRequestBytes int sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter gRPCStream pb.Watch_WatchServer watchStream mvcc.WatchStream ctrlStream chan *pb.WatchResponse // mu protects progress, prevKV, fragment mu sync.RWMutex // tracks the watchID that stream might need to send progress to // TODO: combine progress and prevKV into a single struct? progress map[mvcc.WatchID]bool // record watch IDs that need return previous key-value pair prevKV map[mvcc.WatchID]bool // records fragmented watch IDs fragment map[mvcc.WatchID]bool // closec indicates the stream is closed. closec chan struct{} // wg waits for the send loop to complete wg sync.WaitGroup } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { sws := serverWatchStream{ lg: ws.lg, clusterID: ws.clusterID, memberID: ws.memberID, maxRequestBytes: ws.maxRequestBytes, sg: ws.sg, watchable: ws.watchable, ag: ws.ag, gRPCStream: stream, watchStream: ws.watchable.NewWatchStream(), // chan for sending control response like watcher created and canceled. ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), fragment: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), } sws.wg.Add(1) go func() { sws.sendLoop() sws.wg.Done() }() errc := make(chan error, 1) // Ideally recvLoop would also use sws.wg to signal its completion // but when stream.Context().Done() is closed, the stream's recv // may continue to block since it uses a different context, leading to // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { if isClientCtxErr(stream.Context().Err(), rerr) { if sws.lg != nil { sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr)) } else { plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) } } else { if sws.lg != nil { sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(err)) } else { plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) } streamFailures.WithLabelValues("receive", "watch").Inc() } errc <- rerr } }() select { case err = <-errc: close(sws.ctrlStream) case <-stream.Context().Done(): err = stream.Context().Err() // the only server-side cancellation is noleader for now. if err == context.Canceled { err = rpctypes.ErrGRPCNoLeader } } sws.close() return err } func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) if err != nil { return false } if authInfo == nil { // if auth is enabled, IsRangePermitted() can cause an error authInfo = &auth.AuthInfo{} } return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil } func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() if err == io.EOF { return nil } if err != nil { return err } switch uv := req.RequestUnion.(type) { case *pb.WatchRequest_CreateRequest: if uv.CreateRequest == nil { break } creq := uv.CreateRequest if len(creq.Key) == 0 { // \x00 is the smallest key creq.Key = []byte{0} } if len(creq.RangeEnd) == 0 { // force nil since watchstream.Watch distinguishes // between nil and []byte{} for single key / >= creq.RangeEnd = nil } if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { // support >= key queries creq.RangeEnd = []byte{} } if !sws.isWatchPermitted(creq) { wr := &pb.WatchResponse{ Header: sws.newResponseHeader(sws.watchStream.Rev()), WatchId: creq.WatchId, Canceled: true, Created: true, CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), } select { case sws.ctrlStream <- wr: case <-sws.closec: } return nil } filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() rev := creq.StartRevision if rev == 0 { rev = wsrev + 1 } id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...) if err == nil { sws.mu.Lock() if creq.ProgressNotify { sws.progress[id] = true } if creq.PrevKv { sws.prevKV[id] = true } if creq.Fragment { sws.fragment[id] = true } sws.mu.Unlock() } wr := &pb.WatchResponse{ Header: sws.newResponseHeader(wsrev), WatchId: int64(id), Created: true, Canceled: err != nil, } if err != nil { wr.CancelReason = err.Error() } select { case sws.ctrlStream <- wr: case <-sws.closec: return nil } case *pb.WatchRequest_CancelRequest: if uv.CancelRequest != nil { id := uv.CancelRequest.WatchId err := sws.watchStream.Cancel(mvcc.WatchID(id)) if err == nil { sws.ctrlStream <- &pb.WatchResponse{ Header: sws.newResponseHeader(sws.watchStream.Rev()), WatchId: id, Canceled: true, } sws.mu.Lock() delete(sws.progress, mvcc.WatchID(id)) delete(sws.prevKV, mvcc.WatchID(id)) delete(sws.fragment, mvcc.WatchID(id)) sws.mu.Unlock() } } case *pb.WatchRequest_ProgressRequest: if uv.ProgressRequest != nil { sws.ctrlStream <- &pb.WatchResponse{ Header: sws.newResponseHeader(sws.watchStream.Rev()), WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels } } default: // we probably should not shutdown the entire stream when // receive an valid command. // so just do nothing instead. continue } } } func (sws *serverWatchStream) sendLoop() { // watch ids that are currently active ids := make(map[mvcc.WatchID]struct{}) // watch responses pending on a watch id creation message pending := make(map[mvcc.WatchID][]*pb.WatchResponse) interval := GetProgressReportInterval() progressTicker := time.NewTicker(interval) defer func() { progressTicker.Stop() // drain the chan to clean up pending events for ws := range sws.watchStream.Chan() { mvcc.ReportEventReceived(len(ws.Events)) } for _, wrs := range pending { for _, ws := range wrs { mvcc.ReportEventReceived(len(ws.Events)) } } }() for { select { case wresp, ok := <-sws.watchStream.Chan(): if !ok { return } // TODO: evs is []mvccpb.Event type // either return []*mvccpb.Event from the mvcc package // or define protocol buffer with []mvccpb.Event. evs := wresp.Events events := make([]*mvccpb.Event, len(evs)) sws.mu.RLock() needPrevKV := sws.prevKV[wresp.WatchID] sws.mu.RUnlock() for i := range evs { events[i] = &evs[i] if needPrevKV { opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) if err == nil && len(r.KVs) != 0 { events[i].PrevKv = &(r.KVs[0]) } } } canceled := wresp.CompactRevision != 0 wr := &pb.WatchResponse{ Header: sws.newResponseHeader(wresp.Revision), WatchId: int64(wresp.WatchID), Events: events, CompactRevision: wresp.CompactRevision, Canceled: canceled, } if _, okID := ids[wresp.WatchID]; !okID { // buffer if id not yet announced wrs := append(pending[wresp.WatchID], wr) pending[wresp.WatchID] = wrs continue } mvcc.ReportEventReceived(len(evs)) sws.mu.RLock() fragmented, ok := sws.fragment[wresp.WatchID] sws.mu.RUnlock() var serr error if !fragmented && !ok { serr = sws.gRPCStream.Send(wr) } else { serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send) } if serr != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) { if sws.lg != nil { sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr)) } else { plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error()) } } else { if sws.lg != nil { sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr)) } else { plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error()) } streamFailures.WithLabelValues("send", "watch").Inc() } return } sws.mu.Lock() if len(evs) > 0 && sws.progress[wresp.WatchID] { // elide next progress update if sent a key update sws.progress[wresp.WatchID] = false } sws.mu.Unlock() case c, ok := <-sws.ctrlStream: if !ok { return } if err := sws.gRPCStream.Send(c); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { if sws.lg != nil { sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err)) } else { plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) } } else { if sws.lg != nil { sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err)) } else { plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) } streamFailures.WithLabelValues("send", "watch").Inc() } return } // track id creation wid := mvcc.WatchID(c.WatchId) if c.Canceled { delete(ids, wid) continue } if c.Created { // flush buffered events ids[wid] = struct{}{} for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { if sws.lg != nil { sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err)) } else { plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) } } else { if sws.lg != nil { sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err)) } else { plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) } streamFailures.WithLabelValues("send", "watch").Inc() } return } } delete(pending, wid) } case <-progressTicker.C: sws.mu.Lock() for id, ok := range sws.progress { if ok { sws.watchStream.RequestProgress(id) } sws.progress[id] = true } sws.mu.Unlock() case <-sws.closec: return } } } func sendFragments( wr *pb.WatchResponse, maxRequestBytes int, sendFunc func(*pb.WatchResponse) error) error { // no need to fragment if total request size is smaller // than max request limit or response contains only one event if wr.Size() < maxRequestBytes || len(wr.Events) < 2 { return sendFunc(wr) } ow := *wr ow.Events = make([]*mvccpb.Event, 0) ow.Fragment = true var idx int for { cur := ow for _, ev := range wr.Events[idx:] { cur.Events = append(cur.Events, ev) if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes { cur.Events = cur.Events[:len(cur.Events)-1] break } idx++ } if idx == len(wr.Events) { // last response has no more fragment cur.Fragment = false } if err := sendFunc(&cur); err != nil { return err } if !cur.Fragment { break } } return nil } func (sws *serverWatchStream) close() { sws.watchStream.Close() close(sws.closec) sws.wg.Wait() } func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { return &pb.ResponseHeader{ ClusterId: uint64(sws.clusterID), MemberId: uint64(sws.memberID), Revision: rev, RaftTerm: sws.sg.Term(), } } func filterNoDelete(e mvccpb.Event) bool { return e.Type == mvccpb.DELETE } func filterNoPut(e mvccpb.Event) bool { return e.Type == mvccpb.PUT } // FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request. func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) for _, ft := range creq.Filters { switch ft { case pb.WatchCreateRequest_NOPUT: filters = append(filters, filterNoPut) case pb.WatchCreateRequest_NODELETE: filters = append(filters, filterNoDelete) default: } } return filters }
{ return &watchServer{ lg: s.Cfg.Logger, clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), sg: s, watchable: s.Watchable(), ag: s, } }