file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
roberta_coarsness_NER_CRF_train.py
## 粗粒度ner加crf层的例子 import torch from tqdm import tqdm import unicodedata import os import time from torch.utils.data import Dataset, DataLoader from bert_seq2seq import Tokenizer, load_chinese_base_vocab from bert_seq2seq import load_bert data_path = "./state_dict/corase_train_update.txt" vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置 model_name = "roberta" # 选择模型名字 model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置 recent_model_path = "" # 用于把已经训练好的模型继续训练 model_save_path = "./bert_粗粒度ner_crf.bin" batch_size = 4 lr = 1e-5 word2idx = load_chinese_base_vocab(vocab_path) target = ["O", "B-LOC", "I-LOC", "B-PER", "I-PER", "B-ORG", "I-ORG"] def _is_punctuation(ch): """标点符号类字符判断(全/半角均在此内) """ code = ord(ch) return 33 <= code <= 47 or \ 58 <= code <= 64 or \ 91 <= code <= 96 or \ 123 <= code <= 126 or \ unicodedata.category(ch).startswith('P') def _cjk_punctuation(): return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002' def _is_cjk_character(ch): """CJK类字符判断(包括中文字符也在此列) 参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) """ code = ord(ch) return 0x4E00 <= code <= 0x9FFF or \ 0x3400 <= code <= 0x4DBF or \ 0x20000 <= code <= 0x2A6DF or \ 0x2A700 <= code <= 0x2B73F or \ 0x2B740 <= code <= 0x2B81F or \ 0x2B820 <= code <= 0x2CEAF or \ 0xF900 <= code <= 0xFAFF or \ 0x2F800 <= code <= 0x2FA1F def _is_control(ch): """控制类字符判断 """ return unicodedata.category(ch) in ('Cc', 'Cf') def word_piece_tokenize(word): """word内分成subword """ if word in word2idx: return [word] tokens = [] start, stop = 0, 0 while start < len(word): stop = len(word) while stop > start: sub = word[start:stop] if start > 0: sub = '##' + sub if sub in word2idx: break stop -= 1 if start == stop: stop += 1 tokens.append(sub) start = stop return tokens def read_corpus(data_path): """ 读原始数据 """ sents_src = [] sents_tgt = [] with open(data_path, encoding="utf-8") as f: lines = f.readlines() row = "" t = [] for line in lines: if line == "\n": if len(row) < 300: sents_src.append(row) sents_tgt.append(t) row = "" t = [] continue line = line.split(" ") row = row + line[0] t.append(line[1].strip("\n")) return sents_src, sents_tgt ## 自定义dataset class NERDataset(Dataset): """ 针对特定数据集,定义一个相关的取数据的方式 """ def __init__(self, sents_src, sents_tgt) : ## 一般init函数是加载所有数据 super(NERDataset, self).__init__() # 读原始数据 # self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir) self.sents_src = sents_src self.sents_tgt = sents_tgt self.idx2word = {k: v for v, k in word2idx.items()} self.tokenizer = Tokenizer(word2idx) def __getitem__(self, i): ## 得到单个数据 # print(i) src = self.sents_src[i] tgt = self.sents_tgt[i] tgt = ["O"] + tgt + ["O"] tgt = [target.index(i) for i in tgt ] token_ids, token_type_ids = self.tokenizer.encode(src) if len(token_ids) != len(tgt): print("not equal") os._exit(0) output = { "token_ids": token_ids, "token_type_ids": token_type_ids, "target_id": tgt } return output def __len__(self): return len(self.sents_src) def collate_fn(batch): """ 动态padding, batch为一部分sample """ def padding(indice, max_length, pad_idx=0): """ pad 函数 """ pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice] return torch.tensor(pad_indice) token_ids = [data["token_ids"] for data in batch] max_length = max([len(t) for t in token_ids]) token_type_ids = [da
_ids_padded = padding(token_type_ids, max_length) target_ids_padded = padding(target_ids, max_length) return token_ids_padded, token_type_ids_padded, target_ids_padded def viterbi_decode(nodes, trans): """ 维特比算法 解码 nodes: (seq_len, target_size) trans: (target_size, target_size) """ scores = nodes[0] scores[1:] -= 100000 # 刚开始标签肯定是"O" target_size = nodes.shape[1] seq_len = nodes.shape[0] labels = torch.arange(0, target_size).view(1, -1) path = labels for l in range(1, seq_len): scores = scores.view(-1, 1) M = scores + trans + nodes[l].view(1, -1) scores, ids = M.max(0) path = torch.cat((path[:, ids], labels), dim=0) # print(scores) # print(scores) return path[:, scores.argmax()] def ner_print(model, test_data, device="cpu"): model.eval() idxtword = {v: k for k, v in word2idx.items()} tokenier = Tokenizer(word2idx) trans = model.state_dict()["crf_layer.trans"] for text in test_data: decode = [] text_encode, text_ids = tokenier.encode(text) text_tensor = torch.tensor(text_encode, device=device).view(1, -1) out = model(text_tensor).squeeze(0) # 其实是nodes labels = viterbi_decode(out, trans) starting = False for l in labels: if l > 0: label = target[l.item()] if label[0] == "B": decode.append(label[2: ]) starting = True elif starting: decode.append(label[2: ]) else: starting = False decode.append("O") else : decode.append("O") flag = 0 res = {} text_decode = [idxtword[i] for i in text_encode] for index, each_entity in enumerate(decode): if each_entity != "O": if flag != each_entity: # cur_text = "".join([text[t] for t in mapping[index]]) cur_text = text_decode[index] if each_entity in res.keys(): res[each_entity].append(cur_text) else : res[each_entity] = [cur_text] flag = each_entity elif flag == each_entity: res[each_entity][-1] += text_decode[index] # res[each_entity][-1] += "".join([text[t] for t in mapping[index]]) else : flag = 0 print(res) class Trainer: def __init__(self): # 加载数据 self.sents_src, self.sents_tgt = read_corpus(data_path) self.tokenier = Tokenizer(word2idx) # 判断是否有可用GPU self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("device: " + str(self.device)) # 定义模型 self.bert_model = load_bert(word2idx, model_name=model_name, model_class="sequence_labeling_crf", target_size=len(target)) ## 加载预训练的模型参数~ self.bert_model.load_pretrain_params(model_path) # 将模型发送到计算设备(GPU或CPU) self.bert_model.set_device(self.device) # 声明需要优化的参数 self.optim_parameters = list(self.bert_model.parameters()) self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3) # 声明自定义的数据加载器 dataset = NERDataset(self.sents_src, self.sents_tgt) self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn) def train(self, epoch): # 一个epoch的训练 self.bert_model.train() self.iteration(epoch, dataloader=self.dataloader, train=True) def save(self, save_path): """ 保存模型 """ self.bert_model.save_all_params(save_path) print("{} saved!".format(save_path)) def iteration(self, epoch, dataloader, train=True): total_loss = 0 start_time = time.time() ## 得到当前时间 step = 0 for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True): # print(target_ids.shape) step += 1 if step % 500 == 0: test_data = ["日寇在京掠夺文物详情。", "以书结缘,把欧美,港台流行的食品类食谱汇集一堂。", "明天天津下雨,不知道主任还能不能来学校吃个饭。"] ner_print(self.bert_model, test_data, device=self.device) self.bert_model.train() # 因为传入了target标签,因此会计算loss并且返回 predictions, loss = self.bert_model(token_ids, labels=target_ids ) # 反向传播 if train: # 清空之前的梯度 self.optimizer.zero_grad() # 反向传播, 获取新的梯度 loss.backward() # 用获取的梯度更新模型参数 self.optimizer.step() # 为计算当前epoch的平均loss total_loss += loss.item() end_time = time.time() spend_time = end_time - start_time # 打印训练信息 print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time)) # 保存模型 self.save(model_save_path) if __name__ == '__main__': trainer = Trainer() train_epoches = 25 for epoch in range(train_epoches): # 训练一个epoch trainer.train(epoch) # with open("./state_dict/corase_train_update.txt", "a+") as f: # with open("./corpus/粗粒度NER/人民日报ner数据.txt", "r", encoding="utf-8") as f1 : # lines = f1.readlines() # start = 1 # string = "" # label = "" # for line in lines: # if line == "\n": # f.write("\n") # continue # line = line.strip("\n") # line = line.split(" ") # if _is_punctuation(line[0]) or _is_cjk_character(line[0]): # if string != "": # string = string.lower() # tokens = word_piece_tokenize(string) # 子词 # for t in tokens: # if "##" in t: # f.write(t[2:] + " " + label + "\n") # else : # f.write(t + " " + label + "\n") # # f.write(string + " " + label + "\n") # string = "" # label = "" # f.write(line[0] + " " + line[1] + "\n") # else : # string += line[0] # label = line[1]
ta["token_type_ids"] for data in batch] target_ids = [data["target_id"] for data in batch] token_ids_padded = padding(token_ids, max_length) token_type
mod.rs
pub mod keyframes; pub mod font_face; pub mod page; pub mod supports; pub mod counter_style; pub mod namespace; pub mod import; pub mod media; pub mod style; pub mod document; pub mod nesting; use media::MediaRule; use import::ImportRule; use style::StyleRule; use keyframes::KeyframesRule; use font_face::FontFaceRule; use page::PageRule; use supports::SupportsRule; use counter_style::CounterStyleRule; use namespace::NamespaceRule; use document::MozDocumentRule; use nesting::NestingRule; use crate::traits::ToCss; use crate::printer::Printer; use crate::declaration::DeclarationHandler; use crate::vendor_prefix::VendorPrefix; use crate::prefixes::Feature; use crate::targets::Browsers; use std::collections::{HashMap, HashSet}; use crate::selector::{is_equivalent, get_prefix, get_necessary_prefixes}; use crate::error::PrinterError; use crate::logical::LogicalProperties; use crate::dependencies::{Dependency, ImportDependency}; pub(crate) trait ToCssWithContext { fn to_css_with_context<W>(&self, dest: &mut Printer<W>, context: Option<&StyleContext>) -> Result<(), PrinterError> where W: std::fmt::Write; } pub(crate) struct StyleContext<'a> { pub rule: &'a StyleRule, pub parent: Option<&'a StyleContext<'a>> } #[derive(Debug, PartialEq)] pub enum CssRule { Media(MediaRule), Import(ImportRule), Style(StyleRule), Keyframes(KeyframesRule), FontFace(FontFaceRule), Page(PageRule), Supports(SupportsRule), CounterStyle(CounterStyleRule), Namespace(NamespaceRule), MozDocument(MozDocumentRule), Nesting(NestingRule), Ignored } impl ToCssWithContext for CssRule { fn to_css_with_context<W>(&self, dest: &mut Printer<W>, context: Option<&StyleContext>) -> Result<(), PrinterError> where W: std::fmt::Write { match self { CssRule::Media(media) => media.to_css_with_context(dest, context), CssRule::Import(import) => import.to_css(dest), CssRule::Style(style) => style.to_css_with_context(dest, context), CssRule::Keyframes(keyframes) => keyframes.to_css(dest), CssRule::FontFace(font_face) => font_face.to_css(dest), CssRule::Page(font_face) => font_face.to_css(dest), CssRule::Supports(supports) => supports.to_css_with_context(dest, context), CssRule::CounterStyle(counter_style) => counter_style.to_css(dest), CssRule::Namespace(namespace) => namespace.to_css(dest), CssRule::MozDocument(document) => document.to_css(dest), CssRule::Nesting(nesting) => nesting.to_css_with_context(dest, context), CssRule::Ignored => Ok(()) } } } impl ToCss for CssRule { fn to_css<W>(&self, dest: &mut Printer<W>) -> Result<(), PrinterError> where W: std::fmt::Write { self.to_css_with_context(dest, None) } } #[derive(Debug, PartialEq)] pub struct CssRuleList(pub Vec<CssRule>); pub(crate) struct MinifyContext<'a> { pub targets: &'a Option<Browsers>, pub handler: &'a mut DeclarationHandler, pub important_handler: &'a mut DeclarationHandler, pub logical_properties: &'a mut LogicalProperties, pub unused_symbols: &'a HashSet<String> } impl CssRuleList { pub(crate) fn minify(&mut self, context: &mut MinifyContext, parent_is_unused: bool) { let mut keyframe_rules = HashMap::new(); let mut rules = Vec::new(); for mut rule in self.0.drain(..) { match &mut rule { CssRule::Keyframes(keyframes) => { if context.unused_symbols.contains(&keyframes.name.0) { continue } keyframes.minify(context); macro_rules! set_prefix { ($keyframes: ident) => { if $keyframes.vendor_prefix.contains(VendorPrefix::None) { if let Some(targets) = context.targets { $keyframes.vendor_prefix = Feature::AtKeyframes.prefixes_for(*targets) } } }; } // If there is an existing rule with the same name and identical keyframes, // merge the vendor prefixes from this rule into it. if let Some(existing_idx) = keyframe_rules.get(&keyframes.name) { if let Some(CssRule::Keyframes(existing)) = &mut rules.get_mut(*existing_idx)
} set_prefix!(keyframes); keyframe_rules.insert(keyframes.name.clone(), rules.len()); }, CssRule::Media(media) => { media.minify(context, parent_is_unused); if media.rules.0.is_empty() { continue } }, CssRule::Supports(supports) => { supports.minify(context, parent_is_unused); if supports.rules.0.is_empty() { continue } }, CssRule::MozDocument(document) => document.minify(context), CssRule::Style(style) => { if parent_is_unused || style.minify(context, parent_is_unused) { continue } if let Some(targets) = context.targets { style.vendor_prefix = get_prefix(&style.selectors); if style.vendor_prefix.contains(VendorPrefix::None) { style.vendor_prefix = get_necessary_prefixes(&style.selectors, *targets); } } if let Some(CssRule::Style(last_style_rule)) = rules.last_mut() { // Merge declarations if the selectors are equivalent, and both are compatible with all targets. if style.selectors == last_style_rule.selectors && style.is_compatible(*context.targets) && last_style_rule.is_compatible(*context.targets) && style.rules.0.is_empty() && last_style_rule.rules.0.is_empty() { last_style_rule.declarations.declarations.extend(style.declarations.declarations.drain(..)); last_style_rule.declarations.minify(context.handler, context.important_handler, context.logical_properties); continue } else if style.declarations == last_style_rule.declarations && style.rules.0.is_empty() && last_style_rule.rules.0.is_empty() { // Append the selectors to the last rule if the declarations are the same, and all selectors are compatible. if style.is_compatible(*context.targets) && last_style_rule.is_compatible(*context.targets) { last_style_rule.selectors.0.extend(style.selectors.0.drain(..)); continue } // If both selectors are potentially vendor prefixable, and they are // equivalent minus prefixes, add the prefix to the last rule. if !style.vendor_prefix.is_empty() && !last_style_rule.vendor_prefix.is_empty() && is_equivalent(&style.selectors, &last_style_rule.selectors) { // If the new rule is unprefixed, replace the prefixes of the last rule. // Otherwise, add the new prefix. if style.vendor_prefix.contains(VendorPrefix::None) { last_style_rule.vendor_prefix = style.vendor_prefix; } else { last_style_rule.vendor_prefix |= style.vendor_prefix; } continue } } } }, CssRule::CounterStyle(counter_style) => { if context.unused_symbols.contains(&counter_style.name.0) { continue } } CssRule::Nesting(nesting) => { if nesting.minify(context, parent_is_unused) { continue } } _ => {} } rules.push(rule) } self.0 = rules; } } impl ToCss for CssRuleList { fn to_css<W>(&self, dest: &mut Printer<W>) -> Result<(), PrinterError> where W: std::fmt::Write { self.to_css_with_context(dest, None) } } impl ToCssWithContext for CssRuleList { fn to_css_with_context<W>(&self, dest: &mut Printer<W>, context: Option<&StyleContext>) -> Result<(), PrinterError> where W: std::fmt::Write { let mut first = true; let mut last_without_block = false; for rule in &self.0 { // Skip @import rules if collecting dependencies. if let CssRule::Import(rule) = &rule { if let Some(dependencies) = &mut dest.dependencies { dependencies.push(Dependency::Import(ImportDependency::new(&rule, &dest.filename))); continue; } } if first { first = false; } else { if !dest.minify && !(last_without_block && matches!(rule, CssRule::Import(..) | CssRule::Namespace(..))) { dest.write_char('\n')?; } dest.newline()?; } rule.to_css_with_context(dest, context)?; last_without_block = matches!(rule, CssRule::Import(..) | CssRule::Namespace(..)); } Ok(()) } }
{ if existing.keyframes == keyframes.keyframes { existing.vendor_prefix |= keyframes.vendor_prefix; set_prefix!(existing); continue; } }
deps.ts
export * from "https://deno.land/x/[email protected]/mod.ts"; export type { BotWithCache } from "https://deno.land/x/[email protected]/mod.ts";
main.go
package main import ( "flag" "io/ioutil" "os" "strings" "github.com/lzap/ufacter/facts/cpu" "github.com/lzap/ufacter/facts/disk" "github.com/lzap/ufacter/facts/host" "github.com/lzap/ufacter/facts/link" "github.com/lzap/ufacter/facts/mem" "github.com/lzap/ufacter/facts/net" "github.com/lzap/ufacter/facts/route" fufacter "github.com/lzap/ufacter/facts/ufacter" "github.com/lzap/ufacter/lib/ufacter" "gopkg.in/yaml.v2" ) func main() { conf := ufacter.Config{} modules := flag.String("modules", "cpu,mem,host,disk,net,route,link,ufacter", "Modules to run") yamlFormat := flag.Bool("yaml", false, "Print facts in YAML format") jsonFormat := flag.Bool("json", false, "Print facts in JSON format") noVolatile := flag.Bool("no-volatile", false, "Avoid facts that change often (e.g. free memory)") noExtended := flag.Bool("no-extended", false, "Avoid facts not found in the original facter") customFacts := flag.String("custom-facts", "", "Custom facts stored as YAML file") flag.Parse() if *yamlFormat == true { conf.Formatter = ufacter.NewYAMLFormatter() } else if *jsonFormat == true { conf.Formatter = ufacter.NewJSONFormatter() } else { // YAML is the default output in ufacter conf.Formatter = ufacter.NewYAMLFormatter() } // load custom facts first if _, err := os.Stat(*customFacts); err == nil { yamlMap := make(map[string]interface{}) yamlString, err := ioutil.ReadFile(*customFacts) if err != nil { panic(err) } err = yaml.Unmarshal(yamlString, &yamlMap) if err != nil { panic(err) } for key, value := range yamlMap { conf.Formatter.Add(ufacter.NewStableFact(value, key)) } } // channel buffer hasn't measurable effect only for light formatters factsCh := make(chan ufacter.Fact, 1024) // slice of reporters (put your new reporter HERE) var reporters []func(facts chan<- ufacter.Fact, volatile bool, extended bool) for _, mod := range strings.Split(*modules, ",") { switch mod { case "cpu": reporters = append(reporters, cpu.ReportFacts) case "mem": reporters = append(reporters, mem.ReportFacts) case "link": reporters = append(reporters, link.ReportFacts) case "route": reporters = append(reporters, route.ReportFacts) case "host": reporters = append(reporters, host.ReportFacts) case "net": reporters = append(reporters, net.ReportFacts) case "disk": reporters = append(reporters, disk.ReportFacts) case "ufacter": reporters = append(reporters, fufacter.ReportFacts) } } toClose := len(reporters) if toClose > 0 { // start all reporters for _, r := range reporters { go r(factsCh, !*noVolatile, !*noExtended) } // collect and wait for facts for f := range factsCh { if f.Name == nil { toClose-- } else { if f.Value != nil && f.Value != "" { if (*noVolatile && f.Volatile) || (*noExtended && !f.Native) { // skip } else { conf.Formatter.Add(f) } } } if toClose <= 0
} conf.Formatter.Finish() } }
{ break }
user.py
import MySQLdb as sql from config import sqlconfig class users: def __init__(): self.connection = sql.connect( host=sqlconfig.host, user=sqlconfig.user, passwd=sqlconfig.passwd ) self.cursor = self.connection.cursor() def __del__(): self.cursor.close() self.connection.close()
) existing_user = self.cursor.fetchall() if len(existing_user) is not 0: print("Existing User") return self.cursor.execute( "INSERT INTO dutchman.Users(Name,Email) VALUES(%s,%s)", (name, email) ) self.connection.commit() self.cursor.execute( "SELECT ID FROM dutchman.Users WHERE Email=%s", [email] ) ID = self.cursor.fetchall() ID = ID[0][0] command = "CREATE TABLE " + email query = ("CREATE DATABASE " + email,) query.append( command + ".inbox (ID INT NOT NULL AUTO_INCREMENT, \ message VARCHAR(2000) NOT NULL, recipient VARCHAR(200), \ read INT NOT NULL)" ) query.append( command + ".outbox (ID INT NOT NULL AUTO_INCREMENT, \ message VARCHAR(2000) NOT NULL, \ sender VARCHAR(200), \ read INT NOT NULL))" ) query.append( command + ".queries (ID INT NOT NULL AUTO_INCREMENT, \ message VARCHAR(2000) NOT NULL, \ response VARCHAR(2000))" ) query.append( command + ".friends (SR INT NOT NULL AUTO_INCREMENT, \ ID INT NOT NULL)" ) self.cursor.executemany(query) self.cursor.close() self.connection.close() def add_friend(user, friend): self.cursor.execute( "INSERT INTO " + user + ".friend (ID) VALUES(%s)", [friend] ) self.connection.commit()
def new_user(name, email): self.cursor.execute( "SELECT ID FROM dutchman.Users WHERE Email=%s", [email]
star.go
// Copyright 2016 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package models import ( "code.gitea.io/gitea/modules/timeutil" ) // Star represents a starred repo by an user. type Star struct { ID int64 `xorm:"pk autoincr"` UID int64 `xorm:"UNIQUE(s)"` RepoID int64 `xorm:"UNIQUE(s)"` CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"` } // StarRepo or unstar repository. func StarRepo(userID, repoID int64, star bool) error { sess := x.NewSession() defer sess.Close() if err := sess.Begin(); err != nil { return err } if star { if isStaring(sess, userID, repoID) { return nil } if _, err := sess.Insert(&Star{UID: userID, RepoID: repoID}); err != nil { return err } if _, err := sess.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoID); err != nil { return err } if _, err := sess.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", userID); err != nil { return err } } else { if !isStaring(sess, userID, repoID) { return nil } if _, err := sess.Delete(&Star{UID: userID, RepoID: repoID}); err != nil { return err } if _, err := sess.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoID); err != nil { return err } if _, err := sess.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", userID); err != nil { return err } } return sess.Commit() } // IsStaring checks if user has starred given repository. func IsStaring(userID, repoID int64) bool { return isStaring(x, userID, repoID) } func isStaring(e Engine, userID, repoID int64) bool { has, _ := e.Get(&Star{UID: userID, RepoID: repoID}) return has } // GetStargazers returns the users that starred the repo. func (repo *Repository) GetStargazers(opts ListOptions) ([]*User, error) { sess := x.Where("star.repo_id = ?", repo.ID). Join("LEFT", "star", "`user`.id = star.uid") if opts.Page > 0 { sess = setSessionPagination(sess, &opts) users := make([]*User, 0, opts.PageSize) return users, sess.Find(&users) } users := make([]*User, 0, 8) return users, sess.Find(&users) }
func (u *User) GetStarredRepos(private bool, page, pageSize int, orderBy string) (repos RepositoryList, err error) { if len(orderBy) == 0 { orderBy = "updated_unix DESC" } sess := x. Join("INNER", "star", "star.repo_id = repository.id"). Where("star.uid = ?", u.ID). OrderBy(orderBy) if !private { sess = sess.And("is_private = ?", false) } if page <= 0 { page = 1 } sess.Limit(pageSize, (page-1)*pageSize) repos = make([]*Repository, 0, pageSize) if err = sess.Find(&repos); err != nil { return } if err = repos.loadAttributes(x); err != nil { return } return } // GetStarredRepoCount returns the numbers of repo the user starred. func (u *User) GetStarredRepoCount(private bool) (int64, error) { sess := x. Join("INNER", "star", "star.repo_id = repository.id"). Where("star.uid = ?", u.ID) if !private { sess = sess.And("is_private = ?", false) } return sess.Count(&Repository{}) }
// GetStarredRepos returns the repos the user starred.
test_analytics.py
import operator import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm class TestSeriesAnalytics: def test_prod_numpy16_bug(self): s = Series([1.0, 1.0, 1.0], index=range(3)) result = s.prod() assert not isinstance(result, Series) def test_matmul(self): # matmul test is for GH #10259
def test_ptp(self): # GH21614 N = 1000 arr = np.random.randn(N) ser = Series(arr) assert np.ptp(ser) == np.ptp(arr) def test_repeat(self): s = Series(np.random.randn(3), index=["a", "b", "c"]) reps = s.repeat(5) exp = Series(s.values.repeat(5), index=s.index.values.repeat(5)) tm.assert_series_equal(reps, exp) to_rep = [2, 3, 4] reps = s.repeat(to_rep) exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep)) tm.assert_series_equal(reps, exp) def test_numpy_repeat(self): s = Series(np.arange(3), name="x") expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2)) tm.assert_series_equal(np.repeat(s, 2), expected) msg = "the 'axis' parameter is not supported" with pytest.raises(ValueError, match=msg): np.repeat(s, 2, axis=0) def test_is_monotonic(self): s = Series(np.random.randint(0, 10, size=1000)) assert not s.is_monotonic s = Series(np.arange(1000)) assert s.is_monotonic is True assert s.is_monotonic_increasing is True s = Series(np.arange(1000, 0, -1)) assert s.is_monotonic_decreasing is True s = Series(pd.date_range("20130101", periods=10)) assert s.is_monotonic is True assert s.is_monotonic_increasing is True s = Series(list(reversed(s.tolist()))) assert s.is_monotonic is False assert s.is_monotonic_decreasing is True @pytest.mark.parametrize("func", [np.any, np.all]) @pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())]) @td.skip_if_np_lt("1.15") def test_validate_any_all_out_keepdims_raises(self, kwargs, func): s = pd.Series([1, 2]) param = list(kwargs)[0] name = func.__name__ msg = ( f"the '{param}' parameter is not " "supported in the pandas " fr"implementation of {name}\(\)" ) with pytest.raises(ValueError, match=msg): func(s, **kwargs) @td.skip_if_np_lt("1.15") def test_validate_sum_initial(self): s = pd.Series([1, 2]) msg = ( r"the 'initial' parameter is not " r"supported in the pandas " r"implementation of sum\(\)" ) with pytest.raises(ValueError, match=msg): np.sum(s, initial=10) def test_validate_median_initial(self): s = pd.Series([1, 2]) msg = ( r"the 'overwrite_input' parameter is not " r"supported in the pandas " r"implementation of median\(\)" ) with pytest.raises(ValueError, match=msg): # It seems like np.median doesn't dispatch, so we use the # method instead of the ufunc. s.median(overwrite_input=True) @td.skip_if_np_lt("1.15") def test_validate_stat_keepdims(self): s = pd.Series([1, 2]) msg = ( r"the 'keepdims' parameter is not " r"supported in the pandas " r"implementation of sum\(\)" ) with pytest.raises(ValueError, match=msg): np.sum(s, keepdims=True) def test_td64_summation_overflow(self): # GH 9442 s = pd.Series(pd.date_range("20130101", periods=100000, freq="H")) s[0] += pd.Timedelta("1s 1ms") # mean result = (s - s.min()).mean() expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum()) # the computation is converted to float so # might be some loss of precision assert np.allclose(result.value / 1000, expected.value / 1000) # sum msg = "overflow in timedelta operation" with pytest.raises(ValueError, match=msg): (s - s.min()).sum() s1 = s[0:10000] with pytest.raises(ValueError, match=msg): (s1 - s1.min()).sum() s2 = s[0:1000] (s2 - s2.min()).sum()
a = Series(np.random.randn(4), index=["p", "q", "r", "s"]) b = DataFrame( np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"] ).T # Series @ DataFrame -> Series result = operator.matmul(a, b) expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # DataFrame @ Series -> Series result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # Series @ Series -> scalar result = operator.matmul(a, a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # vector (1D np.array) @ Series (__rmatmul__) result = operator.matmul(a.values, a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # vector (1D list) @ Series (__rmatmul__) result = operator.matmul(a.values.tolist(), a) expected = np.dot(a.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # matrix (2D np.array) @ Series (__rmatmul__) result = operator.matmul(b.T.values, a) expected = np.dot(b.T.values, a.values) tm.assert_almost_equal(result, expected) # GH 21530 # matrix (2D nested lists) @ Series (__rmatmul__) result = operator.matmul(b.T.values.tolist(), a) expected = np.dot(b.T.values, a.values) tm.assert_almost_equal(result, expected) # mixed dtype DataFrame @ Series a["p"] = int(a.p) result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) # different dtypes DataFrame @ Series a = a.astype(int) result = operator.matmul(b.T, a) expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"]) tm.assert_series_equal(result, expected) msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)" # exception raised is of type Exception with pytest.raises(Exception, match=msg): a.dot(a.values[:3]) msg = "matrices are not aligned" with pytest.raises(ValueError, match=msg): a.dot(b.T)
setupapi_windows.go
/* SPDX-License-Identifier: MIT * * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ package setupapi import ( "encoding/binary" "fmt" "syscall" "unsafe" "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" ) //sys setupDiCreateDeviceInfoListEx(classGUID *windows.GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(windows.InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW // SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. func SetupDiCreateDeviceInfoListEx(classGUID *windows.GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { var machineNameUTF16 *uint16 if machineName != "" { machineNameUTF16, err = syscall.UTF16PtrFromString(machineName) if err != nil { return } } return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) } //sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW // SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { data := &DevInfoListDetailData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) } // GetDeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. func (deviceInfoSet DevInfo) GetDeviceInfoListDetail() (*DevInfoListDetailData, error) { return SetupDiGetDeviceInfoListDetail(deviceInfoSet) } //sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *windows.GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW // SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *windows.GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { deviceNameUTF16, err := syscall.UTF16PtrFromString(deviceName) if err != nil { return } var deviceDescriptionUTF16 *uint16 if deviceDescription != "" { deviceDescriptionUTF16, err = syscall.UTF16PtrFromString(deviceDescription) if err != nil { return } } data := &DevInfoData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) } // CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *windows.GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) } //sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo // SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { data := &DevInfoData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) } // EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) } // SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. //sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList // Close method deletes a device information set and frees all associated memory. func (deviceInfoSet DevInfo) Close() error { return SetupDiDestroyDeviceInfoList(deviceInfoSet) } //sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList // BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) } //sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch // CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { return SetupDiCancelDriverInfoSearch(deviceInfoSet) } //sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW // SetupDiEnumDriverInfo function enumerates the members of a driver list. func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { data := &DrvInfoData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) } // EnumDriverInfo method enumerates the members of a driver list. func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) } //sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW // SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { data := &DrvInfoData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) } // GetSelectedDriver method retrieves the selected driver for a device information set or a particular device information element. func (deviceInfoSet DevInfo) GetSelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) } //sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW // SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) } //sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW // SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { const bufCapacity = 0x800 buf := [bufCapacity]byte{} var bufLen uint32 data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) data.size = uint32(unsafe.Sizeof(*data)) err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, bufCapacity, &bufLen) if err == nil
if errWin, ok := err.(syscall.Errno); ok && errWin == windows.ERROR_INSUFFICIENT_BUFFER { // The buffer was too small. Now that we got the required size, create another one big enough and retry. buf := make([]byte, bufLen) data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) data.size = uint32(unsafe.Sizeof(*data)) err = setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, bufLen, &bufLen) if err == nil { data.size = bufLen return data, nil } } return nil, err } // GetDriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. func (deviceInfoSet DevInfo) GetDriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) } //sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList // DestroyDriverInfoList method deletes a driver list. func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) } //sys setupDiGetClassDevsEx(classGUID *windows.GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(windows.InvalidHandle)] = setupapi.SetupDiGetClassDevsExW // SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. func SetupDiGetClassDevsEx(classGUID *windows.GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { var enumeratorUTF16 *uint16 if enumerator != "" { enumeratorUTF16, err = syscall.UTF16PtrFromString(enumerator) if err != nil { return } } var machineNameUTF16 *uint16 if machineName != "" { machineNameUTF16, err = syscall.UTF16PtrFromString(machineName) if err != nil { return } } return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) } // SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). //sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller // CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) } //sys setupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key windows.Handle, err error) [failretval==windows.InvalidHandle] = setupapi.SetupDiOpenDevRegKey // SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, scope DICS_FLAG, hwProfile uint32, keyType DIREG, samDesired uint32) (registry.Key, error) { handle, err := setupDiOpenDevRegKey(deviceInfoSet, deviceInfoData, scope, hwProfile, keyType, samDesired) return registry.Key(handle), err } // OpenDevRegKey method opens a registry key for device-specific configuration information. func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (registry.Key, error) { return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) } //sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW // SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { buf := make([]byte, 0x100) var dataType, bufLen uint32 err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(cap(buf)), &bufLen) if err == nil { // The buffer was sufficiently big. return getRegistryValue(buf[:bufLen], dataType) } if errWin, ok := err.(syscall.Errno); ok && errWin == windows.ERROR_INSUFFICIENT_BUFFER { // The buffer was too small. Now that we got the required size, create another one big enough and retry. buf = make([]byte, bufLen) err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(cap(buf)), &bufLen) if err == nil { return getRegistryValue(buf[:bufLen], dataType) } } return } func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { switch dataType { case windows.REG_SZ: return windows.UTF16ToString(BufToUTF16(buf)), nil case windows.REG_EXPAND_SZ: return registry.ExpandString(windows.UTF16ToString(BufToUTF16(buf))) case windows.REG_BINARY: return buf, nil case windows.REG_DWORD_LITTLE_ENDIAN: return binary.LittleEndian.Uint32(buf), nil case windows.REG_DWORD_BIG_ENDIAN: return binary.BigEndian.Uint32(buf), nil case windows.REG_MULTI_SZ: bufW := BufToUTF16(buf) a := []string{} for i := 0; i < len(bufW); { j := i + wcslen(bufW[i:]) if i < j { a = append(a, windows.UTF16ToString(bufW[i:j])) } i = j + 1 } return a, nil case windows.REG_QWORD_LITTLE_ENDIAN: return binary.LittleEndian.Uint64(buf), nil default: return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) } } // BufToUTF16 function reinterprets []byte buffer as []uint16 func BufToUTF16(buf []byte) []uint16 { sl := struct { addr *uint16 len int cap int }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} return *(*[]uint16)(unsafe.Pointer(&sl)) } // UTF16ToBuf function reinterprets []uint16 as []byte func UTF16ToBuf(buf []uint16) []byte { sl := struct { addr *byte len int cap int }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} return *(*[]byte)(unsafe.Pointer(&sl)) } func wcslen(str []uint16) int { for i := 0; i < len(str); i++ { if str[i] == 0 { return i } } return len(str) } // GetDeviceRegistryProperty method retrieves a specified Plug and Play device property. func (deviceInfoSet DevInfo) GetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) } //sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW // SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) } // SetDeviceRegistryProperty function sets a Plug and Play device property for a device. func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) } //sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW // SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { params := &DevInstallParams{} params.size = uint32(unsafe.Sizeof(*params)) return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) } // GetDeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. func (deviceInfoSet DevInfo) GetDeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) } // SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. //sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW // GetClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. func (deviceInfoSet DevInfo) GetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) } //sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW // SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) } // SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. //sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW // SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) } //sys setupDiClassNameFromGuidEx(classGUID *windows.GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW // SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. func SetupDiClassNameFromGuidEx(classGUID *windows.GUID, machineName string) (className string, err error) { var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 var machineNameUTF16 *uint16 if machineName != "" { machineNameUTF16, err = syscall.UTF16PtrFromString(machineName) if err != nil { return } } err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) if err != nil { return } className = windows.UTF16ToString(classNameUTF16[:]) return } //sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *windows.GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW // SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. func SetupDiClassGuidsFromNameEx(className string, machineName string) (classGuidLists []windows.GUID, err error) { classNameUTF16, err := syscall.UTF16PtrFromString(className) if err != nil { return } const bufCapacity = 4 var buf [bufCapacity]windows.GUID var bufLen uint32 var machineNameUTF16 *uint16 if machineName != "" { machineNameUTF16, err = syscall.UTF16PtrFromString(machineName) if err != nil { return } } err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], bufCapacity, &bufLen, machineNameUTF16, 0) if err == nil { // The GUID array was sufficiently big. Return its slice. return buf[:bufLen], nil } if errWin, ok := err.(syscall.Errno); ok && errWin == windows.ERROR_INSUFFICIENT_BUFFER { // The GUID array was too small. Now that we got the required size, create another one big enough and retry. buf := make([]windows.GUID, bufLen) err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], bufLen, &bufLen, machineNameUTF16, 0) if err == nil { return buf[:bufLen], nil } } return } //sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice // SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { data := &DevInfoData{} data.size = uint32(unsafe.Sizeof(*data)) return data, setupDiGetSelectedDevice(deviceInfoSet, data) } // GetSelectedDevice method retrieves the selected device information element in a device information set. func (deviceInfoSet DevInfo) GetSelectedDevice() (*DevInfoData, error) { return SetupDiGetSelectedDevice(deviceInfoSet) } // SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. //sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice // SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) }
{ // The buffer was was sufficiently big. data.size = bufLen return data, nil }
ibmc.go
package bmc import ( "net/url" "strings" ) func init() { registerFactory("ibmc", newIbmcAccessDetails, []string{"http", "https"}) } func newIbmcAccessDetails(parsedURL *url.URL, disableCertificateVerification bool) (AccessDetails, error)
type ibmcAccessDetails struct { bmcType string host string path string disableCertificateVerification bool } func (a *ibmcAccessDetails) Type() string { return a.bmcType } // NeedsMAC returns true when the host is going to need a separate // port created rather than having it discovered. func (a *ibmcAccessDetails) NeedsMAC() bool { // For the inspection to work, we need a MAC address // https://github.com/metal3-io/baremetal-operator/pull/284#discussion_r317579040 return true } func (a *ibmcAccessDetails) Driver() string { return "ibmc" } func (a *ibmcAccessDetails) DisableCertificateVerification() bool { return a.disableCertificateVerification } const ibmcDefaultScheme = "https" // DriverInfo returns a data structure to pass as the DriverInfo // parameter when creating a node in Ironic. The structure is // pre-populated with the access information, and the caller is // expected to add any other information that might be needed (such as // the kernel and ramdisk locations). func (a *ibmcAccessDetails) DriverInfo(bmcCreds Credentials) map[string]interface{} { ibmcAddress := []string{} schemes := strings.Split(a.bmcType, "+") if len(schemes) > 1 { ibmcAddress = append(ibmcAddress, schemes[1]) } else { ibmcAddress = append(ibmcAddress, ibmcDefaultScheme) } ibmcAddress = append(ibmcAddress, "://") ibmcAddress = append(ibmcAddress, a.host) ibmcAddress = append(ibmcAddress, a.path) result := map[string]interface{}{ "ibmc_username": bmcCreds.Username, "ibmc_password": bmcCreds.Password, "ibmc_address": strings.Join(ibmcAddress, ""), } if a.disableCertificateVerification { result["ibmc_verify_ca"] = false } return result } func (a *ibmcAccessDetails) BootInterface() string { return "pxe" } func (a *ibmcAccessDetails) ManagementInterface() string { return "ibmc" } func (a *ibmcAccessDetails) PowerInterface() string { return "ibmc" } func (a *ibmcAccessDetails) RAIDInterface() string { return "" } func (a *ibmcAccessDetails) VendorInterface() string { return "" }
{ return &ibmcAccessDetails{ bmcType: parsedURL.Scheme, host: parsedURL.Host, path: parsedURL.Path, disableCertificateVerification: disableCertificateVerification, }, nil }
policy.go
/* Portions Copyright 2019 The Kubernetes Authors. Portions Copyright 2019 Aspen Mesh Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package v1alpha1 import ( v1alpha1 "github.com/XiaYinchang/istio-client-go/pkg/apis/authentication/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // PolicyLister helps list Policies. type PolicyLister interface { // List lists all Policies in the indexer. List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) // Policies returns an object that can list and get Policies. Policies(namespace string) PolicyNamespaceLister PolicyListerExpansion } // policyLister implements the PolicyLister interface. type policyLister struct { indexer cache.Indexer } // NewPolicyLister returns a new PolicyLister. func NewPolicyLister(indexer cache.Indexer) PolicyLister
// List lists all Policies in the indexer. func (s *policyLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*v1alpha1.Policy)) }) return ret, err } // Policies returns an object that can list and get Policies. func (s *policyLister) Policies(namespace string) PolicyNamespaceLister { return policyNamespaceLister{indexer: s.indexer, namespace: namespace} } // PolicyNamespaceLister helps list and get Policies. type PolicyNamespaceLister interface { // List lists all Policies in the indexer for a given namespace. List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) // Get retrieves the Policy from the indexer for a given namespace and name. Get(name string) (*v1alpha1.Policy, error) PolicyNamespaceListerExpansion } // policyNamespaceLister implements the PolicyNamespaceLister // interface. type policyNamespaceLister struct { indexer cache.Indexer namespace string } // List lists all Policies in the indexer for a given namespace. func (s policyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { ret = append(ret, m.(*v1alpha1.Policy)) }) return ret, err } // Get retrieves the Policy from the indexer for a given namespace and name. func (s policyNamespaceLister) Get(name string) (*v1alpha1.Policy, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(v1alpha1.Resource("policy"), name) } return obj.(*v1alpha1.Policy), nil }
{ return &policyLister{indexer: indexer} }
musica3.py
'''BOMusic''' from tkinter import * import pygame class App3(Toplevel): cor1 = '#171717' cor2 = '#58009D' cor3 = '#efefef' def __init__(self, original): self.frame_original = original Toplevel.__init__(self) self.config() self.frames() self.widgetsButton1() self.widgetsButton2() self.widgetsButton3() self.widgetsimg() self.widgetstitulo() def config(self): self.title('BoMusic') self.geometry('380x380+700+350') self.resizable(False, False) self.configure(bg = self.cor1) pygame.mixer.init() self.iconbitmap('provaDevSistemas\icone.ico') def som(self): pygame.mixer.music.load('provaDevSistemas\musica3.mp3') pygame.mixer.music.play() StopIteration print('tocando') print('') def stop(self): pygame.mixer.music.pause() print('parando') print('') StopIteration def clickbtn(self): self.withdraw() #self.subFrame = Musicas(self) self.stop() def onClose(self): self.stop() self.destroy() self.frame_original.show() def
(self): self.titulo = Frame( self, bg = self.cor1, ) self.titulo.place( x = 0, y = 20, width = 380, height = 100 ) self.logo = Frame( self, bg = self.cor1, ) self.logo.place( x = 0, y = 140, width = 380, height = 100 ) self.voltar = Frame( self, bg = self.cor3, ) self.voltar.place( x = 12.5, y = 280, width = 110, height = 50 ) self.play = Frame( self, bg = self.cor1, ) self.play.place( x = 135, y = 280, width = 110, height = 50 ) self.parar = Frame( self, bg = self.cor3, ) self.parar.place( x = 257.5, y = 280, width = 110, height = 50 ) def widgetstitulo(self): title = Label(self.titulo, text='Twenty One Pilots\nShy Away', font=('Poppins', 20, 'bold'), bg = self.cor1, fg = self.cor2, ) title.pack() def widgetsimg(self): self.album = PhotoImage(file = r'provaDevSistemas\album3.png') self.img2 = Label( self.logo, image = self.album, bd = 0 ) self.img2.pack() def widgetsButton1(self): self.botao3 = Button( self.voltar, text = 'Voltar', font = ('Poppins', 25), fg = self.cor3, activeforeground = self.cor3, bg = self.cor2, activebackground = self.cor2, command=self.onClose ) self.botao3.place( relx = 0, rely = 0, relwidth = 1, relheight = 1 ) def widgetsButton2(self): self.botao = Button( self.play, text = 'Play', font = ('Poppins', 25), fg = self.cor3, activeforeground = self.cor3, bg = self.cor2, activebackground = self.cor2, command=self.som ) self.botao.place( relx = 0, rely = 0, relwidth = 1, relheight = 1 ) def widgetsButton3(self): self.botao2 = Button( self.parar, text = 'Stop', font = ('Poppins', 25), fg = self.cor3, activeforeground = self.cor3, bg = self.cor2, activebackground = self.cor2, command=self.stop ) self.botao2.place( relx = 0, rely = 0, relwidth = 1, relheight = 1 )
frames
create_vpn_connection.go
package vpc //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" ) // CreateVpnConnection invokes the vpc.CreateVpnConnection API synchronously // api document: https://help.aliyun.com/api/vpc/createvpnconnection.html func (client *Client) CreateVpnConnection(request *CreateVpnConnectionRequest) (response *CreateVpnConnectionResponse, err error) { response = CreateCreateVpnConnectionResponse() err = client.DoAction(request, response) return } // CreateVpnConnectionWithChan invokes the vpc.CreateVpnConnection API asynchronously // api document: https://help.aliyun.com/api/vpc/createvpnconnection.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) CreateVpnConnectionWithChan(request *CreateVpnConnectionRequest) (<-chan *CreateVpnConnectionResponse, <-chan error) { responseChan := make(chan *CreateVpnConnectionResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.CreateVpnConnection(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // CreateVpnConnectionWithCallback invokes the vpc.CreateVpnConnection API asynchronously // api document: https://help.aliyun.com/api/vpc/createvpnconnection.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) CreateVpnConnectionWithCallback(request *CreateVpnConnectionRequest, callback func(response *CreateVpnConnectionResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *CreateVpnConnectionResponse var err error defer close(result) response, err = client.CreateVpnConnection(request) callback(response, err) result <- 1 }) if err != nil
return result } // CreateVpnConnectionRequest is the request struct for api CreateVpnConnection type CreateVpnConnectionRequest struct { *requests.RpcRequest IkeConfig string `position:"Query" name:"IkeConfig"` ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"` RemoteSubnet string `position:"Query" name:"RemoteSubnet"` EffectImmediately requests.Boolean `position:"Query" name:"EffectImmediately"` ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"` ClientToken string `position:"Query" name:"ClientToken"` OwnerAccount string `position:"Query" name:"OwnerAccount"` IpsecConfig string `position:"Query" name:"IpsecConfig"` VpnGatewayId string `position:"Query" name:"VpnGatewayId"` OwnerId requests.Integer `position:"Query" name:"OwnerId"` CustomerGatewayId string `position:"Query" name:"CustomerGatewayId"` LocalSubnet string `position:"Query" name:"LocalSubnet"` Name string `position:"Query" name:"Name"` } // CreateVpnConnectionResponse is the response struct for api CreateVpnConnection type CreateVpnConnectionResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` VpnConnectionId string `json:"VpnConnectionId" xml:"VpnConnectionId"` Name string `json:"Name" xml:"Name"` CreateTime int `json:"CreateTime" xml:"CreateTime"` } // CreateCreateVpnConnectionRequest creates a request to invoke CreateVpnConnection API func CreateCreateVpnConnectionRequest() (request *CreateVpnConnectionRequest) { request = &CreateVpnConnectionRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Vpc", "2016-04-28", "CreateVpnConnection", "vpc", "openAPI") return } // CreateCreateVpnConnectionResponse creates a response to parse from CreateVpnConnection response func CreateCreateVpnConnectionResponse() (response *CreateVpnConnectionResponse) { response = &CreateVpnConnectionResponse{ BaseResponse: &responses.BaseResponse{}, } return }
{ defer close(result) callback(nil, err) result <- 0 }
schema.go
/////////////////////////////////////////////////////////////////////// // Copyright (c) 2017 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /////////////////////////////////////////////////////////////////////// package v1 import ( strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" ) // NO TESTS // Schema schema // swagger:model Schema type Schema struct { // in In interface{} `json:"in,omitempty"` // out Out interface{} `json:"out,omitempty"` } // Validate validates this schema func (m *Schema) Validate(formats strfmt.Registry) error { var res []error if len(res) > 0
return nil } // MarshalBinary interface implementation func (m *Schema) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *Schema) UnmarshalBinary(b []byte) error { var res Schema if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil }
{ return errors.CompositeValidationError(res...) }
trigger_listeners.go
package server import ( "context" "github.com/fnproject/fn/api/models" "github.com/fnproject/fn/fnext" ) type triggerListeners []fnext.TriggerListener var _ fnext.TriggerListener = new(triggerListeners) func (t *triggerListeners) BeforeTriggerCreate(ctx context.Context, trigger *models.Trigger) error { for _, l := range *t { err := l.BeforeTriggerCreate(ctx, trigger) if err != nil { return err } } return nil } func (t *triggerListeners) AfterTriggerCreate(ctx context.Context, trigger *models.Trigger) error { for _, l := range *t { err := l.AfterTriggerCreate(ctx, trigger) if err != nil { return err } } return nil } func (t *triggerListeners) BeforeTriggerUpdate(ctx context.Context, trigger *models.Trigger) error { for _, l := range *t { err := l.BeforeTriggerUpdate(ctx, trigger) if err != nil { return err } } return nil } func (t *triggerListeners) AfterTriggerUpdate(ctx context.Context, trigger *models.Trigger) error { for _, l := range *t { err := l.AfterTriggerUpdate(ctx, trigger) if err != nil { return err } } return nil } func (t *triggerListeners) BeforeTriggerDelete(ctx context.Context, triggerID string) error { for _, l := range *t { err := l.BeforeTriggerDelete(ctx, triggerID) if err != nil { return err } } return nil } func (t *triggerListeners) AfterTriggerDelete(ctx context.Context, triggerID string) error { for _, l := range *t { err := l.AfterTriggerDelete(ctx, triggerID) if err != nil
} return nil } // AddTriggerListener adds an TriggerListener for the server to use. func (s *Server) AddTriggerListener(listener fnext.TriggerListener) { *s.triggerListeners = append(*s.triggerListeners, listener) }
{ return err }
register.rs
use {data::semantics::Semantics, proc_macro2::TokenStream, quote::quote}; impl Semantics { pub fn runtime_register_functions() -> TokenStream { quote! { fn register_classes(source: &Group, classes: &mut HashMap<&'static str, Group>) {
for source in &source.classes { for class in &source.classes { register_classes(class, classes); } let mut target = classes.entry(source.selector).or_insert(Group::default()); if source.elements.len() > 0 { target.elements = Vec::new(); for element in &source.elements { target.elements.push(element.clone()); } } for listener in &source.listeners { target.listeners.push(listener.clone()); } for (property, value) in source.properties.clone() { target.properties.insert(property, value.clone()); } } } } } }
main.go
package vtMain import ( "encoding/json" "github.com/gorilla/websocket" "github.com/kpango/glg" "log" "net/http" "strconv" "strings" "sync" "time" vtLobby "./lobby" ) var Lock *sync.Mutex func Init() { Lock = &sync.Mutex{} http.HandleFunc("/v1/vt/ping", PingGet) http.HandleFunc("/v1/vt/lobby/enter", EnterlobbyGet) http.HandleFunc("/v1/vt/lobby/create", CreatelobbyGet) http.HandleFunc("/v1/vt/lobby/exit", ExitLobbyGet) http.HandleFunc( "/v1/vt/lobby/update/videodesc", SendVideoInfoPost ) http.HandleFunc("/v1/vt/lobbies", QueryLobbiesGet) http.HandleFunc( "/v1/vt/user/status", CheckUserStatus ) http.HandleFunc( "/v1/vt/user/where", UserWhereGet ) http.HandleFunc( "/v1/vt/sync/host", SendSyncHostGet ) http.HandleFunc( "/v1/vt/sync/guest", SendSyncGuestGet ) http.HandleFunc( "/v1/vt/lobby/users/status", GetUserStatus ) http.HandleFunc( "/v1/vt/lobby/videodesc", GetCurrentVideoDesc ) http.HandleFunc("/ws/echo", echo) } var upgrader = websocket.Upgrader{ CheckOrigin: func( r *http.Request ) bool { return true }, } func echo(w http.ResponseWriter, r *http.Request) { c, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Print("upgrade:", err) return } defer c.Close() for { mt, message, err := c.ReadMessage() if err != nil { log.Println("read:", err) break } log.Printf("recv: %s", message) err = c.WriteMessage(mt, message) if err != nil { log.Println("write:", err) break } } } func resp(w* http.ResponseWriter, msg string) { (*w).Write([]byte(msg)) } func CheckUserStatus(w http.ResponseWriter, r *http.Request) { username := r.URL.Query()["username"][0] resp( &w, vtLobby.CheckUserStatus( username, vtLobby.Lobbies ) ) } func ExitLobbyGet(w http.ResponseWriter, r *http.Request) { username := r.URL.Query()["username"][0] var res string Lock.Lock() // LOBBY_DELETED // LOBBY_EXIT // NO_SUCH_LOBBY res, vtLobby.Lobbies = vtLobby.ExitLobby(username, vtLobby.Lobbies) Lock.Unlock() resp( &w, res ) } func QueryLobbiesGet(w http.ResponseWriter, r *http.Request) { lobbyNames := "" for _, lob := range vtLobby.Lobbies { lobbyNames += lob.Name + "," } lobbyNames = strings.TrimSuffix( lobbyNames, "," ) glg.Log(lobbyNames) resp( &w, lobbyNames ) } func EnterlobbyGet(w http.ResponseWriter, r *http.Request){ userName := r.URL.Query()["username"][0] lobbyName := r.URL.Query()["lobbyname"][0] passwd := r.URL.Query()["passwd"][0] for _, lob:= range vtLobby.Lobbies { if lob.Name == lobbyName { if passwd != lob.Password { resp(&w,"PASSWORD_INCORRECT") return } lob.Viewers = append(lob.Viewers, vtLobby.VTViewer{ Name: userName, Location: "0", IsHost: false, IsPause: false, }) glg.Info("Guest[" + userName + "]has entered lobby[" + lobbyName + "]") resp(&w,"OK") return } } resp(&w,"NO_SUCH_LOBBY") } func CreatelobbyGet(w http.ResponseWriter, r *http.Request){ hostName := r.URL.Query()["hostname"][0] lobbyName := r.URL.Query()["lobbyname"][0] passwd := r.URL.Query()["passwd"][0] // 添加新房间 newLobby := &vtLobby.VTLobby{ Name: lobbyName, Password: passwd, Viewers: []vtLobby.VTViewer{ // 添加房主 vtLobby.VTViewer{ Name: hostName, Location: "", IsHost: true, IsPause: true, }, }, MaxOffset: 2, VideoIndex: 0, VideoLs: "", LastUpdateTime: time.Now(), } if vtLobby.IsSameNameLobbyExist( newLobby.Name, vtLobby.Lobbies ) { resp(&w,"LOBBY_EXISTED") glg.Info(newLobby.Name + " is already exist but someone still wants to borrow one.") return } Lock.Lock() vtLobby.Lobbies = append(vtLobby.Lobbies, newLobby) Lock.Unlock() resp(&w,"OK") glg.Info("lobby " + newLobby.Name + " created!") for _, l := range vtLobby.Lobbies { glg.Info(*l) } } func PingGet(w http.ResponseWriter, r *http.Request) { resp( &w, "OK") } func SendVideoInfoPost(w http.ResponseWriter, r *http.Request) { hostName := r.URL.Query()["hostname"][0] lobby, i, _ := vtLobby.FindLobbyByHost( hostName, vtLobby.Lobbies ) if lobby == nil { resp( &w, "NO_AUTH") return } // 将数据同步至房间 var videoDesc vtLobby.VTVideoDesc err := json.NewDecoder(r.Body).Decode( &videoDesc ) if err != nil { panic(err) resp( &w, "INTERVAL_ERR") return } // web 版本ls即为src lobby.VideoLs = videoDesc.Ls lobby.VideoIndex = videoDesc.Index lobby.Md5 = videoDesc.Md5 vtLobby.Lobbies[i] = lobby glg.Info("Video[" + videoDesc.Ls + "]\n P:[" + strconv.Itoa( videoDesc.Index ) + "]") resp( &w, "OK") } func UserWhereGet (w http.ResponseWriter, r *http.Request) { username := r.URL.Query()["username"][0] lobby, _, ishost := vtLobby.FindLobbyByUser( username, vtLobby.Lobbies ) if lobby == nil { resp( &w, "IDLE" ) return } var host string if ishost { host = "HOST" } else { host = "GUEST" } resp( &w, lobby.Name + "," + host + "," + lobby.Password ) } /// sync /// func SendSyncHostGet(w http.ResponseWriter, r *http.Request) { name := r.URL.Query()["name"][0] location := r.URL.Query()["location"][0] pause := r.URL.Query()["ispause"][0] // p, s part := r.URL.Query()["p"][0] lobby, i, _ := vtLobby.FindLobbyByHost( name, vtLobby.Lobbies ) if lobby == nil { return } lobby.IsPause = pause lobby.Location = location lobby.VideoIndex, _ = strconv.Atoi( part ) glg.Log("========SYNC========") glg.Log("[HOST]"+ name) glg.Log("[LOCATION]"+ location) glg.Log("[IS PAUSE]" + pause) glg.Log("[PART]" + part) glg.Log("========SYNC========") Lock.Lock() vtLobby.Lobbies[i] = lobby Lock.Unlock() } func SendSyncGuestGet(w http.ResponseWriter, r *http.Request) { name := r.URL.Query()["name"][0] lb, i, ishost := vtLobby.FindLobbyByUser( name, vtLobby.Lobbies ) if i == -1 || ishost || lb == nil { resp( &w, "ERR" ) } // md5,p/s,location,part resp( &w, lb.Md5 + "," + lb.IsPause + "," + lb.Location + "," + strconv.Itoa( lb.VideoIndex ) ) } func GetCurrentVideoDesc(w http.ResponseWriter,
name := r.URL.Query()["name"][0] t := r.URL.Query()["t"][0] lb, i, ishost := vtLobby.FindLobbyByUser( name, vtLobby.Lobbies ) if i == -1 || ishost || lb == nil { resp( &w, "ERR" ) return } if t == "web" { resp( &w, lb.VideoLs ) return } // ls,index resp( &w, lb.VideoLs + "`" + strconv.Itoa( lb.VideoIndex ) ) } func GetUserStatus(w http.ResponseWriter, r *http.Request) { lobbyname := r.URL.Query()["lobbyname"][0] exists, lob := vtLobby.IsLobbyExist(lobbyname, vtLobby.Lobbies) if !exists { resp( &w, "NO_SUCH_LOBBY" ) return } jsons, errs := json.Marshal(lob.Viewers) if errs != nil { glg.Log(errs) return } resp( &w, string(jsons) ) }
r *http.Request) {
vulnerabilities.go
package digests import ( "fmt" "strconv" "github.com/ion-channel/ionic/scanner" "github.com/ion-channel/ionic/scans" ) func
(status *scanner.ScanStatus, eval *scans.Evaluation) ([]Digest, error) { digests := make([]Digest, 0) var vulnCount, uniqVulnCount int var highs int var crits int if eval != nil { b, ok := eval.TranslatedResults.Data.(scans.VulnerabilityResults) if !ok { return nil, fmt.Errorf("error coercing evaluation translated results into vuln") } vulnCount = b.Meta.VulnerabilityCount ids := make(map[int]bool, 0) for i := range b.Vulnerabilities { for j := range b.Vulnerabilities[i].Vulnerabilities { v := b.Vulnerabilities[i].Vulnerabilities[j] ids[v.ID] = true if v.ScoreSystem == "NPM" { if npmScore, err := strconv.ParseFloat(v.Score, 32); err == nil { if npmScore > 7 { // 10, 9, 8 crits++ } else if npmScore > 5 { // 7, 6 highs++ } } } else { switch v.ScoreVersion { case "3.0": if v.ScoreDetails.CVSSv3 != nil && v.ScoreDetails.CVSSv3.BaseScore >= 9.0 { crits++ } else if v.ScoreDetails.CVSSv3 != nil && v.ScoreDetails.CVSSv3.BaseScore >= 7.0 { highs++ } case "2.0": if v.ScoreDetails.CVSSv2 != nil && v.ScoreDetails.CVSSv2.BaseScore >= 7.0 { highs++ } default: } } } } uniqVulnCount = len(ids) } // total vulns d := NewDigest(status, totalVulnerabilitiesIndex, "total vulnerability", "total vulnerabilities") if eval != nil && !status.Errored() { err := d.AppendEval(eval, "count", vulnCount) if err != nil { return nil, fmt.Errorf("failed to add evaluation data to total vulnerabilities digest: %v", err.Error()) } if vulnCount > 0 { d.Warning = true d.WarningMessage = "vulnerabilities found" if vulnCount == 1 { d.WarningMessage = "vulnerability found" } } d.Evaluated = false // As of now there's no rule to evaluate this against so it's set to not evaluated. } digests = append(digests, *d) // unique vulns d = NewDigest(status, uniqueVulnerabilitiesIndex, "unique vulnerability", "unique vulnerabilities") if eval != nil && !status.Errored() { err := d.AppendEval(eval, "count", uniqVulnCount) if err != nil { return nil, fmt.Errorf("failed to add evaluation data to unique vulnerabilities digest: %v", err.Error()) } if uniqVulnCount > 0 { d.Warning = true d.WarningMessage = "vulnerabilities found" if uniqVulnCount == 1 { d.WarningMessage = "vulnerability found" } } d.Evaluated = false // As of now there's no rule to evaluate this against so it's set to not evaluated. } digests = append(digests, *d) // high vulns d = NewDigest(status, highVulnerabilitiesIndex, "high vulnerability", "high vulnerabilities") if eval != nil && !status.Errored() { err := d.AppendEval(eval, "count", highs) if err != nil { return nil, fmt.Errorf("failed to add evaluation data to unique vulnerabilities digest: %v", err.Error()) } if highs == 0 { d.Passed = true } } digests = append(digests, *d) // critical vulns d = NewDigest(status, criticalVulnerabilitiesIndex, "critical vulnerability", "critical vulnerabilities") if eval != nil && !status.Errored() { err := d.AppendEval(eval, "count", crits) if err != nil { return nil, fmt.Errorf("failed to add evaluation data to unique vulnerabilities digest: %v", err.Error()) } if crits == 0 { d.Passed = true } } digests = append(digests, *d) return digests, nil }
vulnerabilityDigests
reparent_graceful.go
// Copyright 2012, Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package wrangler import ( "fmt" "strings" log "github.com/golang/glog" "github.com/youtube/vitess/go/vt/topo" ) func (wr *Wrangler) reparentShardGraceful(si *topo.ShardInfo, slaveTabletMap, masterTabletMap map[topo.TabletAlias]*topo.TabletInfo, masterElectTablet *topo.TabletInfo, leaveMasterReadOnly bool) error { // Validate a bunch of assumptions we make about the replication graph. if len(masterTabletMap) != 1 { aliases := make([]string, 0, len(masterTabletMap)) for _, v := range masterTabletMap { aliases = append(aliases, v.String()) } return fmt.Errorf("I have 0 or multiple masters / scrapped tablets in this shard replication graph, please scrap the non-master ones: %v", strings.Join(aliases, " ")) } var masterTablet *topo.TabletInfo for _, v := range masterTabletMap { masterTablet = v } if masterTablet.Parent.Uid != topo.NO_TABLET { return fmt.Errorf("master tablet should not have a ParentUid: %v %v", masterTablet.Parent.Uid, masterTablet.Alias) } if masterTablet.Type != topo.TYPE_MASTER { return fmt.Errorf("master tablet should not be type: %v %v", masterTablet.Type, masterTablet.Alias) } if masterTablet.Alias.Uid == masterElectTablet.Alias.Uid { return fmt.Errorf("master tablet should not match master elect - this must be forced: %v", masterTablet.Alias) } if _, ok := slaveTabletMap[masterElectTablet.Alias]; !ok { return fmt.Errorf("master elect tablet not in replication graph %v %v/%v %v", masterElectTablet.Alias, masterTablet.Keyspace, masterTablet.Shard, mapKeys(slaveTabletMap)) } if err := wr.ValidateShard(masterTablet.Keyspace, masterTablet.Shard, true); err != nil { return fmt.Errorf("ValidateShard verification failed: %v, if the master is dead, run: vtctl ScrapTablet -force %v", err, masterTablet.Alias) } // Make sure all tablets have the right parent and reasonable positions. err := wr.checkSlaveReplication(slaveTabletMap, masterTablet.Alias.Uid) if err != nil { return err } // Check the master-elect is fit for duty - call out for hardware checks. err = wr.checkMasterElect(masterElectTablet) if err != nil { return err } masterPosition, err := wr.demoteMaster(masterTablet) if err != nil { // FIXME(msolomon) This suggests that the master is dead and we // need to take steps. We could either pop a prompt, or make // retrying the action painless. return fmt.Errorf("demote master failed: %v, if the master is dead, run: vtctl -force ScrapTablet %v", err, masterTablet.Alias) } log.Infof("check slaves %v/%v", masterTablet.Keyspace, masterTablet.Shard) restartableSlaveTabletMap := restartableTabletMap(slaveTabletMap) err = wr.checkSlaveConsistency(restartableSlaveTabletMap, masterPosition) if err != nil { return fmt.Errorf("check slave consistency failed %v, demoted master is still read only, run: vtctl SetReadWrite %v", err, masterTablet.Alias) } rsd, err := wr.promoteSlave(masterElectTablet) if err != nil {
return fmt.Errorf("promote slave failed: %v, demoted master is still read only: vtctl SetReadWrite %v", err, masterTablet.Alias) } // Once the slave is promoted, remove it from our map delete(slaveTabletMap, masterElectTablet.Alias) majorityRestart, restartSlaveErr := wr.restartSlaves(slaveTabletMap, rsd) // For now, scrap the old master regardless of how many // slaves restarted. // // FIXME(msolomon) We could reintroduce it and reparent it and use // it as new replica. log.Infof("scrap demoted master %v", masterTablet.Alias) scrapActionPath, scrapErr := wr.ai.Scrap(masterTablet.Alias) if scrapErr == nil { scrapErr = wr.ai.WaitForCompletion(scrapActionPath, wr.actionTimeout()) } if scrapErr != nil { // The sub action is non-critical, so just warn. log.Warningf("scrap demoted master failed: %v", scrapErr) } err = wr.finishReparent(si, masterElectTablet, majorityRestart, leaveMasterReadOnly) if err != nil { return err } if restartSlaveErr != nil { // This is more of a warning at this point. return restartSlaveErr } return nil }
// FIXME(msolomon) This suggests that the master-elect is dead. // We need to classify certain errors as temporary and retry.
test_table.py
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock from ._testing import _make_credentials from google.api_core.exceptions import DeadlineExceeded class Test___mutate_rows_request(unittest.TestCase): def _call_fut(self, table_name, rows): from google.cloud.bigtable.table import _mutate_rows_request return _mutate_rows_request(table_name, rows) @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) def test__mutate_rows_too_many_mutations(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import TooManyMutationsError table = mock.Mock(name="table", spec=["name"]) table.name = "table" rows = [ DirectRow(row_key=b"row_key", table=table), DirectRow(row_key=b"row_key_2", table=table), ] rows[0].set_cell("cf1", b"c1", 1) rows[0].set_cell("cf1", b"c1", 2) rows[1].set_cell("cf1", b"c1", 3) rows[1].set_cell("cf1", b"c1", 4) with self.assertRaises(TooManyMutationsError): self._call_fut("table", rows) def test__mutate_rows_request(self): from google.cloud.bigtable.row import DirectRow table = mock.Mock(name="table", spec=["name"]) table.name = "table" rows = [ DirectRow(row_key=b"row_key", table=table), DirectRow(row_key=b"row_key_2"), ] rows[0].set_cell("cf1", b"c1", b"1") rows[1].set_cell("cf1", b"c1", b"2") result = self._call_fut("table", rows) expected_result = _mutate_rows_request_pb(table_name="table") entry1 = expected_result.entries.add() entry1.row_key = b"row_key" mutations1 = entry1.mutations.add() mutations1.set_cell.family_name = "cf1" mutations1.set_cell.column_qualifier = b"c1" mutations1.set_cell.timestamp_micros = -1 mutations1.set_cell.value = b"1" entry2 = expected_result.entries.add() entry2.row_key = b"row_key_2" mutations2 = entry2.mutations.add() mutations2.set_cell.family_name = "cf1" mutations2.set_cell.column_qualifier = b"c1" mutations2.set_cell.timestamp_micros = -1 mutations2.set_cell.value = b"2" self.assertEqual(result, expected_result) class Test__check_row_table_name(unittest.TestCase): def _call_fut(self, table_name, row): from google.cloud.bigtable.table import _check_row_table_name return _check_row_table_name(table_name, row) def test_wrong_table_name(self): from google.cloud.bigtable.table import TableMismatchError from google.cloud.bigtable.row import DirectRow table = mock.Mock(name="table", spec=["name"]) table.name = "table" row = DirectRow(row_key=b"row_key", table=table) with self.assertRaises(TableMismatchError): self._call_fut("other_table", row) def test_right_table_name(self): from google.cloud.bigtable.row import DirectRow table = mock.Mock(name="table", spec=["name"]) table.name = "table" row = DirectRow(row_key=b"row_key", table=table) result = self._call_fut("table", row) self.assertFalse(result) class Test__check_row_type(unittest.TestCase): def _call_fut(self, row): from google.cloud.bigtable.table import _check_row_type return _check_row_type(row) def test_test_wrong_row_type(self): from google.cloud.bigtable.row import ConditionalRow row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) with self.assertRaises(TypeError): self._call_fut(row) def test_right_row_type(self): from google.cloud.bigtable.row import DirectRow row = DirectRow(row_key=b"row_key", table="table") result = self._call_fut(row) self.assertFalse(result) class TestTable(unittest.TestCase): PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID CLUSTER_ID = "cluster-id" CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID TABLE_ID = "table-id" TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID BACKUP_ID = "backup-id" BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID ROW_KEY = b"row-key" ROW_KEY_1 = b"row-key-1" ROW_KEY_2 = b"row-key-2" ROW_KEY_3 = b"row-key-3" FAMILY_NAME = u"family" QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 VALUE = b"value" _json_tests = None @staticmethod def
(): from google.cloud.bigtable.table import Table return Table def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @staticmethod def _get_target_client_class(): from google.cloud.bigtable.client import Client return Client def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor_w_admin(self): credentials = _make_credentials() client = self._make_client( project=self.PROJECT_ID, credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) self.assertEqual(table.table_id, self.TABLE_ID) self.assertIs(table._instance._client, client) self.assertEqual(table.name, self.TABLE_NAME) def test_constructor_wo_admin(self): credentials = _make_credentials() client = self._make_client( project=self.PROJECT_ID, credentials=credentials, admin=False ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) self.assertEqual(table.table_id, self.TABLE_ID) self.assertIs(table._instance._client, client) self.assertEqual(table.name, self.TABLE_NAME) def _row_methods_helper(self): client = self._make_client( project="project-id", credentials=_make_credentials(), admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) row_key = b"row_key" return table, row_key def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow table, row_key = self._row_methods_helper() row = table.row(row_key) self.assertIsInstance(row, DirectRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow table, row_key = self._row_methods_helper() filter_ = object() row = table.row(row_key, filter_=filter_) self.assertIsInstance(row, ConditionalRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow table, row_key = self._row_methods_helper() row = table.row(row_key, append=True) self.assertIsInstance(row, AppendRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_direct_row(self): from google.cloud.bigtable.row import DirectRow table, row_key = self._row_methods_helper() row = table.direct_row(row_key) self.assertIsInstance(row, DirectRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_conditional_row(self): from google.cloud.bigtable.row import ConditionalRow table, row_key = self._row_methods_helper() filter_ = object() row = table.conditional_row(row_key, filter_=filter_) self.assertIsInstance(row, ConditionalRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_append_row(self): from google.cloud.bigtable.row import AppendRow table, row_key = self._row_methods_helper() row = table.append_row(row_key) self.assertIsInstance(row, AppendRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) def test_row_factory_failure(self): table, row_key = self._row_methods_helper() with self.assertRaises(ValueError): table.row(row_key, filter_=object(), append=True) def test___eq__(self): credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) comparison_val = table1 != table2 self.assertFalse(comparison_val) def test___ne__(self): table1 = self._make_one("table_id1", None) table2 = self._make_one("table_id2", None) self.assertNotEqual(table1, table2) def _create_test_helper(self, split_keys=[], column_families={}): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, ) from google.cloud.bigtable.column_family import ColumnFamily table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Patch API calls client._table_admin_client = table_api # Perform the method and check the result. table.create(column_families=column_families, initial_split_keys=split_keys) families = { id: ColumnFamily(id, self, rule).to_pb() for (id, rule) in column_families.items() } split = table_admin_messages_v2_pb2.CreateTableRequest.Split splits = [split(key=split_key) for split_key in split_keys] table_api.create_table.assert_called_once_with( parent=self.INSTANCE_NAME, table=table_pb2.Table(column_families=families), table_id=self.TABLE_ID, initial_splits=splits, ) def test_create(self): self._create_test_helper() def test_create_with_families(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule families = {"family": MaxVersionsGCRule(5)} self._create_test_helper(column_families=families) def test_create_with_split_keys(self): self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) def test_exists(self): from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_messages_v1_pb2, ) from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client, bigtable_table_admin_client, ) from google.api_core.exceptions import NotFound from google.api_core.exceptions import BadRequest table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( mock.Mock() ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) # Create response_pb response_pb = table_messages_v1_pb2.ListTablesResponse( tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)] ) # Patch API calls client._table_admin_client = table_api client._instance_admin_client = instance_api bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [ response_pb, NotFound("testing"), BadRequest("testing"), ] # Perform the method and check the result. table1 = instance.table(self.TABLE_ID) table2 = instance.table("table-id2") result = table1.exists() self.assertEqual(True, result) result = table2.exists() self.assertEqual(False, result) with self.assertRaises(BadRequest): table2.exists() def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Patch API calls client._table_admin_client = table_api # Create expected_result. expected_result = None # delete() has no return value. # Perform the method and check the result. result = table.delete() self.assertEqual(result, expected_result) def _list_column_families_helper(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_pb COLUMN_FAMILY_ID = "foo" column_family = _ColumnFamilyPB() response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family}) # Patch the stub used by the API method. client._table_admin_client = table_api bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [response_pb] # Create expected_result. expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)} # Perform the method and check the result. result = table.list_column_families() self.assertEqual(result, expected_result) def test_list_column_families(self): self._list_column_families_helper() def test_get_cluster_states(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState INITIALIZING = enum_table.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) response_pb = _TablePB( cluster_states={ "cluster-id1": _ClusterStatePB(INITIALIZING), "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE), "cluster-id3": _ClusterStatePB(READY), } ) # Patch the stub used by the API method. client._table_admin_client = table_api bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [response_pb] # build expected result expected_result = { u"cluster-id1": ClusterState(INITIALIZING), u"cluster-id2": ClusterState(PLANNED_MAINTENANCE), u"cluster-id3": ClusterState(READY), } # Perform the method and check the result. result = table.get_cluster_states() self.assertEqual(result, expected_result) def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.row_filters import RowSampleFilter data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) # Create request_pb request_pb = object() # Returned by our mock. mock_created = [] def mock_create_row_request(table_name, **kwargs): mock_created.append((table_name, kwargs)) return request_pb # Create response_iterator if chunks is None: response_iterator = iter(()) # no responses at all else: response_pb = _ReadRowsResponsePB(chunks=chunks) response_iterator = iter([response_pb]) # Patch the stub used by the API method. client._table_data_client = data_api client._table_admin_client = table_api client._table_data_client.transport.read_rows = mock.Mock( side_effect=[response_iterator] ) # Perform the method and check the result. filter_obj = RowSampleFilter(0.33) result = None with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_row(self.ROW_KEY, filter_=filter_obj) row_set = RowSet() row_set.add_row_key(self.ROW_KEY) expected_request = [ ( table.name, { "end_inclusive": False, "row_set": row_set, "app_profile_id": app_profile_id, "end_key": None, "limit": None, "start_key": None, "filter_": filter_obj, }, ) ] self.assertEqual(result, expected_result) self.assertEqual(mock_created, expected_request) def test_read_row_miss_no__responses(self): self._read_row_helper(None, None) def test_read_row_miss_no_chunks_in_response(self): chunks = [] self._read_row_helper(chunks, None) def test_read_row_complete(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData app_profile_id = "app-profile-id" chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunks = [chunk] expected_result = PartialRowData(row_key=self.ROW_KEY) family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) column = family.setdefault(self.QUALIFIER, []) column.append(Cell.from_pb(chunk)) self._read_row_helper(chunks, expected_result, app_profile_id) def test_read_row_more_than_one_row_returned(self): app_profile_id = "app-profile-id" chunk_1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunks = [chunk_1, chunk_2] with self.assertRaises(ValueError): self._read_row_helper(chunks, None, app_profile_id) def test_read_row_still_partial(self): chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, ) # No "commit row". chunks = [chunk] with self.assertRaises(ValueError): self._read_row_helper(chunks, None) def test_mutate_rows(self): from google.rpc.status_pb2 import Status from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) client._table_admin_client = table_api table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] mock_worker = mock.Mock(return_value=response) with mock.patch( "google.cloud.bigtable.table._RetryableMutateRowsWorker", new=mock.MagicMock(return_value=mock_worker), ): statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result) def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) app_profile_id = "app-profile-id" table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) # Create request_pb request = retry = object() # Returned by our mock. mock_created = [] def mock_create_row_request(table_name, **kwargs): mock_created.append((table_name, kwargs)) return request # Create expected_result. expected_result = PartialRowsData( client._table_data_client.transport.read_rows, request, retry ) # Perform the method and check the result. start_key = b"start-key" end_key = b"end-key" filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit, retry=retry, ) self.assertEqual(result.rows, expected_result.rows) self.assertEqual(result.retry, expected_result.retry) created_kwargs = { "start_key": start_key, "end_key": end_key, "filter_": filter_obj, "limit": limit, "end_inclusive": False, "app_profile_id": app_profile_id, "row_set": None, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_read_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.api_core import retry data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) # Create response_iterator chunk_1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_1, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) response_2 = _ReadRowsResponseV2([chunk_2]) response_failure_iterator_1 = _MockFailureIterator_1() response_failure_iterator_2 = _MockFailureIterator_2([response_1]) response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( side_effect=[ response_failure_iterator_1, response_failure_iterator_2, response_iterator, ] ) rows = [] for row in table.read_rows( start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows ): rows.append(row) result = rows[1] self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client import warnings data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_iterator chunk_1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_1, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) response_2 = _ReadRowsResponseV2([chunk_2]) response_failure_iterator_1 = _MockFailureIterator_1() response_failure_iterator_2 = _MockFailureIterator_2([response_1]) response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( side_effect=[ response_failure_iterator_1, response_failure_iterator_2, response_iterator, ] ) rows = [] with warnings.catch_warnings(record=True) as warned: for row in table.yield_rows( start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2 ): rows.append(row) self.assertEqual(len(warned), 1) self.assertIs(warned[0].category, DeprecationWarning) result = rows[1] self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_rows_with_row_set(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange import warnings data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_iterator chunk_1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_1, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) chunk_3 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_3, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) response_2 = _ReadRowsResponseV2([chunk_2]) response_3 = _ReadRowsResponseV2([chunk_3]) response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( side_effect=[response_iterator] ) rows = [] row_set = RowSet() row_set.add_row_range( RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2) ) row_set.add_row_key(self.ROW_KEY_3) with warnings.catch_warnings(record=True) as warned: for row in table.yield_rows(row_set=row_set): rows.append(row) self.assertEqual(len(warned), 1) self.assertIs(warned[0].category, DeprecationWarning) self.assertEqual(rows[0].row_key, self.ROW_KEY_1) self.assertEqual(rows[1].row_key, self.ROW_KEY_2) self.assertEqual(rows[2].row_key, self.ROW_KEY_3) def test_sample_row_keys(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_iterator response_iterator = object() # Just passed to a mock. # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["sample_row_keys"] = mock.Mock( side_effect=[[response_iterator]] ) # Create expected_result. expected_result = response_iterator # Perform the method and check the result. result = table.sample_row_keys() self.assertEqual(result[0], expected_result) def test_truncate(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) expected_result = None # truncate() has no return value. with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME): result = table.truncate() table_api.drop_row_range.assert_called_once_with( name=self.TABLE_NAME, delete_all_data_from_table=True ) self.assertEqual(result, expected_result) def test_truncate_w_timeout(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) expected_result = None # truncate() has no return value. timeout = 120 result = table.truncate(timeout=timeout) self.assertEqual(result, expected_result) def test_drop_by_prefix(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) expected_result = None # drop_by_prefix() has no return value. row_key_prefix = "row-key-prefix" result = table.drop_by_prefix(row_key_prefix=row_key_prefix) self.assertEqual(result, expected_result) def test_drop_by_prefix_w_timeout(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) expected_result = None # drop_by_prefix() has no return value. row_key_prefix = "row-key-prefix" timeout = 120 result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout) self.assertEqual(result, expected_result) def test_mutations_batcher_factory(self): flush_count = 100 max_row_bytes = 1000 table = self._make_one(self.TABLE_ID, None) mutation_batcher = table.mutations_batcher( flush_count=flush_count, max_row_bytes=max_row_bytes ) self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID) self.assertEqual(mutation_batcher.flush_count, flush_count) self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) def test_get_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) version = 1 etag = b"etag_v1" members = ["serviceAccount:[email protected]", "user:[email protected]"] bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy result = table.get_iam_policy() table_api.get_iam_policy.assert_called_once_with(resource=table.name) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins self.assertEqual(len(admins), len(members)) for found, expected in zip(sorted(admins), sorted(members)): self.assertEqual(found, expected) def test_set_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) version = 1 etag = b"etag_v1" members = ["serviceAccount:[email protected]", "user:[email protected]"] bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb iam_policy = Policy(etag=etag, version=version) iam_policy[BIGTABLE_ADMIN_ROLE] = [ Policy.user("[email protected]"), Policy.service_account("[email protected]"), ] result = table.set_iam_policy(iam_policy) table_api.set_iam_policy.assert_called_once_with( resource=table.name, policy=iam_policy_pb ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins self.assertEqual(len(admins), len(members)) for found, expected in zip(sorted(admins), sorted(members)): self.assertEqual(found, expected) def test_test_iam_permissions(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api result = table.test_iam_permissions(permissions) self.assertEqual(result, permissions) table_api.test_iam_permissions.assert_called_once_with( resource=table.name, permissions=permissions ) def test_backup_factory_defaults(self): from google.cloud.bigtable.backup import Backup instance = self._make_one(self.INSTANCE_ID, None) table = self._make_one(self.TABLE_ID, instance) backup = table.backup(self.BACKUP_ID) self.assertIsInstance(backup, Backup) self.assertEqual(backup.backup_id, self.BACKUP_ID) self.assertIs(backup._instance, instance) self.assertIsNone(backup._cluster) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertIsNone(backup._expire_time) self.assertIsNone(backup._parent) self.assertIsNone(backup._source_table) self.assertIsNone(backup._start_time) self.assertIsNone(backup._end_time) self.assertIsNone(backup._size_bytes) self.assertIsNone(backup._state) def test_backup_factory_non_defaults(self): import datetime from google.cloud._helpers import UTC from google.cloud.bigtable.backup import Backup instance = self._make_one(self.INSTANCE_ID, None) table = self._make_one(self.TABLE_ID, instance) timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) backup = table.backup( self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, ) self.assertIsInstance(backup, Backup) self.assertEqual(backup.backup_id, self.BACKUP_ID) self.assertIs(backup._instance, instance) self.assertEqual(backup.backup_id, self.BACKUP_ID) self.assertIs(backup._cluster, self.CLUSTER_ID) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertEqual(backup._expire_time, timestamp) self.assertIsNone(backup._start_time) self.assertIsNone(backup._end_time) self.assertIsNone(backup._size_bytes) self.assertIsNone(backup._state) def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client, bigtable_table_admin_client, ) from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2, table_pb2, ) from google.cloud.bigtable.backup import Backup instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) client = self._make_client( project=self.PROJECT_ID, credentials=_make_credentials(), admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) client._instance_admin_client = instance_api client._table_admin_client = table_api parent = self.INSTANCE_NAME + "/clusters/cluster" backups_pb = bigtable_table_admin_pb2.ListBackupsResponse( backups=[ table_pb2.Backup(name=parent + "/backups/op1"), table_pb2.Backup(name=parent + "/backups/op2"), table_pb2.Backup(name=parent + "/backups/op3"), ] ) api = table_api._inner_api_calls["list_backups"] = mock.Mock( return_value=backups_pb ) backups_filter = "source_table:{}".format(self.TABLE_NAME) if filter_: backups_filter = "({}) AND ({})".format(backups_filter, filter_) backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs) for backup in backups: self.assertIsInstance(backup, Backup) if not cluster_id: cluster_id = "-" parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id) expected_metadata = [ ("x-goog-request-params", "parent={}".format(parent)), ] api.assert_called_once_with( bigtable_table_admin_pb2.ListBackupsRequest( parent=parent, filter=backups_filter, **kwargs ), retry=mock.ANY, timeout=mock.ANY, metadata=expected_metadata, ) def test_list_backups_defaults(self): self._list_backups_helper() def test_list_backups_w_options(self): self._list_backups_helper( cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10 ) def _restore_helper(self, backup_name=None): from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance op_future = object() instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api) instance = Instance(self.INSTANCE_ID, client=client) table = self._make_one(self.TABLE_ID, instance) api = client.table_admin_client = mock.create_autospec( BigtableTableAdminClient, instance=True ) api.restore_table.return_value = op_future if backup_name: future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME) else: future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID) self.assertIs(future, op_future) api.restore_table.assert_called_once_with( parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, ) def test_restore_table_w_backup_id(self): self._restore_helper() def test_restore_table_w_backup_name(self): self._restore_helper(backup_name=self.BACKUP_NAME) class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID TABLE_ID = "table-id" # RPC Status Codes SUCCESS = StatusCode.OK.value[0] RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] RETRYABLE_2 = StatusCode.ABORTED.value[0] NON_RETRYABLE = StatusCode.CANCELLED.value[0] @staticmethod def _get_target_class_for_worker(): from google.cloud.bigtable.table import _RetryableMutateRowsWorker return _RetryableMutateRowsWorker def _make_worker(self, *args, **kwargs): return self._get_target_class_for_worker()(*args, **kwargs) @staticmethod def _get_target_class_for_table(): from google.cloud.bigtable.table import Table return Table def _make_table(self, *args, **kwargs): return self._get_target_class_for_table()(*args, **kwargs) @staticmethod def _get_target_client_class(): from google.cloud.bigtable.client import Client return Client def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def _make_responses_statuses(self, codes): from google.rpc.status_pb2 import Status response = [Status(code=code) for code in codes] return response def _make_responses(self, codes): import six from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse from google.rpc.status_pb2 import Status entries = [ MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) for i in six.moves.xrange(len(codes)) ] return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) worker = self._make_worker(client, table.name, []) statuses = worker() self.assertEqual(len(statuses), 0) def test_callable_no_retry_strategy(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. # Action: # - Attempt to mutate the rows w/o any retry strategy. # Expectation: # - Since no retry, should return statuses as they come back. # - Even if there are retryable errors, no retry attempt is made. # - State of responses_statuses should be # [success, retryable, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") response = self._make_responses( [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] ) with mock.patch("google.cloud.bigtable.table.wrap_method") as patched: patched.return_value = mock.Mock(return_value=[response]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=None) result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once() self.assertEqual(result, expected_result) def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. # Action: # - Initial attempt will mutate all 3 rows. # Expectation: # - First attempt will result in one retryable error. # - Second attempt will result in success for the retry-ed row. # - Check MutateRows is called twice. # - State of responses_statuses should be # [success, success, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") response_1 = self._make_responses( [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] ) response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock( side_effect=[[response_1], [response_2]] ) retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=retry) result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] self.assertEqual( client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2 ) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) worker = self._make_worker(client, table.name, []) statuses = worker._do_mutate_retryable_rows() self.assertEqual(len(statuses), 0) def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 2 rows. # Action: # - Initial attempt will mutate all 2 rows. # Expectation: # - Expect [success, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. # Action: # - Initial attempt will mutate all 3 rows. # Expectation: # - Second row returns retryable error code, so expect a raise. # - State of responses_statuses should be # [success, retryable, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") response = self._make_responses( [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] ) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 4 rows. # - First try results: # [success, retryable, non-retryable, retryable] # Action: # - Second try should re-attempt the 'retryable' rows. # Expectation: # - After second try: # [success, success, non-retryable, retryable] # - One of the rows tried second time returns retryable error code, # so expect a raise. # - Exception contains response whose index should be '3' even though # only two rows were retried. data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") row_4 = DirectRow(row_key=b"row_key_4", table=table) row_4.set_cell("cf", b"col", b"value4") response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] ) with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] expected_result = [ self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE, self.RETRYABLE_1, ] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 4 rows. # - First try results: # [success, retryable, non-retryable, retryable] # Action: # - Second try should re-attempt the 'retryable' rows. # Expectation: # - After second try: # [success, non-retryable, non-retryable, success] data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") row_4 = DirectRow(row_key=b"row_key_4", table=table) row_4.set_cell("cf", b"col", b"value4") response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] ) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] expected_result = [ self.SUCCESS, self.NON_RETRYABLE, self.NON_RETRYABLE, self.SUCCESS, ] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 2 rows. # - First try results: [success, non-retryable] # Action: # - Second try has no row to retry. # Expectation: # - After second try: [success, non-retryable] table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient ) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") worker = self._make_worker(client, table.name, [row_1, row_2]) worker.responses_statuses = self._make_responses_statuses( [self.SUCCESS, self.NON_RETRYABLE] ) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) row_2.set_cell("cf", b"col", b"value2") response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): worker._do_mutate_retryable_rows() class Test__create_row_request(unittest.TestCase): def _call_fut( self, table_name, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, app_profile_id=None, row_set=None, ): from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive, app_profile_id=app_profile_id, row_set=row_set, ) def test_table_name_only(self): table_name = "table_name" result = self._call_fut(table_name) expected_result = _ReadRowsRequestPB(table_name=table_name) self.assertEqual(result, expected_result) def test_row_range_row_set_conflict(self): with self.assertRaises(ValueError): self._call_fut(None, end_key=object(), row_set=object()) def test_row_range_start_key(self): table_name = "table_name" start_key = b"start_key" result = self._call_fut(table_name, start_key=start_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) def test_row_range_end_key(self): table_name = "table_name" end_key = b"end_key" result = self._call_fut(table_name, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(end_key_open=end_key) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): table_name = "table_name" start_key = b"start_key" end_key = b"end_key" result = self._call_fut(table_name, start_key=start_key, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( start_key_closed=start_key, end_key_open=end_key ) self.assertEqual(result, expected_result) def test_row_range_both_keys_inclusive(self): table_name = "table_name" start_key = b"start_key" end_key = b"end_key" result = self._call_fut( table_name, start_key=start_key, end_key=end_key, end_inclusive=True ) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( start_key_closed=start_key, end_key_closed=end_key ) self.assertEqual(result, expected_result) def test_with_filter(self): from google.cloud.bigtable.row_filters import RowSampleFilter table_name = "table_name" row_filter = RowSampleFilter(0.33) result = self._call_fut(table_name, filter_=row_filter) expected_result = _ReadRowsRequestPB( table_name=table_name, filter=row_filter.to_pb() ) self.assertEqual(result, expected_result) def test_with_limit(self): table_name = "table_name" limit = 1337 result = self._call_fut(table_name, limit=limit) expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit) self.assertEqual(result, expected_result) def test_with_row_set(self): from google.cloud.bigtable.row_set import RowSet table_name = "table_name" row_set = RowSet() result = self._call_fut(table_name, row_set=row_set) expected_result = _ReadRowsRequestPB(table_name=table_name) self.assertEqual(result, expected_result) def test_with_app_profile_id(self): table_name = "table_name" limit = 1337 app_profile_id = "app-profile-id" result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id) expected_result = _ReadRowsRequestPB( table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id ) self.assertEqual(result, expected_result) def _ReadRowsRequestPB(*args, **kw): from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) class Test_ClusterState(unittest.TestCase): def test___eq__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = ClusterState(READY) self.assertEqual(state1, state2) def test___eq__type_differ(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = object() self.assertNotEqual(state1, state2) def test___ne__same_value(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = ClusterState(READY) comparison_val = state1 != state2 self.assertFalse(comparison_val) def test___ne__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState READY = enum_table.ReplicationState.READY INITIALIZING = enum_table.ReplicationState.INITIALIZING state1 = ClusterState(READY) state2 = ClusterState(INITIALIZING) self.assertNotEqual(state1, state2) def test__repr__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN INITIALIZING = enum_table.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY replication_dict = { STATE_NOT_KNOWN: "STATE_NOT_KNOWN", INITIALIZING: "INITIALIZING", PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", READY: "READY", } self.assertEqual( str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN] ) self.assertEqual( str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING] ) self.assertEqual( str(ClusterState(PLANNED_MAINTENANCE)), replication_dict[PLANNED_MAINTENANCE], ) self.assertEqual( str(ClusterState(UNPLANNED_MAINTENANCE)), replication_dict[UNPLANNED_MAINTENANCE], ) self.assertEqual(str(ClusterState(READY)), replication_dict[READY]) self.assertEqual( ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN ) self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING) self.assertEqual( ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE ) self.assertEqual( ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE ) self.assertEqual(ClusterState(READY).replication_state, READY) def _ReadRowsResponseCellChunkPB(*args, **kw): from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 family_name = kw.pop("family_name") qualifier = kw.pop("qualifier") message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) message.family_name.value = family_name message.qualifier.value = qualifier return message def _ReadRowsResponsePB(*args, **kw): from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _mutate_rows_request_pb(*args, **kw): from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) class _MockReadRowsIterator(object): def __init__(self, *values): self.iter_values = iter(values) def next(self): return next(self.iter_values) __next__ = next class _MockFailureIterator_1(object): def next(self): raise DeadlineExceeded("Failed to read from server") __next__ = next class _MockFailureIterator_2(object): def __init__(self, *values): self.iter_values = values[0] self.calls = 0 def next(self): self.calls += 1 if self.calls == 1: return self.iter_values[0] else: raise DeadlineExceeded("Failed to read from server") __next__ = next class _ReadRowsResponseV2(object): def __init__(self, chunks, last_scanned_row_key=""): self.chunks = chunks self.last_scanned_row_key = last_scanned_row_key def _TablePB(*args, **kw): from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) def _ClusterStatePB(replication_state): from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.Table.ClusterState(replication_state=replication_state) def _read_rows_retry_exception(exc): return isinstance(exc, DeadlineExceeded)
_get_target_class
output-schema.d.ts
export interface Schema { error?: string; info?: { [key: string]: any; }; success: boolean;
target?: Target; } export interface Target { configuration?: string; project?: string; target?: string; }
main.rs
use crate::{Part, try_parse_input}; use crate::days::day_21::multiverse_game::count_won_games; use crate::days::day_21::score_computer::Player; use crate::problem::{AOCResult, Problem}; #[allow(dead_code)] pub fn day21_launch(part: Part) -> AOCResult<String> { let starting_positions = parse_input(false)?; match part { Part::Part1 => part1(&starting_positions), Part::Part2 => part2(&starting_positions) } } fn part1(start_position:&(usize,usize)) -> AOCResult<String> { let player1 = Player::player1(start_position.0); let player2 = Player::player2(start_position.1); let player1_nb_rolls_to_win = player1.nb_rolls_to_win(); let player2_nb_rolls_to_win = player2.nb_rolls_to_win(); let result = if player1_nb_rolls_to_win<player2_nb_rolls_to_win { player2.score_at_turn(player1.winning_turn()-1)*player1_nb_rolls_to_win } else { player1.score_at_turn(player2.winning_turn())*player2_nb_rolls_to_win }; Ok(result.to_string()) } fn part2(starting_position:&(usize,usize)) -> AOCResult<String> { let games = count_won_games(starting_position.0, starting_position.1); Ok(games.player1().max(games.player2()).to_string()) } #[allow(dead_code)] fn parse_input(for_test:bool) -> AOCResult<(usize,usize)> { let positions:Vec<usize> = Problem::factory(for_test)(21) .read_input_as_mapped_lines(|l| parse_starting_position(l).unwrap())?; Ok((positions[0], positions[1])) } pub fn parse_starting_position(line:&str) -> AOCResult<usize> { try_parse_input!(line.split_once(":") .ok_or_else(|| "Cannot parse starting position".to_string()) .map(|(_,val)| val.trim_start()).unwrap(),usize) } #[cfg(test)] #[allow(dead_code)] mod tests { use crate::days::day_21::main::{parse_input, part1, part2}; use crate::days::day_21::score_computer::Player; #[test] fn day21_do_100_turn() { let player1 = Player::player1(4); let player2 = Player::player2(8); for i in 120..200 { println!("{} {} {}", i, player1.score_at_turn(i), player2.score_at_turn(i)); } println!("{}", player1.winning_turn()); } #[test] fn day21_score_player_at_first_turn() { let player = Player::player1(4); assert_eq!(player.score_at_turn(1),10); } #[test] fn day21_score_player_at_second_turn() { let player = Player::player1(4); assert_eq!(player.score_at_turn(2),14); } #[test] fn
() { let player = Player::player1(4); assert_eq!(player.score_at_turn(3),20); } #[test] fn day21_score_player_at_fourth_turn() { let player = Player::player1(4); assert_eq!(player.score_at_turn(4),26); } #[test] fn day21_score_player1_winning_turn() { let player = Player::player1(4); assert_eq!(player.nb_rolls_to_win(),993); } #[test] fn day21_part1_test() { let start_position = parse_input(true).unwrap(); let result = part1(&start_position).unwrap(); assert_eq!(result,"739785") } #[test] fn day21_part2_test() { let starting_positions = parse_input(true).unwrap(); let result = part2(&starting_positions).unwrap(); assert_eq!(result,"444356092776315") } }
day21_score_player_at_third_turn
DisjointSet.js
import DisjointSetItem from './DisjointSetItem'; export default class
{ constructor(keyExtraction) { this.items = {}; this.keyExtraction = keyExtraction; } makeSet(value) { const newItem = new DisjointSetItem(value, this.keyExtraction); this.items[newItem.getKey()] = newItem; return this; } find(value) { const templateDisjointItem = new DisjointSetItem(value, this.keyExtraction); // Try to find item itself; const requiredDisjointItem = this.items[templateDisjointItem.getKey()]; if (!requiredDisjointItem) { return null; } return requiredDisjointItem.getRoot().getKey(); } union(valueA, valueB) { const itemARoot = this.find(valueA); const itemBRoot = this.find(valueB); if (!itemARoot || !itemBRoot) { throw new Error('Trying to merge two sets that don\'t exist'); } if (itemARoot === itemBRoot) return this; const rootA = this.items[itemARoot]; const rootB = this.items[itemBRoot]; if (rootA.getRank() >= rootB.getRank()) { rootA.addChild(rootB); return this; } rootB.addChild(rootA); return this; } inSameSet(valueA, valueB) { const itemARoot = this.find(valueA); const itemBRoot = this.find(valueB); if (!itemARoot || !itemBRoot) { throw new Error('One or more sets doesn\'t exist...'); } return itemARoot === itemBRoot; } }
DisjointSet
issue-48803.rs
fn flatten<'a, 'b, T>(x: &'a &'b T) -> &'a T { x } fn
() { let mut x = "original"; let y = &x; let z = &y; let w = flatten(z); x = "modified"; //~^ ERROR cannot assign to `x` because it is borrowed [E0506] println!("{}", w); // prints "modified" }
main
replicator.rs
use crate::blob_fetch_stage::BlobFetchStage; #[cfg(feature = "chacha")] use crate::chacha::{chacha_cbc_encrypt_ledger, CHACHA_BLOCK_SIZE}; use crate::client::mk_client; use crate::cluster_info::{ClusterInfo, Node, NodeInfo}; use crate::db_ledger::DbLedger; use crate::gossip_service::GossipService; use crate::leader_scheduler::LeaderScheduler; use crate::result::{self, Result}; use crate::rpc_request::{RpcClient, RpcRequest, RpcRequestHandler}; use crate::service::Service; use crate::storage_stage::{get_segment_from_entry, ENTRIES_PER_SEGMENT}; use crate::streamer::BlobReceiver; use crate::thin_client::{retry_get_balance, ThinClient}; use crate::window_service::window_service; use rand::thread_rng; use rand::Rng; use solana_drone::drone::{request_airdrop_transaction, DRONE_PORT}; use solana_sdk::hash::{Hash, Hasher}; use solana_sdk::signature::{Keypair, KeypairUtil, Signature}; use solana_sdk::storage_program::StorageTransaction; use solana_sdk::transaction::Transaction; use std::fs::File; use std::io; use std::io::BufReader; use std::io::Read; use std::io::Seek; use std::io::SeekFrom; use std::io::{Error, ErrorKind}; use std::mem::size_of; use std::net::UdpSocket; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::channel; use std::sync::{Arc, RwLock}; use std::thread::sleep; use std::thread::JoinHandle; use std::time::{Duration, Instant}; pub struct Replicator { gossip_service: GossipService, fetch_stage: BlobFetchStage, t_window: JoinHandle<()>, pub retransmit_receiver: BlobReceiver, exit: Arc<AtomicBool>, entry_height: u64, } pub fn sample_file(in_path: &Path, sample_offsets: &[u64]) -> io::Result<Hash> { let in_file = File::open(in_path)?; let metadata = in_file.metadata()?; let mut buffer_file = BufReader::new(in_file); let mut hasher = Hasher::default(); let sample_size = size_of::<Hash>(); let sample_size64 = sample_size as u64; let mut buf = vec![0; sample_size]; let file_len = metadata.len(); if file_len < sample_size64 { return Err(Error::new(ErrorKind::Other, "file too short!")); } for offset in sample_offsets { if *offset > (file_len - sample_size64) / sample_size64 { return Err(Error::new(ErrorKind::Other, "offset too large")); } buffer_file.seek(SeekFrom::Start(*offset * sample_size64))?; trace!("sampling @ {} ", *offset); match buffer_file.read(&mut buf) { Ok(size) => { assert_eq!(size, buf.len()); hasher.hash(&buf); } Err(e) => { warn!("Error sampling file"); return Err(e); } } } Ok(hasher.result()) } fn get_entry_heights_from_last_id( signature: &ring::signature::Signature, storage_entry_height: u64, ) -> (u64, u64) { let signature_vec = signature.as_ref(); let mut segment_index = u64::from(signature_vec[0]) | (u64::from(signature_vec[1]) << 8) | (u64::from(signature_vec[1]) << 16) | (u64::from(signature_vec[2]) << 24); let max_segment_index = get_segment_from_entry(storage_entry_height); segment_index %= max_segment_index as u64; let entry_height = segment_index * ENTRIES_PER_SEGMENT; let max_entry_height = entry_height + ENTRIES_PER_SEGMENT; (entry_height, max_entry_height) } impl Replicator { /// Returns a Result that contains a replicator on success /// /// # Arguments /// * `ledger_path` - (Not actually optional) path to where the ledger will be stored. /// Causes panic if none /// * `node` - The replicator node /// * `leader_info` - NodeInfo representing the leader /// * `keypair` - Keypair for this replicator /// * `timeout` - (optional) timeout for polling for leader/downloading the ledger. Defaults to /// 30 seconds #[allow(clippy::new_ret_no_self)] pub fn new( ledger_path: Option<&str>, node: Node, leader_info: &NodeInfo, keypair: &Keypair, timeout: Option<Duration>, ) -> Result<Self> { let exit = Arc::new(AtomicBool::new(false)); let done = Arc::new(AtomicBool::new(false)); let timeout = timeout.unwrap_or_else(|| Duration::new(30, 0)); info!("Replicator: id: {}", keypair.pubkey()); info!("Creating cluster info...."); let cluster_info = Arc::new(RwLock::new(ClusterInfo::new(node.info.clone()))); let leader_pubkey = leader_info.id; { let mut cluster_info_w = cluster_info.write().unwrap(); cluster_info_w.insert_info(leader_info.clone()); cluster_info_w.set_leader(leader_info.id); } // Create DbLedger, eventually will simply repurpose the input // ledger path as the DbLedger path once we replace the ledger with // DbLedger. Note for now, this ledger will not contain any of the existing entries // in the ledger located at ledger_path, and will only append on newly received // entries after being passed to window_service let db_ledger = Arc::new( DbLedger::open(&ledger_path.unwrap()) .expect("Expected to be able to open database ledger"), ); let gossip_service = GossipService::new( &cluster_info, Some(db_ledger.clone()), node.sockets.gossip, exit.clone(), ); info!("polling for leader"); let leader = Self::poll_for_leader(&cluster_info, timeout)?; info!("Got leader: {:?}", leader); let (storage_last_id, storage_entry_height) = Self::poll_for_last_id_and_entry_height(&cluster_info)?; let signature = keypair.sign(storage_last_id.as_ref()); let (entry_height, max_entry_height) = get_entry_heights_from_last_id(&signature, storage_entry_height); info!("replicating entry_height: {}", entry_height); let repair_socket = Arc::new(node.sockets.repair); let mut blob_sockets: Vec<Arc<UdpSocket>> = node.sockets.tvu.into_iter().map(Arc::new).collect(); blob_sockets.push(repair_socket.clone()); let (fetch_stage, blob_fetch_receiver) = BlobFetchStage::new_multi_socket(blob_sockets, exit.clone()); // todo: pull blobs off the retransmit_receiver and recycle them? let (retransmit_sender, retransmit_receiver) = channel(); let (entry_sender, entry_receiver) = channel(); let t_window = window_service( db_ledger.clone(), cluster_info.clone(), 0, entry_height, max_entry_height, blob_fetch_receiver, Some(entry_sender), retransmit_sender, repair_socket, Arc::new(RwLock::new(LeaderScheduler::from_bootstrap_leader( leader_pubkey, ))), done.clone(), ); info!("window created, waiting for ledger download done"); let start = Instant::now(); let mut received_so_far = 0; while !done.load(Ordering::Relaxed) { sleep(Duration::from_millis(100)); let elapsed = start.elapsed(); received_so_far += entry_receiver.try_recv().map(|v| v.len()).unwrap_or(0); if received_so_far == 0 && elapsed > timeout { return Err(result::Error::IO(io::Error::new( ErrorKind::TimedOut, "Timed out waiting to receive any blocks", ))); } } info!("Done receiving entries from window_service"); let mut node_info = node.info.clone(); node_info.tvu = "0.0.0.0:0".parse().unwrap(); { let mut cluster_info_w = cluster_info.write().unwrap(); cluster_info_w.insert_info(node_info); } let mut client = mk_client(&leader); Self::get_airdrop_tokens(&mut client, keypair, &leader_info); info!("Done downloading ledger at {}", ledger_path.unwrap()); let ledger_path = Path::new(ledger_path.unwrap()); let ledger_data_file_encrypted = ledger_path.join("ledger.enc"); let mut sampling_offsets = Vec::new(); #[cfg(not(feature = "chacha"))] sampling_offsets.push(0); #[cfg(feature = "chacha")] { use crate::storage_stage::NUM_STORAGE_SAMPLES; use rand::{Rng, SeedableRng}; use rand_chacha::ChaChaRng; let mut ivec = [0u8; 64]; ivec.copy_from_slice(signature.as_ref()); let num_encrypted_bytes = chacha_cbc_encrypt_ledger( &db_ledger, entry_height, &ledger_data_file_encrypted, &mut ivec, )?; let num_chacha_blocks = num_encrypted_bytes / CHACHA_BLOCK_SIZE; let mut rng_seed = [0u8; 32]; rng_seed.copy_from_slice(&signature.as_ref()[0..32]); let mut rng = ChaChaRng::from_seed(rng_seed); for _ in 0..NUM_STORAGE_SAMPLES { sampling_offsets.push(rng.gen_range(0, num_chacha_blocks) as u64); } } info!("Done encrypting the ledger"); match sample_file(&ledger_data_file_encrypted, &sampling_offsets) { Ok(hash) => { let last_id = client.get_last_id(); info!("sampled hash: {}", hash); let mut tx = Transaction::storage_new_mining_proof( &keypair, hash, last_id, entry_height, Signature::new(signature.as_ref()), ); client .retry_transfer(&keypair, &mut tx, 10) .expect("transfer didn't work!"); } Err(e) => info!("Error occurred while sampling: {:?}", e), } Ok(Self { gossip_service, fetch_stage, t_window, retransmit_receiver, exit, entry_height, }) } pub fn close(self) { self.exit.store(true, Ordering::Relaxed); self.join() } pub fn join(self) { self.gossip_service.join().unwrap(); self.fetch_stage.join().unwrap(); self.t_window.join().unwrap(); // Drain the queue here to prevent self.retransmit_receiver from being dropped // before the window_service thread is joined let mut retransmit_queue_count = 0; while let Ok(_blob) = self.retransmit_receiver.recv_timeout(Duration::new(1, 0)) { retransmit_queue_count += 1; } debug!("retransmit channel count: {}", retransmit_queue_count); } pub fn entry_height(&self) -> u64 { self.entry_height } fn poll_for_leader( cluster_info: &Arc<RwLock<ClusterInfo>>, timeout: Duration, ) -> Result<NodeInfo> { let start = Instant::now(); loop { if let Some(l) = cluster_info.read().unwrap().get_gossip_top_leader() { return Ok(l.clone()); } let elapsed = start.elapsed(); if elapsed > timeout { return Err(result::Error::IO(io::Error::new( ErrorKind::TimedOut, "Timed out waiting to receive any blocks", ))); } sleep(Duration::from_millis(900)); info!("{}", cluster_info.read().unwrap().node_info_trace()); } }
let rpc_client = { let cluster_info = cluster_info.read().unwrap(); let rpc_peers = cluster_info.rpc_peers(); debug!("rpc peers: {:?}", rpc_peers); let node_idx = thread_rng().gen_range(0, rpc_peers.len()); RpcClient::new_from_socket(rpc_peers[node_idx].rpc) }; let storage_last_id = rpc_client .make_rpc_request(2, RpcRequest::GetStorageMiningLastId, None) .expect("rpc request") .to_string(); let storage_entry_height = rpc_client .make_rpc_request(2, RpcRequest::GetStorageMiningEntryHeight, None) .expect("rpc request") .as_u64() .unwrap(); if get_segment_from_entry(storage_entry_height) != 0 { return Ok((storage_last_id, storage_entry_height)); } info!("max entry_height: {}", storage_entry_height); sleep(Duration::from_secs(3)); } Err(Error::new( ErrorKind::Other, "Couldn't get last_id or entry_height", ))? } fn get_airdrop_tokens(client: &mut ThinClient, keypair: &Keypair, leader_info: &NodeInfo) { if retry_get_balance(client, &keypair.pubkey(), None).is_none() { let mut drone_addr = leader_info.tpu; drone_addr.set_port(DRONE_PORT); let airdrop_amount = 1; let last_id = client.get_last_id(); match request_airdrop_transaction( &drone_addr, &keypair.pubkey(), airdrop_amount, last_id, ) { Ok(transaction) => { let signature = client.transfer_signed(&transaction).unwrap(); client.poll_for_signature(&signature).unwrap(); } Err(err) => { panic!( "Error requesting airdrop: {:?} to addr: {:?} amount: {}", err, drone_addr, airdrop_amount ); } }; } } } #[cfg(test)] mod tests { use crate::replicator::sample_file; use solana_sdk::hash::Hash; use solana_sdk::signature::{Keypair, KeypairUtil}; use std::fs::File; use std::fs::{create_dir_all, remove_file}; use std::io::Write; use std::mem::size_of; use std::path::PathBuf; fn tmp_file_path(name: &str) -> PathBuf { use std::env; let out_dir = env::var("OUT_DIR").unwrap_or_else(|_| "target".to_string()); let keypair = Keypair::new(); let mut path = PathBuf::new(); path.push(out_dir); path.push("tmp"); create_dir_all(&path).unwrap(); path.push(format!("{}-{}", name, keypair.pubkey())); path } #[test] fn test_sample_file() { solana_logger::setup(); let in_path = tmp_file_path("test_sample_file_input.txt"); let num_strings = 4096; let string = "12foobar"; { let mut in_file = File::create(&in_path).unwrap(); for _ in 0..num_strings { in_file.write(string.as_bytes()).unwrap(); } } let num_samples = (string.len() * num_strings / size_of::<Hash>()) as u64; let samples: Vec<_> = (0..num_samples).collect(); let res = sample_file(&in_path, samples.as_slice()); let ref_hash: Hash = Hash::new(&[ 173, 251, 182, 165, 10, 54, 33, 150, 133, 226, 106, 150, 99, 192, 179, 1, 230, 144, 151, 126, 18, 191, 54, 67, 249, 140, 230, 160, 56, 30, 170, 52, ]); let res = res.unwrap(); assert_eq!(res, ref_hash); // Sample just past the end assert!(sample_file(&in_path, &[num_samples]).is_err()); remove_file(&in_path).unwrap(); } #[test] fn test_sample_file_invalid_offset() { let in_path = tmp_file_path("test_sample_file_invalid_offset_input.txt"); { let mut in_file = File::create(&in_path).unwrap(); for _ in 0..4096 { in_file.write("123456foobar".as_bytes()).unwrap(); } } let samples = [0, 200000]; let res = sample_file(&in_path, &samples); assert!(res.is_err()); remove_file(in_path).unwrap(); } #[test] fn test_sample_file_missing_file() { let in_path = tmp_file_path("test_sample_file_that_doesnt_exist.txt"); let samples = [0, 5]; let res = sample_file(&in_path, &samples); assert!(res.is_err()); } }
fn poll_for_last_id_and_entry_height( cluster_info: &Arc<RwLock<ClusterInfo>>, ) -> Result<(String, u64)> { for _ in 0..10 {
0005_auto_20200730_1555.py
# Generated by Django 3.0.8 on 2020-07-30 06:55 from django.db import migrations, models import kcss.models class Migration(migrations.Migration): dependencies = [ ('kcss', '0004_auto_20200729_2327'), ] operations = [ migrations.RemoveField( model_name='conference', name='published_years', ), migrations.AlterField( model_name='publication', name='year', field=models.IntegerField(validators=[kcss.models.min_year_validator, kcss.models.max_year_validator]),
), ]
generated.go
// This file was generated by github.com/vektah/gqlgen, DO NOT EDIT package scalars import ( "bytes" context "context" external "external" strconv "strconv" time "time" graphql "github.com/vektah/gqlgen/graphql" introspection "github.com/vektah/gqlgen/neelance/introspection" query "github.com/vektah/gqlgen/neelance/query" schema "github.com/vektah/gqlgen/neelance/schema" ) func MakeExecutableSchema(resolvers Resolvers) graphql.ExecutableSchema { return &executableSchema{resolvers: resolvers} } type Resolvers interface { Query_user(ctx context.Context, id external.ObjectID) (*User, error) Query_search(ctx context.Context, input SearchArgs) ([]User, error) User_primitiveResolver(ctx context.Context, obj *User) (string, error) User_customResolver(ctx context.Context, obj *User) (Point, error) } type executableSchema struct { resolvers Resolvers } func (e *executableSchema) Schema() *schema.Schema { return parsedSchema } func (e *executableSchema) Query(ctx context.Context, op *query.Operation) *graphql.Response { ec := executionContext{graphql.GetRequestContext(ctx), e.resolvers} buf := ec.RequestMiddleware(ctx, func(ctx context.Context) []byte { data := ec._Query(ctx, op.Selections) var buf bytes.Buffer data.MarshalGQL(&buf) return buf.Bytes() }) return &graphql.Response{ Data: buf, Errors: ec.Errors, } } func (e *executableSchema) Mutation(ctx context.Context, op *query.Operation) *graphql.Response { return graphql.ErrorResponse(ctx, "mutations are not supported") } func (e *executableSchema) Subscription(ctx context.Context, op *query.Operation) func() *graphql.Response { return graphql.OneShot(graphql.ErrorResponse(ctx, "subscriptions are not supported")) } type executionContext struct { *graphql.RequestContext resolvers Resolvers } var addressImplementors = []string{"Address"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) _Address(ctx context.Context, sel []query.Selection, obj *Address) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, addressImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Address") case "id": out.Values[i] = ec._Address_id(ctx, field, obj) case "location": out.Values[i] = ec._Address_location(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) _Address_id(ctx context.Context, field graphql.CollectedField, obj *Address) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "Address" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.ID return MarshalID(res) } func (ec *executionContext) _Address_location(ctx context.Context, field graphql.CollectedField, obj *Address) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "Address" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Location if res == nil { return graphql.Null } return *res } var queryImplementors = []string{"Query"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) _Query(ctx context.Context, sel []query.Selection) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, queryImplementors, ec.Variables) ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ Object: "Query", }) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("Query") case "user": out.Values[i] = ec._Query_user(ctx, field) case "search": out.Values[i] = ec._Query_search(ctx, field) case "__schema": out.Values[i] = ec._Query___schema(ctx, field) case "__type": out.Values[i] = ec._Query___type(ctx, field) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) _Query_user(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { args := map[string]interface{}{} var arg0 external.ObjectID if tmp, ok := field.Args["id"]; ok { var err error arg0, err = UnmarshalID(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } args["id"] = arg0 ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ Object: "Query", Args: args, Field: field, }) return graphql.Defer(func() (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { userErr := ec.Recover(ctx, r) ec.Error(ctx, userErr) ret = graphql.Null } }() resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) { return ec.resolvers.Query_user(ctx, args["id"].(external.ObjectID)) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { return graphql.Null } res := resTmp.(*User) if res == nil { return graphql.Null } return ec._User(ctx, field.Selections, res) }) } func (ec *executionContext) _Query_search(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { args := map[string]interface{}{} var arg0 SearchArgs if tmp, ok := field.Args["input"]; ok { var err error arg0, err = UnmarshalSearchArgs(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } else { var tmp interface{} = map[string]interface{}{"location": "37,144"} var err error arg0, err = UnmarshalSearchArgs(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } args["input"] = arg0 ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ Object: "Query", Args: args, Field: field, }) return graphql.Defer(func() (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { userErr := ec.Recover(ctx, r) ec.Error(ctx, userErr) ret = graphql.Null } }() resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) { return ec.resolvers.Query_search(ctx, args["input"].(SearchArgs)) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { return graphql.Null } res := resTmp.([]User) arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() return ec._User(ctx, field.Selections, &res[idx1]) }()) } return arr1 }) } func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "Query" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := ec.introspectSchema() if res == nil { return graphql.Null } return ec.___Schema(ctx, field.Selections, res) } func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) graphql.Marshaler { args := map[string]interface{}{} var arg0 string if tmp, ok := field.Args["name"]; ok { var err error arg0, err = graphql.UnmarshalString(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } args["name"] = arg0 rctx := graphql.GetResolverContext(ctx) rctx.Object = "Query" rctx.Args = args rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := ec.introspectType(args["name"].(string)) if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } var userImplementors = []string{"User"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) _User(ctx context.Context, sel []query.Selection, obj *User) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, userImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("User") case "id": out.Values[i] = ec._User_id(ctx, field, obj) case "name": out.Values[i] = ec._User_name(ctx, field, obj) case "created": out.Values[i] = ec._User_created(ctx, field, obj) case "isBanned": out.Values[i] = ec._User_isBanned(ctx, field, obj) case "primitiveResolver": out.Values[i] = ec._User_primitiveResolver(ctx, field, obj) case "customResolver": out.Values[i] = ec._User_customResolver(ctx, field, obj) case "address": out.Values[i] = ec._User_address(ctx, field, obj) case "tier": out.Values[i] = ec._User_tier(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) _User_id(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.ID return MarshalID(res) } func (ec *executionContext) _User_name(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name return graphql.MarshalString(res) } func (ec *executionContext) _User_created(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Created return MarshalTimestamp(res) } func (ec *executionContext) _User_isBanned(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.IsBanned return graphql.MarshalBoolean(bool(res)) } func (ec *executionContext) _User_primitiveResolver(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ Object: "User", Args: nil, Field: field, }) return graphql.Defer(func() (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { userErr := ec.Recover(ctx, r) ec.Error(ctx, userErr) ret = graphql.Null } }() resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) { return ec.resolvers.User_primitiveResolver(ctx, obj) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { return graphql.Null } res := resTmp.(string) return graphql.MarshalString(res) }) } func (ec *executionContext) _User_customResolver(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { ctx = graphql.WithResolverContext(ctx, &graphql.ResolverContext{ Object: "User", Args: nil, Field: field, }) return graphql.Defer(func() (ret graphql.Marshaler) { defer func() { if r := recover(); r != nil { userErr := ec.Recover(ctx, r) ec.Error(ctx, userErr) ret = graphql.Null } }() resTmp, err := ec.ResolverMiddleware(ctx, func(ctx context.Context) (interface{}, error) { return ec.resolvers.User_customResolver(ctx, obj) }) if err != nil { ec.Error(ctx, err) return graphql.Null } if resTmp == nil { return graphql.Null } res := resTmp.(Point) return res }) } func (ec *executionContext) _User_address(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Address return ec._Address(ctx, field.Selections, &res) } func (ec *executionContext) _User_tier(ctx context.Context, field graphql.CollectedField, obj *User) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "User" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Tier return res } var __DirectiveImplementors = []string{"__Directive"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___Directive(ctx context.Context, sel []query.Selection, obj *introspection.Directive) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __DirectiveImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Directive") case "name": out.Values[i] = ec.___Directive_name(ctx, field, obj) case "description": out.Values[i] = ec.___Directive_description(ctx, field, obj) case "locations": out.Values[i] = ec.___Directive_locations(ctx, field, obj) case "args": out.Values[i] = ec.___Directive_args(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Directive" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name() return graphql.MarshalString(res) } func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Directive" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Description() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Directive" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Locations() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() return graphql.MarshalString(res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Directive" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Args() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___InputValue(ctx, field.Selections, res[idx1]) }()) } return arr1 } var __EnumValueImplementors = []string{"__EnumValue"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___EnumValue(ctx context.Context, sel []query.Selection, obj *introspection.EnumValue) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __EnumValueImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__EnumValue") case "name": out.Values[i] = ec.___EnumValue_name(ctx, field, obj) case "description": out.Values[i] = ec.___EnumValue_description(ctx, field, obj) case "isDeprecated": out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj) case "deprecationReason": out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__EnumValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name() return graphql.MarshalString(res) } func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__EnumValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Description() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__EnumValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.IsDeprecated() return graphql.MarshalBoolean(res) } func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__EnumValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.DeprecationReason() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } var __FieldImplementors = []string{"__Field"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___Field(ctx context.Context, sel []query.Selection, obj *introspection.Field) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __FieldImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Field") case "name": out.Values[i] = ec.___Field_name(ctx, field, obj) case "description": out.Values[i] = ec.___Field_description(ctx, field, obj) case "args": out.Values[i] = ec.___Field_args(ctx, field, obj) case "type": out.Values[i] = ec.___Field_type(ctx, field, obj) case "isDeprecated": out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj) case "deprecationReason": out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name() return graphql.MarshalString(res) } func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx)
rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Description() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Args() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___InputValue(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Type() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.IsDeprecated() return graphql.MarshalBoolean(res) } func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Field" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.DeprecationReason() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } var __InputValueImplementors = []string{"__InputValue"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___InputValue(ctx context.Context, sel []query.Selection, obj *introspection.InputValue) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __InputValueImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__InputValue") case "name": out.Values[i] = ec.___InputValue_name(ctx, field, obj) case "description": out.Values[i] = ec.___InputValue_description(ctx, field, obj) case "type": out.Values[i] = ec.___InputValue_type(ctx, field, obj) case "defaultValue": out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__InputValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name() return graphql.MarshalString(res) } func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__InputValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Description() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__InputValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Type() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__InputValue" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.DefaultValue() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } var __SchemaImplementors = []string{"__Schema"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___Schema(ctx context.Context, sel []query.Selection, obj *introspection.Schema) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __SchemaImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Schema") case "types": out.Values[i] = ec.___Schema_types(ctx, field, obj) case "queryType": out.Values[i] = ec.___Schema_queryType(ctx, field, obj) case "mutationType": out.Values[i] = ec.___Schema_mutationType(ctx, field, obj) case "subscriptionType": out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj) case "directives": out.Values[i] = ec.___Schema_directives(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Schema" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Types() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Schema" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.QueryType() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Schema" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.MutationType() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Schema" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.SubscriptionType() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Schema" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Directives() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___Directive(ctx, field.Selections, res[idx1]) }()) } return arr1 } var __TypeImplementors = []string{"__Type"} // nolint: gocyclo, errcheck, gas, goconst func (ec *executionContext) ___Type(ctx context.Context, sel []query.Selection, obj *introspection.Type) graphql.Marshaler { fields := graphql.CollectFields(ec.Doc, sel, __TypeImplementors, ec.Variables) out := graphql.NewOrderedMap(len(fields)) for i, field := range fields { out.Keys[i] = field.Alias switch field.Name { case "__typename": out.Values[i] = graphql.MarshalString("__Type") case "kind": out.Values[i] = ec.___Type_kind(ctx, field, obj) case "name": out.Values[i] = ec.___Type_name(ctx, field, obj) case "description": out.Values[i] = ec.___Type_description(ctx, field, obj) case "fields": out.Values[i] = ec.___Type_fields(ctx, field, obj) case "interfaces": out.Values[i] = ec.___Type_interfaces(ctx, field, obj) case "possibleTypes": out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj) case "enumValues": out.Values[i] = ec.___Type_enumValues(ctx, field, obj) case "inputFields": out.Values[i] = ec.___Type_inputFields(ctx, field, obj) case "ofType": out.Values[i] = ec.___Type_ofType(ctx, field, obj) default: panic("unknown field " + strconv.Quote(field.Name)) } } return out } func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Kind() return graphql.MarshalString(res) } func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Name() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Description() if res == nil { return graphql.Null } return graphql.MarshalString(*res) } func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { args := map[string]interface{}{} var arg0 bool if tmp, ok := field.Args["includeDeprecated"]; ok { var err error arg0, err = graphql.UnmarshalBoolean(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } args["includeDeprecated"] = arg0 rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = args rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Fields(args["includeDeprecated"].(bool)) arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___Field(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.Interfaces() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.PossibleTypes() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { args := map[string]interface{}{} var arg0 bool if tmp, ok := field.Args["includeDeprecated"]; ok { var err error arg0, err = graphql.UnmarshalBoolean(tmp) if err != nil { ec.Error(ctx, err) return graphql.Null } } args["includeDeprecated"] = arg0 rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = args rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.EnumValues(args["includeDeprecated"].(bool)) arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___EnumValue(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.InputFields() arr1 := graphql.Array{} for idx1 := range res { arr1 = append(arr1, func() graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.PushIndex(idx1) defer rctx.Pop() if res[idx1] == nil { return graphql.Null } return ec.___InputValue(ctx, field.Selections, res[idx1]) }()) } return arr1 } func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) graphql.Marshaler { rctx := graphql.GetResolverContext(ctx) rctx.Object = "__Type" rctx.Args = nil rctx.Field = field rctx.PushField(field.Alias) defer rctx.Pop() res := obj.OfType() if res == nil { return graphql.Null } return ec.___Type(ctx, field.Selections, res) } func UnmarshalSearchArgs(v interface{}) (SearchArgs, error) { var it SearchArgs var asMap = v.(map[string]interface{}) for k, v := range asMap { switch k { case "location": var err error var ptr1 Point if v != nil { err = (&ptr1).UnmarshalGQL(v) it.Location = &ptr1 } if err != nil { return it, err } case "createdAfter": var err error var ptr1 time.Time if v != nil { ptr1, err = UnmarshalTimestamp(v) it.CreatedAfter = &ptr1 } if err != nil { return it, err } case "isBanned": var err error var castTmp bool castTmp, err = graphql.UnmarshalBoolean(v) it.IsBanned = Banned(castTmp) if err != nil { return it, err } } } return it, nil } func (ec *executionContext) introspectSchema() *introspection.Schema { return introspection.WrapSchema(parsedSchema) } func (ec *executionContext) introspectType(name string) *introspection.Type { t := parsedSchema.Resolve(name) if t == nil { return nil } return introspection.WrapType(t) } var parsedSchema = schema.MustParse(`type Query { user(id: ID!): User search(input: SearchArgs = {location: "37,144"}): [User!]! } type User { id: ID! name: String! created: Timestamp isBanned: Boolean! primitiveResolver: String! customResolver: Point! address: Address tier: Tier } type Address { id: ID! location: Point } input SearchArgs { location: Point createdAfter: Timestamp isBanned: Boolean } enum Tier { A B C } scalar Timestamp scalar Point `)
main.go
// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT. // [START compute_v1_generated_RegionTargetHttpsProxies_List_sync] package main import ( "context" compute "cloud.google.com/go/compute/apiv1" "google.golang.org/api/iterator" computepb "google.golang.org/genproto/googleapis/cloud/compute/v1" ) func main()
// [END compute_v1_generated_RegionTargetHttpsProxies_List_sync]
{ ctx := context.Background() c, err := compute.NewRegionTargetHttpsProxiesRESTClient(ctx) if err != nil { // TODO: Handle error. } defer c.Close() req := &computepb.ListRegionTargetHttpsProxiesRequest{ // TODO: Fill request struct fields. // See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/compute/v1#ListRegionTargetHttpsProxiesRequest. } it := c.List(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } }
mimc.rs
// For randomness (during paramgen and proof generation) use rand::thread_rng; // For benchmarking use std::time::{Duration, Instant}; // Bring in some tools for using pairing-friendly curves use bellperson::bls::Engine; use ff::{Field, ScalarEngine}; // We're going to use the BLS12-381 pairing-friendly elliptic curve. use bellperson::bls::Bls12; // We'll use these interfaces to construct our circuit. use bellperson::{Circuit, ConstraintSystem, SynthesisError}; // We're going to use the Groth16 proving system. use bellperson::groth16::{ create_random_proof, create_random_proof_batch, generate_random_parameters, prepare_verifying_key, verify_proof, verify_proofs_batch, Proof, }; const MIMC_ROUNDS: usize = 322; /// This is an implementation of MiMC, specifically a /// variant named `LongsightF322p3` for BLS12-381. /// See http://eprint.iacr.org/2016/492 for more /// information about this construction. /// /// ``` /// function LongsightF322p3(xL ⦂ Fp, xR ⦂ Fp) { /// for i from 0 up to 321 { /// xL, xR := xR + (xL + Ci)^3, xL /// } /// return xL /// } /// ``` fn mimc<E: Engine>(mut xl: E::Fr, mut xr: E::Fr, constants: &[E::Fr]) -> E::Fr { assert_eq!(constants.len(), MIMC_ROUNDS); for i in 0..MIMC_ROUNDS { let mut tmp1 = xl; tmp1.add_assign(&constants[i]); let mut tmp2 = tmp1; tmp2.square(); tmp2.mul_assign(&tmp1); tmp2.add_assign(&xr); xr = xl; xl = tmp2; } xl } /// This is our demo circuit for proving knowledge of the /// preimage of a MiMC hash invocation. #[derive(Clone)] struct MiMCDemo<'a, E: Engine> { xl: Option<E::Fr>, xr: Option<E::Fr>, constants: &'a [E::Fr], } /// Our demo circuit implements this `Circuit` trait which /// is used during paramgen and proving in order to /// synthesize the constraint system. impl<'a, E: Engine> Circuit<E> for MiMCDemo<'a, E> { fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> { assert_eq!(self.constants.len(), MIMC_ROUNDS); // Allocate the first component of the preimage. let mut xl_value = self.xl; let mut xl = cs.alloc( || "preimage xl", || xl_value.ok_or(SynthesisError::AssignmentMissing), )?; // Allocate the second component of the preimage. let mut xr_value = self.xr; let mut xr = cs.alloc( || "preimage xr", || xr_value.ok_or(SynthesisError::AssignmentMissing), )?; for i in 0..MIMC_ROUNDS { // xL, xR := xR + (xL + Ci)^3, xL let cs = &mut cs.namespace(|| format!("round {}", i)); // tmp = (xL + Ci)^2
e }); let tmp = cs.alloc( || "tmp", || tmp_value.ok_or(SynthesisError::AssignmentMissing), )?; cs.enforce( || "tmp = (xL + Ci)^2", |lc| lc + xl + (self.constants[i], CS::one()), |lc| lc + xl + (self.constants[i], CS::one()), |lc| lc + tmp, ); // new_xL = xR + (xL + Ci)^3 // new_xL = xR + tmp * (xL + Ci) // new_xL - xR = tmp * (xL + Ci) let new_xl_value = xl_value.map(|mut e| { e.add_assign(&self.constants[i]); e.mul_assign(&tmp_value.unwrap()); e.add_assign(&xr_value.unwrap()); e }); let new_xl = if i == (MIMC_ROUNDS - 1) { // This is the last round, xL is our image and so // we allocate a public input. cs.alloc_input( || "image", || new_xl_value.ok_or(SynthesisError::AssignmentMissing), )? } else { cs.alloc( || "new_xl", || new_xl_value.ok_or(SynthesisError::AssignmentMissing), )? }; cs.enforce( || "new_xL = xR + (xL + Ci)^3", |lc| lc + tmp, |lc| lc + xl + (self.constants[i], CS::one()), |lc| lc + new_xl - xr, ); // xR = xL xr = xl; xr_value = xl_value; // xL = new_xL xl = new_xl; xl_value = new_xl_value; } Ok(()) } } #[test] fn test_mimc() { // This may not be cryptographically safe, use // `OsRng` (for example) in production software. let rng = &mut thread_rng(); // Generate the MiMC round constants let constants = (0..MIMC_ROUNDS) .map(|_| <Bls12 as ScalarEngine>::Fr::random(rng)) .collect::<Vec<_>>(); println!("Creating parameters..."); // Create parameters for our circuit let params = { let c = MiMCDemo::<Bls12> { xl: None, xr: None, constants: &constants, }; generate_random_parameters(c, rng).unwrap() }; // Prepare the verification key (for proof verification) let pvk = prepare_verifying_key(&params.vk); println!("Creating proofs..."); // Let's benchmark stuff! const SAMPLES: u32 = 50; let mut total_proving = Duration::new(0, 0); let mut total_verifying = Duration::new(0, 0); // Just a place to put the proof data, so we can // benchmark deserialization. let mut proof_vec = vec![]; let mut proofs = vec![]; let mut images = vec![]; for _ in 0..SAMPLES { // Generate a random preimage and compute the image let xl = <Bls12 as ScalarEngine>::Fr::random(rng); let xr = <Bls12 as ScalarEngine>::Fr::random(rng); let image = mimc::<Bls12>(xl, xr, &constants); proof_vec.truncate(0); let start = Instant::now(); { // Create an instance of our circuit (with the // witness) let c = MiMCDemo { xl: Some(xl), xr: Some(xr), constants: &constants, }; // Create a groth16 proof with our parameters. let proof = create_random_proof(c, &params, rng).unwrap(); proof.write(&mut proof_vec).unwrap(); } total_proving += start.elapsed(); let start = Instant::now(); let proof = Proof::read(&proof_vec[..]).unwrap(); // Check the proof assert!(verify_proof(&pvk, &proof, &[image]).unwrap()); total_verifying += start.elapsed(); proofs.push(proof); images.push(vec![image]); } // batch verification println!("Creating batch proofs..."); let proving_batch = Instant::now(); { // Create an instance of our circuit (with the // witness) let xl = <Bls12 as ScalarEngine>::Fr::random(rng); let xr = <Bls12 as ScalarEngine>::Fr::random(rng); let c = MiMCDemo { xl: Some(xl), xr: Some(xr), constants: &constants, }; // Create a groth16 proof with our parameters. let proofs = create_random_proof_batch(vec![c; SAMPLES as usize], &params, rng).unwrap(); assert_eq!(proofs.len(), 50); } let proving_batch = proving_batch.elapsed().subsec_nanos() as f64 / 1_000_000_000f64; println!( "Proving time batch: {:04}s ({:04}s / proof)", proving_batch, proving_batch / SAMPLES as f64, ); let proving_avg = total_proving / SAMPLES; let proving_avg = proving_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (proving_avg.as_secs() as f64); let verifying_avg = total_verifying / SAMPLES; let verifying_avg = verifying_avg.subsec_nanos() as f64 / 1_000_000_000f64 + (verifying_avg.as_secs() as f64); println!("Average proving time: {:08}s", proving_avg); println!("Average verifying time: {:08}s", verifying_avg); // batch verification { let pvk = prepare_verifying_key(&params.vk); let start = Instant::now(); let proofs: Vec<_> = proofs.iter().collect(); let valid = verify_proofs_batch(&pvk, &mut rand::rngs::OsRng, &proofs, &images).unwrap(); println!( "Batch verification of {} proofs: {:04}s ({:04}s/proof)", proofs.len(), (start.elapsed().subsec_nanos() as f64) / 1_000_000_000f64, ((start.elapsed().subsec_nanos() as f64) / 1_000_000_000f64) / proofs.len() as f64, ); assert!(valid, "failed batch verification"); // check that invalid proofs don't validate let mut bad_proofs = proofs .iter() .map(|p| (*p).clone()) .collect::<Vec<Proof<_>>>(); for i in 0..proofs.len() { use groupy::CurveProjective; let p = &mut bad_proofs[i]; let mut a: <Bls12 as Engine>::G1 = p.a.into(); a.add_assign(&<Bls12 as Engine>::G1::one()); p.a = a.into_affine(); } let bad_proofs_ref = bad_proofs.iter().collect::<Vec<_>>(); assert!( !verify_proofs_batch(&pvk, &mut rand::rngs::OsRng, &bad_proofs_ref[..], &images) .unwrap() ); } }
let tmp_value = xl_value.map(|mut e| { e.add_assign(&self.constants[i]); e.square();
__init__.py
from jnius import autoclass, JavaException def
(cls, args: tuple, instantiate: bool): if not args: return cls() if instantiate else cls else: return cls(*args) def _browserx_except_cls_call(namespace: str, args: tuple, instantiate: bool): try: return _class_call(autoclass(namespace), args, instantiate) except JavaException as e: raise JavaException( f"{e}\nEnable androidx in your buildozer.spec file\nadd 'androidx.browser:browser:1.4.0' to " f"buildozer.spec file: android.gradle_dependencies" )
_class_call
admission.go
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package serviceaccount import ( "fmt" "io" "math/rand" "strconv" "time" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/admission" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/fields" kubelet "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/serviceaccount" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) // DefaultServiceAccountName is the name of the default service account to set on pods which do not specify a service account const DefaultServiceAccountName = "default" // EnforceMountableSecretsAnnotation is a default annotation that indicates that a service account should enforce mountable secrets. // The value must be true to have this annotation take effect const EnforceMountableSecretsAnnotation = "kubernetes.io/enforce-mountable-secrets" // DefaultAPITokenMountPath is the path that ServiceAccountToken secrets are automounted to. // The token file would then be accessible at /var/run/secrets/kubernetes.io/serviceaccount const DefaultAPITokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount" // PluginName is the name of this admission plugin const PluginName = "ServiceAccount" func
() { admission.RegisterPlugin(PluginName, func(client clientset.Interface, config io.Reader) (admission.Interface, error) { serviceAccountAdmission := NewServiceAccount(client) serviceAccountAdmission.Run() return serviceAccountAdmission, nil }) } var _ = admission.Interface(&serviceAccount{}) type serviceAccount struct { *admission.Handler // LimitSecretReferences rejects pods that reference secrets their service accounts do not reference LimitSecretReferences bool // RequireAPIToken determines whether pod creation attempts are rejected if no API token exists for the pod's service account RequireAPIToken bool // MountServiceAccountToken creates Volume and VolumeMounts for the first referenced ServiceAccountToken for the pod's service account MountServiceAccountToken bool client clientset.Interface serviceAccounts cache.Indexer secrets cache.Indexer stopChan chan struct{} serviceAccountsReflector *cache.Reflector secretsReflector *cache.Reflector } // NewServiceAccount returns an admission.Interface implementation which limits admission of Pod CREATE requests based on the pod's ServiceAccount: // 1. If the pod does not specify a ServiceAccount, it sets the pod's ServiceAccount to "default" // 2. It ensures the ServiceAccount referenced by the pod exists // 3. If LimitSecretReferences is true, it rejects the pod if the pod references Secret objects which the pod's ServiceAccount does not reference // 4. If the pod does not contain any ImagePullSecrets, the ImagePullSecrets of the service account are added. // 5. If MountServiceAccountToken is true, it adds a VolumeMount with the pod's ServiceAccount's api token secret to containers func NewServiceAccount(cl clientset.Interface) *serviceAccount { serviceAccountsIndexer, serviceAccountsReflector := cache.NewNamespaceKeyedIndexerAndReflector( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return cl.Core().ServiceAccounts(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return cl.Core().ServiceAccounts(api.NamespaceAll).Watch(options) }, }, &api.ServiceAccount{}, 0, ) tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) secretsIndexer, secretsReflector := cache.NewNamespaceKeyedIndexerAndReflector( &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { options.FieldSelector = tokenSelector return cl.Core().Secrets(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { options.FieldSelector = tokenSelector return cl.Core().Secrets(api.NamespaceAll).Watch(options) }, }, &api.Secret{}, 0, ) return &serviceAccount{ Handler: admission.NewHandler(admission.Create), // TODO: enable this once we've swept secret usage to account for adding secret references to service accounts LimitSecretReferences: false, // Auto mount service account API token secrets MountServiceAccountToken: true, // Reject pod creation until a service account token is available RequireAPIToken: true, client: cl, serviceAccounts: serviceAccountsIndexer, serviceAccountsReflector: serviceAccountsReflector, secrets: secretsIndexer, secretsReflector: secretsReflector, } } func (s *serviceAccount) Run() { if s.stopChan == nil { s.stopChan = make(chan struct{}) s.serviceAccountsReflector.RunUntil(s.stopChan) s.secretsReflector.RunUntil(s.stopChan) } } func (s *serviceAccount) Stop() { if s.stopChan != nil { close(s.stopChan) s.stopChan = nil } } func (s *serviceAccount) Admit(a admission.Attributes) (err error) { if a.GetResource() != api.Resource("pods") { return nil } obj := a.GetObject() if obj == nil { return nil } pod, ok := obj.(*api.Pod) if !ok { return nil } // Don't modify the spec of mirror pods. // That makes the kubelet very angry and confused, and it immediately deletes the pod (because the spec doesn't match) // That said, don't allow mirror pods to reference ServiceAccounts or SecretVolumeSources either if _, isMirrorPod := pod.Annotations[kubelet.ConfigMirrorAnnotationKey]; isMirrorPod { if len(pod.Spec.ServiceAccountName) != 0 { return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference service accounts")) } for _, volume := range pod.Spec.Volumes { if volume.VolumeSource.Secret != nil { return admission.NewForbidden(a, fmt.Errorf("A mirror pod may not reference secrets")) } } return nil } // Set the default service account if needed if len(pod.Spec.ServiceAccountName) == 0 { pod.Spec.ServiceAccountName = DefaultServiceAccountName } // Ensure the referenced service account exists serviceAccount, err := s.getServiceAccount(a.GetNamespace(), pod.Spec.ServiceAccountName) if err != nil { return admission.NewForbidden(a, fmt.Errorf("Error looking up service account %s/%s: %v", a.GetNamespace(), pod.Spec.ServiceAccountName, err)) } if serviceAccount == nil { // TODO: convert to a ServerTimeout error (or other error that sends a Retry-After header) return admission.NewForbidden(a, fmt.Errorf("service account %s/%s was not found, retry after the service account is created", a.GetNamespace(), pod.Spec.ServiceAccountName)) } if s.enforceMountableSecrets(serviceAccount) { if err := s.limitSecretReferences(serviceAccount, pod); err != nil { return admission.NewForbidden(a, err) } } if s.MountServiceAccountToken { if err := s.mountServiceAccountToken(serviceAccount, pod); err != nil { return admission.NewForbidden(a, err) } } if len(pod.Spec.ImagePullSecrets) == 0 { pod.Spec.ImagePullSecrets = make([]api.LocalObjectReference, len(serviceAccount.ImagePullSecrets)) copy(pod.Spec.ImagePullSecrets, serviceAccount.ImagePullSecrets) } return nil } // enforceMountableSecrets indicates whether mountable secrets should be enforced for a particular service account // A global setting of true will override any flag set on the individual service account func (s *serviceAccount) enforceMountableSecrets(serviceAccount *api.ServiceAccount) bool { if s.LimitSecretReferences { return true } if value, ok := serviceAccount.Annotations[EnforceMountableSecretsAnnotation]; ok { enforceMountableSecretCheck, _ := strconv.ParseBool(value) return enforceMountableSecretCheck } return false } // getServiceAccount returns the ServiceAccount for the given namespace and name if it exists func (s *serviceAccount) getServiceAccount(namespace string, name string) (*api.ServiceAccount, error) { key := &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: namespace}} index, err := s.serviceAccounts.Index("namespace", key) if err != nil { return nil, err } for _, obj := range index { serviceAccount := obj.(*api.ServiceAccount) if serviceAccount.Name == name { return serviceAccount, nil } } // Could not find in cache, attempt to look up directly numAttempts := 1 if name == DefaultServiceAccountName { // If this is the default serviceaccount, attempt more times, since it should be auto-created by the controller numAttempts = 10 } retryInterval := time.Duration(rand.Int63n(100)+int64(100)) * time.Millisecond for i := 0; i < numAttempts; i++ { if i != 0 { time.Sleep(retryInterval) } serviceAccount, err := s.client.Core().ServiceAccounts(namespace).Get(name) if err == nil { return serviceAccount, nil } if !errors.IsNotFound(err) { return nil, err } } return nil, nil } // getReferencedServiceAccountToken returns the name of the first referenced secret which is a ServiceAccountToken for the service account func (s *serviceAccount) getReferencedServiceAccountToken(serviceAccount *api.ServiceAccount) (string, error) { if len(serviceAccount.Secrets) == 0 { return "", nil } tokens, err := s.getServiceAccountTokens(serviceAccount) if err != nil { return "", err } references := sets.NewString() for _, secret := range serviceAccount.Secrets { references.Insert(secret.Name) } for _, token := range tokens { if references.Has(token.Name) { return token.Name, nil } } return "", nil } // getServiceAccountTokens returns all ServiceAccountToken secrets for the given ServiceAccount func (s *serviceAccount) getServiceAccountTokens(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) { key := &api.Secret{ObjectMeta: api.ObjectMeta{Namespace: serviceAccount.Namespace}} index, err := s.secrets.Index("namespace", key) if err != nil { return nil, err } tokens := []*api.Secret{} for _, obj := range index { token := obj.(*api.Secret) if serviceaccount.IsServiceAccountToken(token, serviceAccount) { tokens = append(tokens, token) } } return tokens, nil } func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error { // Ensure all secrets the pod references are allowed by the service account mountableSecrets := sets.NewString() for _, s := range serviceAccount.Secrets { mountableSecrets.Insert(s.Name) } for _, volume := range pod.Spec.Volumes { source := volume.VolumeSource if source.Secret == nil { continue } secretName := source.Secret.SecretName if !mountableSecrets.Has(secretName) { return fmt.Errorf("Volume with secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", secretName, serviceAccount.Name) } } for _, container := range pod.Spec.Containers { for _, env := range container.Env { if env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil { if !mountableSecrets.Has(env.ValueFrom.SecretKeyRef.Name) { return fmt.Errorf("Container %s with envVar %s referencing secret.secretName=\"%s\" is not allowed because service account %s does not reference that secret", container.Name, env.Name, env.ValueFrom.SecretKeyRef.Name, serviceAccount.Name) } } } } // limit pull secret references as well pullSecrets := sets.NewString() for _, s := range serviceAccount.ImagePullSecrets { pullSecrets.Insert(s.Name) } for i, pullSecretRef := range pod.Spec.ImagePullSecrets { if !pullSecrets.Has(pullSecretRef.Name) { return fmt.Errorf(`imagePullSecrets[%d].name="%s" is not allowed because service account %s does not reference that imagePullSecret`, i, pullSecretRef.Name, serviceAccount.Name) } } return nil } func (s *serviceAccount) mountServiceAccountToken(serviceAccount *api.ServiceAccount, pod *api.Pod) error { // Find the name of a referenced ServiceAccountToken secret we can mount serviceAccountToken, err := s.getReferencedServiceAccountToken(serviceAccount) if err != nil { return fmt.Errorf("Error looking up service account token for %s/%s: %v", serviceAccount.Namespace, serviceAccount.Name, err) } if len(serviceAccountToken) == 0 { // We don't have an API token to mount, so return if s.RequireAPIToken { // If a token is required, this is considered an error // TODO: convert to a ServerTimeout error (or other error that sends a Retry-After header) return fmt.Errorf("no API token found for service account %s/%s, retry after the token is automatically created and added to the service account", serviceAccount.Namespace, serviceAccount.Name) } return nil } // Find the volume and volume name for the ServiceAccountTokenSecret if it already exists tokenVolumeName := "" hasTokenVolume := false allVolumeNames := sets.NewString() for _, volume := range pod.Spec.Volumes { allVolumeNames.Insert(volume.Name) if volume.Secret != nil && volume.Secret.SecretName == serviceAccountToken { tokenVolumeName = volume.Name hasTokenVolume = true break } } // Determine a volume name for the ServiceAccountTokenSecret in case we need it if len(tokenVolumeName) == 0 { // Try naming the volume the same as the serviceAccountToken, and uniquify if needed tokenVolumeName = serviceAccountToken if allVolumeNames.Has(tokenVolumeName) { tokenVolumeName = api.SimpleNameGenerator.GenerateName(fmt.Sprintf("%s-", serviceAccountToken)) } } // Create the prototypical VolumeMount volumeMount := api.VolumeMount{ Name: tokenVolumeName, ReadOnly: true, MountPath: DefaultAPITokenMountPath, } // Ensure every container mounts the APISecret volume needsTokenVolume := false for i, container := range pod.Spec.Containers { existingContainerMount := false for _, volumeMount := range container.VolumeMounts { // Existing mounts at the default mount path prevent mounting of the API token if volumeMount.MountPath == DefaultAPITokenMountPath { existingContainerMount = true break } } if !existingContainerMount { pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, volumeMount) needsTokenVolume = true } } // Add the volume if a container needs it if !hasTokenVolume && needsTokenVolume { volume := api.Volume{ Name: tokenVolumeName, VolumeSource: api.VolumeSource{ Secret: &api.SecretVolumeSource{ SecretName: serviceAccountToken, }, }, } pod.Spec.Volumes = append(pod.Spec.Volumes, volume) } return nil }
init
details_info.go
package graph import ( i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // DetailsInfo provides operations to manage the auditLogRoot singleton. type DetailsInfo struct { // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{}; } // NewDetailsInfo instantiates a new detailsInfo and sets the default values. func NewDetailsInfo()(*DetailsInfo) { m := &DetailsInfo{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateDetailsInfoFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateDetailsInfoFromDiscriminatorValue(parseNode i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, error)
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *DetailsInfo) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetFieldDeserializers the deserialization information for the current model func (m *DetailsInfo) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) return res } func (m *DetailsInfo) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *DetailsInfo) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *DetailsInfo) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } }
{ return NewDetailsInfo(), nil }
Prodoc.py
# Copyright 2000 by Jeffrey Chang. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """ This module is OBSOLETE. Most of the functionality in this module has moved to Bio.ExPASy.Prodoc; please see Bio.ExPASy.Prodoc.read To read a Prodoc file containing one entry. Bio.ExPASy.Prodoc.parse Iterates over entries in a Prodoc file. Bio.ExPASy.Prodoc.Record Holds Prodoc data. Bio.ExPASy.Prodoc.Reference Holds data from a Prodoc reference. The other functions and classes in Bio.Prosite.Prodoc (including Bio.Prosite.Prodoc.index_file and Bio.Prosite.Prodoc.Dictionary) are considered deprecated, and were not moved to Bio.ExPASy.Prodoc. If you use this functionality, please contact the Biopython developers at [email protected] to avoid permanent removal of this module from Biopython. This module provides code to work with the prosite.doc file from Prosite, available at http://www.expasy.ch/prosite/. Tested with: Release 15.0, July 1998 Release 16.0, July 1999 Release 20.22, 13 November 2007 Functions: parse Iterates over entries in a Prodoc file. index_file Index a Prodoc file for a Dictionary. _extract_record Extract Prodoc data from a web page. Classes: Record Holds Prodoc data. Reference Holds data from a Prodoc reference. Dictionary Accesses a Prodoc file using a dictionary interface. RecordParser Parses a Prodoc record into a Record object. _Scanner Scans Prodoc-formatted data. _RecordConsumer Consumes Prodoc data to a Record object. Iterator Iterates over entries in a Prodoc file; DEPRECATED. """ from types import * import os import sgmllib from Bio import File from Bio import Index from Bio.ParserSupport import * def parse(handle): import cStringIO parser = RecordParser() text = "" for line in handle: text += line if line[:5] == '{END}': handle = cStringIO.StringIO(text) record = parser.parse(handle) text = "" yield record def read(handle): parser = RecordParser() record = parser.parse(handle) # We should have reached the end of the record by now remainder = handle.read() if remainder: raise ValueError("More than one Prodoc record found") return record # It may be a good idea to rewrite read(), parse() at some point to avoid # using the old-style "parser = RecordParser(); parser.parse(handle)" approach. class Record: """Holds information from a Prodoc record. Members: accession Accession number of the record. prosite_refs List of tuples (prosite accession, prosite name). text Free format text. references List of reference objects. """ def __init__(self): self.accession = '' self.prosite_refs = [] self.text = '' self.references = [] class Reference: """Holds information from a Prodoc citation. Members: number Number of the reference. (string) authors Names of the authors.
citation Describes the citation. """ def __init__(self): self.number = '' self.authors = '' self.citation = '' class Iterator: """Returns one record at a time from a Prodoc file. Methods: next Return the next record from the stream, or None. """ def __init__(self, handle, parser=None): """__init__(self, handle, parser=None) Create a new iterator. handle is a file-like object. parser is an optional Parser object to change the results into another form. If set to None, then the raw contents of the file will be returned. """ import warnings warnings.warn("Bio.Prosite.Prodoc.Iterator is deprecated; we recommend using the function Bio.Prosite.Prodoc.parse instead. Please contact the Biopython developers at [email protected] you cannot use Bio.Prosite.Prodoc.parse instead of Bio.Prosite.Prodoc.Iterator.", DeprecationWarning) if type(handle) is not FileType and type(handle) is not InstanceType: raise ValueError("I expected a file handle or file-like object") self._uhandle = File.UndoHandle(handle) self._parser = parser def next(self): """next(self) -> object Return the next Prodoc record from the file. If no more records, return None. """ lines = [] while 1: line = self._uhandle.readline() if not line: break lines.append(line) if line[:5] == '{END}': break if not lines: return None data = "".join(lines) if self._parser is not None: return self._parser.parse(File.StringHandle(data)) return data def __iter__(self): return iter(self.next, None) class Dictionary: """Accesses a Prodoc file using a dictionary interface. """ __filename_key = '__filename' def __init__(self, indexname, parser=None): """__init__(self, indexname, parser=None) Open a Prodoc Dictionary. indexname is the name of the index for the dictionary. The index should have been created using the index_file function. parser is an optional Parser object to change the results into another form. If set to None, then the raw contents of the file will be returned. """ self._index = Index.Index(indexname) self._handle = open(self._index[Dictionary.__filename_key]) self._parser = parser def __len__(self): return len(self._index) def __getitem__(self, key): start, len = self._index[key] self._handle.seek(start) data = self._handle.read(len) if self._parser is not None: return self._parser.parse(File.StringHandle(data)) return data def __getattr__(self, name): return getattr(self._index, name) class RecordParser(AbstractParser): """Parses Prodoc data into a Record object. """ def __init__(self): self._scanner = _Scanner() self._consumer = _RecordConsumer() def parse(self, handle): self._scanner.feed(handle, self._consumer) return self._consumer.data class _Scanner: """Scans Prodoc-formatted data. Tested with: Release 15.0, July 1998 """ def feed(self, handle, consumer): """feed(self, handle, consumer) Feed in Prodoc data for scanning. handle is a file-like object that contains prosite data. consumer is a Consumer object that will receive events as the report is scanned. """ if isinstance(handle, File.UndoHandle): uhandle = handle else: uhandle = File.UndoHandle(handle) while 1: line = uhandle.peekline() if not line: break elif is_blank_line(line): # Skip blank lines between records uhandle.readline() continue else: self._scan_record(uhandle, consumer) def _scan_record(self, uhandle, consumer): consumer.start_record() self._scan_accession(uhandle, consumer) self._scan_prosite_refs(uhandle, consumer) read_and_call(uhandle, consumer.noevent, start='{BEGIN}') self._scan_text(uhandle, consumer) self._scan_refs(uhandle, consumer) self._scan_copyright(uhandle, consumer) read_and_call(uhandle, consumer.noevent, start='{END}') consumer.end_record() def _scan_accession(self, uhandle, consumer): read_and_call(uhandle, consumer.accession, start='{PDOC') def _scan_prosite_refs(self, uhandle, consumer): while attempt_read_and_call(uhandle, consumer.prosite_reference, start='{PS'): pass def _scan_text(self, uhandle, consumer): while 1: line = safe_readline(uhandle) if (line[0] == '[' and line[3] == ']' and line[4] == ' ') or \ line[:5] == '{END}': uhandle.saveline(line) break consumer.text(line) def _scan_refs(self, uhandle, consumer): while 1: line = safe_readline(uhandle) if line[:5] == '{END}' or is_blank_line(line): uhandle.saveline(line) break consumer.reference(line) def _scan_copyright(self, uhandle, consumer): # Cayte Lindner found some PRODOC records with the copyrights # appended at the end. We'll try and recognize these. read_and_call_while(uhandle, consumer.noevent, blank=1) if attempt_read_and_call(uhandle, consumer.noevent, start='+----'): read_and_call_until(uhandle, consumer.noevent, start='+----') read_and_call(uhandle, consumer.noevent, start='+----') read_and_call_while(uhandle, consumer.noevent, blank=1) class _RecordConsumer(AbstractConsumer): """Consumer that converts a Prodoc record to a Record object. Members: data Record with Prodoc data. """ def __init__(self): self.data = None def start_record(self): self.data = Record() def end_record(self): self._clean_data() def accession(self, line): line = line.rstrip() if line[0] != '{' or line[-1] != '}': raise ValueError("I don't understand accession line\n%s" % line) acc = line[1:-1] if acc[:4] != 'PDOC': raise ValueError("Invalid accession in line\n%s" % line) self.data.accession = acc def prosite_reference(self, line): line = line.rstrip() if line[0] != '{' or line[-1] != '}': raise ValueError("I don't understand accession line\n%s" % line) acc, name = line[1:-1].split('; ') self.data.prosite_refs.append((acc, name)) def text(self, line): self.data.text = self.data.text + line def reference(self, line): if line[0] == '[' and line[3] == ']': # new reference self._ref = Reference() self._ref.number = line[1:3].strip() if line[1] == 'E': # If it's an electronic reference, then the URL is on the # line, instead of the author. self._ref.citation = line[4:].strip() else: self._ref.authors = line[4:].strip() self.data.references.append(self._ref) elif line[:4] == ' ': if not self._ref: raise ValueError("Unnumbered reference lines\n%s" % line) self._ref.citation = self._ref.citation + line[5:] else: raise Exception("I don't understand the reference line\n%s" % line) def _clean_data(self): # get rid of trailing newlines for ref in self.data.references: ref.citation = ref.citation.rstrip() ref.authors = ref.authors.rstrip() def index_file(filename, indexname, rec2key=None): """index_file(filename, indexname, rec2key=None) Index a Prodoc file. filename is the name of the file. indexname is the name of the dictionary. rec2key is an optional callback that takes a Record and generates a unique key (e.g. the accession number) for the record. If not specified, the id name will be used. """ import os if not os.path.exists(filename): raise ValueError("%s does not exist" % filename) index = Index.Index(indexname, truncate=1) index[Dictionary._Dictionary__filename_key] = filename handle = open(filename) records = parse(handle) end = 0L for record in records: start = end end = long(handle.tell()) length = end - start if rec2key is not None: key = rec2key(record) else: key = record.accession if not key: raise KeyError("empty key was produced") elif key in index: raise KeyError("duplicate key %s found" % key) index[key] = start, length
recipe-511429.py
import os def main(): try: while True: while True: mode = input('Mode: ').lower() if 'search'.startswith(mode): mode = False break elif 'destroy'.startswith(mode): mode = True break print('"search" or "destroy"') path = input('Path: ') extention = input('Extention: ') for path_name in search(path, extention, mode): print('Found:', path_name) except: pass def search(path, extention, destroy): assert os.path.isdir(path) path_list = list() for name in os.listdir(path): path_name = os.path.join(path, name) try: if os.path.isdir(path_name): path_list += search(path_name, extention, destroy) elif os.path.isfile(path_name): if path_name.endswith(extention) or not extention: if destroy: os.remove(path_name) else: path_list.append(path_name) except: print('Error:', path_name) return path_list if __name__ == '__main__':
main()
class.py
class Point: def __init__(self, x=0, y=0): self.x = x self.y = y
return "({0},{1})".format(self.x, self.y) def __add__(self, other): x = self.x + other.x y = self.y + other.y return Point(x, y)
def __str__(self):
test_arrow_result.py
#!/usr/bin/env python # -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved. # import pytest @pytest.mark.skip( reason="Cython is not enabled in build env") def test_select_with_num(conn_cnx): with conn_cnx() as json_cnx: with conn_cnx() as arrow_cnx: row_count = 50000 sql_text = ("select seq4() as c1, uniform(1, 10, random(12)) as c2 from " + "table(generator(rowcount=>50000)) order by c1") cursor_json = json_cnx.cursor() cursor_json.execute("alter session set query_result_format='JSON'") cursor_json.execute(sql_text) cursor_arrow = arrow_cnx.cursor() cursor_arrow.execute("alter session set query_result_format='ARROW_FORCE'") cursor_arrow.execute(sql_text) for i in range(0, row_count): (json_c1, json_c2) = cursor_json.fetchone() (arrow_c1, arrow_c2) = cursor_arrow.fetchone() assert json_c1 == arrow_c1 assert json_c2 == arrow_c2
#
window_handle.rs
use crate::common::*; pub(crate) struct WindowHandle { pub(crate) id: u64, } impl WindowHandle { pub(crate) fn set_floating(&self, floating: usize) -> Result<(), Error> { let info = self.query()?; if info.floating != floating { let exit_status = Command::new("yabai") .arg("-m") .arg("window") .arg(self.id.to_string()) .arg("--toggle") .arg("float") .status()?; if !exit_status.success() { return Err(Error::ExitStatus { exit_status }); } } Ok(()) } pub(crate) fn
(&self) -> Result<WindowInfo, Error> { let child = Command::new("yabai") .arg("-m") .arg("query") .arg("--windows") .arg("--window") .arg(self.id.to_string()) .stdout(Stdio::piped()) .spawn()?; Ok(serde_json::from_reader(child.stdout.unwrap())?) } pub(crate) fn swap(&self, other: &WindowHandle) -> Result<(), Error> { // yabai -m window self.id.to_string() --swap other.id.to_string() unimplemented!() } }
query
command.go
// +build game package main import ( "fmt" "log" "github.com/ridge/game/task" ) // This should work as a default - even if it's in a different file var Default = ReturnsNilError // this should not be a target because it returns a string func ReturnsString() string { fmt.Println("more stuff") return "" } func TestVerbose()
func ReturnsVoid(ctx task.Context) { ctx.Dep(f) } func f() {}
{ log.Println("hi!") }
freeresponse.py
import json import os files = os.listdir() for file in files: if file.endswith('.ipynb'): answers = '' with open(file) as data: nb = json.load(data) for cell in nb['cells']:
answers += ''.join(cell['source']) + '\n' f = open('responses for ' + file[:-6] + '.txt', 'w') f.write(answers) f.close()
if cell['cell_type'] == 'markdown': if 'source' in cell and len(cell['source']) > 0: if cell['source'][0].startswith("<font color='blue'> ANSWER:"):
eshotbutton.component.ts
import { Component, OnInit, Input, Output, EventEmitter } from '@angular/core'; import { AngularFirestoreCollection, AngularFirestore } from 'angularfire2/firestore'; import { Asunto } from '../../../../models/asuntos'; import { Observable } from 'rxjs'; import { Employee } from '../../../../shared/employee.model'; import { EmpresaService } from '../../../../shared/empresa.service';
templateUrl: './eshotbutton.component.html', styleUrls: ['./eshotbutton.component.scss'] }) export class EshotbuttonComponent implements OnInit { renderValue: string; asuntosCollection: AngularFirestoreCollection<Asunto>; asuntos: Observable<Asunto[]>; usuariosCollection : AngularFirestoreCollection<Employee>; usuarios: Observable<Employee[]> @Input() value: string | number; @Input() rowData: any; @Output() save: EventEmitter<any> = new EventEmitter(); constructor(private service: EmpresaService, private firestore: AngularFirestore, private toastr: ToastrService) { } ngOnInit() { this.renderValue = this.value.toString().toUpperCase(); // this.asuntosCollection = this.firestore.collection('asuntos') // this.asuntos = this.asuntosCollection.valueChanges() // this.usuariosCollection = this.firestore.collection('usuarios') // this.usuarios = this.usuariosCollection.valueChanges() } onClick() { this.save.emit(this.rowData.id); this.service.formData = Object.assign({}, this.rowData); } }
import { ToastrService } from 'ngx-toastr'; @Component({ selector: 'ngx-eshotbutton',
norm_act.py
from typing import Union, List import torch from torch import nn as nn from torch.nn import functional as F from models.layers.create_act import get_act_layer from .trace_utils import _assert class BatchNormAct2d(nn.BatchNorm2d): """BatchNorm + Activation This module performs BatchNorm + Activation in a manner that will remain backwards compatible with weights trained with separate bn, act. This is why we inherit from BN instead of composing it as a .bn member. """ def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module if act_layer is not None and apply_act: act_args = dict(inplace=True) if inplace else {} self.act = act_layer(**act_args) else: self.act = nn.Identity() def forward(self, x): # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') # exponential_average_factor is set to self.momentum # (when it is available) only so that it gets updated # in ONNX graph when this node is exported to ONNX. if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: # TODO: if statement only here to tell the jit to skip emitting this when it is None if self.num_batches_tracked is not None: # type: ignore[has-type] self.num_batches_tracked = self.num_batches_tracked + \ 1 # type: ignore[has-type] if self.momentum is None: # use cumulative moving average exponential_average_factor = 1.0 / \ float(self.num_batches_tracked) else: # use exponential moving average exponential_average_factor = self.momentum r""" Decide whether the mini-batch stats should be used for normalization rather than the buffers. Mini-batch stats are used in training mode, and in eval mode when buffers are None. """ if self.training: bn_training = True else: bn_training = (self.running_mean is None) and ( self.running_var is None) r""" Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are used for normalization (i.e. in eval mode when buffers are not None). """ x = F.batch_norm( x, # If buffers are not to be tracked, ensure that they won't be updated self.running_mean if not self.training or self.track_running_stats else None, self.running_var if not self.training or self.track_running_stats else None, self.weight, self.bias, bn_training, exponential_average_factor, self.eps, ) x = self.drop(x) x = self.act(x) return x def _num_groups(num_channels, num_groups, group_size): if group_size: assert num_channels % group_size == 0 return num_channels // group_size return num_groups class GroupNormAct(nn.GroupNorm): # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args def __init__( self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): super(GroupNormAct, self).__init__( _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module if act_layer is not None and apply_act: act_args = dict(inplace=True) if inplace else {} self.act = act_layer(**act_args) else: self.act = nn.Identity() def forward(self, x): x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct(nn.LayerNorm): def __init__( self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): super(LayerNormAct, self).__init__( normalization_shape, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {} self.act = act_layer(**act_args) else: self.act = nn.Identity() def forward(self, x): x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct2d(nn.LayerNorm): def __init__( self, num_channels, eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None): super(LayerNormAct2d, self).__init__( num_channels, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module if act_layer is not None and apply_act: act_args = dict(inplace=True) if inplace else {} self.act = act_layer(**act_args) else: self.act = nn.Identity() def forward(self, x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x
tcp.rs
use crate::clients::Exchanger; use crate::Message; use crate::clients::stats::StatsBuilder; use std::io::Read; use std::io::Write; use std::net::SocketAddr; use std::net::TcpStream; use std::net::ToSocketAddrs; use std::time::Duration; pub const GOOGLE_IPV4_PRIMARY: &str = "8.8.8.8:53"; pub const GOOGLE_IPV4_SECONDARY: &str = "8.8.4.4:53"; pub const GOOGLE_IPV6_PRIMARY: &str = "2001:4860:4860::8888:53"; pub const GOOGLE_IPV6_SECONDARY: &str = "2001:4860:4860::8844:53"; pub const GOOGLE: [&str; 4] = [ GOOGLE_IPV4_PRIMARY, GOOGLE_IPV4_SECONDARY, GOOGLE_IPV6_PRIMARY, GOOGLE_IPV6_SECONDARY, ]; /// A TCP DNS Client. /// /// # Example /// /// ```rust /// use rustdns::clients::Exchanger; /// use rustdns::clients::tcp::Client; /// use rustdns::types::*; /// /// fn main() -> Result<(), rustdns::Error> { /// let mut query = Message::default(); /// query.add_question("bramp.net", Type::A, Class::Internet); /// /// let response = Client::new("8.8.8.8:53")? /// .exchange(&query) /// .expect("could not exchange message"); /// /// println!("{}", response); /// Ok(()) /// } /// ``` /// /// See <https://datatracker.ietf.org/doc/html/rfc1035#section-4.2.2> // TODO Document all the options. pub struct Client { servers: Vec<SocketAddr>, connect_timeout: Duration, read_timeout: Option<Duration>, write_timeout: Option<Duration>, } impl Default for Client { fn default() -> Self { Client { servers: Vec::default(), connect_timeout: Duration::new(5, 0), read_timeout: Some(Duration::new(5, 0)), write_timeout: Some(Duration::new(5, 0)), } } } impl Client { /// Creates a new Client bound to the specific servers. // TODO Document how it fails. pub fn new<A: ToSocketAddrs>(servers: A) -> Result<Self, crate::Error> { let servers = servers.to_socket_addrs()?.collect(); // TODO Check for zero servers. Ok(Self { servers, ..Default::default() }) } } impl Exchanger for Client { /// Sends the [`Message`] to the `server` via TCP and returns the result. fn
(&self, query: &Message) -> Result<Message, crate::Error> { let mut stream = TcpStream::connect_timeout(&self.servers[0], self.connect_timeout)?; stream.set_nodelay(true)?; // We send discrete packets, so we can send as soon as possible. stream.set_read_timeout(self.read_timeout)?; stream.set_write_timeout(self.write_timeout)?; let message = query.to_vec()?; let stats = StatsBuilder::start(message.len() + 2); // Two byte length prefix followed by the message. // TODO Move this into a single message! stream.write_all(&(message.len() as u16).to_be_bytes())?; stream.write_all(&message)?; // Now receive a two byte length let buf = &mut [0; 2]; stream.read_exact(buf)?; let len = u16::from_be_bytes(*buf); // and finally the message let mut buf = vec![0; len.into()]; stream.read_exact(&mut buf)?; let mut resp = Message::from_slice(&buf)?; resp.stats = Some(stats.end(stream.peer_addr()?, (len + 2).into())); Ok(resp) } }
exchange
_document.tsx
/* eslint-disable react/jsx-props-no-spreading */ /** * @todo: remove this comment below when ready * ref: https://github.com/vercel/next.js/issues/13712#issuecomment-910409023 * */ import createEmotionServer from "@emotion/server/create-instance"; // eslint-disable-next-line @next/next/no-document-import-in-page import Document, { Html, Head, Main, NextScript, DocumentContext, } from "next/document"; import * as React from "react"; import createEmotionCache from "styles/createEmotionCache"; import { GA_TRACKING_ID } from "../utils/gtag"; const APP_NAME = "Standingify"; class MyDocument extends Document { static async getInitialProps(ctx: DocumentContext) { const originalRenderPage = ctx.renderPage; const cache = createEmotionCache(); const { extractCriticalToChunks } = createEmotionServer(cache); ctx.renderPage = () => originalRenderPage({ // eslint-disable-next-line @typescript-eslint/no-explicit-any enhanceApp: (App: any) => (props) => <App emotionCache={cache} {...props} />, }); const initialProps = await Document.getInitialProps(ctx); const emotionStyles = extractCriticalToChunks(initialProps.html); const emotionStyleTags = emotionStyles.styles.map((style) => ( <style data-emotion={`${style.key} ${style.ids.join(" ")}`} key={style.key} // eslint-disable-next-line react/no-danger dangerouslySetInnerHTML={{ __html: style.css }} /> )); return { ...initialProps, styles: [ ...React.Children.toArray(initialProps.styles), ...emotionStyleTags, ], }; } render() { return ( <Html lang="en-US"> <Head> <meta name="application-name" content={APP_NAME} /> <meta name="apple-mobile-web-app-capable" content="yes" /> <meta name="apple-mobile-web-app-status-bar-style" content="default" /> <meta name="apple-mobile-web-app-title" content={APP_NAME} /> <meta name="format-detection" content="telephone=no" /> <meta name="mobile-web-app-capable" content="yes" /> <meta name="theme-color" content="#FFFFFF" /> {/* Global Site Tag (gtag.js) - Google Analytics */} <script async src={`https://www.googletagmanager.com/gtag/js?id=${GA_TRACKING_ID}`} /> <script dangerouslySetInnerHTML={{ __html: ` window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', '${GA_TRACKING_ID}', { page_path: window.location.pathname, }); `, }}
/> {/* add your own app-icon */} {/* <link rel="apple-touch-icon" sizes="180x180" href="/icons/apple-touch-icon.png" /> <link rel="shortcut icon" href="/app-icon.png" /> */} <link rel="manifest" href="/manifest.json" /> </Head> <body> <Main /> <NextScript /> </body> </Html> ); } } export default MyDocument;
fixed_queue.go
/* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package queue import ( "sync" "time" gpb "github.com/sunnogo/gnmi/proto/gnmi" ) // FixedQueue provides a strict delivery of provides updates. If checkDelay, // the Next() will sleep for the duration between the timestamps provided in the // updates. type FixedQueue struct { mu sync.Mutex resp []*gpb.SubscribeResponse delay time.Duration checkDelay bool } // NewFixed creates a new FixedQueue with resp list of updates enqueued for // iterating through. func NewFixed(resp []*gpb.SubscribeResponse, delay bool) *FixedQueue
// Add will append resp to the current tail of the queue. func (q *FixedQueue) Add(resp *gpb.SubscribeResponse) { q.mu.Lock() defer q.mu.Unlock() q.resp = append(q.resp, resp) } // Next returns the next update in the queue or an error. If the queue is // exhausted, a nil is returned for the update. The return will always be a // *gpb.SubscribeResponse for proper type assertion. func (q *FixedQueue) Next() (interface{}, error) { q.mu.Lock() defer q.mu.Unlock() if len(q.resp) == 0 { return nil, nil } if q.delay != 0 { time.Sleep(q.delay) } resp := q.resp[0] q.resp = q.resp[1:] var n *gpb.SubscribeResponse_Update if len(q.resp) > 0 && q.checkDelay { var nOk bool n, nOk = resp.Response.(*gpb.SubscribeResponse_Update) next, nextOk := q.resp[0].Response.(*gpb.SubscribeResponse_Update) if !nOk || !nextOk { q.delay = 0 } else { q.delay = time.Duration(next.Update.Timestamp-n.Update.Timestamp) * time.Nanosecond if q.delay < 0 { q.delay = 0 } } } return resp, nil }
{ return &FixedQueue{ resp: resp, checkDelay: delay, } }
update.rs
use { super::{ Ajour, BackupFolderKind, CatalogCategory, CatalogColumnKey, CatalogRow, CatalogSource, ColumnKey, DirectoryType, DownloadReason, ExpandType, InstallAddon, InstallKind, InstallStatus, Interaction, Message, Mode, SelfUpdateStatus, SortDirection, State, }, crate::{log_error, Result}, ajour_core::{ addon::{Addon, AddonFolder, AddonState}, backup::{backup_folders, latest_backup, BackupFolder}, cache::{ remove_addon_cache_entry, update_addon_cache, AddonCache, AddonCacheEntry, FingerprintCache, }, catalog, config::{ColumnConfig, ColumnConfigV2, Flavor}, error::{DownloadError, FilesystemError, ParseError, RepositoryError}, fs::{delete_addons, install_addon, PersistentData}, network::download_addon, parse::{read_addon_directory, update_addon_fingerprint}, repository::{RepositoryKind, RepositoryPackage}, utility::{download_update_to_temp_file, get_latest_release, wow_path_resolution}, }, ajour_widgets::header::ResizeEvent, anyhow::Context, async_std::sync::{Arc, Mutex}, chrono::{NaiveTime, Utc}, iced::{Command, Length}, isahc::http::Uri, native_dialog::*, std::collections::{hash_map::DefaultHasher, HashMap}, std::convert::TryFrom, std::hash::Hasher, std::path::{Path, PathBuf}, }; pub fn handle_message(ajour: &mut Ajour, message: Message) -> Result<Command<Message>> { match message { Message::CachesLoaded(result) => { log::debug!("Message::CachesLoaded(error: {})", result.is_err()); if let Ok((fingerprint_cache, addon_cache)) = result { ajour.fingerprint_cache = Some(Arc::new(Mutex::new(fingerprint_cache))); ajour.addon_cache = Some(Arc::new(Mutex::new(addon_cache))); } return Ok(Command::perform(async {}, Message::Parse)); } Message::Parse(_) => { log::debug!("Message::Parse"); // Begin to parse addon folder(s). let mut commands = vec![]; // If a backup directory is selected, find the latest backup if let Some(dir) = &ajour.config.backup_directory { commands.push(Command::perform( latest_backup(dir.to_owned()), Message::LatestBackup, )); } let flavors = &Flavor::ALL[..]; for flavor in flavors { if let Some(addon_directory) = ajour.config.get_addon_directory_for_flavor(flavor) { log::debug!( "preparing to parse addons in {:?}", addon_directory.display() ); // Builds a Vec of valid flavors. if addon_directory.exists() { ajour.valid_flavors.push(*flavor); ajour.valid_flavors.dedup(); } // Sets loading ajour.state.insert(Mode::MyAddons(*flavor), State::Loading); // Add commands commands.push(Command::perform( perform_read_addon_directory( ajour.addon_cache.clone(), ajour.fingerprint_cache.clone(), addon_directory.clone(), *flavor, ), Message::ParsedAddons, )); } else { log::debug!("addon directory is not set, showing welcome screen"); // Assume we are welcoming a user because directory is not set. let flavor = ajour.config.wow.flavor; ajour.state.insert(Mode::MyAddons(flavor), State::Start); break; } } let flavor = ajour.config.wow.flavor; // If we dont have current flavor in valid flavors we select a new. if !ajour.valid_flavors.iter().any(|f| *f == flavor) { // Find new flavor. if let Some(flavor) = ajour.valid_flavors.first() { // Set nye flavor. ajour.config.wow.flavor = *flavor; // Set mode. ajour.mode = Mode::MyAddons(*flavor); // Persist the newly updated config. ajour.config.save()?; } } return Ok(Command::batch(commands)); } Message::Interaction(Interaction::Refresh) => { log::debug!("Interaction::Refresh"); // Close details if shown. ajour.expanded_type = ExpandType::None; // Cleans the addons. ajour.addons = HashMap::new(); // Prepare state for loading. let flavor = ajour.config.wow.flavor; ajour.state.insert(Mode::MyAddons(flavor), State::Loading); return Ok(Command::perform(async {}, Message::Parse)); } Message::Interaction(Interaction::Ignore(id)) => { log::debug!("Interaction::Ignore({})", &id); // Close details if shown. ajour.expanded_type = ExpandType::None; let flavor = ajour.config.wow.flavor; let addons = ajour.addons.entry(flavor).or_default(); let addon = addons.iter_mut().find(|a| a.primary_folder_id == id); if let Some(addon) = addon { addon.state = AddonState::Ignored; // Update the config. ajour .config .addons .ignored .entry(flavor) .or_default() .push(addon.primary_folder_id.clone()); // Persist the newly updated config. let _ = &ajour.config.save(); } } Message::Interaction(Interaction::Unignore(id)) => { log::debug!("Interaction::Unignore({})", &id); // Update ajour state. let flavor = ajour.config.wow.flavor; let addons = ajour.addons.entry(flavor).or_default(); if let Some(addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { // Check if addon is updatable. if let Some(package) = addon.relevant_release_package() { if addon.is_updatable(&package) { addon.state = AddonState::Updatable; } else { addon.state = AddonState::Idle; } } }; // Update the config. let ignored_addon_ids = ajour.config.addons.ignored.entry(flavor).or_default(); ignored_addon_ids.retain(|i| i != &id); // Persist the newly updated config. let _ = &ajour.config.save(); } Message::Interaction(Interaction::OpenDirectory(path)) => { log::debug!("Interaction::OpenDirectory({:?})", path); let _ = open::that(path); } Message::Interaction(Interaction::SelectDirectory(dir_type)) => { log::debug!("Interaction::SelectDirectory({:?})", dir_type); let message = match dir_type { DirectoryType::Wow => Message::UpdateWowDirectory, DirectoryType::Backup => Message::UpdateBackupDirectory, }; return Ok(Command::perform(select_directory(), message)); } Message::Interaction(Interaction::OpenLink(link)) => { log::debug!("Interaction::OpenLink({})", &link); return Ok(Command::perform( async { let _ = opener::open(link); }, Message::None, )); } Message::UpdateWowDirectory(chosen_path) => { log::debug!("Message::UpdateWowDirectory(Chosen({:?}))", &chosen_path); let path = wow_path_resolution(chosen_path); log::debug!("Message::UpdateWowDirectory(Resolution({:?}))", &path); if path.is_some() { // Clear addons. ajour.addons = HashMap::new(); // Update the path for World of Warcraft. ajour.config.wow.directory = path; // Persist the newly updated config. let _ = &ajour.config.save(); // Set loading state. let state = ajour.state.clone(); for (mode, _) in state { if matches!(mode, Mode::MyAddons(_)) { ajour.state.insert(mode, State::Loading); } } return Ok(Command::perform(async {}, Message::Parse)); } } Message::Interaction(Interaction::FlavorSelected(flavor)) => { log::debug!("Interaction::FlavorSelected({})", flavor); // Close details if shown. ajour.expanded_type = ExpandType::None; // Update the game flavor ajour.config.wow.flavor = flavor; // Persist the newly updated config. let _ = &ajour.config.save(); // Update flavor on MyAddons if thats our current mode. if let Mode::MyAddons(_) = ajour.mode { ajour.mode = Mode::MyAddons(flavor) } // Update catalog query_and_sort_catalog(ajour); } Message::Interaction(Interaction::ModeSelected(mode)) => { log::debug!("Interaction::ModeSelected({:?})", mode); // Toggle off About or Settings if button is clicked again if ajour.mode == mode && (mode == Mode::About || mode == Mode::Settings) { ajour.mode = Mode::MyAddons(ajour.config.wow.flavor); } // Set mode else { ajour.mode = mode; } } Message::Interaction(Interaction::Expand(expand_type)) => { // An addon can be exanded in two ways. match &expand_type { ExpandType::Details(a) => { log::debug!("Interaction::Expand(Details({:?}))", &a.primary_folder_id); let should_close = match &ajour.expanded_type { ExpandType::Details(ea) => a.primary_folder_id == ea.primary_folder_id, _ => false, }; if should_close { ajour.expanded_type = ExpandType::None; } else { ajour.expanded_type = expand_type.clone(); } } ExpandType::None => { log::debug!("Interaction::Expand(ExpandType::None)"); } } } Message::Interaction(Interaction::Delete(id)) => { log::debug!("Interaction::Delete({})", &id); // Close details if shown. ajour.expanded_type = ExpandType::None; let flavor = ajour.config.wow.flavor; let addons = ajour.addons.entry(flavor).or_default(); if let Some(addon) = addons.iter().find(|a| a.primary_folder_id == id).cloned() { // Remove from local state. addons.retain(|a| a.primary_folder_id != addon.primary_folder_id); // Delete addon(s) from disk. let _ = delete_addons(&addon.folders); // Remove addon from cache if let Some(addon_cache) = &ajour.addon_cache { if let Ok(entry) = AddonCacheEntry::try_from(&addon) { match addon.repository_kind() { // Delete the entry for this cached addon Some(RepositoryKind::Tukui) | Some(RepositoryKind::WowI) | Some(RepositoryKind::Git(_)) => { return Ok(Command::perform( remove_addon_cache_entry(addon_cache.clone(), entry, flavor), Message::AddonCacheEntryRemoved, )); } _ => {} } } } } } Message::Interaction(Interaction::Update(id)) => { log::debug!("Interaction::Update({})", &id); // Close details if shown. ajour.expanded_type = ExpandType::None; let flavor = ajour.config.wow.flavor; let addons = ajour.addons.entry(flavor).or_default(); let to_directory = ajour .config .get_download_directory_for_flavor(flavor) .expect("Expected a valid path"); for addon in addons.iter_mut() { if addon.primary_folder_id == id { addon.state = AddonState::Downloading; return Ok(Command::perform( perform_download_addon( DownloadReason::Update, flavor, addon.clone(), to_directory, ), Message::DownloadedAddon, )); } } } Message::Interaction(Interaction::UpdateAll) => { log::debug!("Interaction::UpdateAll"); // Close details if shown. ajour.expanded_type = ExpandType::None; // Update all updatable addons, expect ignored. let flavor = ajour.config.wow.flavor; let ignored_ids = ajour.config.addons.ignored.entry(flavor).or_default(); let mut addons: Vec<_> = ajour .addons .entry(flavor) .or_default() .iter_mut() .filter(|a| !ignored_ids.iter().any(|i| i == &a.primary_folder_id)) .collect(); let mut commands = vec![]; for addon in addons.iter_mut() { if addon.state == AddonState::Updatable { if let Some(to_directory) = ajour.config.get_download_directory_for_flavor(flavor) { addon.state = AddonState::Downloading; let addon = addon.clone(); commands.push(Command::perform( perform_download_addon( DownloadReason::Update, flavor, addon, to_directory, ), Message::DownloadedAddon, )) } } } return Ok(Command::batch(commands)); } Message::ParsedAddons((flavor, result)) => { // if our selected flavor returns (either ok or error) - we change to idle. ajour.state.insert(Mode::MyAddons(flavor), State::Ready); match result.context("Failed to parse addons") { Ok(addons) => { log::debug!("Message::ParsedAddons({}, {} addons)", flavor, addons.len(),); // Ignored addon ids. let ignored_ids = ajour.config.addons.ignored.entry(flavor).or_default(); // Check if addons is updatable. let release_channels = ajour .config .addons .release_channels .entry(flavor) .or_default(); let mut addons = addons .into_iter() .map(|mut a| { // Check if we have saved release channel for addon. if let Some(release_channel) = release_channels.get(&a.primary_folder_id) { a.release_channel = *release_channel; } else { // Else we try to determine the release_channel based of installed version. for (release_channel, package) in a.remote_packages() { if package.file_id == a.file_id() { a.release_channel = release_channel.to_owned(); break; } } } // Check if addon is updatable based on release channel. if let Some(package) = a.relevant_release_package() { if a.is_updatable(&package) { a.state = AddonState::Updatable; } } if ignored_ids.iter().any(|ia| &a.primary_folder_id == ia) { a.state = AddonState::Ignored; }; a }) .collect::<Vec<Addon>>(); // Sort the addons. sort_addons(&mut addons, SortDirection::Desc, ColumnKey::Status); ajour.header_state.previous_sort_direction = Some(SortDirection::Desc); ajour.header_state.previous_column_key = Some(ColumnKey::Status); // Sets the flavor state to ready. ajour.state.insert(Mode::MyAddons(flavor), State::Ready); // Insert the addons into the HashMap. ajour.addons.insert(flavor, addons); } Err(error) => { log_error(&error); } } } Message::DownloadedAddon((reason, flavor, id, result)) => { log::debug!( "Message::DownloadedAddon(({}, {}, error: {}))", flavor, &id, result.is_err() ); let addons = ajour.addons.entry(flavor).or_default(); let install_addons = ajour.install_addons.entry(flavor).or_default(); let mut addon = None; match result.context("Failed to download addon") { Ok(_) => match reason { DownloadReason::Update => { if let Some(_addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { addon = Some(_addon); } } DownloadReason::Install => { if let Some(install_addon) = install_addons .iter_mut() .find(|a| a.addon.as_ref().map(|a| &a.primary_folder_id) == Some(&id)) { install_addon.status = InstallStatus::Unpacking; if let Some(_addon) = install_addon.addon.as_mut() { addon = Some(_addon); } } } }, Err(error) => { log_error(&error); ajour.error = Some(error); match reason { DownloadReason::Update => { if let Some(_addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { _addon.state = AddonState::Retry; } } DownloadReason::Install => { if let Some(install_addon) = install_addons.iter_mut().find(|a| a.id == id) { install_addon.status = InstallStatus::Retry; } } } } } if let Some(addon) = addon { let from_directory = ajour .config .get_download_directory_for_flavor(flavor) .expect("Expected a valid path"); let to_directory = ajour .config .get_addon_directory_for_flavor(&flavor) .expect("Expected a valid path"); if addon.state == AddonState::Downloading { addon.state = AddonState::Unpacking; return Ok(Command::perform( perform_unpack_addon( reason, flavor, addon.clone(), from_directory, to_directory, ), Message::UnpackedAddon, )); } } } Message::UnpackedAddon((reason, flavor, id, result)) => { log::debug!( "Message::UnpackedAddon(({}, error: {}))", &id, result.is_err() ); let addons = ajour.addons.entry(flavor).or_default(); let install_addons = ajour.install_addons.entry(flavor).or_default(); let mut addon = None; let mut folders = None; match result.context("Failed to unpack addon") { Ok(_folders) => match reason { DownloadReason::Update => { if let Some(_addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { addon = Some(_addon); folders = Some(_folders); } } DownloadReason::Install => { if let Some(install_addon) = install_addons .iter_mut() .find(|a| a.addon.as_ref().map(|a| &a.primary_folder_id) == Some(&id)) { if let Some(_addon) = install_addon.addon.as_mut() { // If we are installing from the catalog, remove any existing addon // that has the same folders and insert this new one addons.retain(|a| a.folders != _folders); addons.push(_addon.clone()); addon = addons.iter_mut().find(|a| a.primary_folder_id == id); folders = Some(_folders); } } // Remove install addon since we've successfully installed it and // added to main addon vec install_addons.retain(|a| { a.addon.as_ref().map(|a| &a.primary_folder_id) != Some(&id) }); } }, Err(error) => { log_error(&error); ajour.error = Some(error); match reason { DownloadReason::Update => { if let Some(_addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { _addon.state = AddonState::Retry; } } DownloadReason::Install => { if let Some(install_addon) = install_addons.iter_mut().find(|a| a.id == id) { install_addon.status = InstallStatus::Retry; } } } } } let mut commands = vec![]; if let (Some(addon), Some(folders)) = (addon, folders) { addon.update_addon_folders(folders); addon.state = AddonState::Fingerprint; let mut version = None; if let Some(package) = addon.relevant_release_package() { version = Some(package.version); } if let Some(version) = version { addon.set_version(version); } // If we are updating / installing a Tukui / WowI // addon, we want to update the cache. If we are installing a Curse // addon, we want to make sure cache entry exists for those folders if let Some(addon_cache) = &ajour.addon_cache { if let Ok(entry) = AddonCacheEntry::try_from(addon as &_) { match addon.repository_kind() { // Remove any entry related to this cached addon Some(RepositoryKind::Curse) => { commands.push(Command::perform( remove_addon_cache_entry(addon_cache.clone(), entry, flavor), Message::AddonCacheEntryRemoved, )); } // Update the entry for this cached addon Some(RepositoryKind::Tukui) | Some(RepositoryKind::WowI) | Some(RepositoryKind::Git(_)) => { commands.push(Command::perform( update_addon_cache(addon_cache.clone(), entry, flavor), Message::AddonCacheUpdated, )); } None => {} } } } // Submit all addon folders to be fingerprinted if let Some(cache) = ajour.fingerprint_cache.as_ref() { for folder in &addon.folders { commands.push(Command::perform( perform_hash_addon( ajour .config .get_addon_directory_for_flavor(&flavor) .expect("Expected a valid path"), folder.id.clone(), cache.clone(), flavor, ), Message::UpdateFingerprint, )); } } } if !commands.is_empty() { return Ok(Command::batch(commands)); } } Message::UpdateFingerprint((flavor, id, result)) => { log::debug!( "Message::UpdateFingerprint(({:?}, {}, error: {}))", flavor, &id, result.is_err() ); let addons = ajour.addons.entry(flavor).or_default(); if let Some(addon) = addons.iter_mut().find(|a| a.primary_folder_id == id) { if result.is_ok() { addon.state = AddonState::Completed; } else { addon.state = AddonState::Error("Error".to_owned()); } } } Message::LatestRelease(release) => { log::debug!( "Message::LatestRelease({:?})", release.as_ref().map(|r| &r.tag_name) ); ajour.self_update_state.latest_release = release; } Message::Interaction(Interaction::SortColumn(column_key)) => { // Close details if shown. ajour.expanded_type = ExpandType::None; // First time clicking a column should sort it in Ascending order, otherwise // flip the sort direction. let mut sort_direction = SortDirection::Asc; if let Some(previous_column_key) = ajour.header_state.previous_column_key { if column_key == previous_column_key { if let Some(previous_sort_direction) = ajour.header_state.previous_sort_direction { sort_direction = previous_sort_direction.toggle() } } } // Exception would be first time ever sorting and sorting by title. // Since its already sorting in Asc by default, we should sort Desc. if ajour.header_state.previous_column_key.is_none() && column_key == ColumnKey::Title { sort_direction = SortDirection::Desc; } log::debug!( "Interaction::SortColumn({:?}, {:?})", column_key, sort_direction ); let flavor = ajour.config.wow.flavor; let mut addons = ajour.addons.entry(flavor).or_default(); sort_addons(&mut addons, sort_direction, column_key); ajour.header_state.previous_sort_direction = Some(sort_direction); ajour.header_state.previous_column_key = Some(column_key); } Message::Interaction(Interaction::SortCatalogColumn(column_key)) => { // First time clicking a column should sort it in Ascending order, otherwise // flip the sort direction. let mut sort_direction = SortDirection::Asc; if let Some(previous_column_key) = ajour.catalog_header_state.previous_column_key { if column_key == previous_column_key { if let Some(previous_sort_direction) = ajour.catalog_header_state.previous_sort_direction { sort_direction = previous_sort_direction.toggle() } } } // Exception would be first time ever sorting and sorting by title. // Since its already sorting in Asc by default, we should sort Desc. if ajour.catalog_header_state.previous_column_key.is_none() && column_key == CatalogColumnKey::Title { sort_direction = SortDirection::Desc; } // Exception for the date released if ajour.catalog_header_state.previous_column_key.is_none() && column_key == CatalogColumnKey::DateReleased { sort_direction = SortDirection::Desc; } log::debug!( "Interaction::SortCatalogColumn({:?}, {:?})", column_key, sort_direction ); ajour.catalog_header_state.previous_sort_direction = Some(sort_direction); ajour.catalog_header_state.previous_column_key = Some(column_key); query_and_sort_catalog(ajour); } Message::ReleaseChannelSelected(release_channel) => { log::debug!("Message::ReleaseChannelSelected({:?})", release_channel); if let ExpandType::Details(expanded_addon) = &ajour.expanded_type { let flavor = ajour.config.wow.flavor; let addons = ajour.addons.entry(flavor).or_default(); if let Some(addon) = addons .iter_mut() .find(|a| a.primary_folder_id == expanded_addon.primary_folder_id) { addon.release_channel = release_channel; // Check if addon is updatable. if let Some(package) = addon.relevant_release_package() { if addon.is_updatable(&package) { addon.state = AddonState::Updatable; } else { addon.state = AddonState::Idle; } } // Update config with the newly changed release channel. ajour .config .addons .release_channels .entry(flavor) .or_default() .insert(addon.primary_folder_id.clone(), release_channel); // Persist the newly updated config. let _ = &ajour.config.save(); } } } Message::ThemeSelected(theme_name) => { log::debug!("Message::ThemeSelected({:?})", &theme_name); ajour.theme_state.current_theme_name = theme_name.clone(); ajour.config.theme = Some(theme_name); let _ = ajour.config.save(); } Message::ThemesLoaded(mut themes) => { log::debug!("Message::ThemesLoaded({} themes)", themes.len()); themes.sort(); for theme in themes { ajour.theme_state.themes.push((theme.name.clone(), theme)); } } Message::Interaction(Interaction::ResizeColumn(column_type, event)) => match event { ResizeEvent::ResizeColumn { left_name, left_width, right_name, right_width, } => match column_type { Mode::MyAddons(_) => { let left_key = ColumnKey::from(left_name.as_str()); let right_key = ColumnKey::from(right_name.as_str()); if let Some(column) = ajour .header_state .columns .iter_mut() .find(|c| c.key == left_key && left_key != ColumnKey::Title) { column.width = Length::Units(left_width); } if let Some(column) = ajour .header_state .columns .iter_mut() .find(|c| c.key == right_key && right_key != ColumnKey::Title) { column.width = Length::Units(right_width); } } Mode::Install => {} Mode::Settings => {} Mode::About => {} Mode::Catalog => { let left_key = CatalogColumnKey::from(left_name.as_str()); let right_key = CatalogColumnKey::from(right_name.as_str()); if let Some(column) = ajour .catalog_header_state .columns .iter_mut() .find(|c| c.key == left_key && left_key != CatalogColumnKey::Title) { column.width = Length::Units(left_width); } if let Some(column) = ajour .catalog_header_state .columns .iter_mut() .find(|c| c.key == right_key && right_key != CatalogColumnKey::Title) { column.width = Length::Units(right_width); } } }, ResizeEvent::Finished => { // Persist changes to config save_column_configs(ajour); } }, Message::Interaction(Interaction::ScaleUp) => { let prev_scale = ajour.scale_state.scale; ajour.scale_state.scale = ((prev_scale + 0.1).min(2.0) * 10.0).round() / 10.0; ajour.config.scale = Some(ajour.scale_state.scale); let _ = ajour.config.save(); log::debug!( "Interaction::ScaleUp({} -> {})", prev_scale, ajour.scale_state.scale ); } Message::Interaction(Interaction::ScaleDown) => { let prev_scale = ajour.scale_state.scale; ajour.scale_state.scale = ((prev_scale - 0.1).max(0.5) * 10.0).round() / 10.0; ajour.config.scale = Some(ajour.scale_state.scale); let _ = ajour.config.save(); log::debug!( "Interaction::ScaleDown({} -> {})", prev_scale, ajour.scale_state.scale ); } Message::UpdateBackupDirectory(path) => { log::debug!("Message::UpdateBackupDirectory({:?})", &path); if let Some(path) = path { // Update the backup directory path. ajour.config.backup_directory = Some(path.clone()); // Persist the newly updated config. let _ = &ajour.config.save(); // Check if a latest backup exists in path return Ok(Command::perform(latest_backup(path), Message::LatestBackup)); } } Message::Interaction(Interaction::Backup) => { log::debug!("Interaction::Backup"); // This will disable our backup button and show a message that the // app is processing the backup. We will unflag this on completion. ajour.backup_state.backing_up = true; let mut src_folders = vec![]; // Shouldn't panic since button is only clickable if wow directory is chosen let wow_dir = ajour.config.wow.directory.as_ref().unwrap(); // Shouldn't panic since button is only shown if backup directory is chosen let dest = ajour.config.backup_directory.as_ref().unwrap(); // Backup WTF & AddOn directories for both flavors if they exist for flavor in Flavor::ALL.iter() { if ajour.config.backup_addons { let addon_dir = ajour.config.get_addon_directory_for_flavor(flavor).unwrap(); if addon_dir.exists() { src_folders.push(BackupFolder::new(&addon_dir, wow_dir)); } } if ajour.config.backup_wtf { let wtf_dir = ajour.config.get_wtf_directory_for_flavor(flavor).unwrap(); if wtf_dir.exists() { src_folders.push(BackupFolder::new(&wtf_dir, wow_dir)); } } } return Ok(Command::perform( backup_folders(src_folders, dest.to_owned()), Message::BackupFinished, )); } Message::Interaction(Interaction::ToggleBackupFolder(is_checked, folder)) => { log::debug!( "Interaction::ToggleBackupFolder({:?}, checked: {})", folder, is_checked ); match folder { BackupFolderKind::AddOns => { ajour.config.backup_addons = is_checked; } BackupFolderKind::WTF => { ajour.config.backup_wtf = is_checked; } } let _ = ajour.config.save(); } Message::LatestBackup(as_of) => { log::debug!("Message::LatestBackup({:?})", &as_of); ajour.backup_state.last_backup = as_of; } Message::BackupFinished(Ok(as_of)) => { log::debug!("Message::BackupFinished({})", as_of.format("%H:%M:%S")); ajour.backup_state.backing_up = false; ajour.backup_state.last_backup = Some(as_of); } Message::BackupFinished(error @ Err(_)) => { let error = error.context("Failed to backup folders").unwrap_err(); log_error(&error); ajour.error = Some(error); ajour.backup_state.backing_up = false; } Message::Interaction(Interaction::ToggleColumn(is_checked, key)) => { // We can't untoggle the addon title column if key == ColumnKey::Title { return Ok(Command::none()); } log::debug!("Interaction::ToggleColumn({}, {:?})", is_checked, key); if is_checked { if let Some(column) = ajour.header_state.columns.iter_mut().find(|c| c.key == key) { column.hidden = false; } } else if let Some(column) = ajour.header_state.columns.iter_mut().find(|c| c.key == key) { column.hidden = true; } // Persist changes to config save_column_configs(ajour); } Message::Interaction(Interaction::MoveColumnLeft(key)) => { log::debug!("Interaction::MoveColumnLeft({:?})", key); // Update header state ordering and save to config if let Some(idx) = ajour.header_state.columns.iter().position(|c| c.key == key) { ajour.header_state.columns.swap(idx, idx - 1); ajour .header_state .columns .iter_mut() .enumerate() .for_each(|(idx, column)| column.order = idx); // Persist changes to config save_column_configs(ajour); } // Update column ordering in settings if let Some(idx) = ajour .column_settings .columns .iter() .position(|c| c.key == key) { ajour.column_settings.columns.swap(idx, idx - 1); } } Message::Interaction(Interaction::MoveColumnRight(key)) => { log::debug!("Interaction::MoveColumnRight({:?})", key); // Update header state ordering and save to config if let Some(idx) = ajour.header_state.columns.iter().position(|c| c.key == key) { ajour.header_state.columns.swap(idx, idx + 1); ajour .header_state .columns .iter_mut() .enumerate() .for_each(|(idx, column)| column.order = idx); // Persist changes to config save_column_configs(ajour); } // Update column ordering in settings if let Some(idx) = ajour .column_settings .columns .iter() .position(|c| c.key == key) { ajour.column_settings.columns.swap(idx, idx + 1); } } Message::Interaction(Interaction::ToggleCatalogColumn(is_checked, key)) => { // We can't untoggle the addon title column if key == CatalogColumnKey::Title { return Ok(Command::none()); } log::debug!( "Interaction::ToggleCatalogColumn({}, {:?})", is_checked, key ); if is_checked { if let Some(column) = ajour .catalog_header_state .columns .iter_mut() .find(|c| c.key == key) { column.hidden = false; } } else if let Some(column) = ajour .catalog_header_state .columns .iter_mut() .find(|c| c.key == key) { column.hidden = true; } // Persist changes to config save_column_configs(ajour); } Message::Interaction(Interaction::MoveCatalogColumnLeft(key)) => { log::debug!("Interaction::MoveCatalogColumnLeft({:?})", key); // Update header state ordering and save to config if let Some(idx) = ajour .catalog_header_state .columns .iter() .position(|c| c.key == key) { ajour.catalog_header_state.columns.swap(idx, idx - 1); ajour .catalog_header_state .columns .iter_mut() .enumerate() .for_each(|(idx, column)| column.order = idx); // Persist changes to config save_column_configs(ajour); } // Update column ordering in settings if let Some(idx) = ajour .catalog_column_settings .columns .iter() .position(|c| c.key == key) { ajour.catalog_column_settings.columns.swap(idx, idx - 1); } } Message::Interaction(Interaction::MoveCatalogColumnRight(key)) => { log::debug!("Interaction::MoveCatalogColumnRight({:?})", key); // Update header state ordering and save to config if let Some(idx) = ajour .catalog_header_state .columns .iter() .position(|c| c.key == key) { ajour.catalog_header_state.columns.swap(idx, idx + 1); ajour .catalog_header_state .columns .iter_mut() .enumerate() .for_each(|(idx, column)| column.order = idx); // Persist changes to config save_column_configs(ajour); } // Update column ordering in settings if let Some(idx) = ajour .catalog_column_settings .columns .iter() .position(|c| c.key == key) { ajour.catalog_column_settings.columns.swap(idx, idx + 1); } } Message::CatalogDownloaded(Ok(catalog)) => { log::debug!( "Message::CatalogDownloaded({} addons in catalog)", catalog.addons.len() ); ajour.catalog_last_updated = Some(Utc::now()); let mut categories_per_source = catalog .addons .iter() .fold(HashMap::new(), |mut map, addon| { map.entry(addon.source.to_string()) .or_insert_with(Vec::new) .append( &mut addon .categories .clone() .iter() .map(|c| CatalogCategory::Choice(c.to_string())) .collect(), ); map }); categories_per_source.iter_mut().for_each(move |s| { s.1.sort(); s.1.dedup(); s.1.insert(0, CatalogCategory::All); }); ajour.catalog_categories_per_source_cache = categories_per_source; ajour.catalog_search_state.categories = ajour .catalog_categories_per_source_cache .get(&ajour.catalog_search_state.source.to_string()) .cloned() .unwrap_or_default(); ajour.catalog = Some(catalog); ajour.state.insert(Mode::Catalog, State::Ready); query_and_sort_catalog(ajour); } Message::Interaction(Interaction::CatalogQuery(query)) => { // Catalog search query ajour.catalog_search_state.query = Some(query); query_and_sort_catalog(ajour); } Message::Interaction(Interaction::InstallAddon(flavor, id, kind)) => { log::debug!("Interaction::InstallAddon({}, {:?})", flavor, &kind); let install_addons = ajour.install_addons.entry(flavor).or_default(); // Remove any existing status for this addon since we are going // to try and download it again. For InstallKind::Source, we should only // ever have one entry here so we just remove it install_addons.retain(|a| match kind { InstallKind::Catalog { .. } => !(id == a.id && a.kind == kind), InstallKind::Source => a.kind != kind, }); // Add new status for this addon as Downloading install_addons.push(InstallAddon { id: id.clone(), kind, status: InstallStatus::Downloading, addon: None, }); return Ok(Command::perform( perform_fetch_latest_addon(kind, id, flavor), Message::InstallAddonFetched, )); } Message::Interaction(Interaction::CatalogCategorySelected(category)) => { log::debug!("Interaction::CatalogCategorySelected({})", &category); // Select category ajour.catalog_search_state.category = category; query_and_sort_catalog(ajour); } Message::Interaction(Interaction::CatalogResultSizeSelected(size)) => { log::debug!("Interaction::CatalogResultSizeSelected({:?})", &size); // Catalog result size ajour.catalog_search_state.result_size = size; query_and_sort_catalog(ajour); } Message::Interaction(Interaction::CatalogSourceSelected(source)) => { log::debug!("Interaction::CatalogResultSizeSelected({:?})", source); // Catalog source ajour.catalog_search_state.source = source; ajour.catalog_search_state.categories = ajour .catalog_categories_per_source_cache .get(&source.to_string()) .cloned() .unwrap_or_default(); ajour.catalog_search_state.category = CatalogCategory::All; query_and_sort_catalog(ajour); } Message::InstallAddonFetched((flavor, id, result)) => { let install_addons = ajour.install_addons.entry(flavor).or_default(); if let Some(install_addon) = install_addons.iter_mut().find(|a| a.id == id) { match result { Ok(mut addon) => { log::debug!( "Message::CatalogInstallAddonFetched({:?}, {:?})", flavor, &id, ); addon.state = AddonState::Downloading; install_addon.addon = Some(addon.clone()); let to_directory = ajour .config .get_download_directory_for_flavor(flavor) .expect("Expected a valid path"); return Ok(Command::perform( perform_download_addon( DownloadReason::Install, flavor, addon, to_directory, ), Message::DownloadedAddon, )); } Err(error) => { // Dont use `context` here to convert to anyhow::Error since // we actually want to show the underlying RepositoryError // message let error = anyhow::Error::new(error); log_error(&error); match install_addon.kind { InstallKind::Catalog { .. } => { install_addon.status = InstallStatus::Unavilable; } InstallKind::Source => { install_addon.status = InstallStatus::Error(error.to_string()); } } } } } } Message::Interaction(Interaction::UpdateAjour) => { log::debug!("Interaction::UpdateAjour"); if let Some(release) = &ajour.self_update_state.latest_release { let bin_name = bin_name().to_owned(); ajour.self_update_state.status = Some(SelfUpdateStatus::InProgress); return Ok(Command::perform( download_update_to_temp_file(bin_name, release.clone()), Message::AjourUpdateDownloaded, )); } } Message::AjourUpdateDownloaded(result) => { log::debug!("Message::AjourUpdateDownloaded"); match result.context("Failed to update Ajour") { Ok((relaunch_path, cleanup_path)) => { // Remove first arg, which is path to binary. We don't use this first // arg as binary path because it's not reliable, per the docs. let mut args = std::env::args(); args.next(); let mut args: Vec<_> = args.collect(); // Remove the `--self-update-temp` arg from args if it exists, // since we need to pass it cleanly. Otherwise new process will // fail during arg parsing. if let Some(idx) = args.iter().position(|a| a == "--self-update-temp") { args.remove(idx); // Remove path passed after this arg args.remove(idx); } match std::process::Command::new(&relaunch_path) .args(args) .arg("--self-update-temp") .arg(&cleanup_path) .spawn() .context("Failed to update Ajour") { Ok(_) => std::process::exit(0), Err(error) => { log_error(&error); ajour.error = Some(error); ajour.self_update_state.status = Some(SelfUpdateStatus::Failed); } } } Err(error) => { log_error(&error); ajour.error = Some(error); ajour.self_update_state.status = Some(SelfUpdateStatus::Failed); } } } Message::AddonCacheUpdated(Ok(entry)) => { log::debug!("Message::AddonCacheUpdated({})", entry.title); } Message::AddonCacheEntryRemoved(maybe_entry) => { match maybe_entry.context("Failed to remove cache entry") { Ok(Some(entry)) => log::debug!("Message::AddonCacheEntryRemoved({})", entry.title), Ok(None) => {} Err(e) => { log_error(&e); } } } Message::Interaction(Interaction::InstallSCMQuery(query)) => { // install from scm search query ajour.install_from_scm_state.query = Some(query); // Remove the status if it's an error and user typed into // text input { let install_addons = ajour .install_addons .entry(ajour.config.wow.flavor) .or_default(); if let Some((idx, install_addon)) = install_addons .iter() .enumerate() .find(|(_, a)| a.kind == InstallKind::Source) { if matches!(install_addon.status, InstallStatus::Error(_)) { install_addons.remove(idx); } } } } Message::Interaction(Interaction::InstallSCMURL) => { if let Some(url) = ajour.install_from_scm_state.query.clone() { if !url.is_empty() { return handle_message( ajour, Message::Interaction(Interaction::InstallAddon( ajour.config.wow.flavor, url, InstallKind::Source, )), ); } } } Message::RefreshCatalog(_) => { if let Some(last_updated) = &ajour.catalog_last_updated { let now = Utc::now(); let now_time = now.time(); let refresh_time = NaiveTime::from_hms(0, 40, 0); if last_updated.date() < now.date() && now_time > refresh_time { log::debug!("Message::RefreshCatalog: catalog needs to be refreshed"); return Ok(Command::perform( catalog::get_catalog(), Message::CatalogDownloaded, )); } } } Message::Interaction(Interaction::ToggleHideIgnoredAddons(is_checked)) => { log::debug!("Interaction::ToggleHideIgnoredAddons({})", is_checked); ajour.config.hide_ignored_addons = is_checked; let _ = ajour.config.save(); } Message::CatalogDownloaded(error @ Err(_)) => { let error = error.context("Failed to download catalog").unwrap_err(); log_error(&error); ajour.error = Some(error); } Message::AddonCacheUpdated(error @ Err(_)) => { let error = error.context("Failed to update addon cache").unwrap_err(); log_error(&error); ajour.error = Some(error); } Message::Interaction(Interaction::PickSelfUpdateChannel(channel)) => { log::debug!("Interaction::PickSelfUpdateChannel({:?})", channel); ajour.config.self_update_channel = channel; let _ = ajour.config.save(); return Ok(Command::perform( get_latest_release(ajour.config.self_update_channel), Message::LatestRelease, )); } Message::CheckLatestRelease(_) => { log::debug!("Message::CheckLatestRelease"); return Ok(Command::perform( get_latest_release(ajour.config.self_update_channel), Message::LatestRelease, )); } Message::Error(error) => { log_error(&error); ajour.error = Some(error); } Message::RuntimeEvent(iced_native::Event::Window( iced_native::window::Event::Resized { width, height }, )) => { let width = (width as f64 * ajour.scale_state.scale) as u32; let height = (height as f64 * ajour.scale_state.scale) as u32; // Minimizing Ajour on Windows will call this function with 0, 0. // We don't want to save that in config, because then it will start with zero size. if width > 0 && height > 0 { ajour.config.window_size = Some((width, height)); let _ = ajour.config.save(); } } Message::RuntimeEvent(iced_native::Event::Keyboard( iced_native::keyboard::Event::KeyReleased { key_code, .. }, )) => { if key_code == iced_native::keyboard::KeyCode::Escape && (ajour.mode == Mode::Settings || ajour.mode == Mode::About) { ajour.mode = Mode::MyAddons(ajour.config.wow.flavor); } } Message::RuntimeEvent(_) => {} Message::None(_) => {} } Ok(Command::none()) } async fn select_directory() -> Option<PathBuf> { let dialog = OpenSingleDir { dir: None }; if let Ok(show) = dialog.show() { return show; } None } async fn perform_read_addon_directory( addon_cache: Option<Arc<Mutex<AddonCache>>>, fingerprint_cache: Option<Arc<Mutex<FingerprintCache>>>, root_dir: PathBuf, flavor: Flavor, ) -> (Flavor, Result<Vec<Addon>, ParseError>) { ( flavor, read_addon_directory(addon_cache, fingerprint_cache, root_dir, flavor).await, ) } /// Downloads the newest version of the addon. /// This is for now only downloading from warcraftinterface. async fn perform_download_addon( reason: DownloadReason, flavor: Flavor, addon: Addon, to_directory: PathBuf, ) -> (DownloadReason, Flavor, String, Result<(), DownloadError>) { ( reason, flavor, addon.primary_folder_id.clone(), download_addon(&addon, &to_directory).await, ) } /// Rehashes a `Addon`. async fn perform_hash_addon( addon_dir: impl AsRef<Path>, addon_id: String, fingerprint_cache: Arc<Mutex<FingerprintCache>>, flavor: Flavor, ) -> (Flavor, String, Result<(), ParseError>)
/// Unzips `Addon` at given `from_directory` and moves it `to_directory`. async fn perform_unpack_addon( reason: DownloadReason, flavor: Flavor, addon: Addon, from_directory: PathBuf, to_directory: PathBuf, ) -> ( DownloadReason, Flavor, String, Result<Vec<AddonFolder>, FilesystemError>, ) { ( reason, flavor, addon.primary_folder_id.clone(), install_addon(&addon, &from_directory, &to_directory).await, ) } async fn perform_fetch_latest_addon( install_kind: InstallKind, id: String, flavor: Flavor, ) -> (Flavor, String, Result<Addon, RepositoryError>) { async fn fetch_latest_addon( flavor: Flavor, install_kind: InstallKind, id: String, ) -> Result<Addon, RepositoryError> { // Needed since id for source install is a URL and this id needs to be safe // when using as the temp path of the downloaded zip let mut hasher = DefaultHasher::new(); hasher.write(format!("{:?}{}", install_kind, &id).as_bytes()); let temp_id = hasher.finish(); let mut addon = Addon::empty(&temp_id.to_string()); let mut repo_package = match install_kind { InstallKind::Catalog { source } => { let kind = match source { catalog::Source::Curse => RepositoryKind::Curse, catalog::Source::Tukui => RepositoryKind::Tukui, catalog::Source::WowI => RepositoryKind::WowI, }; RepositoryPackage::from_repo_id(flavor, kind, id)? } InstallKind::Source => { let url = id .parse::<Uri>() .map_err(|_| RepositoryError::GitInvalidUrl { url: id.clone() })?; RepositoryPackage::from_source_url(flavor, url)? } }; repo_package.resolve_metadata().await?; addon.set_repository(repo_package); Ok(addon) } ( flavor, id.clone(), fetch_latest_addon(flavor, install_kind, id).await, ) } fn sort_addons(addons: &mut [Addon], sort_direction: SortDirection, column_key: ColumnKey) { match (column_key, sort_direction) { (ColumnKey::Title, SortDirection::Asc) => { addons.sort_by(|a, b| a.title().to_lowercase().cmp(&b.title().to_lowercase())); } (ColumnKey::Title, SortDirection::Desc) => { addons.sort_by(|a, b| { a.title() .to_lowercase() .cmp(&b.title().to_lowercase()) .reverse() .then_with(|| { a.relevant_release_package() .cmp(&b.relevant_release_package()) }) }); } (ColumnKey::LocalVersion, SortDirection::Asc) => { addons.sort_by(|a, b| { a.version() .cmp(&b.version()) .then_with(|| a.title().cmp(&b.title())) }); } (ColumnKey::LocalVersion, SortDirection::Desc) => { addons.sort_by(|a, b| { a.version() .cmp(&b.version()) .reverse() .then_with(|| a.title().cmp(&b.title())) }); } (ColumnKey::RemoteVersion, SortDirection::Asc) => { addons.sort_by(|a, b| { a.relevant_release_package() .cmp(&b.relevant_release_package()) .then_with(|| a.cmp(&b)) }); } (ColumnKey::RemoteVersion, SortDirection::Desc) => { addons.sort_by(|a, b| { a.relevant_release_package() .cmp(&b.relevant_release_package()) .reverse() .then_with(|| a.cmp(&b)) }); } (ColumnKey::Status, SortDirection::Asc) => { addons.sort_by(|a, b| a.state.cmp(&b.state).then_with(|| a.cmp(&b))); } (ColumnKey::Status, SortDirection::Desc) => { addons.sort_by(|a, b| a.state.cmp(&b.state).reverse().then_with(|| a.cmp(&b))); } (ColumnKey::Channel, SortDirection::Asc) => addons.sort_by(|a, b| { a.release_channel .to_string() .cmp(&b.release_channel.to_string()) }), (ColumnKey::Channel, SortDirection::Desc) => addons.sort_by(|a, b| { a.release_channel .to_string() .cmp(&b.release_channel.to_string()) .reverse() }), (ColumnKey::Author, SortDirection::Asc) => { addons.sort_by(|a, b| a.author().cmp(&b.author())) } (ColumnKey::Author, SortDirection::Desc) => { addons.sort_by(|a, b| a.author().cmp(&b.author()).reverse()) } (ColumnKey::GameVersion, SortDirection::Asc) => { addons.sort_by(|a, b| a.game_version().cmp(&b.game_version())) } (ColumnKey::GameVersion, SortDirection::Desc) => { addons.sort_by(|a, b| a.game_version().cmp(&b.game_version()).reverse()) } (ColumnKey::DateReleased, SortDirection::Asc) => { addons.sort_by(|a, b| { a.relevant_release_package() .map(|p| p.date_time) .cmp(&b.relevant_release_package().map(|p| p.date_time)) }); } (ColumnKey::DateReleased, SortDirection::Desc) => { addons.sort_by(|a, b| { a.relevant_release_package() .map(|p| p.date_time) .cmp(&b.relevant_release_package().map(|p| p.date_time)) .reverse() }); } (ColumnKey::Source, SortDirection::Asc) => { addons.sort_by(|a, b| a.repository_kind().cmp(&b.repository_kind())) } (ColumnKey::Source, SortDirection::Desc) => { addons.sort_by(|a, b| a.repository_kind().cmp(&b.repository_kind()).reverse()) } } } fn sort_catalog_addons( addons: &mut [CatalogRow], sort_direction: SortDirection, column_key: CatalogColumnKey, flavor: &Flavor, ) { match (column_key, sort_direction) { (CatalogColumnKey::Title, SortDirection::Asc) => { addons.sort_by(|a, b| a.addon.name.cmp(&b.addon.name)); } (CatalogColumnKey::Title, SortDirection::Desc) => { addons.sort_by(|a, b| a.addon.name.cmp(&b.addon.name).reverse()); } (CatalogColumnKey::Description, SortDirection::Asc) => { addons.sort_by(|a, b| a.addon.summary.cmp(&b.addon.summary)); } (CatalogColumnKey::Description, SortDirection::Desc) => { addons.sort_by(|a, b| a.addon.summary.cmp(&b.addon.summary).reverse()); } (CatalogColumnKey::Source, SortDirection::Asc) => { addons.sort_by(|a, b| a.addon.source.cmp(&b.addon.source)); } (CatalogColumnKey::Source, SortDirection::Desc) => { addons.sort_by(|a, b| a.addon.source.cmp(&b.addon.source).reverse()); } (CatalogColumnKey::NumDownloads, SortDirection::Asc) => { addons.sort_by(|a, b| { a.addon .number_of_downloads .cmp(&b.addon.number_of_downloads) }); } (CatalogColumnKey::NumDownloads, SortDirection::Desc) => { addons.sort_by(|a, b| { a.addon .number_of_downloads .cmp(&b.addon.number_of_downloads) .reverse() }); } (CatalogColumnKey::Install, SortDirection::Asc) => {} (CatalogColumnKey::Install, SortDirection::Desc) => {} (CatalogColumnKey::DateReleased, SortDirection::Asc) => { addons.sort_by(|a, b| a.addon.date_released.cmp(&b.addon.date_released)); } (CatalogColumnKey::DateReleased, SortDirection::Desc) => { addons.sort_by(|a, b| a.addon.date_released.cmp(&b.addon.date_released).reverse()); } (CatalogColumnKey::GameVersion, SortDirection::Asc) => addons.sort_by(|a, b| { let gv_a = a.addon.game_versions.iter().find(|gc| &gc.flavor == flavor); let gv_b = b.addon.game_versions.iter().find(|gc| &gc.flavor == flavor); gv_a.cmp(&gv_b) }), (CatalogColumnKey::GameVersion, SortDirection::Desc) => addons.sort_by(|a, b| { let gv_a = a.addon.game_versions.iter().find(|gc| &gc.flavor == flavor); let gv_b = b.addon.game_versions.iter().find(|gc| &gc.flavor == flavor); gv_a.cmp(&gv_b).reverse() }), } } fn query_and_sort_catalog(ajour: &mut Ajour) { if let Some(catalog) = &ajour.catalog { let query = ajour .catalog_search_state .query .as_ref() .map(|s| s.to_lowercase()); let flavor = &ajour.config.wow.flavor; let source = &ajour.catalog_search_state.source; let category = &ajour.catalog_search_state.category; let result_size = ajour.catalog_search_state.result_size.as_usize(); let mut catalog_rows: Vec<_> = catalog .addons .iter() .filter(|a| !a.game_versions.is_empty()) .filter(|a| { let cleaned_text = format!("{} {}", a.name.to_lowercase(), a.summary.to_lowercase()); if let Some(query) = &query { cleaned_text.contains(query) } else { true } }) .filter(|a| { a.game_versions .iter() .any(|gc| gc.flavor == flavor.base_flavor()) }) .filter(|a| match source { CatalogSource::Choice(source) => a.source == *source, }) .filter(|a| match category { CatalogCategory::All => true, CatalogCategory::Choice(name) => a.categories.iter().any(|c| c == name), }) .cloned() .map(CatalogRow::from) .collect(); let sort_direction = ajour .catalog_header_state .previous_sort_direction .unwrap_or(SortDirection::Desc); let column_key = ajour .catalog_header_state .previous_column_key .unwrap_or(CatalogColumnKey::NumDownloads); sort_catalog_addons(&mut catalog_rows, sort_direction, column_key, flavor); catalog_rows = catalog_rows .into_iter() .enumerate() .filter_map(|(idx, row)| if idx < result_size { Some(row) } else { None }) .collect(); ajour.catalog_search_state.catalog_rows = catalog_rows; } } fn save_column_configs(ajour: &mut Ajour) { let my_addons_columns: Vec<_> = ajour .header_state .columns .iter() .map(ColumnConfigV2::from) .collect(); let catalog_columns: Vec<_> = ajour .catalog_header_state .columns .iter() .map(ColumnConfigV2::from) .collect(); ajour.config.column_config = ColumnConfig::V3 { my_addons_columns, catalog_columns, }; let _ = ajour.config.save(); } /// Hardcoded binary names for each compilation target /// that gets published to the Github Release const fn bin_name() -> &'static str { #[cfg(all(target_os = "windows", feature = "opengl"))] { "ajour-opengl.exe" } #[cfg(all(target_os = "windows", feature = "wgpu"))] { "ajour.exe" } #[cfg(all(target_os = "macos", feature = "opengl"))] { "ajour-opengl" } #[cfg(all(target_os = "macos", feature = "wgpu"))] { "ajour" } #[cfg(all(target_os = "linux", feature = "opengl"))] { "ajour-opengl.AppImage" } #[cfg(all(target_os = "linux", feature = "wgpu"))] { "ajour.AppImage" } }
{ ( flavor, addon_id.clone(), update_addon_fingerprint(fingerprint_cache, flavor, addon_dir, addon_id).await, ) }
helloworld.http.go
// Code generated by protoc-gen-gohttp. DO NOT EDIT. // source: helloworld/helloworld.proto package helloworldpb import ( bytes "bytes" context "context" fmt "fmt" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protojson "google.golang.org/protobuf/encoding/protojson" proto "google.golang.org/protobuf/proto" io "io" ioutil "io/ioutil" mime "mime" http "net/http" strings "strings" ) // GreeterHTTPService is the server API for Greeter service. type GreeterHTTPService interface { // SayHello says hello. SayHello(context.Context, *HelloRequest) (*HelloReply, error) } // GreeterHTTPConverter has a function to convert GreeterHTTPService interface to http.HandlerFunc. type GreeterHTTPConverter struct { srv GreeterHTTPService } // NewGreeterHTTPConverter returns GreeterHTTPConverter. func NewGreeterHTTPConverter(srv GreeterHTTPService) *GreeterHTTPConverter { return &GreeterHTTPConverter{ srv: srv, } } // SayHello returns GreeterHTTPService interface's SayHello converted to http.HandlerFunc. // // SayHello says hello. func (h *GreeterHTTPConverter) SayHello(cb func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error), interceptors ...grpc.UnaryServerInterceptor) http.HandlerFunc { if cb == nil {
cb = func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error) { if err != nil { w.WriteHeader(http.StatusInternalServerError) p := status.New(codes.Unknown, err.Error()).Proto() switch contentType, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")); contentType { case "application/protobuf", "application/x-protobuf": buf, err := proto.Marshal(p) if err != nil { return } if _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil { return } case "application/json": buf, err := protojson.Marshal(p) if err != nil { return } if _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil { return } default: } } } } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() contentType, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) accepts := strings.Split(r.Header.Get("Accept"), ",") accept := accepts[0] if accept == "*/*" || accept == "" { if contentType != "" { accept = contentType } else { accept = "application/json" } } w.Header().Set("Content-Type", accept) arg := &HelloRequest{} if r.Method != http.MethodGet { body, err := ioutil.ReadAll(r.Body) if err != nil { cb(ctx, w, r, nil, nil, err) return } switch contentType { case "application/protobuf", "application/x-protobuf": if err := proto.Unmarshal(body, arg); err != nil { cb(ctx, w, r, nil, nil, err) return } case "application/json": if err := protojson.Unmarshal(body, arg); err != nil { cb(ctx, w, r, nil, nil, err) return } default: w.WriteHeader(http.StatusUnsupportedMediaType) _, err := fmt.Fprintf(w, "Unsupported Content-Type: %s", contentType) cb(ctx, w, r, nil, nil, err) return } } n := len(interceptors) chained := func(ctx context.Context, arg interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler { return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { return currentInter(currentCtx, currentReq, info, currentHandler) } } chainedHandler := handler for i := n - 1; i >= 0; i-- { chainedHandler = chainer(interceptors[i], chainedHandler) } return chainedHandler(ctx, arg) } info := &grpc.UnaryServerInfo{ Server: h.srv, FullMethod: "/helloworld.Greeter/SayHello", } handler := func(c context.Context, req interface{}) (interface{}, error) { return h.srv.SayHello(c, req.(*HelloRequest)) } iret, err := chained(ctx, arg, info, handler) if err != nil { cb(ctx, w, r, arg, nil, err) return } ret, ok := iret.(*HelloReply) if !ok { cb(ctx, w, r, arg, nil, fmt.Errorf("/helloworld.Greeter/SayHello: interceptors have not return HelloReply")) return } switch accept { case "application/protobuf", "application/x-protobuf": buf, err := proto.Marshal(ret) if err != nil { cb(ctx, w, r, arg, ret, err) return } if _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil { cb(ctx, w, r, arg, ret, err) return } case "application/json": buf, err := protojson.Marshal(ret) if err != nil { cb(ctx, w, r, arg, ret, err) return } if _, err := io.Copy(w, bytes.NewBuffer(buf)); err != nil { cb(ctx, w, r, arg, ret, err) return } default: w.WriteHeader(http.StatusUnsupportedMediaType) _, err := fmt.Fprintf(w, "Unsupported Accept: %s", accept) cb(ctx, w, r, arg, ret, err) return } cb(ctx, w, r, arg, ret, nil) }) } // SayHelloWithName returns Service name, Method name and GreeterHTTPService interface's SayHello converted to http.HandlerFunc. // // SayHello says hello. func (h *GreeterHTTPConverter) SayHelloWithName(cb func(ctx context.Context, w http.ResponseWriter, r *http.Request, arg, ret proto.Message, err error), interceptors ...grpc.UnaryServerInterceptor) (string, string, http.HandlerFunc) { return "Greeter", "SayHello", h.SayHello(cb, interceptors...) }
stat_test.go
// Copyright ©2014 The Gonum Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package stat import ( "fmt" "math" "reflect" "strconv" "testing" "math/rand" "github.com/gopherd/gonum/floats" "github.com/gopherd/gonum/floats/scalar" ) func ExampleCircularMean() { x := []float64{0, 0.25 * math.Pi, 0.75 * math.Pi} weights := []float64{1, 2, 2.5} cmean := CircularMean(x, weights) fmt.Printf("The circular mean is %.5f.\n", cmean) // Output: // The circular mean is 1.37037. } func TestCircularMean(t *testing.T) { for i, test := range []struct { x []float64 wts []float64 ans float64 }{ // Values compared against scipy. { x: []float64{0, 2 * math.Pi}, ans: 0, }, { x: []float64{0, 0.5 * math.Pi}, ans: 0.78539816339744, }, { x: []float64{-1.5 * math.Pi, 0.5 * math.Pi, 2.5 * math.Pi}, wts: []float64{1, 2, 3}, ans: 0.5 * math.Pi, }, { x: []float64{0, 0.5 * math.Pi}, wts: []float64{1, 2}, ans: 1.10714871779409, }, } { c := CircularMean(test.x, test.wts) if math.Abs(c-test.ans) > 1e-14 { t.Errorf("Circular mean mismatch case %d: Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { CircularMean(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("CircularMean did not panic with x, wts length mismatch") } } func ExampleCorrelation() { x := []float64{8, -3, 7, 8, -4} y := []float64{10, 5, 6, 3, -1} w := []float64{2, 1.5, 3, 3, 2} fmt.Println("Correlation computes the degree to which two datasets move together") fmt.Println("about their mean. For example, x and y above move similarly.") c := Correlation(x, y, w) fmt.Printf("Correlation is %.5f\n", c) // Output: // Correlation computes the degree to which two datasets move together // about their mean. For example, x and y above move similarly. // Correlation is 0.59915 } func TestCorrelation(t *testing.T) { for i, test := range []struct { x []float64 y []float64 w []float64 ans float64 }{ { x: []float64{8, -3, 7, 8, -4}, y: []float64{8, -3, 7, 8, -4}, w: nil, ans: 1, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{8, -3, 7, 8, -4}, w: []float64{1, 1, 1, 1, 1}, ans: 1, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{8, -3, 7, 8, -4}, w: []float64{1, 6, 7, 0.8, 2.1}, ans: 1, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{10, 15, 4, 5, -1}, w: nil, ans: 0.0093334660769059, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{10, 15, 4, 5, -1}, w: nil, ans: 0.0093334660769059, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{10, 15, 4, 5, -1}, w: []float64{1, 3, 1, 2, 2}, ans: -0.13966633352689, }, } { c := Correlation(test.x, test.y, test.w) if math.Abs(test.ans-c) > 1e-14 { t.Errorf("Correlation mismatch case %d. Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { Correlation(make([]float64, 2), make([]float64, 3), make([]float64, 3)) }) { t.Errorf("Correlation did not panic with length mismatch") } if !panics(func() { Correlation(make([]float64, 2), make([]float64, 3), nil) }) { t.Errorf("Correlation did not panic with length mismatch") } if !panics(func() { Correlation(make([]float64, 3), make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Correlation did not panic with weights length mismatch") } } func ExampleKendall() { x := []float64{8, -3, 7, 8, -4} y := []float64{10, 5, 6, 3, -1} w := []float64{2, 1.5, 3, 3, 2} fmt.Println("Kendall correlation computes the number of ordered pairs") fmt.Println("between two datasets.") c := Kendall(x, y, w) fmt.Printf("Kendall correlation is %.5f\n", c) // Output: // Kendall correlation computes the number of ordered pairs // between two datasets. // Kendall correlation is 0.25000 } func TestKendall(t *testing.T) { for i, test := range []struct { x []float64 y []float64 weights []float64 ans float64 }{ { x: []float64{0, 1, 2, 3}, y: []float64{0, 1, 2, 3}, weights: nil, ans: 1, }, { x: []float64{0, 1}, y: []float64{1, 0}, weights: nil, ans: -1, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{10, 15, 4, 5, -1}, weights: nil, ans: 0.2, }, { x: []float64{8, -3, 7, 8, -4}, y: []float64{10, 5, 6, 3, -1}, weights: nil, ans: 0.4, }, { x: []float64{1, 2, 3, 4, 5}, y: []float64{2, 3, 4, 5, 6}, weights: []float64{1, 1, 1, 1, 1}, ans: 1, }, { x: []float64{1, 2, 3, 2, 1}, y: []float64{2, 3, 2, 1, 0}, weights: []float64{1, 1, 0, 0, 0}, ans: 1, }, } { c := Kendall(test.x, test.y, test.weights) if math.Abs(test.ans-c) > 1e-14 { t.Errorf("Correlation mismatch case %d. Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { Kendall(make([]float64, 2), make([]float64, 3), make([]float64, 3)) }) { t.Errorf("Kendall did not panic with length mismatch") } if !panics(func() { Kendall(make([]float64, 2), make([]float64, 3), nil) }) { t.Errorf("Kendall did not panic with length mismatch") } if !panics(func() { Kendall(make([]float64, 3), make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Kendall did not panic with weights length mismatch") } } func ExampleCovariance() { fmt.Println("Covariance computes the degree to which datasets move together") fmt.Println("about their mean.") x := []float64{8, -3, 7, 8, -4} y := []float64{10, 2, 2, 4, 1} cov := Covariance(x, y, nil) fmt.Printf("Cov = %.4f\n", cov) fmt.Println("If datasets move perfectly together, the variance equals the covariance") y2 := []float64{12, 1, 11, 12, 0} cov2 := Covariance(x, y2, nil) varX := Variance(x, nil) fmt.Printf("Cov2 is %.4f, VarX is %.4f", cov2, varX) // Output: // Covariance computes the degree to which datasets move together // about their mean. // Cov = 13.8000 // If datasets move perfectly together, the variance equals the covariance // Cov2 is 37.7000, VarX is 37.7000 } func TestCovariance(t *testing.T) { for i, test := range []struct { p []float64 q []float64 weights []float64 ans float64 }{ { p: []float64{0.75, 0.1, 0.05}, q: []float64{0.5, 0.25, 0.25}, ans: 0.05625, }, { p: []float64{1, 2, 3}, q: []float64{2, 4, 6}, ans: 2, }, { p: []float64{1, 2, 3}, q: []float64{1, 4, 9}, ans: 4, }, { p: []float64{1, 2, 3}, q: []float64{1, 4, 9}, weights: []float64{1, 1.5, 1}, ans: 3.2, }, { p: []float64{1, 4, 9}, q: []float64{1, 4, 9}, weights: []float64{1, 1.5, 1}, ans: 13.142857142857146, }, } { c := Covariance(test.p, test.q, test.weights) if math.Abs(c-test.ans) > 1e-14 { t.Errorf("Covariance mismatch case %d: Expected %v, Found %v", i, test.ans, c) } } // test the panic states if !panics(func() { Covariance(make([]float64, 2), make([]float64, 3), nil) }) { t.Errorf("Covariance did not panic with x, y length mismatch") } if !panics(func() { Covariance(make([]float64, 3), make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Covariance did not panic with x, weights length mismatch")
} func TestCrossEntropy(t *testing.T) { for i, test := range []struct { p []float64 q []float64 ans float64 }{ { p: []float64{0.75, 0.1, 0.05}, q: []float64{0.5, 0.25, 0.25}, ans: 0.7278045395879426, }, { p: []float64{0.75, 0.1, 0.05, 0, 0, 0}, q: []float64{0.5, 0.25, 0.25, 0, 0, 0}, ans: 0.7278045395879426, }, { p: []float64{0.75, 0.1, 0.05, 0, 0, 0.1}, q: []float64{0.5, 0.25, 0.25, 0, 0, 0}, ans: math.Inf(1), }, { p: nil, q: nil, ans: 0, }, } { c := CrossEntropy(test.p, test.q) if math.Abs(c-test.ans) > 1e-14 { t.Errorf("Cross entropy mismatch case %d: Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { CrossEntropy(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("CrossEntropy did not panic with p, q length mismatch") } } func ExampleEntropy() { p := []float64{0.05, 0.1, 0.9, 0.05} entP := Entropy(p) q := []float64{0.2, 0.4, 0.25, 0.15} entQ := Entropy(q) r := []float64{0.2, 0, 0, 0.5, 0, 0.2, 0.1, 0, 0, 0} entR := Entropy(r) s := []float64{0, 0, 1, 0} entS := Entropy(s) fmt.Println("Entropy is a measure of the amount of uncertainty in a distribution") fmt.Printf("The second bin of p is very likely to occur. It's entropy is %.4f\n", entP) fmt.Printf("The distribution of q is more spread out. It's entropy is %.4f\n", entQ) fmt.Println("Adding buckets with zero probability does not change the entropy.") fmt.Printf("The entropy of r is: %.4f\n", entR) fmt.Printf("A distribution with no uncertainty has entropy %.4f\n", entS) // Output: // Entropy is a measure of the amount of uncertainty in a distribution // The second bin of p is very likely to occur. It's entropy is 0.6247 // The distribution of q is more spread out. It's entropy is 1.3195 // Adding buckets with zero probability does not change the entropy. // The entropy of r is: 1.2206 // A distribution with no uncertainty has entropy 0.0000 } func ExampleExKurtosis() { fmt.Println(`Kurtosis is a measure of the 'peakedness' of a distribution, and the excess kurtosis is the kurtosis above or below that of the standard normal distribution`) x := []float64{5, 4, -3, -2} kurt := ExKurtosis(x, nil) fmt.Printf("ExKurtosis = %.5f\n", kurt) weights := []float64{1, 2, 3, 5} wKurt := ExKurtosis(x, weights) fmt.Printf("Weighted ExKurtosis is %.4f", wKurt) // Output: // Kurtosis is a measure of the 'peakedness' of a distribution, and the // excess kurtosis is the kurtosis above or below that of the standard normal // distribution // ExKurtosis = -5.41200 // Weighted ExKurtosis is -0.6779 } func TestExKurtosis(t *testing.T) { // the example does a good job, this just has to cover the panic if !panics(func() { ExKurtosis(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("ExKurtosis did not panic with x, weights length mismatch") } } func ExampleGeometricMean() { x := []float64{8, 2, 9, 15, 4} weights := []float64{2, 2, 6, 7, 1} mean := Mean(x, weights) gmean := GeometricMean(x, weights) logx := make([]float64, len(x)) for i, v := range x { logx[i] = math.Log(v) } expMeanLog := math.Exp(Mean(logx, weights)) fmt.Printf("The arithmetic mean is %.4f, but the geometric mean is %.4f.\n", mean, gmean) fmt.Printf("The exponential of the mean of the logs is %.4f\n", expMeanLog) // Output: // The arithmetic mean is 10.1667, but the geometric mean is 8.7637. // The exponential of the mean of the logs is 8.7637 } func TestGeometricMean(t *testing.T) { for i, test := range []struct { x []float64 wts []float64 ans float64 }{ { x: []float64{2, 8}, ans: 4, }, { x: []float64{3, 81}, wts: []float64{2, 1}, ans: 9, }, } { c := GeometricMean(test.x, test.wts) if math.Abs(c-test.ans) > 1e-14 { t.Errorf("Geometric mean mismatch case %d: Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { GeometricMean(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("GeometricMean did not panic with x, wts length mismatch") } } func ExampleHarmonicMean() { x := []float64{8, 2, 9, 15, 4} weights := []float64{2, 2, 6, 7, 1} mean := Mean(x, weights) hmean := HarmonicMean(x, weights) fmt.Printf("The arithmetic mean is %.5f, but the harmonic mean is %.4f.\n", mean, hmean) // Output: // The arithmetic mean is 10.16667, but the harmonic mean is 6.8354. } func TestHarmonicMean(t *testing.T) { for i, test := range []struct { x []float64 wts []float64 ans float64 }{ { x: []float64{.5, .125}, ans: .2, }, { x: []float64{.5, .125}, wts: []float64{2, 1}, ans: .25, }, } { c := HarmonicMean(test.x, test.wts) if math.Abs(c-test.ans) > 1e-14 { t.Errorf("Harmonic mean mismatch case %d: Expected %v, Found %v", i, test.ans, c) } } if !panics(func() { HarmonicMean(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("HarmonicMean did not panic with x, wts length mismatch") } } func TestHistogram(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 dividers []float64 ans []float64 }{ { x: []float64{1, 3, 5, 6, 7, 8}, dividers: []float64{0, 2, 4, 6, 7, 9}, ans: []float64{1, 1, 1, 1, 2}, }, { x: []float64{1, 3, 5, 6, 7, 8}, dividers: []float64{1, 2, 4, 6, 7, 9}, weights: []float64{1, 2, 1, 1, 1, 2}, ans: []float64{1, 2, 1, 1, 3}, }, { x: []float64{1, 8}, dividers: []float64{0, 2, 4, 6, 7, 9}, weights: []float64{1, 2}, ans: []float64{1, 0, 0, 0, 2}, }, { x: []float64{1, 8}, dividers: []float64{0, 2, 4, 6, 7, 9}, ans: []float64{1, 0, 0, 0, 1}, }, { x: []float64{}, dividers: []float64{1, 3}, ans: []float64{0}, }, } { hist := Histogram(nil, test.dividers, test.x, test.weights) if !floats.Equal(hist, test.ans) { t.Errorf("Hist mismatch case %d. Expected %v, Found %v", i, test.ans, hist) } // Test with non-zero values Histogram(hist, test.dividers, test.x, test.weights) if !floats.Equal(hist, test.ans) { t.Errorf("Hist mismatch case %d. Expected %v, Found %v", i, test.ans, hist) } } // panic cases for _, test := range []struct { name string x []float64 weights []float64 dividers []float64 count []float64 }{ { name: "len(x) != len(weights)", x: []float64{1, 3, 5, 6, 7, 8}, weights: []float64{1, 1, 1, 1}, }, { name: "len(count) != len(dividers) - 1", x: []float64{1, 3, 5, 6, 7, 8}, dividers: []float64{1, 4, 9}, count: make([]float64, 6), }, { name: "dividers not sorted", x: []float64{1, 3, 5, 6, 7, 8}, dividers: []float64{0, -1, 0}, }, { name: "x not sorted", x: []float64{1, 5, 2, 9, 7, 8}, dividers: []float64{1, 4, 9}, }, { name: "fewer than 2 dividers", x: []float64{1, 2, 3}, dividers: []float64{5}, }, { name: "x too large", x: []float64{1, 2, 3}, dividers: []float64{1, 3}, }, { name: "x too small", x: []float64{1, 2, 3}, dividers: []float64{2, 3}, }, } { if !panics(func() { Histogram(test.count, test.dividers, test.x, test.weights) }) { t.Errorf("Histogram did not panic when %s", test.name) } } } func ExampleHistogram() { x := make([]float64, 101) for i := range x { x[i] = 1.1 * float64(i) // x data ranges from 0 to 110 } dividers := []float64{0, 7, 20, 100, 1000} fmt.Println(`Histogram counts the amount of data in the bins specified by the dividers. In this data set, there are 7 data points less than 7 (between dividers[0] and dividers[1]), 12 data points between 7 and 20 (dividers[1] and dividers[2]), and 0 data points above 1000. Since dividers has length 5, there will be 4 bins.`) hist := Histogram(nil, dividers, x, nil) fmt.Printf("Hist = %v\n", hist) fmt.Println() fmt.Println("For ease, the floats Span function can be used to set the dividers") nBins := 10 dividers = make([]float64, nBins+1) min := floats.Min(x) max := floats.Max(x) // Increase the maximum divider so that the maximum value of x is contained // within the last bucket. max++ floats.Span(dividers, min, max) // Span includes the min and the max. Trim the dividers to create 10 buckets hist = Histogram(nil, dividers, x, nil) fmt.Printf("Hist = %v\n", hist) fmt.Println() fmt.Println(`Histogram also works with weighted data, and allows reusing of the count field in order to avoid extra garbage`) weights := make([]float64, len(x)) for i := range weights { weights[i] = float64(i + 1) } Histogram(hist, dividers, x, weights) fmt.Printf("Weighted Hist = %v\n", hist) // Output: // Histogram counts the amount of data in the bins specified by // the dividers. In this data set, there are 7 data points less than 7 (between dividers[0] // and dividers[1]), 12 data points between 7 and 20 (dividers[1] and dividers[2]), // and 0 data points above 1000. Since dividers has length 5, there will be 4 bins. // Hist = [7 12 72 10] // // For ease, the floats Span function can be used to set the dividers // Hist = [11 10 10 10 10 10 10 10 10 10] // // Histogram also works with weighted data, and allows reusing of // the count field in order to avoid extra garbage // Weighted Hist = [66 165 265 365 465 565 665 765 865 965] } func TestJensenShannon(t *testing.T) { for i, test := range []struct { p []float64 q []float64 }{ { p: []float64{0.5, 0.1, 0.3, 0.1}, q: []float64{0.1, 0.4, 0.25, 0.25}, }, { p: []float64{0.4, 0.6, 0.0}, q: []float64{0.2, 0.2, 0.6}, }, { p: []float64{0.1, 0.1, 0.0, 0.8}, q: []float64{0.6, 0.3, 0.0, 0.1}, }, { p: []float64{0.5, 0.1, 0.3, 0.1}, q: []float64{0.5, 0, 0.25, 0.25}, }, { p: []float64{0.5, 0.1, 0, 0.4}, q: []float64{0.1, 0.4, 0.25, 0.25}, }, } { m := make([]float64, len(test.p)) p := test.p q := test.q floats.Add(m, p) floats.Add(m, q) floats.Scale(0.5, m) js1 := 0.5*KullbackLeibler(p, m) + 0.5*KullbackLeibler(q, m) js2 := JensenShannon(p, q) if math.IsNaN(js2) { t.Errorf("In case %v, JS distance is NaN", i) } if math.Abs(js1-js2) > 1e-14 { t.Errorf("JS mismatch case %v. Expected %v, found %v.", i, js1, js2) } } if !panics(func() { JensenShannon(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("JensenShannon did not panic with p, q length mismatch") } } func TestKolmogorovSmirnov(t *testing.T) { for i, test := range []struct { x []float64 xWeights []float64 y []float64 yWeights []float64 dist float64 }{ { dist: 0, }, { x: []float64{1}, dist: 1, }, { y: []float64{1}, dist: 1, }, { x: []float64{1}, xWeights: []float64{8}, dist: 1, }, { y: []float64{1}, yWeights: []float64{8}, dist: 1, }, { x: []float64{1}, xWeights: []float64{8}, y: []float64{1}, yWeights: []float64{8}, dist: 0, }, { x: []float64{1, 1, 1}, xWeights: []float64{2, 3, 7}, y: []float64{1}, yWeights: []float64{8}, dist: 0, }, { x: []float64{1, 1, 1, 1, 1}, y: []float64{1, 1, 1}, yWeights: []float64{2, 5, 2}, dist: 0, }, { x: []float64{1, 2, 3}, y: []float64{1, 2, 3}, dist: 0, }, { x: []float64{1, 2, 3}, y: []float64{1, 2, 3}, yWeights: []float64{1, 1, 1}, dist: 0, }, { x: []float64{1, 2, 3}, xWeights: []float64{1, 1, 1}, y: []float64{1, 2, 3}, yWeights: []float64{1, 1, 1}, dist: 0, }, { x: []float64{1, 2}, xWeights: []float64{2, 5}, y: []float64{1, 1, 2, 2, 2, 2, 2}, dist: 0, }, { x: []float64{1, 1, 2, 2, 2, 2, 2}, y: []float64{1, 2}, yWeights: []float64{2, 5}, dist: 0, }, { x: []float64{1, 1, 2, 2, 2}, xWeights: []float64{0.5, 1.5, 1, 2, 2}, y: []float64{1, 2}, yWeights: []float64{2, 5}, dist: 0, }, { x: []float64{1, 2, 3, 4}, y: []float64{5, 6}, dist: 1, }, { x: []float64{5, 6}, y: []float64{1, 2, 3, 4}, dist: 1, }, { x: []float64{5, 6}, xWeights: []float64{8, 7}, y: []float64{1, 2, 3, 4}, dist: 1, }, { x: []float64{5, 6}, xWeights: []float64{8, 7}, y: []float64{1, 2, 3, 4}, yWeights: []float64{9, 2, 1, 6}, dist: 1, }, { x: []float64{-4, 5, 6}, xWeights: []float64{0, 8, 7}, y: []float64{1, 2, 3, 4}, yWeights: []float64{9, 2, 1, 6}, dist: 1, }, { x: []float64{-4, -2, -2, 5, 6}, xWeights: []float64{0, 0, 0, 8, 7}, y: []float64{1, 2, 3, 4}, yWeights: []float64{9, 2, 1, 6}, dist: 1, }, { x: []float64{1, 2, 3}, y: []float64{1, 1, 3}, dist: 1.0 / 3.0, }, { x: []float64{1, 2, 3}, y: []float64{1, 3}, yWeights: []float64{2, 1}, dist: 1.0 / 3.0, }, { x: []float64{1, 2, 3}, xWeights: []float64{2, 2, 2}, y: []float64{1, 3}, yWeights: []float64{2, 1}, dist: 1.0 / 3.0, }, { x: []float64{2, 3, 4}, y: []float64{1, 5}, dist: 1.0 / 2.0, }, { x: []float64{1, 2, math.NaN()}, y: []float64{1, 1, 3}, dist: math.NaN(), }, { x: []float64{1, 2, 3}, y: []float64{1, 1, math.NaN()}, dist: math.NaN(), }, } { dist := KolmogorovSmirnov(test.x, test.xWeights, test.y, test.yWeights) if math.Abs(dist-test.dist) > 1e-14 && !(math.IsNaN(test.dist) && math.IsNaN(dist)) { t.Errorf("Distance mismatch case %v: Expected: %v, Found: %v", i, test.dist, dist) } } // panic cases for _, test := range []struct { name string x []float64 xWeights []float64 y []float64 yWeights []float64 }{ { name: "len(x) != len(xWeights)", x: []float64{1, 3, 5, 6, 7, 8}, xWeights: []float64{1, 1, 1, 1}, }, { name: "len(y) != len(yWeights)", x: []float64{1, 3, 5, 6, 7, 8}, y: []float64{1, 3, 5, 6, 7, 8}, yWeights: []float64{1, 1, 1, 1}, }, { name: "x not sorted", x: []float64{10, 3, 5, 6, 7, 8}, y: []float64{1, 3, 5, 6, 7, 8}, }, { name: "y not sorted", x: []float64{1, 3, 5, 6, 7, 8}, y: []float64{10, 3, 5, 6, 7, 8}, }, } { if !panics(func() { KolmogorovSmirnov(test.x, test.xWeights, test.y, test.yWeights) }) { t.Errorf("KolmogorovSmirnov did not panic when %s", test.name) } } } func ExampleKullbackLeibler() { p := []float64{0.05, 0.1, 0.9, 0.05} q := []float64{0.2, 0.4, 0.25, 0.15} s := []float64{0, 0, 1, 0} klPQ := KullbackLeibler(p, q) klPS := KullbackLeibler(p, s) klPP := KullbackLeibler(p, p) fmt.Println("Kullback-Leibler is one measure of the difference between two distributions") fmt.Printf("The K-L distance between p and q is %.4f\n", klPQ) fmt.Println("It is impossible for s and p to be the same distribution, because") fmt.Println("the first bucket has zero probability in s and non-zero in p. Thus,") fmt.Printf("the K-L distance between them is %.4f\n", klPS) fmt.Printf("The K-L distance between identical distributions is %.4f\n", klPP) // Kullback-Leibler is one measure of the difference between two distributions // The K-L distance between p and q is 0.8900 // It is impossible for s and p to be the same distribution, because // the first bucket has zero probability in s and non-zero in p. Thus, // the K-L distance between them is +Inf // The K-L distance between identical distributions is 0.0000 } func TestKullbackLeibler(t *testing.T) { if !panics(func() { KullbackLeibler(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("KullbackLeibler did not panic with p, q length mismatch") } } var linearRegressionTests = []struct { name string x, y []float64 weights []float64 origin bool alpha float64 beta float64 r float64 tol float64 }{ { name: "faithful", x: faithful.waiting, y: faithful.eruptions, // Values calculated by R using lm(eruptions ~ waiting, data=faithful). alpha: -1.87402, beta: 0.07563, r: 0.8114608, tol: 1e-5, }, { name: "faithful through origin", x: faithful.waiting, y: faithful.eruptions, origin: true, // Values calculated by R using lm(eruptions ~ waiting - 1, data=faithful). alpha: 0, beta: 0.05013, r: 0.9726036, tol: 1e-5, }, { name: "faithful explicit weights", x: faithful.waiting, y: faithful.eruptions, weights: func() []float64 { w := make([]float64, len(faithful.eruptions)) for i := range w { w[i] = 1 } return w }(), // Values calculated by R using lm(eruptions ~ waiting, data=faithful). alpha: -1.87402, beta: 0.07563, r: 0.8114608, tol: 1e-5, }, { name: "faithful non-uniform weights", x: faithful.waiting, y: faithful.eruptions, weights: faithful.waiting, // Just an arbitrary set of non-uniform weights. // Values calculated by R using lm(eruptions ~ waiting, data=faithful, weights=faithful$waiting). alpha: -1.79268, beta: 0.07452, r: 0.7840372, tol: 1e-5, }, } func TestLinearRegression(t *testing.T) { for _, test := range linearRegressionTests { alpha, beta := LinearRegression(test.x, test.y, test.weights, test.origin) var r float64 if test.origin { r = RNoughtSquared(test.x, test.y, test.weights, beta) } else { r = RSquared(test.x, test.y, test.weights, alpha, beta) ests := make([]float64, len(test.y)) for i, x := range test.x { ests[i] = alpha + beta*x } rvals := RSquaredFrom(ests, test.y, test.weights) if r != rvals { t.Errorf("%s: RSquared and RSquaredFrom mismatch: %v != %v", test.name, r, rvals) } } if !scalar.EqualWithinAbsOrRel(alpha, test.alpha, test.tol, test.tol) { t.Errorf("%s: unexpected alpha estimate: want:%v got:%v", test.name, test.alpha, alpha) } if !scalar.EqualWithinAbsOrRel(beta, test.beta, test.tol, test.tol) { t.Errorf("%s: unexpected beta estimate: want:%v got:%v", test.name, test.beta, beta) } if !scalar.EqualWithinAbsOrRel(r, test.r, test.tol, test.tol) { t.Errorf("%s: unexpected r estimate: want:%v got:%v", test.name, test.r, r) } } } func BenchmarkLinearRegression(b *testing.B) { rnd := rand.New(rand.NewSource(1)) slope, offset := 2.0, 3.0 maxn := 10000 xs := make([]float64, maxn) ys := make([]float64, maxn) weights := make([]float64, maxn) for i := range xs { x := rnd.Float64() xs[i] = x ys[i] = slope*x + offset weights[i] = rnd.Float64() } for _, n := range []int{10, 100, 1000, maxn} { for _, weighted := range []bool{true, false} { for _, origin := range []bool{true, false} { name := "n" + strconv.Itoa(n) if weighted { name += "wt" } else { name += "wf" } if origin { name += "ot" } else { name += "of" } b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { var ws []float64 if weighted { ws = weights[:n] } LinearRegression(xs[:n], ys[:n], ws, origin) } }) } } } } func TestChiSquare(t *testing.T) { for i, test := range []struct { p []float64 q []float64 res float64 }{ { p: []float64{16, 18, 16, 14, 12, 12}, q: []float64{16, 16, 16, 16, 16, 8}, res: 3.5, }, { p: []float64{16, 18, 16, 14, 12, 12}, q: []float64{8, 20, 20, 16, 12, 12}, res: 9.25, }, { p: []float64{40, 60, 30, 45}, q: []float64{50, 50, 50, 50}, res: 12.5, }, { p: []float64{40, 60, 30, 45, 0, 0}, q: []float64{50, 50, 50, 50, 0, 0}, res: 12.5, }, } { resultpq := ChiSquare(test.p, test.q) if math.Abs(resultpq-test.res) > 1e-10 { t.Errorf("ChiSquare distance mismatch in case %d. Expected %v, Found %v", i, test.res, resultpq) } } if !panics(func() { ChiSquare(make([]float64, 2), make([]float64, 3)) }) { t.Errorf("ChiSquare did not panic with length mismatch") } } // panics returns true if the called function panics during evaluation. func panics(fun func()) (b bool) { defer func() { err := recover() if err != nil { b = true } }() fun() return } func TestBhattacharyya(t *testing.T) { for i, test := range []struct { p []float64 q []float64 res float64 }{ { p: []float64{0.5, 0.1, 0.3, 0.1}, q: []float64{0.1, 0.4, 0.25, 0.25}, res: 0.15597338718671386, }, { p: []float64{0.4, 0.6, 0.0}, q: []float64{0.2, 0.2, 0.6}, res: 0.46322207765351153, }, { p: []float64{0.1, 0.1, 0.0, 0.8}, q: []float64{0.6, 0.3, 0.0, 0.1}, res: 0.3552520032137785, }, } { resultpq := Bhattacharyya(test.p, test.q) resultqp := Bhattacharyya(test.q, test.p) if math.Abs(resultpq-test.res) > 1e-10 { t.Errorf("Bhattacharyya distance mismatch in case %d. Expected %v, Found %v", i, test.res, resultpq) } if math.Abs(resultpq-resultqp) > 1e-10 { t.Errorf("Bhattacharyya distance is assymmetric in case %d.", i) } } // Bhattacharyya should panic if the inputs have different length if !panics(func() { Bhattacharyya(make([]float64, 2), make([]float64, 3)) }) { t.Errorf("Bhattacharyya did not panic with length mismatch") } } func TestHellinger(t *testing.T) { for i, test := range []struct { p []float64 q []float64 res float64 }{ { p: []float64{0.5, 0.1, 0.3, 0.1}, q: []float64{0.1, 0.4, 0.25, 0.25}, res: 0.3800237367441919, }, { p: []float64{0.4, 0.6, 0.0}, q: []float64{0.2, 0.2, 0.6}, res: 0.6088900771170487, }, { p: []float64{0.1, 0.1, 0.0, 0.8}, q: []float64{0.6, 0.3, 0.0, 0.1}, res: 0.5468118803484205, }, } { resultpq := Hellinger(test.p, test.q) resultqp := Hellinger(test.q, test.p) if math.Abs(resultpq-test.res) > 1e-10 { t.Errorf("Hellinger distance mismatch in case %d. Expected %v, Found %v", i, test.res, resultpq) } if math.Abs(resultpq-resultqp) > 1e-10 { t.Errorf("Hellinger distance is assymmetric in case %d.", i) } } if !panics(func() { Hellinger(make([]float64, 2), make([]float64, 3)) }) { t.Errorf("Hellinger did not panic with length mismatch") } } func ExampleMean() { x := []float64{8.2, -6, 5, 7} mean := Mean(x, nil) fmt.Printf("The mean of the samples is %.4f\n", mean) w := []float64{2, 6, 3, 5} weightedMean := Mean(x, w) fmt.Printf("The weighted mean of the samples is %.4f\n", weightedMean) x2 := []float64{8.2, 8.2, -6, -6, -6, -6, -6, -6, 5, 5, 5, 7, 7, 7, 7, 7} mean2 := Mean(x2, nil) fmt.Printf("The mean of x2 is %.4f\n", mean2) fmt.Println("The weights act as if there were more samples of that number") // Output: // The mean of the samples is 3.5500 // The weighted mean of the samples is 1.9000 // The mean of x2 is 1.9000 // The weights act as if there were more samples of that number } func TestMean(t *testing.T) { if !panics(func() { Mean(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Mean did not panic with x, weights length mismatch") } } func TestMode(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 ans float64 count float64 }{ {}, { x: []float64{1, 6, 1, 9, -2}, ans: 1, count: 2, }, { x: []float64{1, 6, 1, 9, -2}, weights: []float64{1, 7, 3, 5, 0}, ans: 6, count: 7, }, } { m, count := Mode(test.x, test.weights) if test.ans != m { t.Errorf("Mode mismatch case %d. Expected %v, found %v", i, test.ans, m) } if test.count != count { t.Errorf("Mode count mismatch case %d. Expected %v, found %v", i, test.count, count) } } if !panics(func() { Mode(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Mode did not panic with x, weights length mismatch") } } func TestMixedMoment(t *testing.T) { for i, test := range []struct { x, y, weights []float64 r, s float64 ans float64 }{ { x: []float64{10, 2, 1, 8, 5}, y: []float64{8, 15, 1, 6, 3}, r: 1, s: 1, ans: 0.48, }, { x: []float64{10, 2, 1, 8, 5}, y: []float64{8, 15, 1, 6, 3}, weights: []float64{1, 1, 1, 1, 1}, r: 1, s: 1, ans: 0.48, }, { x: []float64{10, 2, 1, 8, 5}, y: []float64{8, 15, 1, 6, 3}, weights: []float64{2, 3, 0.2, 8, 4}, r: 1, s: 1, ans: -4.786371011357490, }, { x: []float64{10, 2, 1, 8, 5}, y: []float64{8, 15, 1, 6, 3}, weights: []float64{2, 3, 0.2, 8, 4}, r: 2, s: 3, ans: 1.598600579313326e+03, }, } { m := BivariateMoment(test.r, test.s, test.x, test.y, test.weights) if math.Abs(test.ans-m) > 1e-14 { t.Errorf("Moment mismatch case %d. Expected %v, found %v", i, test.ans, m) } } if !panics(func() { BivariateMoment(1, 1, make([]float64, 3), make([]float64, 2), nil) }) { t.Errorf("Moment did not panic with x, y length mismatch") } if !panics(func() { BivariateMoment(1, 1, make([]float64, 2), make([]float64, 3), nil) }) { t.Errorf("Moment did not panic with x, y length mismatch") } if !panics(func() { BivariateMoment(1, 1, make([]float64, 2), make([]float64, 2), make([]float64, 3)) }) { t.Errorf("Moment did not panic with x, weights length mismatch") } if !panics(func() { BivariateMoment(1, 1, make([]float64, 2), make([]float64, 2), make([]float64, 1)) }) { t.Errorf("Moment did not panic with x, weights length mismatch") } } func TestMoment(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 moment float64 ans float64 }{ { x: []float64{6, 2, 4, 8, 10}, moment: 5, ans: 0, }, { x: []float64{6, 2, 4, 8, 10}, weights: []float64{1, 2, 2, 2, 1}, moment: 5, ans: 121.875, }, } { m := Moment(test.moment, test.x, test.weights) if math.Abs(test.ans-m) > 1e-14 { t.Errorf("Moment mismatch case %d. Expected %v, found %v", i, test.ans, m) } } if !panics(func() { Moment(1, make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Moment did not panic with x, weights length mismatch") } if !panics(func() { Moment(1, make([]float64, 2), make([]float64, 3)) }) { t.Errorf("Moment did not panic with x, weights length mismatch") } } func TestMomentAbout(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 moment float64 mean float64 ans float64 }{ { x: []float64{6, 2, 4, 8, 9}, mean: 3, moment: 5, ans: 2.2288e3, }, { x: []float64{6, 2, 4, 8, 9}, weights: []float64{1, 2, 2, 2, 1}, mean: 3, moment: 5, ans: 1.783625e3, }, } { m := MomentAbout(test.moment, test.x, test.mean, test.weights) if math.Abs(test.ans-m) > 1e-14 { t.Errorf("MomentAbout mismatch case %d. Expected %v, found %v", i, test.ans, m) } } if !panics(func() { MomentAbout(1, make([]float64, 3), 0, make([]float64, 2)) }) { t.Errorf("MomentAbout did not panic with x, weights length mismatch") } } func TestCDF(t *testing.T) { cumulantKinds := []CumulantKind{Empirical} for i, test := range []struct { q []float64 x []float64 weights []float64 ans [][]float64 }{ {}, { q: []float64{0, 0.9, 1, 1.1, 2.9, 3, 3.1, 4.9, 5, 5.1}, x: []float64{1, 2, 3, 4, 5}, ans: [][]float64{{0, 0, 0.2, 0.2, 0.4, 0.6, 0.6, 0.8, 1, 1}}, }, { q: []float64{0, 0.9, 1, 1.1, 2.9, 3, 3.1, 4.9, 5, 5.1}, x: []float64{1, 2, 3, 4, 5}, weights: []float64{1, 1, 1, 1, 1}, ans: [][]float64{{0, 0, 0.2, 0.2, 0.4, 0.6, 0.6, 0.8, 1, 1}}, }, { q: []float64{0, 0.9, 1}, x: []float64{math.NaN()}, ans: [][]float64{{math.NaN(), math.NaN(), math.NaN()}}, }, } { copyX := make([]float64, len(test.x)) copy(copyX, test.x) var copyW []float64 if test.weights != nil { copyW = make([]float64, len(test.weights)) copy(copyW, test.weights) } for j, q := range test.q { for k, kind := range cumulantKinds { v := CDF(q, kind, test.x, test.weights) if !floats.Equal(copyX, test.x) && !math.IsNaN(v) { t.Errorf("x changed for case %d kind %d percentile %v", i, k, q) } if !floats.Equal(copyW, test.weights) { t.Errorf("x changed for case %d kind %d percentile %v", i, k, q) } if v != test.ans[k][j] && !(math.IsNaN(v) && math.IsNaN(test.ans[k][j])) { t.Errorf("mismatch case %d kind %d percentile %v. Expected: %v, found: %v", i, k, q, test.ans[k][j], v) } } } } // these test cases should all result in a panic for i, test := range []struct { name string q float64 kind CumulantKind x []float64 weights []float64 }{ { name: "x == nil", kind: Empirical, x: nil, }, { name: "len(x) == 0", kind: Empirical, x: []float64{}, }, { name: "len(x) != len(weights)", q: 1.5, kind: Empirical, x: []float64{1, 2, 3, 4, 5}, weights: []float64{1, 2, 3}, }, { name: "unsorted x", q: 1.5, kind: Empirical, x: []float64{3, 2, 1}, }, { name: "unknown CumulantKind", q: 1.5, kind: CumulantKind(1000), // bogus x: []float64{1, 2, 3}, }, } { if !panics(func() { CDF(test.q, test.kind, test.x, test.weights) }) { t.Errorf("did not panic as expected with %s for case %d kind %d percentile %v x %v weights %v", test.name, i, test.kind, test.q, test.x, test.weights) } } } func TestQuantile(t *testing.T) { cumulantKinds := []CumulantKind{ Empirical, LinInterp, } for i, test := range []struct { p []float64 x []float64 w []float64 ans [][]float64 panics bool }{ { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: nil, w: nil, panics: true, }, { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: []float64{}, w: nil, panics: true, }, { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: []float64{1}, w: nil, ans: [][]float64{ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, }, }, { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, w: nil, ans: [][]float64{ {1, 1, 1, 2, 5, 5, 6, 9, 9, 10, 10}, {1, 1, 1, 1.5, 4.5, 5, 5.5, 8.5, 9, 9.5, 10}, }, }, { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, w: []float64{3, 3, 3, 3, 3, 3, 3, 3, 3, 3}, ans: [][]float64{ {1, 1, 1, 2, 5, 5, 6, 9, 9, 10, 10}, {1, 1, 1, 1.5, 4.5, 5, 5.5, 8.5, 9, 9.5, 10}, }, }, { p: []float64{0, 0.05, 0.1, 0.15, 0.45, 0.5, 0.55, 0.85, 0.9, 0.95, 1}, x: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, w: []float64{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}, ans: [][]float64{ {1, 2, 3, 4, 7, 7, 8, 10, 10, 10, 10}, {1, 1.875, 2.833333333333333, 3.5625, 6.535714285714286, 6.928571428571429, 7.281250000000001, 9.175, 9.45, 9.725, 10}, }, }, { p: []float64{0.5}, x: []float64{1, 2, 3, 4, 5, 6, 7, 8, math.NaN(), 10}, ans: [][]float64{ {math.NaN()}, {math.NaN()}, }, }, } { copyX := make([]float64, len(test.x)) copy(copyX, test.x) var copyW []float64 if test.w != nil { copyW = make([]float64, len(test.w)) copy(copyW, test.w) } for j, p := range test.p { for k, kind := range cumulantKinds { var v float64 if test.panics != panics(func() { v = Quantile(p, kind, test.x, test.w) }) { t.Errorf("Quantile did not panic when expected: test %d", j) } if !floats.Same(copyX, test.x) { t.Errorf("x changed for case %d kind %d percentile %v", i, k, p) } if !floats.Same(copyW, test.w) { t.Errorf("x changed for case %d kind %d percentile %v", i, k, p) } if test.panics { continue } if v != test.ans[k][j] && !(math.IsNaN(v) && math.IsNaN(test.ans[k][j])) { t.Errorf("mismatch case %d kind %d percentile %v. Expected: %v, found: %v", i, k, p, test.ans[k][j], v) } } } } } func TestQuantileInvalidInput(t *testing.T) { cumulantKinds := []CumulantKind{ Empirical, LinInterp, } for _, test := range []struct { name string p float64 x []float64 w []float64 }{ { name: "p < 0", p: -1, }, { name: "p > 1", p: 2, }, { name: "p is NaN", p: math.NaN(), }, { name: "len(x) != len(weights)", p: .5, x: make([]float64, 4), w: make([]float64, 2), }, { name: "x not sorted", p: .5, x: []float64{3, 2, 1}, }, } { for _, kind := range cumulantKinds { if !panics(func() { Quantile(test.p, kind, test.x, test.w) }) { t.Errorf("Quantile did not panic when %s", test.name) } } } } func TestQuantileInvalidCumulantKind(t *testing.T) { if !panics(func() { Quantile(0.5, CumulantKind(1000), []float64{1, 2, 3}, nil) }) { t.Errorf("Quantile did not panic when CumulantKind is unknown") } } func ExampleStdDev() { x := []float64{8, 2, -9, 15, 4} stdev := StdDev(x, nil) fmt.Printf("The standard deviation of the samples is %.4f\n", stdev) weights := []float64{2, 2, 6, 7, 1} weightedStdev := StdDev(x, weights) fmt.Printf("The weighted standard deviation of the samples is %.4f\n", weightedStdev) // Output: // The standard deviation of the samples is 8.8034 // The weighted standard deviation of the samples is 10.5733 } func ExamplePopStdDev() { x := []float64{8, 2, -9, 15, 4} stdev := PopStdDev(x, nil) fmt.Printf("The standard deviation of the population is %.4f\n", stdev) weights := []float64{2, 2, 6, 7, 1} weightedStdev := PopStdDev(x, weights) fmt.Printf("The weighted standard deviation of the population is %.4f\n", weightedStdev) // Output: // The standard deviation of the population is 7.8740 // The weighted standard deviation of the population is 10.2754 } func ExampleStdErr() { x := []float64{8, 2, -9, 15, 4} weights := []float64{2, 2, 6, 7, 1} mean := Mean(x, weights) stdev := StdDev(x, weights) nSamples := floats.Sum(weights) stdErr := StdErr(stdev, nSamples) fmt.Printf("The standard deviation is %.4f and there are %g samples, so the mean\nis likely %.4f ± %.4f.", stdev, nSamples, mean, stdErr) // Output: // The standard deviation is 10.5733 and there are 18 samples, so the mean // is likely 4.1667 ± 2.4921. } func TestSkew(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 ans float64 }{ { x: []float64{8, 3, 7, 8, 4}, weights: nil, ans: -0.581456499151665, }, { x: []float64{8, 3, 7, 8, 4}, weights: []float64{1, 1, 1, 1, 1}, ans: -0.581456499151665, }, { x: []float64{8, 3, 7, 8, 4}, weights: []float64{2, 1, 2, 1, 1}, ans: -1.12066646837198, }, } { skew := Skew(test.x, test.weights) if math.Abs(skew-test.ans) > 1e-14 { t.Errorf("Skew mismatch case %d. Expected %v, Found %v", i, test.ans, skew) } } if !panics(func() { Skew(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Skew did not panic with x, weights length mismatch") } } func TestSortWeighted(t *testing.T) { for i, test := range []struct { x []float64 w []float64 ansx []float64 answ []float64 }{ { x: []float64{8, 3, 7, 8, 4}, ansx: []float64{3, 4, 7, 8, 8}, }, { x: []float64{8, 3, 7, 8, 4}, w: []float64{.5, 1, 1, .5, 1}, ansx: []float64{3, 4, 7, 8, 8}, answ: []float64{1, 1, 1, .5, .5}, }, } { SortWeighted(test.x, test.w) if !floats.Same(test.x, test.ansx) { t.Errorf("SortWeighted mismatch case %d. Expected x %v, Found x %v", i, test.ansx, test.x) } if !(test.w == nil) && !floats.Same(test.w, test.answ) { t.Errorf("SortWeighted mismatch case %d. Expected w %v, Found w %v", i, test.answ, test.w) } } if !panics(func() { SortWeighted(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("SortWeighted did not panic with x, weights length mismatch") } } func TestSortWeightedLabeled(t *testing.T) { for i, test := range []struct { x []float64 l []bool w []float64 ansx []float64 ansl []bool answ []float64 }{ { x: []float64{8, 3, 7, 8, 4}, ansx: []float64{3, 4, 7, 8, 8}, }, { x: []float64{8, 3, 7, 8, 4}, w: []float64{.5, 1, 1, .5, 1}, ansx: []float64{3, 4, 7, 8, 8}, answ: []float64{1, 1, 1, .5, .5}, }, { x: []float64{8, 3, 7, 8, 4}, l: []bool{false, false, true, false, true}, ansx: []float64{3, 4, 7, 8, 8}, ansl: []bool{false, true, true, false, false}, }, { x: []float64{8, 3, 7, 8, 4}, l: []bool{false, false, true, false, true}, w: []float64{.5, 1, 1, .5, 1}, ansx: []float64{3, 4, 7, 8, 8}, ansl: []bool{false, true, true, false, false}, answ: []float64{1, 1, 1, .5, .5}, }, } { SortWeightedLabeled(test.x, test.l, test.w) if !floats.Same(test.x, test.ansx) { t.Errorf("SortWeightedLabelled mismatch case %d. Expected x %v, Found x %v", i, test.ansx, test.x) } if (test.l != nil) && !reflect.DeepEqual(test.l, test.ansl) { t.Errorf("SortWeightedLabelled mismatch case %d. Expected l %v, Found l %v", i, test.ansl, test.l) } if (test.w != nil) && !floats.Same(test.w, test.answ) { t.Errorf("SortWeightedLabelled mismatch case %d. Expected w %v, Found w %v", i, test.answ, test.w) } } if !panics(func() { SortWeightedLabeled(make([]float64, 3), make([]bool, 2), make([]float64, 3)) }) { t.Errorf("SortWeighted did not panic with x, labels length mismatch") } if !panics(func() { SortWeightedLabeled(make([]float64, 3), make([]bool, 2), nil) }) { t.Errorf("SortWeighted did not panic with x, labels length mismatch") } if !panics(func() { SortWeightedLabeled(make([]float64, 3), make([]bool, 3), make([]float64, 2)) }) { t.Errorf("SortWeighted did not panic with x, weights length mismatch") } if !panics(func() { SortWeightedLabeled(make([]float64, 3), nil, make([]float64, 2)) }) { t.Errorf("SortWeighted did not panic with x, weights length mismatch") } } func TestVariance(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 ans float64 }{ { x: []float64{8, -3, 7, 8, -4}, weights: nil, ans: 37.7, }, { x: []float64{8, -3, 7, 8, -4}, weights: []float64{1, 1, 1, 1, 1}, ans: 37.7, }, { x: []float64{8, 3, 7, 8, 4}, weights: []float64{2, 1, 2, 1, 1}, ans: 4.2857142857142865, }, { x: []float64{1, 4, 9}, weights: []float64{1, 1.5, 1}, ans: 13.142857142857146, }, { x: []float64{1, 2, 3}, weights: []float64{1, 1.5, 1}, ans: .8, }, } { variance := Variance(test.x, test.weights) if math.Abs(variance-test.ans) > 1e-14 { t.Errorf("Variance mismatch case %d. Expected %v, Found %v", i, test.ans, variance) } } if !panics(func() { Variance(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("Variance did not panic with x, weights length mismatch") } } func TestPopVariance(t *testing.T) { for i, test := range []struct { x []float64 weights []float64 ans float64 }{ { x: []float64{8, -3, 7, 8, -4}, weights: nil, ans: 30.16, }, { x: []float64{8, -3, 7, 8, -4}, weights: []float64{1, 1, 1, 1, 1}, ans: 30.16, }, { x: []float64{8, 3, 7, 8, 4}, weights: []float64{2, 1, 2, 1, 1}, ans: 3.6734693877551026, }, { x: []float64{1, 4, 9}, weights: []float64{1, 1.5, 1}, ans: 9.387755102040817, }, { x: []float64{1, 2, 3}, weights: []float64{1, 1.5, 1}, ans: 0.5714285714285714, }, { x: []float64{2}, weights: nil, ans: 0, }, { x: []float64{2}, weights: []float64{2}, ans: 0, }, } { variance := PopVariance(test.x, test.weights) if math.Abs(variance-test.ans) > 1e-14 { t.Errorf("PopVariance mismatch case %d. Expected %v, Found %v", i, test.ans, variance) } } if !panics(func() { PopVariance(make([]float64, 3), make([]float64, 2)) }) { t.Errorf("PopVariance did not panic with x, weights length mismatch") } } func ExampleVariance() { x := []float64{8, 2, -9, 15, 4} variance := Variance(x, nil) fmt.Printf("The variance of the samples is %.4f\n", variance) weights := []float64{2, 2, 6, 7, 1} weightedVariance := Variance(x, weights) fmt.Printf("The weighted variance of the samples is %.4f\n", weightedVariance) // Output: // The variance of the samples is 77.5000 // The weighted variance of the samples is 111.7941 } func ExamplePopVariance() { x := []float64{8, 2, -9, 15, 4} variance := PopVariance(x, nil) fmt.Printf("The biased variance of the samples is %.4f\n", variance) weights := []float64{2, 2, 6, 7, 1} weightedVariance := PopVariance(x, weights) fmt.Printf("The weighted biased variance of the samples is %.4f\n", weightedVariance) // Output: // The biased variance of the samples is 62.0000 // The weighted biased variance of the samples is 105.5833 } func TestStdScore(t *testing.T) { for i, test := range []struct { x float64 u float64 s float64 z float64 }{ { x: 4, u: -6, s: 5, z: 2, }, { x: 1, u: 0, s: 1, z: 1, }, } { z := StdScore(test.x, test.u, test.s) if math.Abs(z-test.z) > 1e-14 { t.Errorf("StdScore mismatch case %d. Expected %v, Found %v", i, test.z, z) } } }
}
base.py
# This is Cyder's main settings file. If you need to override a setting # locally, use cyder/settings/local.py import glob import itertools import logging import os import socket import sys from django.utils.functional import lazy from lib.path_utils import ROOT, path ########################## # copied from funfactory # ########################## SLAVE_DATABASES = [] DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',) ## Logging LOG_LEVEL = logging.INFO HAS_SYSLOG = True SYSLOG_TAG = "http_app_playdoh" # Change this after you fork. LOGGING_CONFIG = None LOGGING = {} # CEF Logging CEF_PRODUCT = 'Playdoh' CEF_VENDOR = 'Mozilla' CEF_VERSION = '0' CEF_DEVICE_VERSION = '0' ## Accepted locales # Tells the product_details module where to find our local JSON files. # This ultimately controls how LANGUAGES are constructed. PROD_DETAILS_DIR = path('lib/product_details_json') # On dev instances, the list of accepted locales defaults to the contents of # the `locale` directory within a project module or, for older Playdoh apps, # the root locale directory. A localizer can add their locale in the l10n # repository (copy of which is checked out into `locale`) in order to start # testing the localization on the dev server. try: DEV_LANGUAGES = [ os.path.basename(loc).replace('_', '-') for loc in itertools.chain(glob.iglob(ROOT + '/locale/*'), # old style glob.iglob(ROOT + '/*/locale/*')) if (os.path.isdir(loc) and os.path.basename(loc) != 'templates') ] except OSError: DEV_LANGUAGES = ('en-US',) def lazy_lang_url_map():
LANGUAGE_URL_MAP = lazy(lazy_lang_url_map, dict)() # Override Django's built-in with our native names def lazy_langs(): from django.conf import settings from product_details import product_details langs = DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES return dict([(lang.lower(), product_details.languages[lang]['native']) for lang in langs if lang in product_details.languages]) LANGUAGES = lazy(lazy_langs, dict)() # Tells the extract script what files to look for L10n in and what function # handles the extraction. The Tower library expects this. DOMAIN_METHODS = { 'messages': [ # Searching apps dirs only exists for historic playdoh apps. # See playdoh's base settings for how message paths are set. ('apps/**.py', 'tower.management.commands.extract.extract_tower_python'), ('apps/**/templates/**.html', 'tower.management.commands.extract.extract_tower_template'), ('templates/**.html', 'tower.management.commands.extract.extract_tower_template'), ], } # Paths that don't require a locale code in the URL. SUPPORTED_NONLOCALES = ['media', 'static', 'admin'] ## Media and templates. # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = path('static') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'jingo.Loader', 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.media', 'django.core.context_processors.request', 'session_csrf.context_processor', 'django.contrib.messages.context_processors.messages', 'lib.context_processors.i18n', 'lib.context_processors.globals', #'jingo_minify.helpers.build_ids', ) def get_template_context_processors(exclude=(), append=(), current={'processors': TEMPLATE_CONTEXT_PROCESSORS}): """ Returns TEMPLATE_CONTEXT_PROCESSORS without the processors listed in exclude and with the processors listed in append. The use of a mutable dict is intentional, in order to preserve the state of the TEMPLATE_CONTEXT_PROCESSORS tuple across multiple settings files. """ current['processors'] = tuple( [p for p in current['processors'] if p not in exclude] ) + tuple(append) return current['processors'] TEMPLATE_DIRS = ( path('templates'), ) # Storage of static files COMPRESS_ROOT = STATIC_ROOT COMPRESS_CSS_FILTERS = ( 'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter' ) COMPRESS_PRECOMPILERS = ( #('text/coffeescript', 'coffee --compile --stdio'), ('text/less', 'lessc {infile} {outfile}'), #('text/x-sass', 'sass {infile} {outfile}'), #('text/x-scss', 'sass --scss {infile} {outfile}'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) def JINJA_CONFIG(): import jinja2 from django.conf import settings # from caching.base import cache config = {'extensions': ['tower.template.i18n', 'jinja2.ext.do', 'jinja2.ext.with_', 'jinja2.ext.loopcontrols'], 'finalize': lambda x: x if x is not None else ''} # if 'memcached' in cache.scheme and not settings.DEBUG: # We're passing the _cache object directly to jinja because # Django can't store binary directly; it enforces unicode on it. # Details: http://jinja.pocoo.org/2/documentation/api#bytecode-cache # and in the errors you get when you try it the other way. # bc = jinja2.MemcachedBytecodeCache(cache._cache, # "%sj2:" % settings.CACHE_PREFIX) # config['cache_size'] = -1 # Never clear the cache # config['bytecode_cache'] = bc return config # Path to Java. Used for compress_assets. JAVA_BIN = '/usr/bin/java' # Sessions # # By default, be at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True ## Tests TEST_RUNNER = 'test_utils.runner.RadicalTestSuiteRunner' ## Celery # True says to simulate background tasks without actually using celeryd. # Good for local development in case celeryd is not running. CELERY_ALWAYS_EAGER = True BROKER_CONNECTION_TIMEOUT = 0.1 CELERY_RESULT_BACKEND = 'amqp' CELERY_IGNORE_RESULT = True CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised. # The task can catch that and recover but should exit ASAP. CELERYD_TASK_SOFT_TIME_LIMIT = 60 * 2 ## Arecibo # when ARECIBO_SERVER_URL is set, it can use celery or the regular wrapper ARECIBO_USES_CELERY = True # For absolute urls try: DOMAIN = socket.gethostname() except socket.error: DOMAIN = 'localhost' PROTOCOL = "http://" PORT = 80 ## django-mobility MOBILE_COOKIE = 'mobile' ######### # Cyder # ######### TESTING = True if sys.argv[1:] and sys.argv[1] == 'test' else False MIGRATING = (True if sys.argv[1:] and sys.argv[1] == 'maintain_migrate' else False) ROOT_URLCONF = 'cyder.urls' APPEND_SLASH = True MEDIA_ROOT = path('media') MEDIA_URL = '/media/' _base = os.path.dirname(__file__) site_root = os.path.realpath(os.path.join(_base, '../')) sys.path.append(site_root) sys.path.append(site_root + '/vendor') EMAIL_SUFFIX = '@onid.oregonstate.edu' CAS_SERVER_URL = 'https://login.oregonstate.edu/cas/login' CAS_AUTO_CREATE_USERS = False BUG_REPORT_EMAIL = '[email protected]' EMAIL_HOST = 'mail.oregonstate.edu' SASS_PREPROCESS = True JINGO_MINIFY_USE_STATIC = False SOUTH_TESTS_MIGRATE = False # Bundles is a dictionary of two dictionaries, css and js, which list css files # and js files that can be bundled together by the minify app. MINIFY_BUNDLES = { 'css': { 'cyder_css': ( 'css/lib/jquery-ui-1.8.11.custom.css', 'css/sticky_footer.css', 'css/globals.scss', 'css/base.scss', 'css/forms.scss', 'css/tables.scss', ), 'search': ('css/search.scss',), 'tags_css': ('css//lib/jquery.tagsinput.css',), }, 'js': { 'cyder_js': ( 'js/lib/jquery-1.11.1.min.js', 'js/lib/jquery-migrate-1.2.1.min.js', 'js/lib/attribute_adder.js', 'js/lib/jQuery.rightclick.js', 'js/lib/jquery.validate.min.js', 'js/lib/jquery-ui.min.js', 'js/lib/tablesorter.js', 'js/lib/editablegrid/editablegrid.js', 'js/lib/editablegrid/editablegrid_renderers.js', 'js/lib/editablegrid/editablegrid_editors.js', 'js/lib/editablegrid/editablegrid_validators.js', 'js/lib/editablegrid/editablegrid_utils.js', 'js/lib/editablegrid/editablegrid_charts.js', 'js/utils.js', 'js/application.js', 'js/dhcp_raw_include.js', 'js/views.js', 'js/cy_delete.js', 'js/rangewizard.js', 'js/mobile.js', ), 'rangeform': ( 'js/rangeform.js', ), 'tables': ( 'js/tables.js', ), 'admin': ( 'js/admin.js', ), 'ctnr': ( 'js/ctnr/ctnr.js', ), 'cyuser': ( 'js/cyuser/cyuser.js', ), 'systemform': ( 'js/systemform.js', ), 'bugreport': ( 'js/bugreport.js', ), 'tags_js': ( 'js/lib/jquery.tagsinput.js', ), } } INSTALLED_APPS = [ # Local apps 'compressor', 'tower', # for ./manage.py extract (L10n) 'cronjobs', # for ./manage.py cron * cmd line tasks 'django_browserid', # Django contrib apps 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', # 'django.contrib.sites', # 'django.contrib.messages', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', # Third-party apps, patches, fixes 'commonware.response.cookies', 'djcelery', 'django_nose', 'session_csrf', # L10n 'product_details', # Cyder 'cyder', # Third party apps 'south', 'django_cas', 'djcelery', 'django_extensions', 'django_nose', 'jingo_minify', 'rest_framework', # Django contrib apps 'django.contrib.sessions', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.admin', 'django.contrib.messages', ] MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', #'django_cas.middleware.CASMiddleware', 'cyder.middleware.dev_authentication.DevAuthenticationMiddleware', ) TEMPLATE_CONTEXT_PROCESSORS += ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.request', 'django.core.context_processors.csrf', 'django.contrib.messages.context_processors.messages' ) SESSION_COOKIE_NAME = 'cyder' SESSION_COOKIE_SECURE = False AUTH_PROFILE_MODULE = 'cyder.UserProfile' AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', #'django_cas.backends.CASBackend', ) # Because Jinja2 is the default template loader, add any non-Jinja templated # apps here: JINGO_EXCLUDE_APPS = [ 'admin', 'debug_toolbar', 'rest_framework', 'cyder.api.authtoken', ] DJANGO_TEMPLATE_APPS = ['admin'] LOGGING = dict(loggers=dict(playdoh={'level': logging.INFO})) # # Use this if you have localizable HTML files: # DOMAIN_METHODS['lhtml'] = [ # ('**/templates/**.lhtml', # 'tower.management.commands.extract.extract_tower_template'), # ] # # Use this if you have localizable HTML files: # DOMAIN_METHODS['javascript'] = [ # # Make sure that this won't pull in strings from external libraries you # # may use. # ('media/js/**.js', 'javascript'), # ] #TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' BUILD_PATH = 'builds' INTERNAL_IPS = ('127.0.0.1', '10.22.74.139', '10.250.2.54') # Use sha 256 by default but support any other algorithm: BASE_PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', 'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher', ) HMAC_KEYS = { # for bcrypt only #'2012-06-06': 'cheesecake', } from django_sha2 import get_password_hashers PASSWORD_HASHERS = get_password_hashers(BASE_PASSWORD_HASHERS, HMAC_KEYS) # Migration settings POINTERS = [('128.193.76.253', 'cob-dc81.bus.oregonstate.edu', 'forward'), ('128.193.76.254', 'cob-dc82.bus.oregonstate.edu', 'forward'), ('128.193.76.252', 'cob-dc83.bus.oregonstate.edu', 'forward'), ('128.193.76.255', 'cob-dc84.bus.oregonstate.edu', 'forward'), ] NONDELEGATED_NS = ['dns.merit.net', 'ns1.nero.net', 'ns1.oregonstate.edu', 'ns1.ucsb.edu', 'ns2.oregonstate.edu'] SECONDARY_ZONES = ["oscs.orst.edu", "oscs.oregonstate.edu", "oscs.orst.net", "100.193.128.in-addr.arpa", "101.193.128.in-addr.arpa", "4.215.10.in-addr.arpa", "5.215.10.in-addr.arpa", "bus.oregonstate.edu", "74.193.128.in-addr.arpa", "75.193.128.in-addr.arpa", "76.193.128.in-addr.arpa", "77.193.128.in-addr.arpa", "78.193.128.in-addr.arpa", "ceoas.oregonstate.edu", "coas.oregonstate.edu", "oce.orst.edu", "64.193.128.in-addr.arpa", "65.193.128.in-addr.arpa", "66.193.128.in-addr.arpa", "67.193.128.in-addr.arpa", "68.193.128.in-addr.arpa", "69.193.128.in-addr.arpa", "70.193.128.in-addr.arpa", "71.193.128.in-addr.arpa"] REVERSE_SOAS = [ '139.201.199', '17.211.140', '18.211.140', '19.211.140', '20.211.140', '21.211.140', '28.211.140', '32.211.140', '33.211.140', '162.211.140', '163.211.140', '16.211.140', '193.128', '23.211.140', '165.211.140', '10', '26.211.140', '71.211.140', '224.211.140', '225.211.140', '226.211.140', '227.211.140', '228.211.140', '229.211.140', '230.211.140', '231.211.140', '232.211.140', '233.211.140', '234.211.140', '235.211.140', '236.211.140', '237.211.140', '238.211.140', '239.211.140', '100.193.128', '101.193.128', '74.193.128', '75.193.128', '76.193.128', '77.193.128', '78.193.128', '64.193.128', '65.193.128', '66.193.128', '67.193.128', '68.193.128', '69.193.128', '70.193.128', '71.193.128', ] NONAUTHORITATIVE_DOMAINS = [ 'nero.net', 'peak.org', 'orvsd.org', 'pdx.orvsd.org', ] # This list contains tuples that have a zone's name as their 0th element and a # view's name as the 1st element. For example: # # ('mozilla.net', 'public'), # ('mozilla.net', 'private') # # This will cause the public and private view of the mozilla.net zone to not # have a config statement in the produced config/master.private and # config/master.public files. The files net/mozilla/mozilla.net.public and # net/mozilla.net.private *will* be generated and written to disk. ZONES_WITH_NO_CONFIG = [ ] REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( # 'cyder.api.v1.permissions.ReadOnlyIfAuthenticated', 'cyder.api.v1.permissions.ReadOnlyIfAuthenticatedWriteIfSpecialCase', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'cyder.api.v1.authentication.CyderTokenAuthentication', ), 'PAGINATE_BY': 25, 'PAGINATE_BY_PARAM': 'count', 'MAX_PAGINATE_BY': 100, 'DEFAULT_FILTER_BACKENDS': ( 'cyder.api.v1.filter.SearchFieldFilter', ), 'DEFAULT_RENDERER_CLASSES': ( 'rest_framework.renderers.JSONRenderer', ), } # bindbuild settings # ================== BINDBUILD = { # stage_dir: Where test builds should go. This shouldn't be under # version control. 'stage_dir': '/tmp/dns_stage/', # prod_dir: This is the directory where Cyder will place its DNS files. # This should be a Git repo. 'prod_dir': '/tmp/dns_prod/cyzones/', # bind_prefix: This is the path to where Cyder zone files are built # relative to the root of the Git repo. This is usually a substring of # prod_dir. 'bind_prefix': '/tmp/dns_prod/cyzones/', 'lock_file': '/tmp/cyder_dns.lock', 'pid_file': '/tmp/cyder_dns.pid', 'named_checkzone': 'named-checkzone', 'named_checkconf': 'named-checkconf', 'named_checkzone_opts': '', 'line_change_limit': 500, # Only one zone at a time should be removed 'line_removal_limit': 10, 'stop_file': '/tmp/cyder_dns.stop', 'stop_file_email_interval': 1800, # 30 minutes 'last_run_file': '/tmp/cyder.last_run', 'log_syslog': False, } # dhcp_build settings # =================== DHCPBUILD = { # stage_dir: Where test builds should go. This shouldn't be under # version control. 'stage_dir': '/tmp/dhcp/stage', # prod_dir: Where Cyder will place the dhcpd configuration file. This # should be a Git repo. 'prod_dir': '/tmp/dhcp/prod', 'lock_file': '/tmp/cyder_dhcp.lock', 'pid_file': '/tmp/cyder_dhcp.pid', 'dhcpd': 'dhcpd', # target_file: The configuration file that will be generated 'target_file': 'dhcpd.conf.data', # check_file: The conf file whose syntax will be checked (None means don't # check any file) 'check_file': None, 'line_change_limit': 500, 'line_removal_limit': None, 'stop_file': '/tmp/cyder_dhcp.stop', 'stop_file_email_interval': 1800, # 30 minutes 'log_syslog': False, } DATETIME_INPUT_FORMATS = ( '%m/%d/%y', # '10/25/06' '%m/%d/%y %H:%M', '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' ) ############################### # more copied from funfactory # ############################### ## Middlewares, apps, URL configs. def get_middleware(exclude=(), append=(), current={'middleware': MIDDLEWARE_CLASSES}): """ Returns MIDDLEWARE_CLASSES without the middlewares listed in exclude and with the middlewares listed in append. The use of a mutable dict is intentional, in order to preserve the state of the MIDDLEWARE_CLASSES tuple across multiple settings files. """ current['middleware'] = tuple( [m for m in current['middleware'] if m not in exclude] ) + tuple(append) return current['middleware'] def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}): """ Returns INSTALLED_APPS without the apps listed in exclude and with the apps listed in append. The use of a mutable dict is intentional, in order to preserve the state of the INSTALLED_APPS tuple across multiple settings files. """ current['apps'] = tuple( [a for a in current['apps'] if a not in exclude] ) + tuple(append) return current['apps']
from django.conf import settings langs = settings.DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES return dict([(i.lower(), i) for i in langs])
ideascale.rs
use crate::Result; use std::io::Write; use std::{fs::File, path::PathBuf}; use structopt::StructOpt; #[derive(StructOpt, Debug)] #[structopt(setting = structopt::clap::AppSettings::ColoredHelp)] pub struct
{ #[structopt(long = "input")] pub input: PathBuf, /// proposals output json #[structopt( long = "proposals", default_value = "../resources/external/proposals.json" )] pub proposals: PathBuf, /// challenges output json #[structopt( long = "challenges", default_value = "../resources/external/challenges.json" )] pub challenges: PathBuf, } impl ConvertFromIdeascale { pub fn exec(self) -> Result<()> { std::env::set_var("RUST_BACKTRACE", "full"); let data: serde_json::Value = serde_json::from_str(&jortestkit::file::read_file(&self.input))?; let proposals = &data["proposals.csv"]; let challenges = &data["challenges.csv"]; let content = serde_json::to_string_pretty(&proposals)?; let mut file = File::create(self.proposals)?; file.write_all(content.as_bytes())?; let content = serde_json::to_string_pretty(&challenges)?; let mut file = File::create(self.challenges)?; file.write_all(content.as_bytes())?; Ok(()) } }
ConvertFromIdeascale
md5_transport_test.go
package md5transport import ( "bytes" "crypto/md5" "encoding/hex" "io/ioutil" "math/rand" "net/http" "net/http/httptest" "strconv" "testing" "time" "github.com/stretchr/testify/assert" ) func TestMd5(t *testing.T) { rand.Seed(time.Now().UnixNano()) ast := assert.New(t) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { md5Str := r.Header.Get("X-Md5") b, err := ioutil.ReadAll(r.Body) ast.Nil(err) if len(b) == 0 { ast.Equal("", md5Str, r.URL.Path) return } hexB := md5.Sum(b) md5exp := hex.EncodeToString(hexB[:]) ast.Equal(md5exp, md5Str, r.URL.Path) return }))
md5tp := NewTransport(http.DefaultTransport) client := http.Client{Transport: md5tp} _, err := client.Get(ts.URL + "/testget") ast.Nil(err) for n := 1; n <= 5000; n *= 2 { path := "/" + strconv.Itoa(n) b := make([]byte, n) rand.Read(b) reader := bytes.NewBuffer(b) _, err := client.Post(ts.URL+path, "application/octet-stream", reader) ast.Nil(err) } }
defer ts.Close()
index.ts
export { ProgressComponent } from './progress.component';
request_password_change_token_via_email.rs
//! [POST /_matrix/client/r0/account/password/email/requestToken](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-account-password-email-requesttoken) use js_int::UInt; use ruma_api::ruma_api; use super::IdentityServerInfo; ruma_api! { metadata { description: "Request that a password change token is sent to the given email address.", method: POST, name: "request_password_change_token_via_email", path: "/_matrix/client/r0/account/password/email/requestToken", rate_limited: false, requires_authentication: false, } request { /// Client-generated secret string used to protect this session. pub client_secret: String, /// The email address. pub email: String,
/// Return URL for identity server to redirect the client back to. #[serde(skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, /// Optional identity server hostname and access token. Deprecated since r0.6.0. #[serde(flatten)] #[serde(skip_serializing_if = "Option::is_none")] pub identity_server_info: Option<IdentityServerInfo>, } response { /// The session identifier given by the identity server. pub sid: String, /// URL to submit validation token to. If omitted, verification happens without client. #[serde(skip_serializing_if = "Option::is_none")] pub submit_url: Option<String> } error: crate::Error }
/// Used to distinguish protocol level retries from requests to re-send the email. pub send_attempt: UInt,
mysql_server_types.go
/* Copyright 2018 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( "strconv" "github.com/Azure/azure-sdk-for-go/services/mysql/mgmt/2017-12-01/mysql" corev1alpha1 "github.com/crossplaneio/crossplane/pkg/apis/core/v1alpha1" "github.com/crossplaneio/crossplane/pkg/util" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( // OperationCreateServer is the operation type for creating a new mysql server OperationCreateServer = "createServer" // OperationCreateFirewallRules is the operation type for creating a firewall rule OperationCreateFirewallRules = "createFirewallRules" ) // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MysqlServer is the Schema for the instances API // +k8s:openapi-gen=true // +groupName=database.azure // +kubebuilder:printcolumn:name="STATUS",type="string",JSONPath=".status.state" // +kubebuilder:printcolumn:name="CLASS",type="string",JSONPath=".spec.classRef.name" // +kubebuilder:printcolumn:name="VERSION",type="string",JSONPath=".spec.version" // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" type MysqlServer struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec MysqlServerSpec `json:"spec,omitempty"` Status MysqlServerStatus `json:"status,omitempty"` } // MysqlServerSpec defines the desired state of MysqlServer type MysqlServerSpec struct { ResourceGroupName string `json:"resourceGroupName"` Location string `json:"location"` PricingTier PricingTierSpec `json:"pricingTier"` StorageProfile StorageProfileSpec `json:"storageProfile"` AdminLoginName string `json:"adminLoginName"` Version string `json:"version"` SSLEnforced bool `json:"sslEnforced,omitempty"` // Kubernetes object references ClaimRef *v1.ObjectReference `json:"claimRef,omitempty"` ClassRef *v1.ObjectReference `json:"classRef,omitempty"` ProviderRef v1.LocalObjectReference `json:"providerRef"` ConnectionSecretRef v1.LocalObjectReference `json:"connectionSecretRef,omitempty"` // ReclaimPolicy identifies how to handle the cloud resource after the deletion of this type ReclaimPolicy corev1alpha1.ReclaimPolicy `json:"reclaimPolicy,omitempty"` } // MysqlServerStatus defines the observed state of MysqlServer type MysqlServerStatus struct { corev1alpha1.ConditionedStatus corev1alpha1.BindingStatusPhase State string `json:"state,omitempty"` Message string `json:"message,omitempty"` // the external ID to identify this resource in the cloud provider ProviderID string `json:"providerID,omitempty"` // Endpoint of the MySQL Server instance used in connection strings Endpoint string `json:"endpoint,omitempty"` // RunningOperation stores any current long running operation for this instance across // reconciliation attempts. This will be a serialized Azure MySQL Server API object that will // be used to check the status and completion of the operation during each reconciliation. // Once the operation has completed, this field will be cleared out. RunningOperation string `json:"runningOperation,omitempty"` // RunningOperationType is the type of the currently running operation RunningOperationType string `json:"runningOperationType,omitempty"` } // PricingTierSpec represents the performance and cost oriented properties of the server type PricingTierSpec struct { Tier string `json:"tier"` VCores int `json:"vcores"` Family string `json:"family"` } // StorageProfileSpec represents storage related properties of the server type StorageProfileSpec struct { StorageGB int `json:"storageGB"` BackupRetentionDays int `json:"backupRetentionDays,omitempty"` GeoRedundantBackup bool `json:"geoRedundantBackup,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MysqlServerList contains a list of MysqlServer type MysqlServerList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []MysqlServer `json:"items"` } // NewMySQLServerSpec creates a new MySQLServerSpec based on the given properties map func NewMySQLServerSpec(properties map[string]string) *MysqlServerSpec { spec := &MysqlServerSpec{ ReclaimPolicy: corev1alpha1.ReclaimRetain, } val, ok := properties["adminLoginName"] if ok { spec.AdminLoginName = val } val, ok = properties["resourceGroupName"] if ok { spec.ResourceGroupName = val } val, ok = properties["location"] if ok { spec.Location = val } val, ok = properties["version"] if ok { spec.Version = val } val, ok = properties["sslEnforced"] if ok { if sslEnforced, err := strconv.ParseBool(val); err == nil { spec.SSLEnforced = sslEnforced } } val, ok = properties["tier"] if ok { spec.PricingTier.Tier = val } val, ok = properties["vcores"] if ok
val, ok = properties["family"] if ok { spec.PricingTier.Family = val } val, ok = properties["storageGB"] if ok { if storageGB, err := strconv.Atoi(val); err == nil { spec.StorageProfile.StorageGB = storageGB } } val, ok = properties["backupRetentionDays"] if ok { if backupRetentionDays, err := strconv.Atoi(val); err == nil { spec.StorageProfile.BackupRetentionDays = backupRetentionDays } } val, ok = properties["geoRedundantBackup"] if ok { if geoRedundantBackup, err := strconv.ParseBool(val); err == nil { spec.StorageProfile.GeoRedundantBackup = geoRedundantBackup } } return spec } // ConnectionSecretName returns a secret name from the reference func (m *MysqlServer) ConnectionSecretName() string { if m.Spec.ConnectionSecretRef.Name == "" { // the user hasn't specified the name of the secret they want the connection information // stored in, generate one now m.Spec.ConnectionSecretRef.Name = m.Name } return m.Spec.ConnectionSecretRef.Name } // Endpoint returns the MySQL Server endpoint for connection func (m *MysqlServer) Endpoint() string { return m.Status.Endpoint } // ObjectReference to this MySQL Server instance func (m *MysqlServer) ObjectReference() *v1.ObjectReference { return util.ObjectReference(m.ObjectMeta, util.IfEmptyString(m.APIVersion, APIVersion), util.IfEmptyString(m.Kind, MysqlServerKind)) } // OwnerReference to use this instance as an owner func (m *MysqlServer) OwnerReference() metav1.OwnerReference { return *util.ObjectToOwnerReference(m.ObjectReference()) } // IsAvailable for usage/binding func (m *MysqlServer) IsAvailable() bool { return m.Status.State == string(mysql.ServerStateReady) } // IsBound determines if the resource is in a bound binding state func (m *MysqlServer) IsBound() bool { return m.Status.Phase == corev1alpha1.BindingStateBound } // SetBound sets the binding state of this resource func (m *MysqlServer) SetBound(state bool) { if state { m.Status.Phase = corev1alpha1.BindingStateBound } else { m.Status.Phase = corev1alpha1.BindingStateUnbound } } // ValidVersionValues returns the valid set of engine version values. func ValidVersionValues() []string { return []string{"5.6", "5.7"} }
{ if vcores, err := strconv.Atoi(val); err == nil { spec.PricingTier.VCores = vcores } }
icon.rs
use iced::{Font, HorizontalAlignment, Length, Text}; const ICONS: Font = Font::External { name: "Icons", bytes: include_bytes!("../../static/icons/bootstrap-icons.ttf"), }; fn icon(unicode: char) -> Text { Text::new(&unicode.to_string()) .font(ICONS) .width(Length::Units(20)) .horizontal_alignment(HorizontalAlignment::Center) .size(20) } pub fn home_icon() -> Text { icon('\u{F3DC}') } pub fn send_icon() -> Text
pub fn deposit_icon() -> Text { icon('\u{F123}') } #[allow(dead_code)] pub fn withdrawal_icon() -> Text { icon('\u{F144}') } pub fn turnback_icon() -> Text { icon('\u{F131}') } #[allow(dead_code)] pub fn history_icon() -> Text { icon('\u{F292}') } pub fn vaults_icon() -> Text { icon('\u{F1C7}') } pub fn settings_icon() -> Text { icon('\u{F3C5}') } pub fn block_icon() -> Text { icon('\u{F1C8}') } pub fn network_icon() -> Text { icon('\u{F3ED}') } pub fn dot_icon() -> Text { icon('\u{F287}') } pub fn clipboard_icon() -> Text { icon('\u{F28E}') } pub fn shield_icon() -> Text { icon('\u{F517}') } pub fn shield_notif_icon() -> Text { icon('\u{F50A}') } pub fn shield_check_icon() -> Text { icon('\u{F509}') } pub fn person_check_icon() -> Text { icon('\u{F4AF}') } #[allow(dead_code)] pub fn arrow_up_icon() -> Text { icon('\u{F148}') } pub fn tooltip_icon() -> Text { icon('\u{F410}') } pub fn plus_icon() -> Text { icon('\u{F4D7}') } pub fn warning_icon() -> Text { icon('\u{F31B}') } #[allow(dead_code)] pub fn stakeholder_icon() -> Text { icon('\u{F4AE}') } #[allow(dead_code)] pub fn manager_icon() -> Text { icon('\u{F4B4}') } pub fn done_icon() -> Text { icon('\u{F26B}') } pub fn todo_icon() -> Text { icon('\u{F28A}') }
{ icon('\u{F144}') }
admin.py
from django.contrib import admin from .models import Snack # Register your models here.
admin.site.register(Snack)
cell_area_context.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::CellArea; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use std::boxed::Box as Box_; use std::fmt; use std::mem; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GtkCellAreaContext")] pub struct CellAreaContext(Object<ffi::GtkCellAreaContext, ffi::GtkCellAreaContextClass>); match fn { type_ => || ffi::gtk_cell_area_context_get_type(), } } impl CellAreaContext { pub const NONE: Option<&'static CellAreaContext> = None; } pub trait CellAreaContextExt: 'static { #[doc(alias = "gtk_cell_area_context_allocate")] fn allocate(&self, width: i32, height: i32); #[doc(alias = "gtk_cell_area_context_get_allocation")] #[doc(alias = "get_allocation")] fn allocation(&self) -> (i32, i32); #[doc(alias = "gtk_cell_area_context_get_area")] #[doc(alias = "get_area")] fn area(&self) -> Option<CellArea>; #[doc(alias = "gtk_cell_area_context_get_preferred_height")] #[doc(alias = "get_preferred_height")] fn preferred_height(&self) -> (i32, i32); #[doc(alias = "gtk_cell_area_context_get_preferred_height_for_width")] #[doc(alias = "get_preferred_height_for_width")] fn preferred_height_for_width(&self, width: i32) -> (i32, i32); #[doc(alias = "gtk_cell_area_context_get_preferred_width")] #[doc(alias = "get_preferred_width")] fn preferred_width(&self) -> (i32, i32); #[doc(alias = "gtk_cell_area_context_get_preferred_width_for_height")] #[doc(alias = "get_preferred_width_for_height")] fn preferred_width_for_height(&self, height: i32) -> (i32, i32); #[doc(alias = "gtk_cell_area_context_push_preferred_height")] fn push_preferred_height(&self, minimum_height: i32, natural_height: i32); #[doc(alias = "gtk_cell_area_context_push_preferred_width")] fn push_preferred_width(&self, minimum_width: i32, natural_width: i32); #[doc(alias = "gtk_cell_area_context_reset")] fn reset(&self); #[doc(alias = "minimum-height")] fn minimum_height(&self) -> i32; #[doc(alias = "minimum-width")] fn minimum_width(&self) -> i32; #[doc(alias = "natural-height")] fn natural_height(&self) -> i32; #[doc(alias = "natural-width")] fn natural_width(&self) -> i32; #[doc(alias = "minimum-height")] fn connect_minimum_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "minimum-width")] fn connect_minimum_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "natural-height")] fn connect_natural_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "natural-width")] fn connect_natural_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<CellAreaContext>> CellAreaContextExt for O { fn allocate(&self, width: i32, height: i32) { unsafe { ffi::gtk_cell_area_context_allocate(self.as_ref().to_glib_none().0, width, height); } } fn allocation(&self) -> (i32, i32) { unsafe { let mut width = mem::MaybeUninit::uninit(); let mut height = mem::MaybeUninit::uninit(); ffi::gtk_cell_area_context_get_allocation( self.as_ref().to_glib_none().0, width.as_mut_ptr(), height.as_mut_ptr(), ); let width = width.assume_init(); let height = height.assume_init(); (width, height) } } fn
(&self) -> Option<CellArea> { unsafe { from_glib_none(ffi::gtk_cell_area_context_get_area( self.as_ref().to_glib_none().0, )) } } fn preferred_height(&self) -> (i32, i32) { unsafe { let mut minimum_height = mem::MaybeUninit::uninit(); let mut natural_height = mem::MaybeUninit::uninit(); ffi::gtk_cell_area_context_get_preferred_height( self.as_ref().to_glib_none().0, minimum_height.as_mut_ptr(), natural_height.as_mut_ptr(), ); let minimum_height = minimum_height.assume_init(); let natural_height = natural_height.assume_init(); (minimum_height, natural_height) } } fn preferred_height_for_width(&self, width: i32) -> (i32, i32) { unsafe { let mut minimum_height = mem::MaybeUninit::uninit(); let mut natural_height = mem::MaybeUninit::uninit(); ffi::gtk_cell_area_context_get_preferred_height_for_width( self.as_ref().to_glib_none().0, width, minimum_height.as_mut_ptr(), natural_height.as_mut_ptr(), ); let minimum_height = minimum_height.assume_init(); let natural_height = natural_height.assume_init(); (minimum_height, natural_height) } } fn preferred_width(&self) -> (i32, i32) { unsafe { let mut minimum_width = mem::MaybeUninit::uninit(); let mut natural_width = mem::MaybeUninit::uninit(); ffi::gtk_cell_area_context_get_preferred_width( self.as_ref().to_glib_none().0, minimum_width.as_mut_ptr(), natural_width.as_mut_ptr(), ); let minimum_width = minimum_width.assume_init(); let natural_width = natural_width.assume_init(); (minimum_width, natural_width) } } fn preferred_width_for_height(&self, height: i32) -> (i32, i32) { unsafe { let mut minimum_width = mem::MaybeUninit::uninit(); let mut natural_width = mem::MaybeUninit::uninit(); ffi::gtk_cell_area_context_get_preferred_width_for_height( self.as_ref().to_glib_none().0, height, minimum_width.as_mut_ptr(), natural_width.as_mut_ptr(), ); let minimum_width = minimum_width.assume_init(); let natural_width = natural_width.assume_init(); (minimum_width, natural_width) } } fn push_preferred_height(&self, minimum_height: i32, natural_height: i32) { unsafe { ffi::gtk_cell_area_context_push_preferred_height( self.as_ref().to_glib_none().0, minimum_height, natural_height, ); } } fn push_preferred_width(&self, minimum_width: i32, natural_width: i32) { unsafe { ffi::gtk_cell_area_context_push_preferred_width( self.as_ref().to_glib_none().0, minimum_width, natural_width, ); } } fn reset(&self) { unsafe { ffi::gtk_cell_area_context_reset(self.as_ref().to_glib_none().0); } } fn minimum_height(&self) -> i32 { glib::ObjectExt::property(self.as_ref(), "minimum-height") } fn minimum_width(&self) -> i32 { glib::ObjectExt::property(self.as_ref(), "minimum-width") } fn natural_height(&self) -> i32 { glib::ObjectExt::property(self.as_ref(), "natural-height") } fn natural_width(&self) -> i32 { glib::ObjectExt::property(self.as_ref(), "natural-width") } fn connect_minimum_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_minimum_height_trampoline< P: IsA<CellAreaContext>, F: Fn(&P) + 'static, >( this: *mut ffi::GtkCellAreaContext, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(CellAreaContext::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::minimum-height\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_minimum_height_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_minimum_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_minimum_width_trampoline< P: IsA<CellAreaContext>, F: Fn(&P) + 'static, >( this: *mut ffi::GtkCellAreaContext, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(CellAreaContext::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::minimum-width\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_minimum_width_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_natural_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_natural_height_trampoline< P: IsA<CellAreaContext>, F: Fn(&P) + 'static, >( this: *mut ffi::GtkCellAreaContext, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(CellAreaContext::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::natural-height\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_natural_height_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_natural_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_natural_width_trampoline< P: IsA<CellAreaContext>, F: Fn(&P) + 'static, >( this: *mut ffi::GtkCellAreaContext, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(CellAreaContext::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::natural-width\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_natural_width_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for CellAreaContext { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("CellAreaContext") } }
area
jupyterhub_config.py
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. # Configuration file for JupyterHub import os # pre-spawn settings NB_UID = 65534 NB_GID = 65534 CUDA = 'cuda' in os.environ['HOSTNODE'] c = get_config() # read users/teams & images import os, yaml with open('/srv/jupyterhub/config.yaml', 'r') as cfgfile: cfg = yaml.load(cfgfile, Loader=yaml.FullLoader) team_map = cfg['users'] # Whitlelist users and admins # google: remove @gmail.com c.Authenticator.allowed_users = list(team_map.keys()) c.Authenticator.admin_users = admin = set() for u, team in team_map.items(): if 'admin' in team: admin.add(u) # Spawn single-user servers as Docker containers # CustomDockerSpawner # form to select image def get_options_form(spawner): username = spawner.user.name # .split('@')[0] teams = cfg['users'][username] images = cfg['images'] # list of image letters for user img = {k:v for k,v in images.items() if k in teams } images = [] # unique list for t,i in img.items(): for k in i: if k not in images: images.append(k) if not CUDA: images = [i for i in images if i != 'G'] # dict of image label:build available_images = cfg['available_images'] allowed_images = [v for k,v in available_images.items() if k in images] images=[] for i in allowed_images: images = images | i.items() allowed_images = dict(images) allowed_images = dict(sorted(allowed_images.items(), key=lambda x: x[0])) # prepare form if len(allowed_images) > 1: option_t = '<option value="{image}" {selected}>{label}</option>' options = [ option_t.format( image=image, label=label, selected='selected' if image == spawner.image else '' ) for label, image in allowed_images.items() ] return """ <br><br> <h3>Select an image</h3><br><br>{havecuda}<br><br><b>User: {username}</b><br><br> <select class="form-control" name="image" required autofocus> {options} </select> """.format(options=options, username=username, havecuda='All can run CUDA' if CUDA else '') else: spawner.image = [v for k,v in allowed_images.items()][0] c.DockerSpawner.options_form = get_options_form def set_sudo(spawner): username = spawner.user.name teams = cfg['users'][username] if 'sudo' in teams: return 'yes' else: return 'no' def set_USER(spawner): username = spawner.user.name if username[0:4].isnumeric(): return username.upper() else: return username def set_HOME(spawner): return '/home/' + spawner.user.name def set_UID(spawner): UID = cfg['users'][spawner.user.name][0]['uid'] if UID >= 1 and UID < 65536: return UID else: return 1000 def set_GID(spawner): GID = cfg['users'][spawner.user.name][1]['gid'] if GID >= 1 and GID < 65536: return GID else: return 100 c.DockerSpawner.environment = { 'NB_USER': set_USER, 'NB_UID': set_UID, 'NB_GID': set_GID, 'NB_UMASK':'002', 'CHOWN_HOME':'yes', 'GRANT_SUDO': set_sudo, } home_dir = os.environ.get('HOME_DIR') # notebook_dir = '/home/' + spawner.user.name # c.DockerSpawner.notebook_dir = notebook_dir from dockerspawner import DockerSpawner class CustomDockerSpawner(DockerSpawner): # mount volumes by team def
(self): username = set_USER(self) # username = self.user.name # home dir self.volumes[f"{home_dir}/{username.split('@')[0]}"] = { 'bind': '/home/' + username , 'mode': 'rw', } # copy system /etc/group file self.volumes['/etc/group'] = { 'bind': '/tmp/group', 'mode': 'ro', } # mount /srv from files in /singleuser/srv/setup self.volumes[os.environ['JHUB_DIR']+'/singleuser/srv/setup'] = { 'bind': '/srv', 'mode': 'ro', } # user specific mounts as in config.yaml teams = cfg['users'][self.user.name] # lowercase mounts = cfg['mounts'] mounts = {k:v for k,v in mounts.items() if k in teams } for k,v in mounts.items(): for h,d in v.items(): self.volumes[h] = { 'bind': d[0].replace('USER',username), 'mode': d[1] } return super().start() # c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner' c.JupyterHub.spawner_class = CustomDockerSpawner # hub runs as 'root', c.DockerSpawner.extra_create_kwargs = { 'user': 'root', 'hostname': 'hub', } # nvidia # /dev/shm 64M > 16G if CUDA: c.DockerSpawner.extra_host_config = { 'runtime': 'nvidia', 'shm_size': '16gb' } # JupyterHub requires a single-user instance of the Notebook server, so we # default to using the `start-singleuser.sh` script included in the # jupyter/docker-stacks *-notebook images as the Docker run command when # spawning containers. Optionally, you can override the Docker run command # using the DOCKER_SPAWN_CMD environment variable. spawn_cmd = "start-singleuser.sh" c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd }) # Connect containers to this Docker network network_name = os.environ['DOCKER_NETWORK_NAME'] c.DockerSpawner.use_internal_ip = True c.DockerSpawner.network_name = network_name # Pass the network name as argument to spawned containers c.DockerSpawner.extra_host_config.update({ 'network_mode': network_name }) # Mount the real user's Docker volume on the host to the notebook user's # notebook directory in the container #c.DockerSpawner.volumes = { 'jupyterhub-user-{username}': notebook_dir } # external proxy c.JupyterHub.cleanup_servers = False # tells the hub to not stop servers when the hub restarts (proxy runs separately). c.ConfigurableHTTPProxy.should_start = False # tells the hub that the proxy should not be started (because you start it yourself). c.ConfigurableHTTPProxy.auth_token = os.environ.get('CONFIGPROXY_AUTH_TOKEN') # token for authenticating communication with the proxy. c.ConfigurableHTTPProxy.api_url = 'http://jupyterproxy:8001' # the URL which the hub uses to connect to the proxy’s API. # Remove containers once they are stopped c.DockerSpawner.remove_containers = True # User containers will access hub by container name on the Docker network c.JupyterHub.base_url = '/jhub/' c.JupyterHub.hub_ip = 'jupyterhub' c.JupyterHub.hub_port = 8080 # don't need because we are behind an https reverse proxy # # TLS config: requires generating certificates # c.JupyterHub.port = 443 # c.JupyterHub.ssl_key = os.environ['SSL_KEY'] # c.JupyterHub.ssl_cert = os.environ['SSL_CERT'] # Persist hub data on volume mounted inside container data_dir = '/data' c.JupyterHub.cookie_secret_file = os.path.join(data_dir, 'jupyterhub_cookie_secret') c.JupyterHub.db_url = f'sqlite:///{data_dir}/jupyterhub.sqlite' # c.JupyterHub.db_url = 'postgresql://postgres:{password}@{host}/{db}'.format( # host=os.environ['POSTGRES_HOST'], # password=os.environ['POSTGRES_PASSWORD'], # db=os.environ['POSTGRES_DB'], # ) # reset database # c.JupyterHub.reset_db = False # Authenticate users ''' # GitHub c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator' c.GitHubOAuthenticator.oauth_callback_url = os.environ['OAUTH_CALLBACK_URL'] # Native # admin users in c.Authenticator.admin_users are automatically authorized when signup c.JupyterHub.authenticator_class = 'nativeauthenticator.NativeAuthenticator' ''' ##### multioauth # https://github.com/jupyterhub/oauthenticator/issues/136 from traitlets import List from jupyterhub.auth import Authenticator def url_path_join(*parts): return '/'.join([p.strip().strip('/') for p in parts]) class MultiOAuthenticator(Authenticator): authenticators = List(help="The subauthenticators to use", config=True) def __init__(self, *arg, **kwargs): super().__init__(*arg, **kwargs) self._authenticators = [] for authenticator_klass, url_scope, configs in self.authenticators: c = self.trait_values() c.update(configs) self._authenticators.append({"instance": authenticator_klass(**c), "url_scope": url_scope}) def get_custom_html(self, base_url): html = [] for authenticator in self._authenticators: login_service = authenticator["instance"].login_service if login_service == 'User/Pass': url = url_path_join(authenticator["url_scope"], "login") else: url = url_path_join(authenticator["url_scope"], "oauth_login") # html.append( # f""" # <div class="service-login"> # <a role="button" class='btn btn-jupyter btn-lg' href='{url}'> # Sign in with {login_service} # </a> # </div> # """ # ) return "\n".join(html) def get_handlers(self, app): routes = [] for _authenticator in self._authenticators: for path, handler in _authenticator["instance"].get_handlers(app): class SubHandler(handler): authenticator = _authenticator["instance"] routes.append((f'{_authenticator["url_scope"]}{path}', SubHandler)) return routes c.JupyterHub.authenticator_class = MultiOAuthenticator from oauthenticator.github import GitHubOAuthenticator from oauthenticator.google import GoogleOAuthenticator from nativeauthenticator import NativeAuthenticator #from oauthenticator.azuread import AzureAdOAuthenticator c.MultiOAuthenticator.authenticators = [ (GitHubOAuthenticator, '/github', { 'client_id': os.environ['GITHUB_CLIENT_ID'], 'client_secret': os.environ['GITHUB_CLIENT_SECRET'], 'oauth_callback_url': os.environ['GITHUB_CALLBACK_URL'] }), (GoogleOAuthenticator, '/google', { 'client_id': os.environ['GOOGLE_CLIENT_ID'], 'client_secret': os.environ['GOOGLE_CLIENT_SECRET'], 'oauth_callback_url': os.environ['GOOGLE_CALLBACK_URL'], 'login_service': 'Google' }), (NativeAuthenticator, '/', { 'login_service': 'User/Pass' }), ] import nativeauthenticator c.JupyterHub.template_paths = [f"{os.path.dirname(nativeauthenticator.__file__)}/templates/"] # template modified to allow github/google oauth # ["/usr/local/lib/python3.8/dist-packages/nativeauthenticator/templates/"] # google # https://oauthenticator.readthedocs.io/en/latest/api/gen/oauthenticator.google.html c.GoogleOAuthenticator.hosted_domain = ['gmail.com'] c.GoogleOAuthenticator.login_service = 'Google' c.GoogleOAuthenticator.delete_invalid_users = True c.NativeAuthenticator.check_common_password = True c.NativeAuthenticator.minimum_password_length = 8 c.NativeAuthenticator.allowed_failed_logins = 3 c.NativeAuthenticator.enable_signup = True # recaptcha config # https://www.google.com/recaptcha/admin/site/500725121/settings c.NativeAuthenticator.recaptcha_key = os.environ['RECAPCHA_KEY'] c.NativeAuthenticator.recaptcha_secret = os.environ['RECAPCHA_SECRET'] c.NativeAuthenticator.tos = 'Acepto las <a href="https://remote.genrisk.org/CDU.html" target="_blank">condiciones de uso</a>' ## enable authentication state0 c.MultiOAuthenticator.enable_auth_state = True import warnings if 'JUPYTERHUB_CRYPT_KEY' not in os.environ: warnings.warn( "Need JUPYTERHUB_CRYPT_KEY env for persistent auth_state.\n" " export JUPYTERHUB_CRYPT_KEY=$(openssl rand -hex 32)" ) c.CryptKeeper.keys = [ os.urandom(32) ] pass ''' # remove idle notebooks after inactive time # https://github.com/jupyterhub/jupyterhub-idle-culler import sys c.JupyterHub.services = [ { 'name': 'idle-culler', 'admin': True, 'command': [sys.executable, '-m', 'jupyterhub_idle_culler', '--timeout=3600'], } ] ''' # max simultaneous users c.JupyterHub.concurrent_spawn_limit = 10 # user limits # c.Spawner.cpu_limit = 2 # cores # c.Spawner.mem_limit = 8G
start
muse_score_pdf_exporter.py
import sys from common import * def main(muse_score_path, directory_path):
if __name__ == "__main__": main(sys.argv[1], sys.argv[2])
muse_score_export(muse_score_path, directory_path, OutputFormat.pdf)
hui-entities-card.ts
import { css, CSSResultGroup, html, LitElement, PropertyValues, TemplateResult, } from "lit"; import { customElement, state } from "lit/decorators"; import { DOMAINS_TOGGLE } from "../../../common/const"; import { applyThemesOnElement } from "../../../common/dom/apply_themes_on_element"; import { computeDomain } from "../../../common/entity/compute_domain"; import "../../../components/ha-card"; import { HomeAssistant } from "../../../types"; import { computeCardSize } from "../common/compute-card-size"; import { findEntities } from "../common/find-entities"; import { processConfigEntities } from "../common/process-config-entities"; import "../components/hui-entities-toggle"; import { createHeaderFooterElement } from "../create-element/create-header-footer-element"; import { createRowElement } from "../create-element/create-row-element"; import { EntityConfig, LovelaceRow, LovelaceRowConfig, } from "../entity-rows/types"; import { LovelaceCard, LovelaceCardEditor, LovelaceHeaderFooter, } from "../types"; import { EntitiesCardConfig } from "./types"; @customElement("hui-entities-card") class
extends LitElement implements LovelaceCard { public static async getConfigElement(): Promise<LovelaceCardEditor> { await import("../editor/config-elements/hui-entities-card-editor"); return document.createElement("hui-entities-card-editor"); } public static getStubConfig( hass: HomeAssistant, entities: string[], entitiesFallback: string[] ): EntitiesCardConfig { const maxEntities = 3; const foundEntities = findEntities( hass, maxEntities, entities, entitiesFallback, ["light", "switch", "sensor"] ); return { type: "entities", entities: foundEntities }; } @state() private _config?: EntitiesCardConfig; private _hass?: HomeAssistant; private _configEntities?: LovelaceRowConfig[]; private _showHeaderToggle?: boolean; private _headerElement?: LovelaceHeaderFooter; private _footerElement?: LovelaceHeaderFooter; set hass(hass: HomeAssistant) { this._hass = hass; this.shadowRoot ?.querySelectorAll("#states > div > *") .forEach((element: unknown) => { (element as LovelaceRow).hass = hass; }); if (this._headerElement) { this._headerElement.hass = hass; } if (this._footerElement) { this._footerElement.hass = hass; } const entitiesToggle = this.shadowRoot?.querySelector( "hui-entities-toggle" ); if (entitiesToggle) { (entitiesToggle as any).hass = hass; } } public async getCardSize(): Promise<number> { if (!this._config) { return 0; } // +1 for the header let size = (this._config.title || this._showHeaderToggle ? 2 : 0) + (this._config.entities.length || 1); if (this._headerElement) { const headerSize = computeCardSize(this._headerElement); size += headerSize instanceof Promise ? await headerSize : headerSize; } if (this._footerElement) { const footerSize = computeCardSize(this._footerElement); size += footerSize instanceof Promise ? await footerSize : footerSize; } return size; } public setConfig(config: EntitiesCardConfig): void { if (!config.entities || !Array.isArray(config.entities)) { throw new Error("Entities must be specified"); } const entities = processConfigEntities(config.entities); this._config = config; this._configEntities = entities; if (config.title !== undefined && config.show_header_toggle === undefined) { // Default value is show toggle if we can at least toggle 2 entities. let toggleable = 0; for (const rowConf of entities) { if (!("entity" in rowConf)) { continue; } toggleable += Number(DOMAINS_TOGGLE.has(computeDomain(rowConf.entity))); if (toggleable === 2) { break; } } this._showHeaderToggle = toggleable === 2; } else { this._showHeaderToggle = config.show_header_toggle; } if (this._config.header) { this._headerElement = createHeaderFooterElement(this._config.header); if (this._hass) { this._headerElement.hass = this._hass; } } else { this._headerElement = undefined; } if (this._config.footer) { this._footerElement = createHeaderFooterElement(this._config.footer); if (this._hass) { this._footerElement.hass = this._hass; } } else { this._footerElement = undefined; } } protected updated(changedProps: PropertyValues): void { super.updated(changedProps); if (!this._config || !this._hass) { return; } const oldHass = changedProps.get("hass") as HomeAssistant | undefined; const oldConfig = changedProps.get("_config") as | EntitiesCardConfig | undefined; if ( !oldHass || !oldConfig || oldHass.themes !== this.hass.themes || oldConfig.theme !== this._config.theme ) { applyThemesOnElement(this, this._hass.themes, this._config.theme); } } protected render(): TemplateResult { if (!this._config || !this._hass) { return html``; } return html` <ha-card> ${this._headerElement ? html`<div class="header-footer header">${this._headerElement}</div>` : ""} ${!this._config.title && !this._showHeaderToggle && !this._config.icon ? "" : html` <h1 class="card-header"> <div class="name"> ${this._config.icon ? html` <ha-icon class="icon" .icon=${this._config.icon} ></ha-icon> ` : ""} ${this._config.title} </div> ${!this._showHeaderToggle ? html`` : html` <hui-entities-toggle .hass=${this._hass} .entities=${( this._configEntities!.filter( (conf) => "entity" in conf ) as EntityConfig[] ).map((conf) => conf.entity)} ></hui-entities-toggle> `} </h1> `} <div id="states" class="card-content"> ${this._configEntities!.map((entityConf) => this.renderEntity(entityConf) )} </div> ${this._footerElement ? html`<div class="header-footer footer">${this._footerElement}</div>` : ""} </ha-card> `; } static get styles(): CSSResultGroup { return css` ha-card { height: 100%; display: flex; flex-direction: column; justify-content: space-between; } .card-header { display: flex; justify-content: space-between; } .card-header .name { white-space: nowrap; overflow: hidden; text-overflow: ellipsis; } #states { flex: 1; } #states > * { margin: 8px 0; } #states > *:first-child { margin-top: 0; } #states > *:last-child { margin-bottom: 0; } #states > div > * { overflow: clip visible; } #states > div { position: relative; } .icon { padding: 0px 18px 0px 8px; } .header { border-top-left-radius: var(--ha-card-border-radius, 2px); border-top-right-radius: var(--ha-card-border-radius, 2px); margin-bottom: 16px; overflow: hidden; } .footer { border-bottom-left-radius: var(--ha-card-border-radius, 2px); border-bottom-right-radius: var(--ha-card-border-radius, 2px); margin-top: -16px; overflow: hidden; } `; } private renderEntity(entityConf: LovelaceRowConfig): TemplateResult { const element = createRowElement( !("type" in entityConf) && this._config!.state_color ? ({ state_color: true, ...(entityConf as EntityConfig), } as EntityConfig) : entityConf ); if (this._hass) { element.hass = this._hass; } return html`<div>${element}</div>`; } } declare global { interface HTMLElementTagNameMap { "hui-entities-card": HuiEntitiesCard; } }
HuiEntitiesCard
ProjectionAddAttributeGroup.test.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. import { AttributeResolutionDirectiveSet, CdmAttributeGroupDefinition, CdmAttributeGroupReference, CdmAttributeItem, CdmCorpusDefinition, CdmCollection, CdmEntityAttributeDefinition, CdmEntityDefinition, CdmEntityReference, CdmFolderDefinition, cdmObjectType, CdmOperationAddAttributeGroup, CdmProjection, CdmTypeAttributeDefinition, resolveOptions } from '../../../internal'; import { projectionTestUtils } from '../../Utilities/projectionTestUtils'; /** * Class to handle AddAttributeGroup operations */ describe('Cdm/Projection/ProjectionAddAttributeGroupTest', () => { /** * The path between TestDataPath and TestName. */ const testsSubpath: string = 'Cdm/Projection/TestProjectionAddAttributeGroup'; /** * All possible combinations of the different resolution directives */ const resOptsCombinations: string[][] = [ [], ['referenceOnly'], ['normalized'], ['structured'], ['referenceOnly', 'normalized'], ['referenceOnly', 'structured'], ['normalized', 'structured'], ['referenceOnly', 'normalized', 'structured'] ]; /** * Test AddAttributeGroup operation nested with ExcludeAttributes */ it('testCombineOpsNestedProj', async () => { const testName: string = 'TestCombineOpsNestedProj'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] // Exclude attributes: ['age', 'phoneNumber'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(3); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Test AddAttributeGroup and IncludeAttributes operations in the same projection */ it('testCombineOpsProj', async () => { const testName: string = 'TestCombineOpsProj'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] // Included attributes: ['age', 'phoneNumber'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonAttributeGroup', 3); expect(attGroupDefinition.members.length) .toEqual(5); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroupDefinition.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); // Check the attributes coming from the IncludeAttribute operation expect((resolvedEntity.attributes.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((resolvedEntity.attributes.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); }); /** * Test AddAttributeGroup operation with a 'structured' condition */ it('testConditionalProj', async () => { const testName: string = 'TestConditionalProj'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ 'referenceOnly' ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] // condition not met, keep attributes in flat list expect(resolvedEntity.attributes.length) .toEqual(5); expect((resolvedEntity.attributes.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((resolvedEntity.attributes.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((resolvedEntity.attributes.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((resolvedEntity.attributes.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((resolvedEntity.attributes.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); const resolvedEntity2: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ 'structured' ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] // condition met, put all attributes in an attribute group const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity2.attributes, 'PersonAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(5); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroupDefinition.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Test for creating a projection with an AddAttributeGroup operation and a condition using the object model */ it('testConditionalProjUsingObjectModel', async () => { const testName: string = 'TestConditionalProjUsingObjectModel'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); const localRoot: CdmFolderDefinition = corpus.storage.fetchRootFolder('local'); // Create an entity. const entity: CdmEntityDefinition = projectionTestUtils.createEntity(corpus, localRoot); // Create a projection with a condition that states the operation should only execute when the resolution directive is 'structured'. const projection: CdmProjection = projectionTestUtils.createProjection(corpus, localRoot); projection.condition = 'structured==true'; // Create an AddAttributeGroup operation const addAttGroupOp:CdmOperationAddAttributeGroup = corpus.MakeObject<CdmOperationAddAttributeGroup>(cdmObjectType.operationAddAttributeGroupDef); addAttGroupOp.attributeGroupName = 'PersonAttributeGroup'; projection.operations.push(addAttGroupOp); // Create an entity reference to hold this projection. const projectionEntityRef: CdmEntityReference = corpus.MakeObject<CdmEntityReference>(cdmObjectType.entityRef, null); projectionEntityRef.explicitReference = projection; // Create an entity attribute that contains this projection and add this to the entity. const entityAttribute: CdmEntityAttributeDefinition = corpus.MakeObject<CdmEntityAttributeDefinition>(cdmObjectType.entityAttributeDef, 'TestEntityAttribute'); entityAttribute.entity = projectionEntityRef; entity.attributes.push(entityAttribute); // Create resolution options with the 'referenceOnly' directive. const resOpt = new resolveOptions(entity.inDocument) resOpt.directives = new AttributeResolutionDirectiveSet(new Set<string>([ 'referenceOnly' ])); // Resolve the entity with 'referenceOnly' const resolvedEntityWithReferenceOnly: CdmEntityDefinition = await entity.createResolvedEntityAsync(`Resolved_${entity.entityName}.cdm.json`, resOpt, localRoot); // Verify correctness of the resolved attributes after running the AddAttributeGroup operation // Original set of attributes: ['id', 'name', 'value', 'date'] // condition not met, keep attributes in flat list expect(resolvedEntityWithReferenceOnly.attributes.length) .toEqual(4); expect((resolvedEntityWithReferenceOnly.attributes.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('id'); expect((resolvedEntityWithReferenceOnly.attributes.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((resolvedEntityWithReferenceOnly.attributes.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('value'); expect((resolvedEntityWithReferenceOnly.attributes.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('date'); // Now resolve the entity with the 'structured' directive resOpt.directives = new AttributeResolutionDirectiveSet(new Set<string>([ 'structured' ])); const resolvedEntityWithStructured: CdmEntityDefinition = await entity.createResolvedEntityAsync(`Resolved_${entity.entityName}.cdm.json`, resOpt, localRoot); // Verify correctness of the resolved attributes after running the AddAttributeGroup operation // Original set of attributes: ['id', 'name', 'value', 'date'] // condition met, put all attributes in an attribute group const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntityWithStructured.attributes, 'PersonAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(4); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('id'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('value'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('date'); }); /** * Test resolving an entity attribute using resolution guidance */ it('testEntityAttribute', async () => { const testName: string = 'TestEntityAttribute'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ 'structured' ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonInfo'); expect(attGroupDefinition.members.length) .toEqual(5); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroupDefinition.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Test for creating a projection with an AddAttributeGroup operation on an entity attribute using the object model */ it('testEntityAttributeProjUsingObjectModel', async () => { const testName: string = 'TestEntityAttributeProjUsingObjectModel'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); const localRoot: CdmFolderDefinition = corpus.storage.fetchRootFolder('local'); // Create an entity const entity: CdmEntityDefinition = projectionTestUtils.createEntity(corpus, localRoot); // Create a projection const projection: CdmProjection = projectionTestUtils.createProjection(corpus, localRoot); // Create an AddAttributeGroup operation const addAttGroupOp: CdmOperationAddAttributeGroup = corpus.MakeObject<CdmOperationAddAttributeGroup>(cdmObjectType.operationAddAttributeGroupDef); addAttGroupOp.attributeGroupName = 'PersonAttributeGroup'; projection.operations.push(addAttGroupOp); // Create an entity reference to hold this projection const projectionEntityRef: CdmEntityReference = corpus.MakeObject<CdmEntityReference>(cdmObjectType.entityRef, null); projectionEntityRef.explicitReference = projection; // Create an entity attribute that contains this projection and add this to the entity const entityAttribute: CdmEntityAttributeDefinition = corpus.MakeObject<CdmEntityAttributeDefinition>(cdmObjectType.entityAttributeDef, 'TestEntityAttribute'); entityAttribute.entity = projectionEntityRef; entity.attributes.push(entityAttribute); // Resolve the entity. const resolvedEntity: CdmEntityDefinition = await entity.createResolvedEntityAsync(`Resolved_${entity.entityName}.cdm.json`, null, localRoot); // Verify correctness of the resolved attributes after running the AddAttributeGroup operation // Original set of attributes: ['id', 'name', 'value', 'date'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(4); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('id'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('value'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('date'); }); /** * Test for creating a projection with an AddAttributeGroup operation on an entity definition using the object model */ it('testEntityProjUsingObjectModel', async () => { const testName: string = 'TestEntityProjUsingObjectModel'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); const localRoot: CdmFolderDefinition = corpus.storage.fetchRootFolder('local'); // Create an entity const entity: CdmEntityDefinition = projectionTestUtils.createEntity(corpus, localRoot); // Create a projection const projection: CdmProjection = projectionTestUtils.createProjection(corpus, localRoot); // Create an AddAttributeGroup operation const addAttGroupOp: CdmOperationAddAttributeGroup = corpus.MakeObject<CdmOperationAddAttributeGroup>(cdmObjectType.operationAddAttributeGroupDef); addAttGroupOp.attributeGroupName = 'PersonAttributeGroup'; projection.operations.push(addAttGroupOp); // Create an entity reference to hold this projection const projectionEntityRef: CdmEntityReference = corpus.MakeObject<CdmEntityReference>(cdmObjectType.entityRef, null); projectionEntityRef.explicitReference = projection; // Set the entity's ExtendEntity to be the projection entity.extendsEntity = projectionEntityRef; // Resolve the entity const resolvedEntity: CdmEntityDefinition = await entity.createResolvedEntityAsync(`Resolved_${entity.entityName}.cdm.json`, null, localRoot); // Verify correctness of the resolved attributes after running the AddAttributeGroup operation // Original set of attributes: ['id', 'name', 'value', 'date'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(4); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('id'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('value'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('date'); }); /** * Test AddAttributeGroup operation on an entity definition */ it('testExtendsEntityProj', async () => { const testName: string = 'TestExtendsEntityProj'; const entityName: string = 'Child'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'ChildAttributeGroup'); expect(attGroupDefinition.members.length) .toEqual(5); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroupDefinition.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroupDefinition.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroupDefinition.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroupDefinition.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Multiple AddAttributeGroup operations on the same projection */ it('testMultipleOpProj', async () => { const testName: string = 'TestMultipleOpProj'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName);
} const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] // This will result in two attribute groups with the same set of attributes being generated const attGroup1: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'PersonAttributeGroup', 2); expect(attGroup1.members.length) .toEqual(5); expect((attGroup1.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroup1.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroup1.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroup1.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroup1.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); const attGroup2: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'SecondAttributeGroup', 2, 1); expect(attGroup2.members.length) .toEqual(5); expect((attGroup2.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((attGroup2.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((attGroup2.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((attGroup2.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((attGroup2.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Nested projections with AddAttributeGroup */ it('testNestedProj', async () => { const testName: string = 'TestNestedProj'; const entityName: string = 'NewPerson'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ ]); // Original set of attributes: ['name', 'age', 'address', 'phoneNumber', 'email'] const outerAttGroup: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'OuterAttributeGroup'); const innerAttGroup: CdmAttributeGroupDefinition = validateAttributeGroup(outerAttGroup.members, 'InnerAttributeGroup'); expect(innerAttGroup.members.length) .toEqual(5); expect((innerAttGroup.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((innerAttGroup.members.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); expect((innerAttGroup.members.allItems[2] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((innerAttGroup.members.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((innerAttGroup.members.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Test resolving a type attribute with an add attribute group operation */ it.skip('testTypeAttributeProj', async () => { const testName: string = 'TestTypeAttributeProj'; const entityName: string = 'Person'; const corpus: CdmCorpusDefinition = projectionTestUtils.getLocalCorpus(testsSubpath, testName); for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt); } const entity: CdmEntityDefinition = await corpus.fetchObjectAsync<CdmEntityDefinition>(`local:/${entityName}.cdm.json/${entityName}`); const resolvedEntity: CdmEntityDefinition = await projectionTestUtils.getResolvedEntity(corpus, entity, [ 'referenceOnly' ]); // Original set of attributes: ["name", "age", "address", "phoneNumber", "email"] // Add attribute group applied to "address" expect(resolvedEntity.attributes.length) .toEqual(5); expect((resolvedEntity.attributes.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('name'); expect((resolvedEntity.attributes.allItems[1] as CdmTypeAttributeDefinition).name) .toEqual('age'); const attGroupDefinition: CdmAttributeGroupDefinition = validateAttributeGroup(resolvedEntity.attributes, 'AddressAttributeGroup', 5, 2); expect((attGroupDefinition.members.allItems[0] as CdmTypeAttributeDefinition).name) .toEqual('address'); expect((resolvedEntity.attributes.allItems[3] as CdmTypeAttributeDefinition).name) .toEqual('phoneNumber'); expect((resolvedEntity.attributes.allItems[4] as CdmTypeAttributeDefinition).name) .toEqual('email'); }); /** * Validates the creation of an attribute group and return its definition * @param attributes The collection of attributes * @param attributeGroupName The attribute group name * @param attributesSize The expected size of the attributes collection */ function validateAttributeGroup(attributes: CdmCollection<CdmAttributeItem>, attributeGroupName: string, attributesSize: number = 1, index: number = 0) { expect(attributes.length) .toEqual(attributesSize); expect(attributes.allItems[index].objectType) .toEqual(cdmObjectType.attributeGroupRef); const attGroupReference: CdmAttributeGroupReference = attributes.allItems[index] as CdmAttributeGroupReference; expect(attGroupReference.explicitReference) .not .toBeUndefined(); const attGroupDefinition: CdmAttributeGroupDefinition = attGroupReference.explicitReference as CdmAttributeGroupDefinition; expect(attGroupDefinition.attributeGroupName) .toEqual(attributeGroupName); return attGroupDefinition; } });
for (const resOpt of resOptsCombinations) { await projectionTestUtils.loadEntityForResolutionOptionAndSave(corpus, testName, testsSubpath, entityName, resOpt);
OAuth2AuthorizationCodeRestClient.ts
import { NoAuthRestClient } from './NoAuthRestClient'; const { promisify } = require('util'); const request = promisify(require('request')); export class OAuth2RestClient extends NoAuthRestClient { private async fetchNewToken() { this.emitter.logger.info('Fetching new token...'); const authTokenResponse = await request({ uri: this.cfg.authorizationServerTokenEndpointUrl, method: 'POST', json: true, simple: false, resolveWithFullResponse: true, form: { refresh_token: this.cfg.oauth2.refresh_token, scope: this.cfg.oauth2.scope, grant_type: 'refresh_token', client_id: this.cfg.oauth2_field_client_id, client_secret: this.cfg.oauth2_field_client_secret, }, }); this.emitter.logger.info('New token fetched...'); if (authTokenResponse.statusCode >= 400) { throw new Error(`Error in authentication. Status code: ${authTokenResponse.statusCode}, Body: ${JSON.stringify(authTokenResponse.body)}`); } return authTokenResponse.body; } private async getValidToken() { if (!this.cfg.oauth2) { throw new Error('cfg.oauth2 can not be empty'); } const tokenExpiryTime = new Date(this.cfg.oauth2.tokenExpiryTime); const now = new Date(); if (now < tokenExpiryTime) { this.emitter.logger.info('Previously valid token found.'); return this.cfg.oauth2.access_token; } const tokenRefreshStartTime = new Date(); this.cfg.oauth2 = await this.fetchNewToken(); this.cfg.oauth2.tokenExpiryTime = (new Date(tokenRefreshStartTime.getTime() + (this.cfg.oauth2.expires_in * 1000))).toISOString(); if (this.emitter && this.emitter.emit) { this.emitter.emit('updateKeys', this.cfg.oauth2); } return this.cfg.oauth2.access_token; } protected async addAuthenticationToRequestOptions(requestOptions) { const accessToken = await this.getValidToken();
requestOptions.headers.Authorization = `Bearer ${accessToken}`; } }
15.2.3.14-2-3.js
/*--- es5id: 15.2.3.14-2-3 description: Object.keys returns the standard built-in Array (Array overridden) ---*/ function Array() {} var o = { x: 1, y: 2 }; var a = Object.keys(o); var s = Object.prototype.toString.call(a); assert.sameValue(s, '[object Array]', 's');
// Copyright (c) 2012 Ecma International. All rights reserved. // This code is governed by the BSD license found in the LICENSE file.
Wrapper.js
import styled from "styled-components";
const Wrapper = styled.div` ${flexColumnBase} ${wrapperSizeBase} `; export default Wrapper;
import { flexColumnBase, wrapperSizeBase } from "../base";
lib1.rs
#![crate_type = "rlib"] #[link(name = "foo", kind = "static")] extern "C" { fn foo() -> i32; } pub fn
() -> i32 { unsafe { foo() } }
foo1
list_select.rs
//! A wrapper around the `List` widget providing the ability to select one or more items. use {Color, Positionable, Scalar, Sizeable, Ui, Widget}; use {event, graph, input, widget}; use std; use input::keyboard::ModifierKey; use input::state::mouse::Button; /// A wrapper around the `List` widget that handles single and multiple selection logic. #[derive(Clone, WidgetCommon_)] #[allow(missing_copy_implementations)] pub struct ListSelect<M, D, S> { #[conrod(common_builder)] common: widget::CommonBuilder, num_items: usize, mode: M, direction: std::marker::PhantomData<D>, item_size: S, style: widget::list::Style, item_instantiation: widget::list::ItemInstantiation, } /// A trait that extends the `List` `Direction` trait with behaviour necessary for the `ListSelect` /// widget. /// /// Implemented for the `Down`, `Right`, `Up`, `Left` types. pub trait Direction: widget::list::Direction { /// Maps a given `key` to a direction along the list. fn key_direction(key: input::Key) -> Option<ListDirection>; } /// The direction in which the list flows. #[derive(Copy, Clone, Debug)] pub enum ListDirection { /// The direction flowing from the start of the list to the end of the list. Forward, /// The direction flowing from the end of the list to the start of the list. Backward, } /// Allows the `ListSelect` to be generic over `Single` and `Multiple` selection modes. /// /// Also allows for defining other custom selection modes. pub trait Mode { /// The data associated with the `Mode`s `Event::Selection`. type Selection; /// Update the `PendingEvents` in accordance with the given `Click` event. fn click_selection<F, D, S>(&self, event::Click, i: usize, num_items: usize, &State, is_selected: F, &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool; /// Update the `PendingEvents` in accordance with the given `KeyPress` event. fn key_selection<F, D, S>(&self, event::KeyPress, i: usize, num_items: usize, &State, is_selected: F, &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool, D: Direction; } widget_ids! { struct Ids { list, } } /// Represents the state of the ListSelect. pub struct State { ids: Ids, /// Tracking index of last selected entry that has been pressed in order to /// perform multi selection when `SHIFT` or `ALT`(Mac) / 'CTRL'(Other OS) is held. last_selected_entry: std::cell::Cell<Option<usize>>, } /// Buffer used for storing events that have been produced but are yet to be yielded. pub type PendingEvents<Selection, D, S> = std::collections::VecDeque<Event<Selection, D, S>>; /// An iterator-like type for yielding `ListSelect` `Event`s. pub struct Events<M, D, S> where M: Mode, { id: widget::Id, items: widget::list::Items<D, S>, num_items: usize, mode: M, pending_events: PendingEvents<M::Selection, D, S>, } /// The kind of events that the `ListSelect` may `react` to. /// Provides tuple(s) of index in list and string representation of selection #[derive(Clone, Debug)] pub enum Event<Selection, Direction, Size> { /// The next `Item` is ready for instantiation. Item(widget::list::Item<Direction, Size>), /// A change in selection has occurred. Selection(Selection), /// A button press occurred while the widget was capturing the mouse. Press(event::Press), /// A button release occurred while the widget was capturing the mouse. Release(event::Release), /// A click occurred while the widget was capturing the mouse. Click(event::Click), /// A double click occurred while the widget was capturing the mouse. DoubleClick(event::DoubleClick), } /// A single item selection `Mode` for the `ListSelect`. #[derive(Copy, Clone)] pub struct Single; /// A selection `Mode` for the `ListSelect` that allows selecting more than one item at a time. #[derive(Copy, Clone)] pub struct Multiple; /// Represents some change in item selection for a `ListSelect` in `Multiple` mode. #[derive(Clone, Debug)] pub enum Selection<H: std::hash::BuildHasher = std::collections::hash_map::RandomState> { /// Items which have been added to the selection. Add(std::collections::HashSet<usize, H>), /// Items which have been removed from the selection. Remove(std::collections::HashSet<usize, H>), } impl<H: std::hash::BuildHasher> Selection<H> { /// Update the given slice of `bool`s with this `Selection`. /// /// Each index in the `Selection` represents and index into the slice. pub fn update_bool_slice(&self, slice: &mut [bool]) { match *self { Selection::Add(ref indices) => for &i in indices { if let Some(b) = slice.get_mut(i) { *b = true; } }, Selection::Remove(ref indices) => for &i in indices { if let Some(b) = slice.get_mut(i) { *b = false; } }, } } /// Update the given set of selected indices with this `Selection`. pub fn update_index_set<T>(&self, set: &mut std::collections::HashSet<usize, T>) where T: std::hash::BuildHasher { match *self { Selection::Add(ref indices) => for &i in indices { set.insert(i); }, Selection::Remove(ref indices) => for &i in indices { set.remove(&i); }, } } } impl ListSelect<Single, widget::list::Down, widget::list::Dynamic> { /// Construct a new ListSelect, allowing one selected item at a time. pub fn single(num_items: usize) -> Self { Self::new(num_items, Single) } } impl ListSelect<Multiple, widget::list::Down, widget::list::Dynamic> { /// Construct a new ListSelect, allowing multiple selected items. pub fn multiple(num_items: usize) -> Self { Self::new(num_items, Multiple) } } impl<M, D, S> ListSelect<M, D, S> where M: Mode, D: Direction, S: widget::list::ItemSize, { /// Flows items from top to bottom. pub fn flow_down(self) -> ListSelect<M, widget::list::Down, S> { let ListSelect { common, num_items, mode, item_size, style, item_instantiation, .. } = self; ListSelect { common: common, num_items: num_items, mode: mode, direction: std::marker::PhantomData, item_size: item_size, style: style, item_instantiation: item_instantiation, } } /// Flows items from left to right. pub fn flow_right(self) -> ListSelect<M, widget::list::Right, S> { let ListSelect { common, num_items, mode, item_size, style, item_instantiation, .. } = self; ListSelect { common: common, num_items: num_items, mode: mode, direction: std::marker::PhantomData, item_size: item_size, style: style, item_instantiation: item_instantiation, } } /// Flows items from right to left. pub fn flow_left(self) -> ListSelect<M, widget::list::Left, S> { let ListSelect { common, num_items, mode, item_size, style, item_instantiation, .. } = self; ListSelect { common: common, num_items: num_items, mode: mode, direction: std::marker::PhantomData, item_size: item_size, style: style, item_instantiation: item_instantiation, } } /// Flows items from bottom to top. pub fn flow_up(self) -> ListSelect<M, widget::list::Up, S> { let ListSelect { common, num_items, mode, item_size, style, item_instantiation, .. } = self; ListSelect { common: common, num_items: num_items, mode: mode, direction: std::marker::PhantomData, item_size: item_size, style: style, item_instantiation: item_instantiation, } } /// Specify a fixed item size, where size is a `Scalar` in the direction that the `List` is /// flowing. When a `List` is constructed with this method, all items will have a fixed, equal /// length. pub fn item_size(self, length: Scalar) -> ListSelect<M, D, widget::list::Fixed> { let ListSelect { common, num_items, mode, direction, style, .. } = self; ListSelect { common: common, num_items: num_items, mode: mode, direction: direction, item_size: widget::list::Fixed { length: length }, style: style, item_instantiation: widget::list::ItemInstantiation::OnlyVisible, } } } impl<M> ListSelect<M, widget::list::Down, widget::list::Dynamic> { /// Begin building a new `ListSelect` with the given mode. /// /// This method is only useful when using a custom `Mode`, otherwise `ListSelect::single` or /// `ListSelect::multiple` will probably be more suitable. pub fn new(num_items: usize, mode: M) -> Self where M: Mode, { ListSelect { common: widget::CommonBuilder::default(), style: widget::list::Style::default(), num_items: num_items, item_size: widget::list::Dynamic {}, mode: mode, direction: std::marker::PhantomData, item_instantiation: widget::list::ItemInstantiation::All, } } } impl<M, D, S> ListSelect<M, D, S> { /// Specifies that the `List` should be scrollable and should provide a `Scrollbar` to the /// right of the items. pub fn scrollbar_next_to(mut self) -> Self { self.style.scrollbar_position = Some(Some(widget::list::ScrollbarPosition::NextTo)); self } /// Specifies that the `List` should be scrollable and should provide a `Scrollbar` that hovers /// above the right edge of the items and automatically hides when the user is not scrolling. pub fn scrollbar_on_top(mut self) -> Self { self.style.scrollbar_position = Some(Some(widget::list::ScrollbarPosition::OnTop)); self } /// The width of the `Scrollbar`. pub fn scrollbar_thickness(mut self, w: Scalar) -> Self { self.style.scrollbar_thickness = Some(Some(w)); self } /// The color of the `Scrollbar`. pub fn scrollbar_color(mut self, color: Color) -> Self { self.style.scrollbar_color = Some(color); self } } impl<M, D> ListSelect<M, D, widget::list::Fixed> { /// Indicates that an `Item` should be instatiated for every element in the list, regardless of /// whether or not the `Item` would be visible. /// /// This is the default (and only) behaviour for `List`s with dynamic item sizes. This is /// because a `List` cannot know the total length of its combined items in advanced when each /// item is dynamically sized and their size is not given until they are set. /// /// Note: This may cause significantly heavier CPU load for lists containing many items (100+). /// We only recommend using this when absolutely necessary as large lists may cause unnecessary /// bloating within the widget graph, and in turn result in greater traversal times. pub fn instantiate_all_items(mut self) -> Self { self.item_instantiation = widget::list::ItemInstantiation::All; self } /// Indicates that only `Item`s that are visible should be instantiated. This ensures that we /// avoid bloating the widget graph with unnecessary nodes and in turn keep traversal times to /// a minimum. /// /// This is the default behaviour for `ListSelect`s with fixed item sizes. pub fn
(mut self) -> Self { self.item_instantiation = widget::list::ItemInstantiation::OnlyVisible; self } } impl<M, D, S> Widget for ListSelect<M, D, S> where M: Mode, D: Direction, S: widget::list::ItemSize, { type State = State; type Style = widget::list::Style; type Event = (Events<M, D, S>, Option<widget::list::Scrollbar<D::Axis>>); fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), last_selected_entry: std::cell::Cell::new(None), } } fn style(&self) -> Self::Style { self.style.clone() } /// Update the state of the ListSelect. fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { let widget::UpdateArgs { id, state, style, ui, .. } = args; let ListSelect { num_items, item_size, item_instantiation, mode, .. } = self; // Make sure that `last_selected_entry` refers to an actual selected value in the list. // If not push first selected item, if any. if let Some(i) = state.last_selected_entry.get() { if i >= num_items { state.update(|state| state.last_selected_entry.set(None)); } } let mut list = widget::List::<D, _>::from_item_size(num_items, item_size); let scrollbar_position = style.scrollbar_position(&ui.theme); list = match scrollbar_position { Some(widget::list::ScrollbarPosition::OnTop) => list.scrollbar_on_top(), Some(widget::list::ScrollbarPosition::NextTo) => list.scrollbar_next_to(), None => list, }; list.item_instantiation = item_instantiation; list.style = style.clone(); let (items, scrollbar) = list.middle_of(id).wh_of(id).set(state.ids.list, ui); let events = Events { id: id, items: items, num_items: num_items, mode: mode, pending_events: PendingEvents::new(), }; (events, scrollbar) } } impl<M, D, S> Events<M, D, S> where M: Mode, D: Direction, S: widget::list::ItemSize, { /// Yield the next `Event`. pub fn next<F>(&mut self, ui: &Ui, is_selected: F) -> Option<Event<M::Selection, D, S>> where F: Fn(usize) -> bool, { let Events { id, num_items, ref mode, ref mut items, ref mut pending_events, } = *self; if let Some(event) = pending_events.pop_front() { return Some(event); } let item = match items.next(ui) { Some(item) => item, None => return None, }; // Borrow the `ListSelect::State` from the `Ui`'s widget graph. let state = || { ui.widget_graph() .widget(id) .and_then(|container| container.unique_widget_state::<ListSelect<M, D, S>>()) .map(|&graph::UniqueWidgetState { ref state, .. }| state) .expect("couldn't find `ListSelect` state in the widget graph") }; // Ensure's the last selected entry is still selected. // // Sets the `last_selected_entry` to `None` if it is no longer selected. let ensure_last_selected_validity = |state: &State| { if let Some(i) = state.last_selected_entry.get() { if !is_selected(i) { state.last_selected_entry.set(None); } } }; let i = item.i; // Check for any events that may have occurred to this widget. for widget_event in ui.widget_input(item.widget_id).events() { match widget_event { // Produce a `DoubleClick` event. event::Widget::DoubleClick(click) => { if let input::MouseButton::Left = click.button { pending_events.push_back(Event::DoubleClick(click)); } }, // Check if the entry has been `Click`ed. event::Widget::Click(click) => { pending_events.push_back(Event::Click(click)); let state = state(); ensure_last_selected_validity(state); mode.click_selection(click, i, num_items, state, &is_selected, pending_events); }, // Check for whether or not the item should be selected. event::Widget::Press(press) => { pending_events.push_back(Event::Press(press)); if let Some(key_press) = press.key() { let state = state(); ensure_last_selected_validity(state); mode.key_selection(key_press, i, num_items, state, &is_selected, pending_events); } }, event::Widget::Tap(_) => { let dummy_click=event::Click{ button:Button::Left, xy:[200.0,123.0], modifiers:ModifierKey::NO_MODIFIER }; pending_events.push_back(Event::Click(dummy_click.clone())); let state = state(); ensure_last_selected_validity(state); mode.click_selection(dummy_click, i, num_items, state, &is_selected, pending_events); }, // Produce a `Release` event. event::Widget::Release(release) => { let event = Event::Release(release); pending_events.push_back(event); }, _ => (), } } let item_event = Event::Item(item); // If we can avoid causing `pending_events` to allocate, do so. match pending_events.pop_front() { Some(event) => { pending_events.push_back(item_event); Some(event) }, None => Some(item_event), } } } impl Mode for Single { type Selection = usize; fn click_selection<F, D, S>(&self, _: event::Click, i: usize, _num_items: usize, state: &State, _is_selected: F, pending: &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool, { state.last_selected_entry.set(Some(i)); let event = Event::Selection(i); pending.push_back(event); } fn key_selection<F, D, S>(&self, press: event::KeyPress, _i: usize, num_items: usize, state: &State, _is_selected: F, pending: &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool, D: Direction, { let i = match state.last_selected_entry.get() { Some(i) => i, None => return, }; let selection = match D::key_direction(press.key) { Some(ListDirection::Backward) => if i == 0 { 0 } else { i - 1 }, Some(ListDirection::Forward) => std::cmp::min(i + 1, num_items - 1), None => return, }; state.last_selected_entry.set(Some(selection)); let event = Event::Selection(selection); pending.push_back(event); } } impl Mode for Multiple { type Selection = Selection; fn click_selection<F, D, S>(&self, click: event::Click, i: usize, num_items: usize, state: &State, is_selected: F, pending: &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool, { let shift = click.modifiers.contains(input::keyboard::ModifierKey::SHIFT); let alt = click.modifiers.contains(input::keyboard::ModifierKey::ALT) || click.modifiers.contains(input::keyboard::ModifierKey::CTRL); let event = match state.last_selected_entry.get() { Some(idx) if shift => { let start = std::cmp::min(idx, i); let end = std::cmp::max(idx, i); state.last_selected_entry.set(Some(i)); let selection = (start..end + 1).collect(); Event::Selection(Selection::Add(selection)) }, Some(_) | None if alt => { let selection = std::iter::once(i).collect(); if !is_selected(i) { state.last_selected_entry.set(Some(i)); Event::Selection(Selection::Add(selection)) } else { Event::Selection(Selection::Remove(selection)) } }, _ => { let old_selection = (0..num_items).filter(|&i| is_selected(i)).collect(); let event = Event::Selection(Selection::Remove(old_selection)); pending.push_back(event); let selection = std::iter::once(i).collect(); state.last_selected_entry.set(Some(i)); Event::Selection(Selection::Add(selection)) }, }; pending.push_back(event); } fn key_selection<F, D, S>(&self, press: event::KeyPress, _i: usize, num_items: usize, state: &State, is_selected: F, pending: &mut PendingEvents<Self::Selection, D, S>) where F: Fn(usize) -> bool, D: Direction, { let i = match state.last_selected_entry.get() { Some(i) => i, None => return, }; let alt = press.modifiers.contains(input::keyboard::ModifierKey::ALT); let end = match D::key_direction(press.key) { Some(ListDirection::Backward) => if i == 0 || alt { 0 } else { i - 1 }, Some(ListDirection::Forward) => { let last_idx = num_items - 1; if i >= last_idx || alt { last_idx } else { i + 1 } }, None => return, }; state.last_selected_entry.set(Some(end)); let selection = if press.modifiers.contains(input::keyboard::ModifierKey::SHIFT) { let start = std::cmp::min(i, end); let end = std::cmp::max(i, end) + 1; (start..end).collect() } else { let old_selection = (0..num_items).filter(|&i| is_selected(i)).collect(); let event = Event::Selection(Selection::Remove(old_selection)); pending.push_back(event); std::iter::once(end).collect() }; let event = Event::Selection(Selection::Add(selection)); pending.push_back(event); } } impl Direction for widget::list::Down { fn key_direction(key: input::Key) -> Option<ListDirection> { match key { input::Key::Down => Some(ListDirection::Forward), input::Key::Up => Some(ListDirection::Backward), _ => None, } } } impl Direction for widget::list::Up { fn key_direction(key: input::Key) -> Option<ListDirection> { match key { input::Key::Up => Some(ListDirection::Forward), input::Key::Down => Some(ListDirection::Backward), _ => None, } } } impl Direction for widget::list::Right { fn key_direction(key: input::Key) -> Option<ListDirection> { match key { input::Key::Right => Some(ListDirection::Forward), input::Key::Left => Some(ListDirection::Backward), _ => None, } } } impl Direction for widget::list::Left { fn key_direction(key: input::Key) -> Option<ListDirection> { match key { input::Key::Left => Some(ListDirection::Forward), input::Key::Right => Some(ListDirection::Backward), _ => None, } } }
instantiate_only_visible_items
buyer_dashboard_business.py
from app.api.services import ( briefs ) def get_briefs(user_id, status=None):
return briefs.get_buyer_dashboard_briefs(user_id, status) def get_brief_counts(user_id): return briefs.get_brief_counts(user_id)
coovie2.py
import os import click from movie import Movie from scan import Scan from helper import Helper @click.command() @click.option('--endings', default='mp4, mkv', help='File-endings that are accepted as valid movie-files. ' + 'Default: [.mkv, .mp4]' ) @click.option('--size_limit', default="1500", help='Smaller files are excluded from search (in MegaBytes). ' + "Default: 1500") @click.argument('search_path', required=True) def
(endings, size_limit, search_path): # initiate global function variables movie_list = [] longest_title = 0 # initiate options & arguments from cli movie_endings = tuple(endings.split(", ")) movie_size_limit = int(size_limit) * 1024 * 1024 # MegaBytes # initiate needed objects scanner = Scan(movie_endings, movie_size_limit) helper = Helper() # look for all available files inside directory recursively for root, subs, files in os.walk(search_path): # do available files match a movie-file? for file in files: # is movie file? bool_movie = scanner.is_movie(file) if not bool_movie: continue # is large enough? movie_path = os.path.join(root, file) movie_folder = os.path.basename(root) bool_large = scanner.is_large(movie_path) if not bool_large: continue # is movie file and large enough, try to extract a valid movie name extracted_data = scanner.extract_file_data(file, movie_folder) # if movie has valid data, create a new movie object if -1 in extracted_data: print("Problem with: " + extracted_data[0] + " " + str(extracted_data[1])) else: # data valid, create object and append it movie_object = Movie( extracted_data[0], extracted_data[1], movie_path, root ) movie_list.append(movie_object) # does the current movie have the longest title? if longest_title < len(movie_object.title): longest_title = len(movie_object.title) result_str = 'Movies counted: {number}'.format(number=len(movie_list)) print(result_str) # try to fetch imdb rating for each movie-object for movie in movie_list: movie.fetch_rating() # is current movie in top 250 movie.imdb_top = helper.is_imdb_top(movie) # sort movies by their rating and print them print("") movie_list.sort(key=lambda x: x.rating, reverse=True) for movie in movie_list: movie.print_data(longest_title) if __name__ == '__main__': main()
main
catchall.py
"""This module defines miscellaneous utility functions that is public to users.""" import numpy as np from numpy import unique, linalg, diag, sqrt, dot from Bio.Phylo.BaseTree import Tree, Clade from prody import PY3K from .misctools import addEnds, interpY, index, isListLike from .checkers import checkCoords from .logger import LOGGER __all__ = ['calcTree', 'clusterMatrix', 'showLines', 'showMatrix', 'reorderMatrix', 'findSubgroups', 'getCoords', 'getLinkage', 'getTreeFromLinkage', 'clusterSubfamilies'] class LinkageError(Exception): pass def clusterSubfamilies(similarities, n_clusters=0, linkage='all', method='tsne', cutoff=0.0, **kwargs): """Perform clustering based on members of the *ensemble* projected into lower a reduced dimension. :arg similarities: a matrix of similarities for each structure in the ensemble, such as RMSD-matrix, dynamics-based spectral overlap, sequence similarity :type similarities: :class:`~numpy.ndarray` :arg n_clusters: the number of clusters to generate. If **0**, will scan a range of number of clusters and return the best one based on highest silhouette score. Default is **0**. :type n_clusters: int :arg linkage: if **all**, will test all linkage types (ward, average, complete, single). Otherwise will use only the one(s) given as input. Default is **all**. :type linkage: str, list, tuple, :class:`~numpy.ndarray` :arg method: if set to **spectral**, will generate a Kirchoff matrix based on the cutoff value given and use that as input as clustering instead of the values themselves. Default is **tsne**. :type method: str :arg cutoff: only used if *method* is set to **spectral**. This value is used for generating the Kirchoff matrix to use for generating clusters when doing spectral clustering. Default is **0.0**. :type cutoff: float """ # Import necessary packages try: from sklearn.manifold import SpectralEmbedding from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import silhouette_score from sklearn.manifold import TSNE except ImportError: raise ImportError('need sklearn module') ''' try: import Bio except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') ''' # Check inputs to make sure are of valid types/values if not isinstance(similarities, np.ndarray): raise TypeError('similarities should be a numpy ndarray') dim = similarities.shape if dim[0] != dim[1]: raise ValueError('similarities must be a square matrix') if n_clusters != 0: if not isinstance(n_clusters, int): raise TypeError('clusters must be an instance of int') if n_clusters < 1: raise ValueError('clusters must be a positive integer') elif n_clusters > similarities.shape[0]: raise ValueError('clusters can\'t be longer than similarities matrix') nclusts = range(n_clusters,n_clusters+1) else: nclusts = range(2,10,1) if linkage != 'all': # Check if given input for linkage is list-like if isListLike(linkage): for val in linkage: if val.lower() not in ['ward', 'average', 'complete', 'single']: raise ValueError('linkage must be one or more of: \'ward\', \'average\', \'complete\', or \'single\'') if len(linkage) > 4: raise ValueError('linkage must be one or more of: \'ward\', \'average\', \'complete\', or \'single\'') linkages = [ x.lower() for x in linkage ] # If not, check if it is a valid string and method name else: if not isinstance(linkage, str): raise TypeError('linkage must be an instance of str or list-like of strs') if linkage not in ['ward', 'average', 'complete', 'single']: raise ValueError('linkage must one or more of: \'ward\', \'average\', \'complete\', or \'single\'') linkages = [linkage] else: linkages = ['ward', 'average', 'complete', 'single'] if method != 'tsne': if not isinstance(method, str): raise TypeError('method must be an instance of str') if method != 'spectral': raise ValueError('method must be either \'tsne\' or \'spectral\'') if not isinstance(cutoff, float): raise TypeError('cutoff must be an instance of float') best_score = -1 best_nclust = 0 best_link = '' best_labels = [] # Scan over range of clusters for x in nclusts: if method == 'tsne': embedding = TSNE(n_components=2) transform = embedding.fit_transform(similarities) else: kirchhoff = np.where(similarities > cutoff, 0, -1) embedding = SpectralEmbedding(n_components=2) transform = embedding.fit_transform(kirchhoff) for link in linkages: clustering = AgglomerativeClustering(linkage=link, n_clusters=x) clustering.fit(transform) silhouette_avg = silhouette_score(transform, clustering.labels_) if silhouette_avg > best_score: best_score = silhouette_avg best_nclust = x best_link = link best_labels = clustering.labels_ return best_labels def getCoords(data): try: data = (data._getCoords() if hasattr(data, '_getCoords') else data.getCoords()) except AttributeError: try: checkCoords(data) except TypeError: raise TypeError('data must be a Numpy array or an object ' 'with `getCoords` method') return data def getLinkage(names, tree): """ Obtain the :func:`~scipy.cluster.hierarchy.linkage` matrix encoding ``tree``. :arg names: a list of names, the order determines the values in the linkage matrix :type names: list, :class:`~numpy.ndarray` :arg tree: tree to be converted :type tree: :class:`~Bio.Phylo.BaseTree.Tree` """ tree_terminals = tree.get_terminals() if len(tree_terminals) != len(names): raise ValueError('inconsistent number of terminals in tree and names') terminals = [None] * len(names) for clade in tree_terminals: i = index(names, clade.name) terminals[i] = clade n = len(terminals) nonterminals = [c for c in reversed(tree.get_nonterminals())] if len(nonterminals) != n-1: raise LinkageError('wrong number of terminal clades') Z = np.zeros((n-1, 4)) root = tree.root def _indexOfClade(clade): if clade.is_terminal(): i = index(terminals, clade) else: i = index(nonterminals, clade) + n return i def _height_of(clade): if clade.is_terminal(): height = 0 else: height = max(_height_of(c) + c.branch_length for c in clade.clades) return height def _dfs(clade): if clade.is_terminal(): return i = _indexOfClade(clade) clade_a = clade.clades[0] clade_b = clade.clades[1] a = _indexOfClade(clade_a) b = _indexOfClade(clade_b) l = min(a, b) r = max(a, b) Z[i-n, 0] = l Z[i-n, 1] = r Z[i-n, 2] = _height_of(clade) * 2. Z[i-n, 3] = clade.count_terminals() _dfs(clade_a) _dfs(clade_b) _dfs(root) return Z def getTreeFromLinkage(names, linkage): """ Obtain the tree encoded by ``linkage``. :arg names: a list of names, the order should correspond to the values in linkage :type names: list, :class:`~numpy.ndarray` :arg linkage: linkage matrix :type linkage: :class:`~numpy.ndarray` """ try: import Bio except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') from Bio.Phylo.BaseTree import Tree, Clade if not isinstance(linkage, np.ndarray): raise TypeError('linkage must be a numpy.ndarray instance') if linkage.ndim != 2: raise LinkageError('linkage must be a 2-dimensional matrix') if linkage.shape[1] != 4: raise LinkageError('linkage must have exactly 4 columns') n_terms = len(names) if linkage.shape[0] != n_terms-1: raise LinkageError('linkage must have exactly len(names)-1 rows') clades = [] heights = [] for name in names: clade = Clade(None, name) clades.append(clade) heights.append(0.) for link in linkage: l = int(link[0]) r = int(link[1]) height = link[2] left = clades[l] right = clades[r] lh = heights[l] rh = heights[r] left.branch_length = height - lh right.branch_length = height - rh clade = Clade(None, None) clade.clades.append(left) clade.clades.append(right) clades.append(clade) heights.append(height) return Tree(clade) def calcTree(names, distance_matrix, method='upgma', linkage=False): """ Given a distance matrix, it creates an returns a tree structure. :arg names: a list of names :type names: list, :class:`~numpy.ndarray` :arg distance_matrix: a square matrix with length of ensemble. If numbers does not match *names* it will raise an error :type distance_matrix: :class:`~numpy.ndarray` :arg method: method used for constructing the tree. Acceptable options are ``"upgma"``, ``"nj"``, or methods supported by :func:`~scipy.cluster.hierarchy.linkage` such as ``"single"``, ``"average"``, ``"ward"``, etc. Default is ``"upgma"`` :type method: str :arg linkage: whether the linkage matrix is returned. Note that NJ trees do not support linkage :type linkage: bool """ try: import Bio except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') from .TreeConstruction import DistanceMatrix, DistanceTreeConstructor if len(names) != distance_matrix.shape[0] or len(names) != distance_matrix.shape[1]: raise ValueError("Mismatch between the sizes of matrix and names.") method = method.lower().strip() if method in ['ward', 'single', 'average', 'weighted', 'centroid', 'median']: from scipy.cluster.hierarchy import linkage as hlinkage from scipy.spatial.distance import squareform Z = hlinkage(squareform(distance_matrix), method=method) tree = getTreeFromLinkage(names, Z) else: matrix = [] k = 1 Z = None for row in distance_matrix: matrix.append(list(row[:k])) k = k + 1 if isinstance(names, np.ndarray): names = names.tolist() dm = DistanceMatrix(names, matrix) constructor = DistanceTreeConstructor() method = method.strip().lower() if method == 'nj': tree = constructor.nj(dm) elif method == 'upgma': tree = constructor.upgma(dm) if linkage: Z = getLinkage(names, tree) else: raise ValueError('Method can be only either "nj", "upgma" or ' 'hierarchical clustering such as "single", "average", etc.') for node in tree.get_nonterminals(): node.name = None if linkage: return tree, Z else: return tree def writeTree(filename, tree, format_str='newick'): """ Write a tree to file using Biopython. :arg filename: name for output file :type filename: str :arg tree: a square matrix with length of ensemble. If numbers does not match *names* it will raise an error :type tree: :class:`~Bio.Phylo.BaseTree.Tree` :arg format_str: a string specifying the format for the tree :type format_str: str """ try: from Bio import Phylo except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') if not isinstance(filename, str): raise TypeError('filename should be a string') if not isinstance(tree, Phylo.BaseTree.Tree): raise TypeError('tree should be a Biopython.Phylo Tree object') if not isinstance(format_str, str): raise TypeError('format_str should be a string') Phylo.write(tree, filename, format_str) def clusterMatrix(distance_matrix=None, similarity_matrix=None, labels=None, return_linkage=None, **kwargs): """ Cluster a distance matrix using scipy.cluster.hierarchy and return the sorted matrix, indices used for sorting, sorted labels (if **labels** are passed), and linkage matrix (if **return_linkage** is **True**). Set ``similarity=True`` for clustering a similarity matrix :arg distance_matrix: an N-by-N matrix containing some measure of distance such as 1. - seqid_matrix, rmsds, or distances in PCA space :type similarity_matrix: :class:`~numpy.ndarray` :arg similarity_matrix: an N-by-N matrix containing some measure of similarity such as sequence identity, mode-mode overlap, or spectral overlap :type similarity_matrix: :class:`~numpy.ndarray` :arg labels: labels for each matrix row that can be returned sorted :type labels: list :arg no_plot: if **True**, don't plot the dendrogram. default is **True** :type no_plot: bool :arg reversed: if set to **True**, then the sorting indices will be reversed. :type reversed: bool Other arguments for :func:`~scipy.hierarchy.linkage` and :func:`~scipy.hierarchy.dendrogram` can also be provided and will be taken as **kwargs**. """ import scipy.cluster.hierarchy as sch from scipy import spatial if similarity_matrix is None and distance_matrix is None: raise ValueError('Please provide a distance matrix or a similarity matrix') orientation = kwargs.pop('orientiation', 'right') reversed = kwargs.pop('reversed', False) no_plot = kwargs.pop('no_plot', True) if distance_matrix is None: matrix = similarity_matrix distance_matrix = 1. - similarity_matrix else: matrix = distance_matrix formatted_distance_matrix = spatial.distance.squareform(distance_matrix) linkage_matrix = sch.linkage(formatted_distance_matrix, **kwargs) sorting_dendrogram = sch.dendrogram(linkage_matrix, orientation=orientation, labels=labels, no_plot=no_plot) indices = sorting_dendrogram['leaves'] sorted_labels = sorting_dendrogram['ivl'] if reversed: indices = indices[::-1] sorted_labels = sorted_labels[::-1] sorted_matrix = matrix[indices, :] sorted_matrix = sorted_matrix[:, indices] return_vals = [sorted_matrix, indices] if labels is not None: return_vals.append(sorted_labels) if return_linkage: return_vals.append(linkage_matrix) return tuple(return_vals) # convert to tuple to avoid [pylint] E0632:Possible unbalanced tuple unpacking def showLines(*args, **kwargs): """ Show 1-D data using :func:`~matplotlib.axes.Axes.plot`. :arg x: (optional) x coordinates. *x* can be an 1-D array or a 2-D matrix of column vectors. :type x: :class:`~numpy.ndarray` :arg y: data array. *y* can be an 1-D array or a 2-D matrix of column vectors. :type y: :class:`~numpy.ndarray` :arg dy: an array of variances of *y* which will be plotted as a band along *y*. It should have the same shape with *y*. :type dy: :class:`~numpy.ndarray` :arg lower: an array of lower bounds which will be plotted as a band along *y*. It should have the same shape with *y* and should be paired with *upper*. :type lower: :class:`~numpy.ndarray` :arg upper: an array of upper bounds which will be plotted as a band along *y*. It should have the same shape with *y* and should be paired with *lower*. :type upper: :class:`~numpy.ndarray` :arg alpha: the transparency of the band(s) for plotting *dy*. :type alpha: float :arg beta: the transparency of the band(s) for plotting *miny* and *maxy*. :type beta: float :arg ticklabels: user-defined tick labels for x-axis. :type ticklabels: list """ # note for developers: this function serves as a low-level # plotting function which provides basic utilities for other # plotting functions. Therefore showFigure is not handled # in this function as it should be already handled in the caller. ticklabels = kwargs.pop('ticklabels', None) dy = kwargs.pop('dy', None) miny = kwargs.pop('lower', None) maxy = kwargs.pop('upper', None) alpha = kwargs.pop('alpha', 0.5) beta = kwargs.pop('beta', 0.25) gap = kwargs.pop('gap', False) labels = kwargs.pop('label', None) from matplotlib import cm, ticker from matplotlib.pyplot import figure, gca, xlim ax = gca() lines = ax.plot(*args, **kwargs) polys = [] for i, line in enumerate(lines): color = line.get_color() x, y = line.get_data() if gap: x_new, y_new = addEnds(x, y) line.set_data(x_new, y_new) else: x_new, y_new = x, y if labels is not None: if np.isscalar(labels): line.set_label(labels) else: try: line.set_label(labels[i]) except IndexError: raise ValueError('The number of labels ({0}) and that of y ({1}) do not match.' .format(len(labels), len(line))) # the following function needs to be here so that line exists def sub_array(a, i, tag='a'): ndim = 0 if a is not None: if np.isscalar(a[0]): ndim = 1 # a plain list (array) else: ndim = 2 # a nested list (array) else: return None if ndim == 1: _a = a else: try: _a = a[i] except IndexError: raise ValueError('The number of {2} ({0}) and that of y ({1}) do not match.' .format(len(miny), len(line), tag)) if len(_a) != len(y): raise ValueError('The shapes of {2} ({0}) and y ({1}) do not match.' .format(len(_miny), len(y), tag)) return _a if miny is not None and maxy is not None: _miny = sub_array(miny, i) _maxy = sub_array(maxy, i) if gap: _, _miny = addEnds(x, _miny) _, _maxy = addEnds(x, _maxy) poly = ax.fill_between(x_new, _miny, _maxy, alpha=beta, facecolor=color, edgecolor=None, linewidth=1, antialiased=True) polys.append(poly) if dy is not None: _dy = sub_array(dy, i) if gap: _, _dy = addEnds(x, _dy) poly = ax.fill_between(x_new, y_new-_dy, y_new+_dy, alpha=alpha, facecolor=color, edgecolor=None, linewidth=1, antialiased=True) polys.append(poly) ax.margins(x=0) if ticklabels is not None: if callable(ticklabels): ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(ticklabels)) else: ax.get_xaxis().set_major_formatter(ticker.IndexFormatter(ticklabels)) ax.xaxis.set_major_locator(ticker.AutoLocator()) ax.xaxis.set_minor_locator(ticker.AutoMinorLocator()) return lines, polys def showMatrix(matrix, x_array=None, y_array=None, **kwargs): """Show a matrix using :meth:`~matplotlib.axes.Axes.imshow`. Curves on x- and y-axis can be added. :arg matrix: matrix to be displayed :type matrix: :class:`~numpy.ndarray` :arg x_array: data to be plotted above the matrix :type x_array: :class:`~numpy.ndarray` :arg y_array: data to be plotted on the left side of the matrix :type y_array: :class:`~numpy.ndarray` :arg percentile: a percentile threshold to remove outliers, i.e. only showing data within *p*-th to *100-p*-th percentile :type percentile: float :arg interactive: turn on or off the interactive options :type interactive: bool :arg xtickrotation: how much to rotate the xticklabels in degrees default is 0 :type xtickrotation: float """ from matplotlib import ticker from matplotlib.gridspec import GridSpec from matplotlib.collections import LineCollection from matplotlib.pyplot import gca, sca, sci, colorbar, subplot from .drawtools import drawTree p = kwargs.pop('percentile', None) vmin = vmax = None if p is not None: vmin = np.percentile(matrix, p) vmax = np.percentile(matrix, 100-p) vmin = kwargs.pop('vmin', vmin) vmax = kwargs.pop('vmax', vmax) vcenter = kwargs.pop('vcenter', None) norm = kwargs.pop('norm', None) if vcenter is not None and norm is None: if PY3K: try: from matplotlib.colors import DivergingNorm except ImportError: from matplotlib.colors import TwoSlopeNorm as DivergingNorm norm = DivergingNorm(vmin=vmin, vcenter=0., vmax=vmax) else: LOGGER.warn('vcenter cannot be used in Python 2 so norm remains None') lw = kwargs.pop('linewidth', 1) W = H = kwargs.pop('ratio', 6) ticklabels = kwargs.pop('ticklabels', None) xticklabels = kwargs.pop('xticklabels', ticklabels) yticklabels = kwargs.pop('yticklabels', ticklabels) xtickrotation = kwargs.pop('xtickrotation', 0.) show_colorbar = kwargs.pop('colorbar', True) cb_extend = kwargs.pop('cb_extend', 'neither') allticks = kwargs.pop('allticks', False) # this argument is temporary and will be replaced by better implementation interactive = kwargs.pop('interactive', True) cmap = kwargs.pop('cmap', 'jet') origin = kwargs.pop('origin', 'lower') try: from Bio import Phylo except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') tree_mode_y = isinstance(y_array, Phylo.BaseTree.Tree) tree_mode_x = isinstance(x_array, Phylo.BaseTree.Tree) if x_array is not None and y_array is not None: nrow = 2; ncol = 2 i = 1; j = 1 width_ratios = [1, W] height_ratios = [1, H] aspect = 'auto' elif x_array is not None and y_array is None: nrow = 2; ncol = 1 i = 1; j = 0 width_ratios = [W] height_ratios = [1, H] aspect = 'auto' elif x_array is None and y_array is not None: nrow = 1; ncol = 2 i = 0; j = 1 width_ratios = [1, W] height_ratios = [H] aspect = 'auto' else: nrow = 1; ncol = 1 i = 0; j = 0 width_ratios = [W] height_ratios = [H] aspect = kwargs.pop('aspect', None) main_index = (i, j) upper_index = (i-1, j) left_index = (i, j-1) complex_layout = nrow > 1 or ncol > 1 ax1 = ax2 = ax3 = None if complex_layout: gs = GridSpec(nrow, ncol, width_ratios=width_ratios, height_ratios=height_ratios, hspace=0., wspace=0.) ## draw matrix if complex_layout: ax3 = subplot(gs[main_index]) else: ax3 = gca() im = ax3.imshow(matrix, aspect=aspect, vmin=vmin, vmax=vmax, norm=norm, cmap=cmap, origin=origin, **kwargs) #ax3.set_xlim([-0.5, matrix.shape[0]+0.5]) #ax3.set_ylim([-0.5, matrix.shape[1]+0.5]) if xticklabels is not None: ax3.xaxis.set_major_formatter(ticker.IndexFormatter(xticklabels)) if yticklabels is not None and ncol == 1: ax3.yaxis.set_major_formatter(ticker.IndexFormatter(yticklabels)) if allticks: ax3.xaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.)) ax3.yaxis.set_major_locator(ticker.IndexLocator(offset=0.5, base=1.)) else: locator = ticker.AutoLocator() locator.set_params(integer=True) minor_locator = ticker.AutoMinorLocator() ax3.xaxis.set_major_locator(locator) ax3.xaxis.set_minor_locator(minor_locator) locator = ticker.AutoLocator() locator.set_params(integer=True) minor_locator = ticker.AutoMinorLocator() ax3.yaxis.set_major_locator(locator) ax3.yaxis.set_minor_locator(minor_locator) if ncol > 1: ax3.yaxis.set_major_formatter(ticker.NullFormatter()) ## draw x_ and y_array lines = [] if nrow > 1: ax1 = subplot(gs[upper_index]) if tree_mode_x: Y, X = drawTree(x_array, label_func=None, orientation='vertical', inverted=True) miny = min(Y.values()) maxy = max(Y.values()) minx = min(X.values()) maxx = max(X.values()) ax1.set_xlim(minx-.5, maxx+.5) ax1.set_ylim(miny, 1.05*maxy) else: ax1.set_xticklabels([]) y = x_array xp, yp = interpY(y) points = np.array([xp, yp]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) lcy = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap) lines.append(lcy) ax1.add_collection(lcy) ax1.set_xlim(xp.min()-.5, xp.max()+.5) ax1.set_ylim(yp.min(), yp.max()) if ax3.xaxis_inverted(): ax2.invert_xaxis() ax1.axis('off') if ncol > 1: ax2 = subplot(gs[left_index]) if tree_mode_y: X, Y = drawTree(y_array, label_func=None, inverted=True) miny = min(Y.values()) maxy = max(Y.values()) minx = min(X.values()) maxx = max(X.values()) ax2.set_ylim(miny-.5, maxy+.5) ax2.set_xlim(minx, 1.05*maxx) else: ax2.set_xticklabels([]) y = y_array xp, yp = interpY(y) points = np.array([yp, xp]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) lcx = LineCollection(segments, array=yp, linewidths=lw, cmap=cmap) lines.append(lcx) ax2.add_collection(lcx) ax2.set_xlim(yp.min(), yp.max()) ax2.set_ylim(xp.min()-.5, xp.max()+.5) ax2.invert_xaxis() if ax3.yaxis_inverted(): ax2.invert_yaxis() ax2.axis('off') ## draw colorbar sca(ax3) cb = None if show_colorbar: if nrow > 1: axes = [ax1, ax2, ax3] while None in axes: axes.remove(None) s = H / (H + 1.) cb = colorbar(mappable=im, ax=axes, anchor=(0, 0), shrink=s, extend=cb_extend) else: cb = colorbar(mappable=im, extend=cb_extend) sca(ax3) sci(im) if interactive: from prody.utilities import ImageCursor from matplotlib.pyplot import connect cursor = ImageCursor(ax3, im) connect('button_press_event', cursor.onClick) ax3.tick_params(axis='x', rotation=xtickrotation) return im, lines, cb def reorderMatrix(names, matrix, tree, axis=None): """ Reorder a matrix based on a tree and return the reordered matrix and indices for reordering other things. :arg names: a list of names associated with the rows of the matrix These names must match the ones used to generate the tree :type names: list :arg matrix: any square matrix :type matrix: :class:`~numpy.ndarray` :arg tree: any tree from :func:`calcTree` :type tree: :class:`~Bio.Phylo.BaseTree.Tree` :arg axis: along which axis the matrix should be reordered. Default is **None** which reorder along all the axes :type axis: int """ try: from Bio import Phylo except ImportError: raise ImportError('Phylo module could not be imported. ' 'Reinstall ProDy or install Biopython ' 'to solve the problem.') try: if matrix.ndim != 2: raise ValueError('matrix should be a 2D matrix.') except AttributeError: raise TypeError('matrix should be a numpy array.') if np.shape(matrix)[0] != np.shape(matrix)[1]: raise ValueError('matrix should be a square matrix') names = np.asarray(names) if np.isscalar(names): raise TypeError('names should be list-like') if not len(names): raise TypeError('names is empty') if not isinstance(tree, Phylo.BaseTree.Tree): raise TypeError('tree should be a BioPython Tree') if len(names) != len(matrix): raise ValueError('names should have entries for each matrix row/column') terminals = tree.get_terminals() if len(names) != len(terminals): raise ValueError('names should have entries for each tree terminal') if len(terminals) != len(matrix): raise ValueError('matrix should have a row for each tree terminal') indices = [] for terminal in terminals: name = terminal.name locs = np.where(names == name)[0] if not len(locs): raise ValueError('inconsistent names and tree: %s not in names'%name) if len(locs) > 1: raise ValueError('inconsistent names and tree: duplicate name %s in names'%name) indices.append(locs[0]) # rmatrix = matrix[:, indices] # rmatrix = rmatrix[indices, :] if axis is not None: I = [np.arange(s) for s in matrix.shape] axes = [axis] if np.isscalar(axis) else axis for ax in axes: I[ax] = indices else: I = [indices] * matrix.ndim rmatrix = matrix[np.ix_(*I)] return rmatrix, indices def findSubgroups(tree, c, method='naive', **kwargs):
""" Divide a tree into subgroups using a criterion and a cutoff. Returns a list of lists with labels divided into subgroups. """ method = method.lower().strip() terminals = tree.get_terminals() names = [clade.name for clade in terminals] Z = None if method != 'naive': try: Z = getLinkage(names, tree) except LinkageError: print('Failed to build linkage; fall back to naive criterion') method = 'naive' if method == 'naive': subgroups = [[names[0]]] for i in range(len(terminals)-1): curr_clade = terminals[i] next_clade = terminals[i + 1] d = tree.distance(curr_clade, next_clade) if d > c: subgroups.append([]) subgroups[-1].append(next_clade.name) else: from scipy.cluster.hierarchy import fcluster T = fcluster(Z, c, criterion=method, **kwargs) labels = np.unique(T) subgroups = [[] for _ in range(len(labels))] for i, t in enumerate(T): subgroups[t-1].append(names[i]) return subgroups
mysql_processor.py
# -*- coding: utf-8 -*- from .processor import QueryProcessor class MySqlQueryProcessor(QueryProcessor): def
(self, query, sql, values, sequence=None): """ Process an "insert get ID" query. :param query: A QueryBuilder instance :type query: QueryBuilder :param sql: The sql query to execute :type sql: str :param values: The value bindings :type values: list :param sequence: The ids sequence :type sequence: str :return: The inserted row id :rtype: int """ if not query.get_connection().transaction_level(): with query.get_connection().transaction(): query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') else: query.get_connection().insert(sql, values) cursor = query.get_connection().get_cursor() if hasattr(cursor, 'lastrowid'): id = cursor.lastrowid else: id = query.get_connection().statement('SELECT LAST_INSERT_ID()') if isinstance(id, int): return id if str(id).isdigit(): return int(id) return id def process_column_listing(self, results): """ Process the results of a column listing query :param results: The query results :type results: dict :return: The processed results :return: dict """ return map(lambda x: x['column_name'], results)
process_insert_get_id
temperature_reporter.py
from modules.lib.reporter import Reporter from modules.lib.report import Report from modules.lib.alarm_machine import AlarmMachine class TemperatureReporter(Reporter):
def data_type(self): return 'temperature' def report(self): with open('/sys/class/thermal/thermal_zone0/temp') as file: temp = int(file.read()) / float(1000) report = Report.report_now( 'measurement', type='temperature', key='zone_0', value=temp, unit='c' ) alarm = None if self.alarm_machine() is not None: alarm = self.alarm_machine().judge('temperature', temp, report.reported_at) return report, alarm
permutation-sequence_test.go
package problem0060 import ( "fmt" "testing" "github.com/stretchr/testify/assert" ) type question struct { para ans } // para 是参数 type para struct { n int k int } // ans 是答案 type ans struct { one string } func Test_Problem
T) { ast := assert.New(t) qs := []question{ question{ para{ 4, 4, }, ans{ "1342", }, }, question{ para{ 0, 4, }, ans{ "", }, }, question{ para{ 3, 3, }, ans{ "213", }, }, // 如需多个测试,可以复制上方元素。 } for _, q := range qs { a, p := q.ans, q.para fmt.Printf("~~%v~~\n", p) ast.Equal(a.one, getPermutation(p.n, p.k), "输入:%v", p) } }
0060(t *testing.
require.rs
mod utils; use clap::{App, Arg, ArgGroup, ErrorKind}; static REQUIRE_EQUALS: &str = "error: The following required arguments were not provided: --opt=<FILE> USAGE: clap-test --opt=<FILE> For more information try --help "; static REQUIRE_EQUALS_FILTERED: &str = "error: The following required arguments were not provided: --opt=<FILE> USAGE: clap-test --opt=<FILE> --foo=<FILE> For more information try --help "; static REQUIRE_EQUALS_FILTERED_GROUP: &str = "error: The following required arguments were not provided: --opt=<FILE> USAGE: clap-test --opt=<FILE> --foo=<FILE> <--g1=<FILE>|--g2=<FILE>> For more information try --help "; static MISSING_REQ: &str = "error: The following required arguments were not provided: --long-option-2 <option2> <positional2> USAGE: clap-test --long-option-2 <option2> -F <positional2> For more information try --help "; static COND_REQ_IN_USAGE: &str = "error: The following required arguments were not provided: --output <output> USAGE: test --target <target> --input <input> --output <output> For more information try --help "; #[test] fn flag_required() { let result = App::new("flag_required") .arg(Arg::from("-f, --flag 'some flag'").requires("color")) .arg(Arg::from("-c, --color 'third flag'")) .try_get_matches_from(vec!["", "-f"]); assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind, ErrorKind::MissingRequiredArgument); } #[test] fn flag_required_2() { let m = App::new("flag_required") .arg(Arg::from("-f, --flag 'some flag'").requires("color")) .arg(Arg::from("-c, --color 'third flag'")) .get_matches_from(vec!["", "-f", "-c"]); assert!(m.is_present("color")); assert!(m.is_present("flag")); } #[test] fn option_required() { let result = App::new("option_required") .arg(Arg::from("-f [flag] 'some flag'").requires("c")) .arg(Arg::from("-c [color] 'third flag'")) .try_get_matches_from(vec!["", "-f", "val"]); assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind, ErrorKind::MissingRequiredArgument); } #[test] fn option_required_2() { let m = App::new("option_required") .arg(Arg::from("-f [flag] 'some flag'").requires("c")) .arg(Arg::from("-c [color] 'third flag'")) .get_matches_from(vec!["", "-f", "val", "-c", "other_val"]); assert!(m.is_present("c")); assert_eq!(m.value_of("c").unwrap(), "other_val"); assert!(m.is_present("f")); assert_eq!(m.value_of("f").unwrap(), "val"); } #[test] fn positional_required() { let result = App::new("positional_required") .arg(Arg::new("flag").index(1).required(true)) .try_get_matches_from(vec![""]); assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind, ErrorKind::MissingRequiredArgument); } #[test] fn positional_required_2() { let m = App::new("positional_required") .arg(Arg::new("flag").index(1).required(true)) .get_matches_from(vec!["", "someval"]); assert!(m.is_present("flag")); assert_eq!(m.value_of("flag").unwrap(), "someval"); } #[test] fn group_required() { let result = App::new("group_required") .arg(Arg::from("-f, --flag 'some flag'")) .group(ArgGroup::new("gr").required(true).arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .try_get_matches_from(vec!["", "-f"]); assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind, ErrorKind::MissingRequiredArgument); } #[test] fn group_required_2() { let m = App::new("group_required") .arg(Arg::from("-f, --flag 'some flag'")) .group(ArgGroup::new("gr").required(true).arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .get_matches_from(vec!["", "-f", "--some"]); assert!(m.is_present("some")); assert!(!m.is_present("other")); assert!(m.is_present("flag")); } #[test] fn group_required_3() { let m = App::new("group_required") .arg(Arg::from("-f, --flag 'some flag'")) .group(ArgGroup::new("gr").required(true).arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .get_matches_from(vec!["", "-f", "--other"]); assert!(!m.is_present("some")); assert!(m.is_present("other")); assert!(m.is_present("flag")); } #[test] fn arg_require_group() { let result = App::new("arg_require_group") .arg(Arg::from("-f, --flag 'some flag'").requires("gr")) .group(ArgGroup::new("gr").arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .try_get_matches_from(vec!["", "-f"]); assert!(result.is_err()); let err = result.err().unwrap(); assert_eq!(err.kind, ErrorKind::MissingRequiredArgument); } #[test] fn arg_require_group_2() { let res = App::new("arg_require_group") .arg(Arg::from("-f, --flag 'some flag'").requires("gr")) .group(ArgGroup::new("gr").arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .try_get_matches_from(vec!["", "-f", "--some"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(m.is_present("some")); assert!(!m.is_present("other")); assert!(m.is_present("flag")); } #[test] fn arg_require_group_3() { let res = App::new("arg_require_group") .arg(Arg::from("-f, --flag 'some flag'").requires("gr")) .group(ArgGroup::new("gr").arg("some").arg("other")) .arg(Arg::from("--some 'some arg'")) .arg(Arg::from("--other 'other arg'")) .try_get_matches_from(vec!["", "-f", "--other"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(!m.is_present("some")); assert!(m.is_present("other")); assert!(m.is_present("flag")); } // REQUIRED_UNLESS #[test] fn issue_753() { let m = App::new("test") .arg(Arg::from( "-l, --list 'List available interfaces (and stop there)'", )) .arg( Arg::from("-i, --iface=[INTERFACE] 'Ethernet interface for fetching NTP packets'") .required_unless_present("list"), ) .arg( Arg::from("-f, --file=[TESTFILE] 'Fetch NTP packets from pcap file'") .conflicts_with("iface") .required_unless_present("list"), ) .arg( Arg::from("-s, --server=[SERVER_IP] 'NTP server IP address'") .required_unless_present("list"), ) .arg(Arg::from("-p, --port=[SERVER_PORT] 'NTP server port'").default_value("123")) .try_get_matches_from(vec!["test", "--list"]); assert!(m.is_ok()); } #[test] fn required_unless_present() { let res = App::new("unlesstest") .arg( Arg::new("cfg") .required_unless_present("dbg") .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .try_get_matches_from(vec!["unlesstest", "--debug"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(m.is_present("dbg")); assert!(!m.is_present("cfg")); } #[test] fn required_unless_err() { let res = App::new("unlesstest") .arg( Arg::new("cfg") .required_unless_present("dbg") .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .try_get_matches_from(vec!["unlesstest"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } // REQUIRED_UNLESS_ALL #[test] fn required_unless_present_all() { let res = App::new("unlessall") .arg( Arg::new("cfg") .required_unless_present_all(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessall", "--debug", "-i", "file"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(m.is_present("dbg")); assert!(m.is_present("infile")); assert!(!m.is_present("cfg")); } #[test] fn required_unless_all_err() { let res = App::new("unlessall") .arg( Arg::new("cfg") .required_unless_present_all(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessall", "--debug"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } // REQUIRED_UNLESS_ONE #[test] fn required_unless_present_any() { let res = App::new("unlessone") .arg( Arg::new("cfg") .required_unless_present_any(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessone", "--debug"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(m.is_present("dbg")); assert!(!m.is_present("cfg")); } #[test] fn required_unless_any_2()
#[test] fn required_unless_any_works_with_short() { // GitHub issue: https://github.com/kbknapp/clap-rs/issues/1135 let res = App::new("unlessone") .arg(Arg::new("a").conflicts_with("b").short('a')) .arg(Arg::new("b").short('b')) .arg( Arg::new("x") .short('x') .required_unless_present_any(&["a", "b"]), ) .try_get_matches_from(vec!["unlessone", "-a"]); assert!(res.is_ok()); } #[test] fn required_unless_any_works_with_short_err() { let res = App::new("unlessone") .arg(Arg::new("a").conflicts_with("b").short('a')) .arg(Arg::new("b").short('b')) .arg( Arg::new("x") .short('x') .required_unless_present_any(&["a", "b"]), ) .try_get_matches_from(vec!["unlessone"]); assert!(!res.is_ok()); } #[test] fn required_unless_any_works_without() { let res = App::new("unlessone") .arg(Arg::new("a").conflicts_with("b").short('a')) .arg(Arg::new("b").short('b')) .arg(Arg::new("x").required_unless_present_any(&["a", "b"])) .try_get_matches_from(vec!["unlessone", "-a"]); assert!(res.is_ok()); } #[test] fn required_unless_any_works_with_long() { let res = App::new("unlessone") .arg(Arg::new("a").conflicts_with("b").short('a')) .arg(Arg::new("b").short('b')) .arg( Arg::new("x") .long("x_is_the_option") .required_unless_present_any(&["a", "b"]), ) .try_get_matches_from(vec!["unlessone", "-a"]); assert!(res.is_ok()); } #[test] fn required_unless_any_1() { let res = App::new("unlessone") .arg( Arg::new("cfg") .required_unless_present_any(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessone", "--debug"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(!m.is_present("infile")); assert!(!m.is_present("cfg")); assert!(m.is_present("dbg")); } #[test] fn required_unless_any_err() { let res = App::new("unlessone") .arg( Arg::new("cfg") .required_unless_present_any(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessone"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn missing_required_output() { assert!(utils::compare_output( utils::complex_app(), "clap-test -F", MISSING_REQ, true )); } // Conditional external requirements #[test] fn requires_if_present_val() { let res = App::new("unlessone") .arg( Arg::new("cfg") .requires_if("my.cfg", "extra") .takes_value(true) .long("config"), ) .arg(Arg::new("extra").long("extra")) .try_get_matches_from(vec!["unlessone", "--config=my.cfg"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn requires_if_present_mult() { let res = App::new("unlessone") .arg( Arg::new("cfg") .requires_ifs(&[("my.cfg", "extra"), ("other.cfg", "other")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").long("extra")) .arg(Arg::new("other").long("other")) .try_get_matches_from(vec!["unlessone", "--config=other.cfg"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn requires_if_present_mult_pass() { let res = App::new("unlessone") .arg( Arg::new("cfg") .requires_ifs(&[("my.cfg", "extra"), ("other.cfg", "other")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").long("extra")) .arg(Arg::new("other").long("other")) .try_get_matches_from(vec!["unlessone", "--config=some.cfg"]); assert!(res.is_ok()); } #[test] fn requires_if_present_val_no_present_pass() { let res = App::new("unlessone") .arg( Arg::new("cfg") .requires_if("my.cfg", "extra") .takes_value(true) .long("config"), ) .arg(Arg::new("extra").long("extra")) .try_get_matches_from(vec!["unlessone"]); assert!(res.is_ok()); } // Conditionally required #[test] fn required_if_val_present_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq("extra", "val") .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .try_get_matches_from(vec!["ri", "--extra", "val", "--config", "my.cfg"]); assert!(res.is_ok()); } #[test] fn required_if_val_present_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq("extra", "val") .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .try_get_matches_from(vec!["ri", "--extra", "val"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn required_if_val_present_case_insensitive_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq("extra", "Val") .takes_value(true) .long("config"), ) .arg( Arg::new("extra") .takes_value(true) .long("extra") .case_insensitive(true), ) .try_get_matches_from(vec!["ri", "--extra", "vaL", "--config", "my.cfg"]); assert!(res.is_ok()); } #[test] fn required_if_val_present_case_insensitive_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq("extra", "Val") .takes_value(true) .long("config"), ) .arg( Arg::new("extra") .takes_value(true) .long("extra") .case_insensitive(true), ) .try_get_matches_from(vec!["ri", "--extra", "vaL"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn required_if_all_values_present_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_all(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec![ "ri", "--extra", "val", "--option", "spec", "--config", "my.cfg", ]); assert!(res.is_ok()); } #[test] fn required_if_some_values_present_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_all(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--extra", "val"]); assert!(res.is_ok()); } #[test] fn required_if_all_values_present_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_all(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--extra", "val", "--option", "spec"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn required_if_any_all_values_present_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_all(&[("extra", "val"), ("option", "spec")]) .required_if_eq_any(&[("extra", "val2"), ("option", "spec2")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec![ "ri", "--extra", "val", "--option", "spec", "--config", "my.cfg", ]); assert!(res.is_ok()); } #[test] fn required_if_any_all_values_present_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_all(&[("extra", "val"), ("option", "spec")]) .required_if_eq_any(&[("extra", "val2"), ("option", "spec2")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--extra", "val", "--option", "spec"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn list_correct_required_args() { let app = App::new("Test app") .version("1.0") .author("F0x06") .about("Arg test") .arg( Arg::new("target") .takes_value(true) .required(true) .possible_values(["file", "stdout"]) .long("target"), ) .arg( Arg::new("input") .takes_value(true) .required(true) .long("input"), ) .arg( Arg::new("output") .takes_value(true) .required(true) .long("output"), ); assert!(utils::compare_output( app, "test --input somepath --target file", COND_REQ_IN_USAGE, true )); } #[test] fn required_if_val_present_fail_error_output() { let app = App::new("Test app") .version("1.0") .author("F0x06") .about("Arg test") .arg( Arg::new("target") .takes_value(true) .required(true) .possible_values(["file", "stdout"]) .long("target"), ) .arg( Arg::new("input") .takes_value(true) .required(true) .long("input"), ) .arg( Arg::new("output") .takes_value(true) .required_if_eq("target", "file") .long("output"), ); assert!(utils::compare_output( app, "test --input somepath --target file", COND_REQ_IN_USAGE, true )); } #[test] fn required_if_wrong_val() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq("extra", "val") .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .try_get_matches_from(vec!["ri", "--extra", "other"]); assert!(res.is_ok()); } #[test] fn required_ifs_val_present_pass() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_any(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("option").takes_value(true).long("option")) .arg(Arg::new("extra").takes_value(true).long("extra")) .try_get_matches_from(vec!["ri", "--option", "spec", "--config", "my.cfg"]); assert!(res.is_ok()); } #[test] fn required_ifs_val_present_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_any(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--option", "spec"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn required_ifs_wrong_val() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_any(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--option", "other"]); assert!(res.is_ok()); } #[test] fn required_ifs_wrong_val_mult_fail() { let res = App::new("ri") .arg( Arg::new("cfg") .required_if_eq_any(&[("extra", "val"), ("option", "spec")]) .takes_value(true) .long("config"), ) .arg(Arg::new("extra").takes_value(true).long("extra")) .arg(Arg::new("option").takes_value(true).long("option")) .try_get_matches_from(vec!["ri", "--extra", "other", "--option", "spec"]); assert!(res.is_err()); assert_eq!(res.unwrap_err().kind, ErrorKind::MissingRequiredArgument); } #[test] fn require_eq() { let app = App::new("clap-test").version("v1.4.8").arg( Arg::new("opt") .long("opt") .short('o') .required(true) .require_equals(true) .value_name("FILE") .about("some"), ); assert!(utils::compare_output( app, "clap-test", REQUIRE_EQUALS, true )); } #[test] fn require_eq_filtered() { let app = App::new("clap-test") .version("v1.4.8") .arg( Arg::new("opt") .long("opt") .short('o') .required(true) .require_equals(true) .value_name("FILE") .about("some"), ) .arg( Arg::new("foo") .long("foo") .short('f') .required(true) .require_equals(true) .value_name("FILE") .about("some other arg"), ); assert!(utils::compare_output( app, "clap-test -f=blah", REQUIRE_EQUALS_FILTERED, true )); } #[test] fn require_eq_filtered_group() { let app = App::new("clap-test") .version("v1.4.8") .arg( Arg::new("opt") .long("opt") .short('o') .required(true) .require_equals(true) .value_name("FILE") .about("some"), ) .arg( Arg::new("foo") .long("foo") .short('f') .required(true) .require_equals(true) .value_name("FILE") .about("some other arg"), ) .arg( Arg::new("g1") .long("g1") .require_equals(true) .value_name("FILE"), ) .arg( Arg::new("g2") .long("g2") .require_equals(true) .value_name("FILE"), ) .group( ArgGroup::new("test_group") .args(&["g1", "g2"]) .required(true), ); assert!(utils::compare_output( app, "clap-test -f=blah --g1=blah", REQUIRE_EQUALS_FILTERED_GROUP, true )); } static ISSUE_1158: &str = "error: The following required arguments were not provided: -x <X> -y <Y> -z <Z> USAGE: example -x <X> -y <Y> -z <Z> <ID> For more information try --help "; fn issue_1158_app() -> App<'static> { App::new("example") .arg( Arg::from("-c, --config [FILE] 'Custom config file.'") .required_unless_present("ID") .conflicts_with("ID"), ) .arg( Arg::from("[ID] 'ID'") .required_unless_present("config") .conflicts_with("config") .requires_all(&["x", "y", "z"]), ) .arg(Arg::from("-x [X] 'X'")) .arg(Arg::from("-y [Y] 'Y'")) .arg(Arg::from("-z [Z] 'Z'")) } #[test] fn multiple_required_unless_usage_printing() { static MULTIPLE_REQUIRED_UNLESS_USAGE: &str = "error: The following required arguments were not provided: --a <a> --b <b> USAGE: test --c <c> --a <a> --b <b> For more information try --help "; let app = App::new("test") .arg( Arg::new("a") .long("a") .takes_value(true) .required_unless_present("b") .conflicts_with("b"), ) .arg( Arg::new("b") .long("b") .takes_value(true) .required_unless_present("a") .conflicts_with("a"), ) .arg( Arg::new("c") .long("c") .takes_value(true) .required_unless_present("d") .conflicts_with("d"), ) .arg( Arg::new("d") .long("d") .takes_value(true) .required_unless_present("c") .conflicts_with("c"), ); assert!(utils::compare_output( app, "test --c asd", MULTIPLE_REQUIRED_UNLESS_USAGE, true )); } #[test] fn issue_1158_conflicting_requirements() { let app = issue_1158_app(); assert!(utils::compare_output(app, "example id", ISSUE_1158, true)); } #[test] fn issue_1158_conflicting_requirements_rev() { let res = issue_1158_app().try_get_matches_from(&["", "--config", "some.conf"]); assert!(res.is_ok()); } #[test] fn issue_1643_args_mutually_require_each_other() { use clap::*; let app = App::new("test") .arg( Arg::new("relation_id") .about("The relation id to get the data from") .long("relation-id") .short('r') .takes_value(true) .requires("remote_unit_name"), ) .arg( Arg::new("remote_unit_name") .about("The name of the remote unit to get data from") .long("remote-unit") .short('u') .takes_value(true) .requires("relation_id"), ); app.get_matches_from(&["test", "-u", "hello", "-r", "farewell"]); } #[test] fn short_flag_require_equals_with_minvals_zero() { let m = App::new("foo") .arg( Arg::new("check") .short('c') .min_values(0) .require_equals(true), ) .arg(Arg::new("unique").short('u')) .get_matches_from(&["foo", "-cu"]); assert!(m.is_present("check")); assert!(m.is_present("unique")); } #[test] fn issue_2624() { let matches = App::new("foo") .arg( Arg::new("check") .short('c') .long("check") .require_equals(true) .min_values(0) .possible_values(["silent", "quiet", "diagnose-first"]), ) .arg(Arg::new("unique").short('u').long("unique")) .get_matches_from(&["foo", "-cu"]); assert!(matches.is_present("check")); assert!(matches.is_present("unique")); } #[cfg(debug_assertions)] #[test] #[should_panic = "Argument or group 'extra' specified in 'requires*' for 'config' does not exist"] fn requires_invalid_arg() { let _ = App::new("prog") .arg(Arg::new("config").requires("extra").long("config")) .try_get_matches_from(vec!["", "--config"]); } #[cfg(debug_assertions)] #[test] #[should_panic = "Argument or group 'extra' specified in 'requires*' for 'config' does not exist"] fn requires_if_invalid_arg() { let _ = App::new("prog") .arg( Arg::new("config") .requires_if("val", "extra") .long("config"), ) .try_get_matches_from(vec!["", "--config"]); } #[cfg(debug_assertions)] #[test] #[should_panic = "Argument or group 'extra' specified in 'required_if_eq*' for 'config' does not exist"] fn required_if_invalid_arg() { let _ = App::new("prog") .arg( Arg::new("config") .required_if_eq("extra", "val") .long("config"), ) .try_get_matches_from(vec!["", "--config"]); } #[cfg(debug_assertions)] #[test] #[should_panic = "Argument or group 'extra' specified in 'required_unless*' for 'config' does not exist"] fn required_unless_invalid_arg() { let _ = App::new("prog") .arg( Arg::new("config") .required_unless_present("extra") .long("config"), ) .try_get_matches_from(vec![""]); }
{ // This tests that the required_unless_present_any works when the second arg in the array is used // instead of the first. let res = App::new("unlessone") .arg( Arg::new("cfg") .required_unless_present_any(&["dbg", "infile"]) .takes_value(true) .long("config"), ) .arg(Arg::new("dbg").long("debug")) .arg(Arg::new("infile").short('i').takes_value(true)) .try_get_matches_from(vec!["unlessone", "-i", "file"]); assert!(res.is_ok()); let m = res.unwrap(); assert!(m.is_present("infile")); assert!(!m.is_present("cfg")); }
reconciler.go
/* Copyright 2022 TriggerMesh Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by injection-gen. DO NOT EDIT. package splitter import ( context "context" json "encoding/json" fmt "fmt" v1alpha1 "github.com/triggermesh/triggermesh/pkg/apis/routing/v1alpha1" internalclientset "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset" routingv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/listers/routing/v1alpha1" zap "go.uber.org/zap" v1 "k8s.io/api/core/v1" equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" sets "k8s.io/apimachinery/pkg/util/sets" record "k8s.io/client-go/tools/record" controller "knative.dev/pkg/controller" kmp "knative.dev/pkg/kmp" logging "knative.dev/pkg/logging" reconciler "knative.dev/pkg/reconciler" ) // Interface defines the strongly typed interfaces to be implemented by a // controller reconciling v1alpha1.Splitter. type Interface interface { // ReconcileKind implements custom logic to reconcile v1alpha1.Splitter. Any changes // to the objects .Status or .Finalizers will be propagated to the stored // object. It is recommended that implementors do not call any update calls // for the Kind inside of ReconcileKind, it is the responsibility of the calling // controller to propagate those properties. The resource passed to ReconcileKind // will always have an empty deletion timestamp. ReconcileKind(ctx context.Context, o *v1alpha1.Splitter) reconciler.Event } // Finalizer defines the strongly typed interfaces to be implemented by a // controller finalizing v1alpha1.Splitter. type Finalizer interface { // FinalizeKind implements custom logic to finalize v1alpha1.Splitter. Any changes // to the objects .Status or .Finalizers will be ignored. Returning a nil or // Normal type reconciler.Event will allow the finalizer to be deleted on // the resource. The resource passed to FinalizeKind will always have a set // deletion timestamp. FinalizeKind(ctx context.Context, o *v1alpha1.Splitter) reconciler.Event } // ReadOnlyInterface defines the strongly typed interfaces to be implemented by a // controller reconciling v1alpha1.Splitter if they want to process resources for which // they are not the leader. type ReadOnlyInterface interface { // ObserveKind implements logic to observe v1alpha1.Splitter. // This method should not write to the API. ObserveKind(ctx context.Context, o *v1alpha1.Splitter) reconciler.Event } type doReconcile func(ctx context.Context, o *v1alpha1.Splitter) reconciler.Event // reconcilerImpl implements controller.Reconciler for v1alpha1.Splitter resources. type reconcilerImpl struct { // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client internalclientset.Interface // Listers index properties about resources. Lister routingv1alpha1.SplitterLister // Recorder is an event recorder for recording Event resources to the // Kubernetes API. Recorder record.EventRecorder // configStore allows for decorating a context with config maps. // +optional configStore reconciler.ConfigStore // reconciler is the implementation of the business logic of the resource. reconciler Interface // finalizerName is the name of the finalizer to reconcile. finalizerName string // skipStatusUpdates configures whether or not this reconciler automatically updates // the status of the reconciled resource. skipStatusUpdates bool } // Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) func
(ctx context.Context, logger *zap.SugaredLogger, client internalclientset.Interface, lister routingv1alpha1.SplitterLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatal("Up to one options struct is supported, found: ", len(options)) } // Fail fast when users inadvertently implement the other LeaderAware interface. // For the typed reconcilers, Promote shouldn't take any arguments. if _, ok := r.(reconciler.LeaderAware); ok { logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) } rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { all, err := lister.List(labels.Everything()) if err != nil { return err } for _, elt := range all { // TODO: Consider letting users specify a filter in options. enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), }) } return nil }, }, Client: client, Lister: lister, Recorder: recorder, reconciler: r, finalizerName: defaultFinalizerName, } for _, opts := range options { if opts.ConfigStore != nil { rec.configStore = opts.ConfigStore } if opts.FinalizerName != "" { rec.finalizerName = opts.FinalizerName } if opts.SkipStatusUpdates { rec.skipStatusUpdates = true } if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } } return rec } // Reconcile implements controller.Reconciler func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { logger := logging.FromContext(ctx) // Initialize the reconciler state. This will convert the namespace/name // string into a distinct namespace and name, determine if this instance of // the reconciler is the leader, and any additional interfaces implemented // by the reconciler. Returns an error is the resource key is invalid. s, err := newState(key, r) if err != nil { logger.Error("Invalid resource key: ", key) return nil } // If we are not the leader, and we don't implement either ReadOnly // observer interfaces, then take a fast-path out. if s.isNotLeaderNorObserver() { return controller.NewSkipKey(key) } // If configStore is set, attach the frozen configuration to the context. if r.configStore != nil { ctx = r.configStore.ToContext(ctx) } // Add the recorder to context. ctx = controller.WithEventRecorder(ctx, r.Recorder) // Get the resource with this namespace/name. getter := r.Lister.Splitters(s.namespace) original, err := getter.Get(s.name) if errors.IsNotFound(err) { // The resource may no longer exist, in which case we stop processing and call // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { return del.ObserveDeletion(ctx, types.NamespacedName{ Namespace: s.namespace, Name: s.name, }) } return nil } else if err != nil { return err } // Don't modify the informers copy. resource := original.DeepCopy() var reconcileEvent reconciler.Event name, do := s.reconcileMethodFor(resource) // Append the target method to the logger. logger = logger.With(zap.String("targetMethod", name)) switch name { case reconciler.DoReconcileKind: // Set and update the finalizer on resource if r.reconciler // implements Finalizer. if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { return fmt.Errorf("failed to set finalizers: %w", err) } if !r.skipStatusUpdates { reconciler.PreProcessReconcile(ctx, resource) } // Reconcile this copy of the resource and then write back any status // updates regardless of whether the reconciliation errored out. reconcileEvent = do(ctx, resource) if !r.skipStatusUpdates { reconciler.PostProcessReconcile(ctx, resource, original) } case reconciler.DoFinalizeKind: // For finalizing reconcilers, if this resource being marked for deletion // and reconciled cleanly (nil or normal event), remove the finalizer. reconcileEvent = do(ctx, resource) if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { return fmt.Errorf("failed to clear finalizers: %w", err) } case reconciler.DoObserveKind: // Observe any changes to this resource, since we are not the leader. reconcileEvent = do(ctx, resource) } // Synchronize the status. switch { case r.skipStatusUpdates: // This reconciler implementation is configured to skip resource updates. // This may mean this reconciler does not observe spec, but reconciles external changes. case equality.Semantic.DeepEqual(original.Status, resource.Status): // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the injectionInformer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. case !s.isLeader: // High-availability reconcilers may have many replicas watching the resource, but only // the elected leader is expected to write modifications. logger.Warn("Saw status changes when we aren't the leader!") default: if err = r.updateStatus(ctx, original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", resource.Name, err) return err } } // Report the reconciler event, if any. if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { return reconcileEvent } return nil } if controller.IsSkipKey(reconcileEvent) { // This is a wrapped error, don't emit an event. } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { // This is a wrapped error, don't emit an event. } else { logger.Errorw("Returned an error", zap.Error(reconcileEvent)) r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) } return reconcileEvent } return nil } func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1alpha1.Splitter, desired *v1alpha1.Splitter) error { existing = existing.DeepCopy() return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. if attempts > 0 { getter := r.Client.RoutingV1alpha1().Splitters(desired.Namespace) existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) if err != nil { return err } } // If there's nothing to update, just return. if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { logging.FromContext(ctx).Debug("Updating status with: ", diff) } existing.Status = desired.Status updater := r.Client.RoutingV1alpha1().Splitters(existing.Namespace) _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) return err }) } // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.Splitter) (*v1alpha1.Splitter, error) { getter := r.Lister.Splitters(resource.Namespace) actual, err := getter.Get(resource.Name) if err != nil { return resource, err } // Don't modify the informers copy. existing := actual.DeepCopy() var finalizers []string // If there's nothing to update, just return. existingFinalizers := sets.NewString(existing.Finalizers...) desiredFinalizers := sets.NewString(resource.Finalizers...) if desiredFinalizers.Has(r.finalizerName) { if existingFinalizers.Has(r.finalizerName) { // Nothing to do. return resource, nil } // Add the finalizer. finalizers = append(existing.Finalizers, r.finalizerName) } else { if !existingFinalizers.Has(r.finalizerName) { // Nothing to do. return resource, nil } // Remove the finalizer. existingFinalizers.Delete(r.finalizerName) finalizers = existingFinalizers.List() } mergePatch := map[string]interface{}{ "metadata": map[string]interface{}{ "finalizers": finalizers, "resourceVersion": existing.ResourceVersion, }, } patch, err := json.Marshal(mergePatch) if err != nil { return resource, err } patcher := r.Client.RoutingV1alpha1().Splitters(resource.Namespace) resourceName := resource.Name updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", "Failed to update finalizers for %q: %v", resourceName, err) } else { r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", "Updated %q finalizers", resource.GetName()) } return updated, err } func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.Splitter) (*v1alpha1.Splitter, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } finalizers := sets.NewString(resource.Finalizers...) // If this resource is not being deleted, mark the finalizer. if resource.GetDeletionTimestamp().IsZero() { finalizers.Insert(r.finalizerName) } resource.Finalizers = finalizers.List() // Synchronize the finalizers filtered by r.finalizerName. return r.updateFinalizersFiltered(ctx, resource) } func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.Splitter, reconcileEvent reconciler.Event) (*v1alpha1.Splitter, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } if resource.GetDeletionTimestamp().IsZero() { return resource, nil } finalizers := sets.NewString(resource.Finalizers...) if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { if event.EventType == v1.EventTypeNormal { finalizers.Delete(r.finalizerName) } } } else { finalizers.Delete(r.finalizerName) } resource.Finalizers = finalizers.List() // Synchronize the finalizers filtered by r.finalizerName. return r.updateFinalizersFiltered(ctx, resource) }
NewReconciler
dialog-title-props.test.js
import fs from 'fs'; import path from 'path'; import { expect } from 'chai'; import jscodeshift from 'jscodeshift'; import transform from './dialog-title-props'; import readFile from '../util/readFile'; function
(fileName) { return readFile(path.join(__dirname, fileName)); } describe('@material-ui/codemod', () => { describe('v5.0.0', () => { describe('dialog-title-props', () => { it('transforms props as needed', () => { const actual = transform( { source: read('./dialog-title-props.test/actual.js'), path: require.resolve('./dialog-title-props.test/actual.js'), }, { jscodeshift: jscodeshift }, {}, ); const expected = read('./dialog-title-props.test/expected.js'); expect(actual).to.equal(expected, 'The transformed version should be correct'); }); it('should be idempotent', () => { const actual = transform( { source: read('./dialog-title-props.test/expected.js'), path: require.resolve('./dialog-title-props.test/expected.js'), }, { jscodeshift: jscodeshift }, {}, ); const expected = read('./dialog-title-props.test/expected.js'); expect(actual).to.equal(expected, 'The transformed version should be correct'); }); }); }); });
read
Exceptions.py
class UnauthorizedException(Exception):
class ForbiddenException(Exception): def __init__(self): super(ForbiddenException, self).__init__('User not authorized.') class ConflictException(Exception): def __init__(self): super(ConflictException, self).__init__('Request creates conflict.') class NotFoundException(Exception): def __init__(self): super(NotFoundException, self).__init__('Request requires a resource which can not be found.')
def __init__(self): super(UnauthorizedException, self).__init__('User not authenticated.')
test_TCP.py
#!/usr/bin/env python # Impacket - Collection of Python classes for working with network protocols. # # SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved. # # This software is provided under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # import unittest from impacket.ImpactPacket import TCP class TestTCP(unittest.TestCase): def setUp(self): # TCP - sport: 60655, dport: 80, sec: 0, HLen: 40, Flags: 0x02, win_size: 5840 # cksum: 0x64cb, Options: 0x20 self.frame = b'\xec\xef\x00\x50\xa8\xbd\xea\x4c\x00\x00\x00\x00\xa0\x02\x16\xd0' \ b'\x64\xcb\x00\x00\x02\x04\x05\xb4\x04\x02\x08\x0a\x00\xdc\xd6\x12' \ b'\x00\x00\x00\x00\x01\x03\x03\x06' self.tcp = TCP(self.frame) def test_01(self): 'Test TCP get_packet' self.assertEqual(self.tcp.get_packet(), self.frame) def test_02(self): 'Test TCP getters' self.assertEqual(self.tcp.get_th_sport(), 60655) self.assertEqual(self.tcp.get_th_dport(), 80) self.assertEqual(self.tcp.get_th_off()*4, 40) # *4 because are words self.assertEqual(self.tcp.get_th_flags(), 0x02) self.assertEqual(self.tcp.get_th_win(), 5840) self.assertEqual(self.tcp.get_th_sum(), 0x64cb) self.assertEqual(self.tcp.get_SYN(), 1) self.assertEqual(self.tcp.get_RST(), 0) def test_03(self): 'Test TCP port setters' self.tcp.set_th_sport(54321) self.assertEqual(self.tcp.get_th_sport(), 54321) self.tcp.set_th_dport(81) self.assertEqual(self.tcp.get_th_dport(), 81) def test_04(self): 'Test TCP offset setters' # test that set_th_off doesn't affect to flags flags = int('10101010',2) self.tcp.set_th_flags( flags ) self.assertEqual(self.tcp.get_th_flags(), flags) self.tcp.set_th_off(4) self.assertEqual(self.tcp.get_th_off(), 4) self.assertEqual(self.tcp.get_th_flags(), flags) def test_05(self): 'Test TCP win setters' self.tcp.set_th_win(12345) self.assertEqual(self.tcp.get_th_win(), 12345) def test_06(self): 'Test TCP checksum setters' self.tcp.set_th_sum(0xFEFE) self.assertEqual(self.tcp.get_th_sum(), 0xFEFE)
def test_07(self): 'Test TCP flags setters' self.tcp.set_th_flags(0x03) # SYN+FIN self.assertEqual(self.tcp.get_th_flags(), 0x03) self.tcp.set_ACK() self.assertEqual(self.tcp.get_ACK(), 1) self.assertEqual(self.tcp.get_SYN(), 1) self.assertEqual(self.tcp.get_FIN(), 1) self.assertEqual(self.tcp.get_RST(), 0) self.assertEqual(self.tcp.get_th_flags(), 19) def test_08(self): 'Test TCP reset_flags' # Test 1 self.tcp.set_th_flags(19) # ACK+SYN+FIN self.assertEqual(self.tcp.get_th_flags(), 19) self.assertEqual(self.tcp.get_ACK(), 1) self.assertEqual(self.tcp.get_SYN(), 1) self.assertEqual(self.tcp.get_FIN(), 1) self.assertEqual(self.tcp.get_RST(), 0) self.tcp.reset_flags(0x02) self.assertEqual(self.tcp.get_th_flags(), 17) # Test 2 flags = int('10011', 2) # 19 = ACK+SYN+FIN self.tcp.set_th_flags(flags) self.assertEqual(self.tcp.get_th_flags(), 19) # 010011 # 000010 # ------ # 010001 = 17 self.tcp.reset_flags(int('000010',2)) self.assertEqual(self.tcp.get_th_flags(), 17) # Test 3 flags = int('10011', 2) # 19 = ACK+SYN+FIN self.tcp.set_th_flags(flags) self.assertEqual(self.tcp.get_th_flags(), 19) # 010011 # 010001 # ------ # 000010 = 2 self.tcp.reset_flags(int('010001',2)) self.assertEqual(self.tcp.get_th_flags(), 2) def test_09(self): 'Test TCP set_flags' flags = int('10101010',2) # 0xAA self.tcp.set_flags(flags) self.assertEqual(self.tcp.get_FIN(), 0) self.assertEqual(self.tcp.get_SYN(), 1) self.assertEqual(self.tcp.get_RST(), 0) self.assertEqual(self.tcp.get_PSH(), 1) self.assertEqual(self.tcp.get_ACK(), 0) self.assertEqual(self.tcp.get_URG(), 1) self.assertEqual(self.tcp.get_ECE(), 0) self.assertEqual(self.tcp.get_CWR(), 1) self.assertEqual(self.tcp.get_th_flags(), 0xAA ) if __name__ == '__main__': unittest.main(verbosity=1)
test_tempgen.py
import pytest import os import os from collections import defaultdict from syrupy.extensions.single_file import SingleFileSnapshotExtension from tempgen.module import Tempgen from tempgen.parsers import Parsers from tempgen.transforms import Transforms from tempgen.tests.helpers import ext_serializer_map tests_dir = os.path.dirname(os.path.abspath(__file__)) fixture_dir = os.path.join(tests_dir, 'fixtures') fixture_name = 'test_template' generated_name = 'generated' transforms = Transforms().name_transform_map.keys() extensions = Parsers().ext_parser_map.keys() serializers = ext_serializer_map @pytest.fixture(autouse=True) def tempgen_instance(): return Tempgen() @pytest.fixture def tempgen_instances(request): return [Tempgen() for _ in range(request.param)] @pytest.mark.parametrize('extension', extensions) def test_load_template(extension, tempgen_instance, snapshot):
@pytest.mark.parametrize('extension', extensions) def test_save_result(extension, tempgen_instance, snapshot): template = os.path.join(fixture_dir, fixture_name + extension) tempgen_instance.load_template(template) replacements = { key: value['value'] for key, value in tempgen_instance.get_fields().items() } replacements['doer'] = 'Петров П.П.' replacements['itn'] = '987654321098' save_path = os.path.join(fixture_dir, generated_name) tempgen_instance.save_result(template, save_path, replacements) assert ext_serializer_map[extension](save_path + extension) == snapshot os.remove(save_path + extension) @pytest.mark.parametrize('extension', extensions) @pytest.mark.parametrize('transform', transforms) @pytest.mark.parametrize('tempgen_instances', [2], indirect=['tempgen_instances']) def test_independence(extension, transform, tempgen_instances): instance_0, instance_1 = tempgen_instances assert instance_0.parsers != instance_1.parsers assert instance_0.transforms != instance_1.transforms instance_0.parsers[extension].parse = lambda *args, **kwargs: ({}) instance_0.transforms[transform] = lambda x: x assert instance_0.parsers != instance_1.parsers assert instance_0.transforms != instance_1.transforms
template = os.path.join(fixture_dir, fixture_name + extension) tempgen_instance.load_template(template) assert template in tempgen_instance.get_templates() assert tempgen_instance.get_fields() == snapshot
data.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package mntr import ( "bufio" "io" "regexp" "github.com/codragonzuo/beats/libbeat/common" s "github.com/codragonzuo/beats/libbeat/common/schema" c "github.com/codragonzuo/beats/libbeat/common/schema/mapstrstr" "github.com/codragonzuo/beats/libbeat/logp" "github.com/codragonzuo/beats/metricbeat/mb" ) var ( // Matches first the variable name, second the param itself paramMatcher = regexp.MustCompile("([^\\s]+)\\s+(.*$)") schema = s.Schema{ "version": c.Str("zk_version"), "latency": s.Object{ "avg": c.Int("zk_avg_latency"), "min": c.Int("zk_min_latency"), "max": c.Int("zk_max_latency"), },
}, "num_alive_connections": c.Int("zk_num_alive_connections"), "outstanding_requests": c.Int("zk_outstanding_requests"), "server_state": c.Str("zk_server_state"), "znode_count": c.Int("zk_znode_count"), "watch_count": c.Int("zk_watch_count"), "ephemerals_count": c.Int("zk_ephemerals_count"), "approximate_data_size": c.Int("zk_approximate_data_size"), } schemaLeader = s.Schema{ "followers": c.Int("zk_followers"), "synced_followers": c.Int("zk_synced_followers"), "pending_syncs": c.Int("zk_pending_syncs"), } schemaUnix = s.Schema{ "open_file_descriptor_count": c.Int("zk_open_file_descriptor_count"), "max_file_descriptor_count": c.Int("zk_max_file_descriptor_count"), } ) func eventMapping(response io.Reader, r mb.ReporterV2, logger *logp.Logger) { fullEvent := map[string]interface{}{} scanner := bufio.NewScanner(response) // Iterate through all events to gather data for scanner.Scan() { if match := paramMatcher.FindStringSubmatch(scanner.Text()); len(match) == 3 { fullEvent[match[1]] = match[2] } else { logger.Infof("Unexpected line in mntr output: %s", scanner.Text()) } } event, _ := schema.Apply(fullEvent) e := mb.Event{} if version, ok := event["version"]; ok { e.RootFields = common.MapStr{} e.RootFields.Put("service.version", version) delete(event, "version") } // only exposed by the Leader if _, ok := fullEvent["zk_followers"]; ok { schemaLeader.ApplyTo(event, fullEvent) } // only available on Unix platforms if _, ok := fullEvent["zk_open_file_descriptor_count"]; ok { schemaUnix.ApplyTo(event, fullEvent) } e.MetricSetFields = event r.Event(e) }
"packets": s.Object{ "received": c.Int("zk_packets_received"), "sent": c.Int("zk_packets_sent"),
outpack_test.go
package worker import ( "bytes" "testing" rt "github.com/quantcast/g2/pkg/runtime" ) var ( outpackcases = map[rt.PT]map[string]string{ rt.PT_CanDo: { "src": "\x00REQ\x00\x00\x00\x01\x00\x00\x00\x01a", "data": "a", }, rt.PT_CanDoTimeout: { "src": "\x00REQ\x00\x00\x00\x17\x00\x00\x00\x06a\x00\x00\x00\x00\x01", "data": "a\x00\x00\x00\x00\x01", }, rt.PT_CantDo: { "src": "\x00REQ\x00\x00\x00\x02\x00\x00\x00\x01a", "data": "a", }, rt.PT_ResetAbilities: { "src": "\x00REQ\x00\x00\x00\x03\x00\x00\x00\x00", }, rt.PT_PreSleep: { "src": "\x00REQ\x00\x00\x00\x04\x00\x00\x00\x00", }, rt.PT_GrabJob: { "src": "\x00REQ\x00\x00\x00\x09\x00\x00\x00\x00", }, rt.PT_GrabJobUniq: { "src": "\x00REQ\x00\x00\x00\x1E\x00\x00\x00\x00", }, rt.PT_WorkData: { "src": "\x00REQ\x00\x00\x00\x1C\x00\x00\x00\x03a\x00b", "data": "a\x00b", }, rt.PT_WorkWarning: { "src": "\x00REQ\x00\x00\x00\x1D\x00\x00\x00\x03a\x00b", "data": "a\x00b", }, rt.PT_WorkStatus: { "src": "\x00REQ\x00\x00\x00\x0C\x00\x00\x00\x08a\x0050\x00100", "data": "a\x0050\x00100", }, rt.PT_WorkComplete: { "src": "\x00REQ\x00\x00\x00\x0D\x00\x00\x00\x03a\x00b", "data": "a\x00b", }, rt.PT_WorkFail: { "src": "\x00REQ\x00\x00\x00\x0E\x00\x00\x00\x01a", "handle": "a", }, rt.PT_WorkException: { "src": "\x00REQ\x00\x00\x00\x19\x00\x00\x00\x03a\x00b", "data": "a\x00b", }, rt.PT_SetClientId: { "src": "\x00REQ\x00\x00\x00\x16\x00\x00\x00\x01a", "data": "a", }, rt.PT_AllYours: { "src": "\x00REQ\x00\x00\x00\x18\x00\x00\x00\x00", }, } ) func
(t *testing.T) { for k, v := range outpackcases { outpack := getOutPack() outpack.dataType = k if handle, ok := v["handle"]; ok { outpack.handle = handle } if data, ok := v["data"]; ok { outpack.data = []byte(data) } data := outpack.Encode() if bytes.Compare([]byte(v["src"]), data) != 0 { t.Errorf("%d: %X expected, %X got.", k, v["src"], data) } } } func BenchmarkEncode(b *testing.B) { for i := 0; i < b.N; i++ { for k, v := range outpackcases { outpack := getOutPack() outpack.dataType = k if handle, ok := v["handle"]; ok { outpack.handle = handle } if data, ok := v["data"]; ok { outpack.data = []byte(data) } outpack.Encode() } } }
TestOutPack
registry.go
// Copyright 2017-2019 Lei Ni ([email protected]) and other contributors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package registry import ( "fmt" "sync" "github.com/cockroachdb/errors" "github.com/lni/goutils/logutil" "github.com/lni/dragonboat/v3/config" "github.com/lni/dragonboat/v3/internal/server" "github.com/lni/dragonboat/v3/raftio" ) var ( // ErrUnknownTarget is the error returned when the target address of the node // is unknown. ErrUnknownTarget = errors.New("target address unknown") ) // IResolver converts the (cluster id, node id( tuple to network address. type IResolver interface { Resolve(uint64, uint64) (string, string, error) Add(uint64, uint64, string) } // INodeRegistry is the local registry interface used to keep all known // nodes in the system.. type INodeRegistry interface { Close() error Add(clusterID uint64, nodeID uint64, url string) Remove(clusterID uint64, nodeID uint64) RemoveCluster(clusterID uint64) Resolve(clusterID uint64, nodeID uint64) (string, string, error) } var _ INodeRegistry = (*Registry)(nil) var _ IResolver = (*Registry)(nil) // Registry is used to manage all known node addresses in the multi raft system. // The transport layer uses this address registry to locate nodes. type Registry struct { partitioner server.IPartitioner validate config.TargetValidator addr sync.Map // map of raftio.NodeInfo => string } // NewNodeRegistry returns a new Registry object. func
(streamConnections uint64, v config.TargetValidator) *Registry { n := &Registry{validate: v} if streamConnections > 1 { n.partitioner = server.NewFixedPartitioner(streamConnections) } return n } // Close closes the node registry. func (n *Registry) Close() error { return nil } // Add adds the specified node and its target info to the registry. func (n *Registry) Add(clusterID uint64, nodeID uint64, target string) { if n.validate != nil && !n.validate(target) { plog.Panicf("invalid target %s", target) } key := raftio.GetNodeInfo(clusterID, nodeID) v, ok := n.addr.LoadOrStore(key, target) if ok { if v.(string) != target { plog.Panicf("inconsistent target for %s, %s:%s", logutil.DescribeNode(clusterID, nodeID), v, target) } } } func (n *Registry) getConnectionKey(addr string, clusterID uint64) string { if n.partitioner == nil { return addr } return fmt.Sprintf("%s-%d", addr, n.partitioner.GetPartitionID(clusterID)) } // Remove removes a remote from the node registry. func (n *Registry) Remove(clusterID uint64, nodeID uint64) { n.addr.Delete(raftio.GetNodeInfo(clusterID, nodeID)) } // RemoveCluster removes all nodes info associated with the specified cluster func (n *Registry) RemoveCluster(clusterID uint64) { var toRemove []raftio.NodeInfo n.addr.Range(func(k, v interface{}) bool { ni := k.(raftio.NodeInfo) if ni.ClusterID == clusterID { toRemove = append(toRemove, ni) } return true }) for _, v := range toRemove { n.addr.Delete(v) } } // Resolve looks up the Addr of the specified node. func (n *Registry) Resolve(clusterID uint64, nodeID uint64) (string, string, error) { key := raftio.GetNodeInfo(clusterID, nodeID) addr, ok := n.addr.Load(key) if !ok { return "", "", ErrUnknownTarget } return addr.(string), n.getConnectionKey(addr.(string), clusterID), nil }
NewNodeRegistry
test_random_crop_and_resize.py
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing RandomCropAndResize op in DE """ import numpy as np import cv2 import mindspore.dataset.transforms.py_transforms import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.utils as mode import mindspore.dataset as ds from mindspore import log as logger from util import diff_mse, save_and_check_md5, visualize_list, \ config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" GENERATE_GOLDEN = False def test_random_crop_and_resize_op_c(plot=False): """ Test RandomCropAndResize op in c transforms """ logger.info("test_random_crop_and_resize_op_c") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() # With these inputs we expect the code to crop the whole image random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = data2.map(input_columns=["image"], operations=decode_op) num_iter = 0 crop_and_resize_images = [] original_images = [] for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)): crop_and_resize = item1["image"] original = item2["image"] # Note: resize the original image with the same size as the one applied RandomResizedCrop() original = cv2.resize(original, (512, 256)) mse = diff_mse(crop_and_resize, original) assert mse == 0 logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse)) num_iter += 1 crop_and_resize_images.append(crop_and_resize) original_images.append(original) if plot: visualize_list(original_images, crop_and_resize_images) def test_random_crop_and_resize_op_py(plot=False): """ Test RandomCropAndResize op in py transforms """ logger.info("test_random_crop_and_resize_op_py") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) # With these inputs we expect the code to crop the whole image transforms1 = [ py_vision.Decode(), py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)), py_vision.ToTensor() ] transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) data1 = data1.map(input_columns=["image"], operations=transform1) # Second dataset # Second dataset for comparison data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms2 = [ py_vision.Decode(), py_vision.ToTensor() ] transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) data2 = data2.map(input_columns=["image"], operations=transform2) num_iter = 0 crop_and_resize_images = [] original_images = [] for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)): crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) original = cv2.resize(original, (512, 256)) mse = diff_mse(crop_and_resize, original) # Due to rounding error the mse for Python is not exactly 0 assert mse <= 0.05 logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse)) num_iter += 1 crop_and_resize_images.append(crop_and_resize) original_images.append(original) if plot: visualize_list(original_images, crop_and_resize_images) def test_random_crop_and_resize_01(): """ Test RandomCropAndResize with md5 check, expected to pass """ logger.info("test_random_crop_and_resize_01") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(),
data2 = data2.map(input_columns=["image"], operations=transform) filename1 = "random_crop_and_resize_01_c_result.npz" filename2 = "random_crop_and_resize_01_py_result.npz" save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) def test_random_crop_and_resize_02(): """ Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST, expected to pass """ logger.info("test_random_crop_and_resize_02") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), py_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data2 = data2.map(input_columns=["image"], operations=transform) filename1 = "random_crop_and_resize_02_c_result.npz" filename2 = "random_crop_and_resize_02_py_result.npz" save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) def test_random_crop_and_resize_03(): """ Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass """ logger.info("test_random_crop_and_resize_03") original_seed = config_get_set_seed(0) original_num_parallel_workers = config_get_set_num_parallel_workers(1) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), max_attempts=1) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), py_vision.RandomResizedCrop((256, 512), max_attempts=1), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data2 = data2.map(input_columns=["image"], operations=transform) filename1 = "random_crop_and_resize_03_c_result.npz" filename2 = "random_crop_and_resize_03_py_result.npz" save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) # Restore config setting ds.config.set_seed(original_seed) ds.config.set_num_parallel_workers(original_num_parallel_workers) def test_random_crop_and_resize_04_c(): """ Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min), expected to raise ValueError """ logger.info("test_random_crop_and_resize_04_c") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() try: # If input range of scale is not in the order of (min, max), ValueError will be raised. random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)) data = data.map(input_columns=["image"], operations=decode_op) data = data.map(input_columns=["image"], operations=random_crop_and_resize_op) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input is not within the required interval of (0 to 16777216)." in str(e) def test_random_crop_and_resize_04_py(): """ Test RandomCropAndResize with py_transforms: invalid range of scale (max<min), expected to raise ValueError """ logger.info("test_random_crop_and_resize_04_py") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) try: transforms = [ py_vision.Decode(), # If input range of scale is not in the order of (min, max), ValueError will be raised. py_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data = data.map(input_columns=["image"], operations=transform) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input is not within the required interval of (0 to 16777216)." in str(e) def test_random_crop_and_resize_05_c(): """ Test RandomCropAndResize with c_transforms: invalid range of ratio (max<min), expected to raise ValueError """ logger.info("test_random_crop_and_resize_05_c") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() try: random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)) # If input range of ratio is not in the order of (min, max), ValueError will be raised. data = data.map(input_columns=["image"], operations=decode_op) data = data.map(input_columns=["image"], operations=random_crop_and_resize_op) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input is not within the required interval of (0 to 16777216)." in str(e) def test_random_crop_and_resize_05_py(): """ Test RandomCropAndResize with py_transforms: invalid range of ratio (max<min), expected to raise ValueError """ logger.info("test_random_crop_and_resize_05_py") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) try: transforms = [ py_vision.Decode(), # If input range of ratio is not in the order of (min, max), ValueError will be raised. py_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data = data.map(input_columns=["image"], operations=transform) except ValueError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Input is not within the required interval of (0 to 16777216)." in str(e) def test_random_crop_and_resize_comp(plot=False): """ Test RandomCropAndResize and compare between python and c image augmentation """ logger.info("test_random_crop_and_resize_comp") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() random_crop_and_resize_op = c_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_crop_and_resize_op) # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms = [ py_vision.Decode(), py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) data2 = data2.map(input_columns=["image"], operations=transform) image_c_cropped = [] image_py_cropped = [] for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1), data2.create_dict_iterator(num_epochs=1)): c_image = item1["image"] py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_c_cropped.append(c_image) image_py_cropped.append(py_image) mse = diff_mse(c_image, py_image) assert mse < 0.02 # rounding error if plot: visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2) def test_random_crop_and_resize_06(): """ Test RandomCropAndResize with c_transforms: invalid values for scale, expected to raise ValueError """ logger.info("test_random_crop_and_resize_05_c") # Generate dataset data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() try: random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale="", ratio=(1, 0.5)) data = data.map(input_columns=["image"], operations=decode_op) data.map(input_columns=["image"], operations=random_crop_and_resize_op) except TypeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Argument scale with value \"\" is not of type (<class 'tuple'>,)" in str(e) try: random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale=(1, "2"), ratio=(1, 0.5)) data = data.map(input_columns=["image"], operations=decode_op) data.map(input_columns=["image"], operations=random_crop_and_resize_op) except TypeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "Argument scale[1] with value 2 is not of type (<class 'float'>, <class 'int'>)." in str(e) if __name__ == "__main__": test_random_crop_and_resize_op_c(True) test_random_crop_and_resize_op_py(True) test_random_crop_and_resize_01() test_random_crop_and_resize_02() test_random_crop_and_resize_03() test_random_crop_and_resize_04_c() test_random_crop_and_resize_04_py() test_random_crop_and_resize_05_c() test_random_crop_and_resize_05_py() test_random_crop_and_resize_06() test_random_crop_and_resize_comp(True)
py_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)), py_vision.ToTensor() ] transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
content-script-injector.test.ts
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. import { IMock, It, Mock, Times } from 'typemoq'; import { ContentScriptInjector } from 'background/injector/content-script-injector'; import { BrowserAdapter } from '../../../../common/browser-adapters/browser-adapter'; import { PromiseFactory } from '../../../../common/promises/promise-factory'; describe('ContentScriptInjector', () => { const testTabId = 1; let browserAdapterMock: IMock<BrowserAdapter>; let promiseFactoryMock: IMock<PromiseFactory>; let testSubject: ContentScriptInjector; beforeEach(() => { browserAdapterMock = Mock.ofType<BrowserAdapter>(); promiseFactoryMock = Mock.ofType<PromiseFactory>(); testSubject = new ContentScriptInjector(browserAdapterMock.object, promiseFactoryMock.object); }); it('uses a timeout promise with the expected timeout constant', async () => { promiseFactoryMock .setup(factory => factory.timeout(It.isAny(), ContentScriptInjector.timeoutInMilliSec)) .returns(() => Promise.resolve({})) .verifiable(Times.once()); await testSubject.injectScripts(testTabId); promiseFactoryMock.verifyAll(); }); it('rejects if a timeout occurs', async () => { promiseFactoryMock.setup(factory => factory.timeout(It.isAny(), It.isAny())).returns(() => Promise.reject('artificial timeout')); await expect(testSubject.injectScripts(testTabId)).rejects.toBe('artificial timeout'); }); describe('when no timeout occurs', () => { beforeEach(() => { promiseFactoryMock.setup(factory => factory.timeout(It.isAny(), It.isAny())).returns(originalPromise => originalPromise); }); it('injects each CSS file once with the expected parameters', async () => { setupExecuteScriptToSucceedImmediately();
.setup(adapter => adapter.insertCSSInTab(testTabId, expectedDetails, It.isAny())) .callback(resolveCallbackImmediately) .verifiable(Times.once()); }); await testSubject.injectScripts(testTabId); browserAdapterMock.verifyAll(); }); it('injects each JS file once with the expected parameters', async () => { setupInsertCSSToSucceedImmediately(); ContentScriptInjector.jsFiles.forEach(jsFile => { const expectedDetails = { allFrames: true, file: jsFile, runAt: 'document_start' }; browserAdapterMock .setup(adapter => adapter.executeScriptInTab(testTabId, expectedDetails, It.isAny())) .callback(resolveCallbackImmediately) .verifiable(Times.once()); }); await testSubject.injectScripts(testTabId); browserAdapterMock.verifyAll(); }); it('resolves only after JS files have finished injecting', async () => { setupInsertCSSToSucceedImmediately(); let callbackPassedToExecuteScript: Function; // simulate JS injection taking a while, only completing asynchronously when we explicitly invoke the callback browserAdapterMock .setup(adapter => adapter.executeScriptInTab(It.isAny(), It.isObjectWith({ file: ContentScriptInjector.jsFiles[0] }), It.isAny()), ) .callback((tabId, details, passedCallback) => { callbackPassedToExecuteScript = passedCallback; }); let returnedPromiseCompleted = false; const returnedPromise = testSubject.injectScripts(testTabId).then(() => { returnedPromiseCompleted = true; }); expect(callbackPassedToExecuteScript).toBeDefined(); expect(returnedPromiseCompleted).toBe(false); callbackPassedToExecuteScript(); // simulate JS injection finishing await returnedPromise; expect(returnedPromiseCompleted).toBe(true); }); it('does not wait for CSS files to be injected before resolving', async () => { setupExecuteScriptToSucceedImmediately(); // Intentionally don't set up insertCSS callbacks to be called await testSubject.injectScripts(testTabId); // expect to not timeout }); function resolveCallbackImmediately(tabId: any, details: any, callback?: Function): void { if (callback) { callback(); } } function setupInsertCSSToSucceedImmediately(): void { browserAdapterMock .setup(adapter => adapter.insertCSSInTab(It.isAny(), It.isAny(), It.isAny())) .callback(resolveCallbackImmediately); } function setupExecuteScriptToSucceedImmediately(): void { browserAdapterMock .setup(adapter => adapter.executeScriptInTab(It.isAny(), It.isAny(), It.isAny())) .callback(resolveCallbackImmediately); } }); });
ContentScriptInjector.cssFiles.forEach(cssFile => { const expectedDetails = { allFrames: true, file: cssFile }; browserAdapterMock
mod.rs
use crate::grammar::cfg::RX_NUM_SUFFIX; use crate::parser::parol_grammar::ParolGrammar; use crate::parser::parol_parser::parse; use crate::GrammarConfig; use id_tree::Tree; use id_tree_layout::Layouter; use miette::{IntoDiagnostic, Result, WrapErr}; use parol_runtime::parser::ParseTreeType; use std::collections::HashMap; use std::convert::TryFrom; use std::fmt::Debug; use std::fs; use std::hash::Hash; use std::path::Path; pub mod str_vec; /// Applies a key-generating function to each element of a vector and yields a vector of /// pairs. Each pair consists of a unique key and a vector of all elements of the input /// vector which did produce this key by applying the projection function. /// The result vector is not sorted. pub(crate) fn group_by<P, T, K>(data: &[T], projection: P) -> Vec<(K, Vec<T>)> where P: Fn(&T) -> K, K: Eq + Hash, T: Clone, { let mut grouping: HashMap<K, Vec<T>> = HashMap::new(); data.iter() .fold(&mut grouping, |acc, t| { let key = projection(t); if let Some(vt) = acc.get_mut(&key) { vt.push(t.clone()); } else { acc.insert(key, vec![t.clone()]); } acc }) .drain() .collect() } /// Generates a new unique name avoiding collisions with the names given in the 'exclusions'. /// It takes a preferred name and if it collides it adds an increasing suffix number. /// If the preferred name already has a suffix number it starts counting up from this number. pub(crate) fn generate_name<T>(exclusions: &[T], preferred_name: String) -> String where T: AsRef<str>, { fn gen_name<T>(exclusions: &[T], prefix: String, start_num: usize) -> String where T: AsRef<str>, { let mut num = start_num; let mut new_name = format!("{}{}", prefix, num); while exclusions.iter().any(|n| n.as_ref() == new_name) { num += 1; new_name = format!("{}{}", prefix, num); } new_name } if exclusions.iter().any(|n| n.as_ref() == preferred_name) { let (suffix_number, prefix) = { if let Some(match_) = RX_NUM_SUFFIX.find(&preferred_name) { let num = match_.as_str().parse::<usize>().unwrap_or(1); (num, preferred_name[0..match_.start()].to_string()) } else { (0, preferred_name.clone()) } }; gen_name(exclusions, prefix, suffix_number) } else { preferred_name } } pub(crate) fn combine<A, B, C, F, G>(f: F, g: G) -> impl Fn(A) -> C where F: Fn(A) -> B, G: Fn(B) -> C, { move |x| g(f(x)) } pub(crate) fn short_cut_disjunction_combine<A, F, G>(f: F, g: G) -> impl Fn(&A) -> bool where F: Fn(&A) -> bool, G: Fn(&A) -> bool, { move |x| { let r = f(x); if r { r } else { g(x) } } } pub(crate) fn short_cut_conjunction_combine<A, F, G>(f: F, g: G) -> impl Fn(&A) -> bool where F: Fn(&A) -> bool, G: Fn(&A) -> bool, { move |x| { let r = f(x); if !r { r } else { g(x) } } } // --------------------------------------------------- // Part of the Public API // *Changes will affect crate's version according to semver* // --------------------------------------------------- /// /// Utility function to parse a file with a grammar in PAR syntax. /// pub fn obtain_grammar_config<T>(file_name: T, verbose: bool) -> Result<GrammarConfig> where T: AsRef<Path> + Debug, { let input = fs::read_to_string(&file_name) .into_diagnostic() .wrap_err(format!("Can't read file {:?}", file_name))?; obtain_grammar_config_from_string(&input, verbose) } // --------------------------------------------------- // Part of the Public API // *Changes will affect crate's version according to semver* // --------------------------------------------------- /// /// Utility function to parse a text with a grammar in PAR syntax. /// pub fn obtain_grammar_config_from_string(input: &str, verbose: bool) -> Result<GrammarConfig> { let mut parol_grammar = ParolGrammar::new(); let _syntax_tree = parse(input, "No file", &mut parol_grammar) .wrap_err(format!("Failed parsing text {}", input.escape_default()))?; if verbose { println!("{}", parol_grammar); } GrammarConfig::try_from(parol_grammar) } // --------------------------------------------------- // Part of the Public API // *Changes will affect crate's version according to semver* // --------------------------------------------------- /// /// Utility function for generating tree layouts /// pub fn generate_tree_layout<T>(syntax_tree: &Tree<ParseTreeType>, input_file_name: T) -> Result<()> where T: AsRef<Path>, { let mut svg_full_file_name = input_file_name.as_ref().to_path_buf();
Layouter::new(syntax_tree) .with_file_path(std::path::Path::new(&svg_full_file_name)) .write() .into_diagnostic() .wrap_err("Failed writing layout") }
svg_full_file_name.set_extension("svg");
WIP_train_mlbox-WIP.py
import os, json, shutil, pickle from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, mean_squared_log_error import pandas as pd print('installing library') os.system('pip3 install mlbox==0.8.4') from mlbox.preprocessing import * from mlbox.optimisation import * from mlbox.prediction import * ''' From the documentation: https://mlbox.readthedocs.io/en/latest/ ''' # install mlblocks def train_mlbox(alldata, labels, mtype, jsonfile, problemtype, default_features, settings): # name model modelname=jsonfile[0:-5]+'_mlbox_'+str(default_features).replace("'",'').replace('"','') # training and testing sets X_train, X_test, y_train, y_test = train_test_split(alldata, labels, train_size=0.750, test_size=0.250) df = {"train" : pd.DataFrame(X_train), "target" : pd.DataFrame(y_train), "test" : pd.DataFrame(X_test)} print(df) if mtype=='c': # rename files with classification modelname=modelname+'_classification' model_name=modelname+'.pickle' jsonfilename=modelname+'.json' # from sklearn.datasets import load_boston # dataset = load_boston() # df = {"train" : pd.DataFrame(dataset.data), "target" : pd.Series(dataset.target)} # print(df['train'][0]) # print(type(df['train'][0])) # data = Drift_thresholder().fit_transform(df) #deleting non-stable variables space = { 'ne__numerical_strategy' : {"space" : [0, 'mean']}, 'ce__strategy' : {"space" : ["label_encoding", "random_projection", "entity_embedding"]}, 'fs__strategy' : {"space" : ["variance", "rf_feature_importance"]}, 'fs__threshold': {"search" : "choice", "space" : [0.1, 0.2, 0.3]}, 'est__strategy' : {"space" : ["LightGBM"]}, 'est__max_depth' : {"search" : "choice", "space" : [5,6]}, 'est__subsample' : {"search" : "uniform", "space" : [0.6,0.9]} } best = Optimiser().optimise(space, df, max_evals = 5) mse_ =Optimiser().evaluate(best, df) pipeline = Predictor().fit_predict(best, df) print(best) print(mse_) # saving model print('saving model') modelfile=open(model_name,'wb') pickle.dump(pipeline, modelfile) modelfile.close() # SAVE JSON FILE print('saving .JSON file (%s)'%(jsonfilename)) jsonfile=open(jsonfilename,'w') data={'sample type': problemtype, 'feature_set':default_features, 'model name':jsonfilename[0:-5]+'.pickle', 'accuracy':accuracy, 'model type':'mlblocks_regression', 'settings': settings, } json.dump(data,jsonfile) jsonfile.close() if mtype=='r': # rename files with regression modelname=modelname+'_regression' model_name=modelname+'.pickle' jsonfilename=modelname+'.json' params = {"ne__numerical_strategy" : 0, "ce__strategy" : "label_encoding", "fs__threshold" : 0.1, "stck__base_estimators" : [Regressor(strategy="RandomForest"), Regressor(strategy="ExtraTrees")], "est__strategy" : "Linear"} best = Optimiser().optimise(params, df, max_evals = 5) mse_error =Optimiser().evaluate(best, df) # saving model print('saving model') modelfile=open(model_name,'wb') pickle.dump(pipeline, modelfile) modelfile.close() # save JSON print('saving .JSON file (%s)'%(jsonfilename)) jsonfile=open(jsonfilename,'w') data={'sample type': problemtype, 'feature_set':default_features, 'model name':jsonfilename[0:-5]+'.pickle', 'mse_error':mse_error,
'settings': settings, } json.dump(data,jsonfile) jsonfile.close() cur_dir2=os.getcwd() try: os.chdir(problemtype+'_models') except: os.mkdir(problemtype+'_models') os.chdir(problemtype+'_models') # now move all the files over to proper model directory shutil.copy(cur_dir2+'/'+model_name, os.getcwd()+'/'+model_name) shutil.copy(cur_dir2+'/'+jsonfilename, os.getcwd()+'/'+jsonfilename) os.remove(cur_dir2+'/'+model_name) os.remove(cur_dir2+'/'+jsonfilename) # get model directory model_dir=os.getcwd() return model_name, model_dir
'model type':'mlblocks_regression',
test_lifecycle_trend.py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from django.test import TestCase from rest_framework.reverse import reverse from tests.utils import UnittestClient class TestLifecycleTrend(TestCase): """ 生命周期趋势相关测试 """ def setUp(self): pass def test_asset_value_trend(self): """价值趋势相关测试""" params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"} client = UnittestClient() url = reverse('asset_value-trend/asset-value') response = client.get(url, params) assert response.is_success() assert len(response.data['score']) > 0 assert len(response.data['time']) > 0 def test_assetvalue_to_cost_trend(self): """收益比趋势相关测试""" par
ant1115", "dataset_type": "result_table"} client = UnittestClient() url = reverse('asset_value-trend/assetvalue-to-cost') response = client.get(url, params) assert response.is_success() assert len(response.data['score']) > 0 assert len(response.data['time']) > 0 def test_importance_trend(self): """重要度趋势相关测试""" params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"} client = UnittestClient() url = reverse('asset_value-trend/importance') response = client.get(url, params) assert response.is_success() assert len(response.data['score']) > 0 assert len(response.data['time']) > 0 def test_range_trend(self): """广度趋势相关测试""" params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"} client = UnittestClient() url = reverse('range-list-range-metric-by-influxdb') response = client.get(url, params) assert response.is_success() assert len(response.data['score']) > 0 assert len(response.data['time']) > 0 assert len(response.data['biz_count']) > 0 assert len(response.data['proj_count']) > 0 def test_heat_trend(self): """热度趋势相关测试""" params = {"dataset_id": "591_durant1115", "dataset_type": "result_table"} client = UnittestClient() url = reverse('heat-list-heat-metric-by-influxdb') response = client.get(url, params) assert response.is_success() assert len(response.data['score']) > 0 assert len(response.data['time']) > 0 assert len(response.data['query_count']) > 0 assert len(response.data['day_query_count']) > 0
ams = {"dataset_id": "591_dur
import.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modload import ( "context" "errors" "fmt" "go/build" "internal/goroot" "io/fs" "os" pathpkg "path" "path/filepath" "sort" "strings" "cmd/go/internal/cfg" "cmd/go/internal/fsys" "cmd/go/internal/modfetch" "cmd/go/internal/par" "cmd/go/internal/search" "golang.org/x/mod/module" "golang.org/x/mod/semver" ) type ImportMissingError struct { Path string Module module.Version QueryErr error // isStd indicates whether we would expect to find the package in the standard // library. This is normally true for all dotless import paths, but replace // directives can cause us to treat the replaced paths as also being in // modules. isStd bool // replaced the highest replaced version of the module where the replacement // contains the package. replaced is only set if the replacement is unused. replaced module.Version // newMissingVersion is set to a newer version of Module if one is present // in the build list. When set, we can't automatically upgrade. newMissingVersion string } func (e *ImportMissingError) Error() string { if e.Module.Path == "" { if e.isStd { return fmt.Sprintf("package %s is not in GOROOT (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path)) } if e.QueryErr != nil && e.QueryErr != ErrNoModRoot { return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr) } if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) { return "cannot find module providing package " + e.Path } if e.replaced.Path != "" { suggestArg := e.replaced.Path if !module.IsZeroPseudoVersion(e.replaced.Version) { suggestArg = e.replaced.String() } return fmt.Sprintf("module %s provides package %s and is replaced but not required; to add it:\n\tgo get %s", e.replaced.Path, e.Path, suggestArg) } message := fmt.Sprintf("no required module provides package %s", e.Path) if e.QueryErr != nil { return fmt.Sprintf("%s: %v", message, e.QueryErr) } return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) } if e.newMissingVersion != "" { return fmt.Sprintf("package %s provided by %s at latest version %s but not at required version %s", e.Path, e.Module.Path, e.Module.Version, e.newMissingVersion) } return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.Path) } func (e *ImportMissingError) Unwrap() error { return e.QueryErr } func (e *ImportMissingError) ImportPath() string { return e.Path } // An AmbiguousImportError indicates an import of a package found in multiple // modules in the build list, or found in both the main module and its vendor // directory. type AmbiguousImportError struct { importPath string Dirs []string Modules []module.Version // Either empty or 1:1 with Dirs. } func (e *AmbiguousImportError) ImportPath() string { return e.importPath } func (e *AmbiguousImportError) Error() string { locType := "modules" if len(e.Modules) == 0 { locType = "directories" } var buf strings.Builder fmt.Fprintf(&buf, "ambiguous import: found package %s in multiple %s:", e.importPath, locType) for i, dir := range e.Dirs { buf.WriteString("\n\t") if i < len(e.Modules) { m := e.Modules[i] buf.WriteString(m.Path) if m.Version != "" { fmt.Fprintf(&buf, " %s", m.Version) } fmt.Fprintf(&buf, " (%s)", dir) } else { buf.WriteString(dir) } } return buf.String() } // A DirectImportFromImplicitDependencyError indicates a package directly // imported by a package or test in the main module that is satisfied by a // dependency that is not explicit in the main module's go.mod file. type DirectImportFromImplicitDependencyError struct { ImporterPath string ImportedPath string Module module.Version } func (e *DirectImportFromImplicitDependencyError) Error() string { return fmt.Sprintf("package %s imports %s from implicitly required module; to add missing requirements, run:\n\tgo get %s@%s", e.ImporterPath, e.ImportedPath, e.Module.Path, e.Module.Version) } func (e *DirectImportFromImplicitDependencyError) ImportPath() string { return e.ImporterPath } // ImportMissingSumError is reported in readonly mode when we need to check // if a module contains a package, but we don't have a sum for its .zip file. // We might need sums for multiple modules to verify the package is unique. // // TODO(#43653): consolidate multiple errors of this type into a single error // that suggests a 'go get' command for root packages that transtively import // packages from modules with missing sums. load.CheckPackageErrors would be // a good place to consolidate errors, but we'll need to attach the import // stack here. type ImportMissingSumError struct { importPath string found bool mods []module.Version importer, importerVersion string // optional, but used for additional context importerIsTest bool } func (e *ImportMissingSumError) Error() string { var importParen string if e.importer != "" { importParen = fmt.Sprintf(" (imported by %s)", e.importer) } var message string if e.found { message = fmt.Sprintf("missing go.sum entry needed to verify package %s%s is provided by exactly one module", e.importPath, importParen) } else { message = fmt.Sprintf("missing go.sum entry for module providing package %s%s", e.importPath, importParen) } var hint string if e.importer == "" { // Importing package is unknown, or the missing package was named on the // command line. Recommend 'go mod download' for the modules that could // provide the package, since that shouldn't change go.mod. args := make([]string, len(e.mods)) for i, mod := range e.mods { args[i] = mod.Path } hint = fmt.Sprintf("; to add:\n\tgo mod download %s", strings.Join(args, " ")) } else { // Importing package is known (common case). Recommend 'go get' on the // current version of the importing package. tFlag := "" if e.importerIsTest { tFlag = " -t" } version := "" if e.importerVersion != "" { version = "@" + e.importerVersion } hint = fmt.Sprintf("; to add:\n\tgo get%s %s%s", tFlag, e.importer, version) } return message + hint } func (e *ImportMissingSumError) ImportPath() string { return e.importPath } type invalidImportError struct { importPath string err error } func (e *invalidImportError) ImportPath() string { return e.importPath } func (e *invalidImportError) Error() string { return e.err.Error() } func (e *invalidImportError) Unwrap() error { return e.err } // importFromModules finds the module and directory in the dependency graph of // rs containing the package with the given import path. If mg is nil, // importFromModules attempts to locate the module using only the main module // and the roots of rs before it loads the full graph. // // The answer must be unique: importFromModules returns an error if multiple // modules are observed to provide the same package. // // importFromModules can return a module with an empty m.Path, for packages in // the standard library. // // importFromModules can return an empty directory string, for fake packages // like "C" and "unsafe". // // If the package is not present in any module selected from the requirement // graph, importFromModules returns an *ImportMissingError. func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph) (m module.Version, dir string, err error) { if strings.Contains(path, "@") { return module.Version{}, "", fmt.Errorf("import path should not have @version") } if build.IsLocalImport(path) { return module.Version{}, "", fmt.Errorf("relative import not supported") } if path == "C" { // There's no directory for import "C". return module.Version{}, "", nil } // Before any further lookup, check that the path is valid. if err := module.CheckImportPath(path); err != nil { return module.Version{}, "", &invalidImportError{importPath: path, err: err} } // Is the package in the standard library? pathIsStd := search.IsStandardImportPath(path) if pathIsStd && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { if targetInGorootSrc { if dir, ok, err := dirInModule(path, targetPrefix, ModRoot(), true); err != nil { return module.Version{}, dir, err } else if ok { return Target, dir, nil } } dir := filepath.Join(cfg.GOROOT, "src", path) return module.Version{}, dir, nil } // -mod=vendor is special. // Everything must be in the main module or the main module's vendor directory. if cfg.BuildMod == "vendor" { mainDir, mainOK, mainErr := dirInModule(path, targetPrefix, ModRoot(), true) vendorDir, vendorOK, _ := dirInModule(path, "", filepath.Join(ModRoot(), "vendor"), false) if mainOK && vendorOK { return module.Version{}, "", &AmbiguousImportError{importPath: path, Dirs: []string{mainDir, vendorDir}} } // Prefer to return main directory if there is one, // Note that we're not checking that the package exists. // We'll leave that for load. if !vendorOK && mainDir != "" { return Target, mainDir, nil } if mainErr != nil { return module.Version{}, "", mainErr } readVendorList() return vendorPkgModule[path], vendorDir, nil } // Check each module on the build list. var dirs []string var mods []module.Version // Iterate over possible modules for the path, not all selected modules. // Iterating over selected modules would make the overall loading time // O(M × P) for M modules providing P imported packages, whereas iterating // over path prefixes is only O(P × k) with maximum path depth k. For // large projects both M and P may be very large (note that M ≤ P), but k // will tend to remain smallish (if for no other reason than filesystem // path limitations). // // We perform this iteration either one or two times. If mg is initially nil, // then we first attempt to load the package using only the main module and // its root requirements. If that does not identify the package, or if mg is // already non-nil, then we attempt to load the package using the full // requirements in mg. for { var sumErrMods []module.Version for prefix := path; prefix != "."; prefix = pathpkg.Dir(prefix) { var ( v string ok bool ) if mg == nil { v, ok = rs.rootSelected(prefix) } else { v, ok = mg.Selected(prefix), true } if !ok || v == "none" { continue } m := module.Version{Path: prefix, Version: v} needSum := true root, isLocal, err := fetch(ctx, m, needSum) if err != nil { if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { // We are missing a sum needed to fetch a module in the build list. // We can't verify that the package is unique, and we may not find // the package at all. Keep checking other modules to decide which // error to report. Multiple sums may be missing if we need to look in // multiple nested modules to resolve the import; we'll report them all. sumErrMods = append(sumErrMods, m) continue } // Report fetch error. // Note that we don't know for sure this module is necessary, // but it certainly _could_ provide the package, and even if we // continue the loop and find the package in some other module, // we need to look at this module to make sure the import is // not ambiguous. return module.Version{}, "", err } if dir, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { return module.Version{}, "", err } else if ok { mods = append(mods, m) dirs = append(dirs, dir) } } if len(mods) > 1 { // We produce the list of directories from longest to shortest candidate // module path, but the AmbiguousImportError should report them from // shortest to longest. Reverse them now. for i := 0; i < len(mods)/2; i++ { j := len(mods) - 1 - i mods[i], mods[j] = mods[j], mods[i] dirs[i], dirs[j] = dirs[j], dirs[i] } return module.Version{}, "", &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods} } if len(sumErrMods) > 0 { for i := 0; i < len(sumErrMods)/2; i++ { j := len(sumErrMods) - 1 - i sumErrMods[i], sumErrMods[j] = sumErrMods[j], sumErrMods[i] } return module.Version{}, "", &ImportMissingSumError{ importPath: path, mods: sumErrMods, found: len(mods) > 0, } } if len(mods) == 1 { return mods[0], dirs[0], nil } if mg != nil { // We checked the full module graph and still didn't find the // requested package. var queryErr error if !HasModRoot() { queryErr = ErrNoModRoot } return module.Version{}, "", &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} } // So far we've checked the root dependencies. // Load the full module graph and try again. mg, err = rs.Graph(ctx) if err != nil { // We might be missing one or more transitive (implicit) dependencies from // the module graph, so we can't return an ImportMissingError here — one // of the missing modules might actually contain the package in question, // in which case we shouldn't go looking for it in some new dependency. return module.Version{}, "", err } } } // queryImport attempts to locate a module that can be added to the current // build list to provide the package with the given import path. // // Unlike QueryPattern, queryImport prefers to add a replaced version of a // module *before* checking the proxies for a version to add. func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { // To avoid spurious remote fetches, try the latest replacement for each // module (golang.org/issue/26241). if index != nil { var mods []module.Version for mp, mv := range index.highestReplaced { if !maybeInModule(path, mp) { continue } if mv == "" { // The only replacement is a wildcard that doesn't specify a version, so // synthesize a pseudo-version with an appropriate major version and a // timestamp below any real timestamp. That way, if the main module is // used from within some other module, the user will be able to upgrade // the requirement to any real version they choose. if _, pathMajor, ok := module.SplitPathVersion(mp); ok && len(pathMajor) > 0 { mv = module.ZeroPseudoVersion(pathMajor[1:]) } else { mv = module.ZeroPseudoVersion("v0") } } mods = append(mods, module.Version{Path: mp, Version: mv}) } // Every module path in mods is a prefix of the import path. // As in QueryPattern, prefer the longest prefix that satisfies the import. sort.Slice(mods, func(i, j int) bool { return len(mods[i].Path) > len(mods[j].Path) }) for _, m := range mods { needSum := true root, isLocal, err := fetch(ctx, m, needSum) if err != nil { if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { return module.Version{}, &ImportMissingSumError{importPath: path} } return module.Version{}, err } if _, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { return m, err } else if ok { if cfg.BuildMod == "readonly" { return module.Version{}, &ImportMissingError{Path: path, replaced: m} } return m, nil } } if len(mods) > 0 && module.CheckPath(path) != nil { // The package path is not valid to fetch remotely, // so it can only exist in a replaced module, // and we know from the above loop that it is not. return module.Version{}, &PackageNotInModuleError{ Mod: mods[0], Query: "latest", Pattern: path, Replacement: Replacement(mods[0]), } } } if search.IsStandardImportPath(path) { // This package isn't in the standard library, isn't in any module already // in the build list, and isn't in any other module that the user has // shimmed in via a "replace" directive. // Moreover, the import path is reserved for the standard library, so // QueryPattern cannot possibly find a module containing this package. // // Instead of trying QueryPattern, report an ImportMissingError immediately. return module.Version{}, &ImportMissingError{Path: path, isStd: true} } if cfg.BuildMod == "readonly" && !allowMissingModuleImports { // In readonly mode, we can't write go.mod, so we shouldn't try to look up // the module. If readonly mode was enabled explicitly, include that in // the error message. var queryErr error if cfg.BuildModExplicit { queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) } else if cfg.BuildModReason != "" { queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} } // Look up module containing the package, for addition to the build list. // Goal is to determine the module, download it to dir, // and return m, dir, ImpportMissingError. fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) mg, err := rs.Graph(ctx) if err != nil { return module.Version{}, err } candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) if err != nil { if errors.Is(err, fs.ErrNotExist) { // Return "cannot find module providing package […]" instead of whatever // low-level error QueryPattern produced. return module.Version{}, &ImportMissingError{Path: path, QueryErr: err} } else { return module.Version{}, err } } candidate0MissingVersion := "" for i, c := range candidates { if v := mg.Selected(c.Mod.Path); semver.Compare(v, c.Mod.Version) > 0 { // QueryPattern proposed that we add module c.Mod to provide the package, // but we already depend on a newer version of that module (and that // version doesn't have the package). // // This typically happens when a package is present at the "@latest" // version (e.g., v1.0.0) of a module, but we have a newer version // of the same module in the build list (e.g., v1.0.1-beta), and // the package is not present there. if i == 0 { candidate0MissingVersion = v } continue } return c.Mod, nil } return module.Version{}, &ImportMissingError{ Path: path, Module: candidates[0].Mod, newMissingVersion: candidate0MissingVersion, } } // maybeInModule reports whether, syntactically, // a package with the given import path could be supplied // by a module with the given module path (mpath). func maybeInModule(path, mpath string) bool { return mpath == path || len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath } var ( haveGoModCache par.Cache // dir → bool haveGoFilesCache par.Cache // dir → goFilesEntry ) type goFilesEntry struct { haveGoFiles bool err error } // dirInModule locates the directory that would hold the package named by the given path, // if it were in the module with module path mpath and root mdir. // If path is syntactically not within mpath, // or if mdir is a local file tree (isLocal == true) and the directory // that would hold path is in a sub-module (covered by a go.mod below mdir), // dirInModule returns "", false, nil. // // Otherwise, dirInModule returns the name of the directory where // Go source files would be expected, along with a boolean indicating // whether there are in fact Go source files in that directory. // A non-nil error indicates that the existence of the directory and/or // source files could not be determined, for example due to a permission error. func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFiles bool, err error) { // Determine where to expect the package. if path == mpath { dir = mdir } else if mpath == "" { // vendor directory dir = filepath.Join(mdir, path) } else if len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath { dir = filepath.Join(mdir, path[len(mpath)+1:]) } else { return "", false, nil } // Check that there aren't other modules in the way. // This check is unnecessary inside the module cache // and important to skip in the vendor directory, // where all the module trees have been overlaid. // So we only check local module trees // (the main module, and any directory trees pointed at by replace directives). if isLocal { for d := dir; d != mdir && len(d) > len(mdir); { haveGoMod := haveGoModCache.Do(d, func() interface{} { fi, err := fsys.Stat(filepath.Join(d, "go.mod")) return err == nil && !fi.IsDir() }).(bool) if haveGoMod { return "", false, nil } parent := filepath.Dir(d) if parent == d { // Break the loop, as otherwise we'd loop // forever if d=="." and mdir=="". break } d = parent } } // Now committed to returning dir (not ""). // Are there Go source files in the directory? // We don't care about build tags, not even "+build ignore". // We're just looking for a plausible directory. res := haveGoFilesCache.Do(dir, func() interface{} { ok, err := fsys.IsDirWithGoFiles(dir) return goFilesEntry{haveGoFiles: ok, err: err} }).(goFilesEntry) return dir, res.haveGoFiles, res.err } // fetch downloads the given module (or its replacement) // and returns its location. // // needSum indicates whether the module may be downloaded in readonly mode // without a go.sum entry. It should only be false for modules fetched // speculatively (for example, for incompatible version filtering). The sum // will still be verified normally. // // The isLocal return value reports whether the replacement, // if any, is local to the filesystem. func fetch(ctx co
.Context, mod module.Version, needSum bool) (dir string, isLocal bool, err error) { if mod == Target { return ModRoot(), true, nil } if r := Replacement(mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { dir = filepath.Join(ModRoot(), dir) } // Ensure that the replacement directory actually exists: // dirInModule does not report errors for missing modules, // so if we don't report the error now, later failures will be // very mysterious. if _, err := fsys.Stat(dir); err != nil { if os.IsNotExist(err) { // Semantically the module version itself “exists” — we just don't // have its source code. Remove the equivalence to os.ErrNotExist, // and make the message more concise while we're at it. err = fmt.Errorf("replacement directory %s does not exist", r.Path) } else { err = fmt.Errorf("replacement directory %s: %w", r.Path, err) } return dir, true, module.VersionError(mod, err) } return dir, true, nil } mod = r } if HasModRoot() && cfg.BuildMod == "readonly" && needSum && !modfetch.HaveSum(mod) { return "", false, module.VersionError(mod, &sumMissingError{}) } dir, err = modfetch.Download(ctx, mod) return dir, false, err } type sumMissingError struct { suggestion string } func (e *sumMissingError) Error() string { return "missing go.sum entry" + e.suggestion }
ntext
file.go
package tempfile import ( "context" "io/ioutil" "os" "path/filepath" "github.com/gopasspw/gopass/pkg/ctxutil" "github.com/pkg/errors" ) // File is a temporary file type File struct { dir string dev string fh *os.File dbg bool } // New returns a new tempfile wrapper func New(ctx context.Context, prefix string) (*File, error)
// Name returns the name of the tempfile func (t *File) Name() string { if t.fh == nil { return "" } return t.fh.Name() } // Write implement io.Writer func (t *File) Write(p []byte) (int, error) { if t.fh == nil { return 0, errors.Errorf("not initialized") } return t.fh.Write(p) } // Close implements io.WriteCloser func (t *File) Close() error { if t.fh == nil { return nil } return t.fh.Close() } // Remove attempts to remove the tempfile func (t *File) Remove(ctx context.Context) error { _ = t.Close() if err := t.unmount(ctx); err != nil { return errors.Errorf("Failed to unmount %s from %s: %s", t.dev, t.dir, err) } if t.dir == "" { return nil } return os.RemoveAll(t.dir) }
{ td, err := ioutil.TempDir(tempdirBase(), prefix) if err != nil { return nil, err } tf := &File{ dir: td, dbg: ctxutil.IsDebug(ctx), } if err := tf.mount(ctx); err != nil { _ = os.RemoveAll(tf.dir) return nil, err } fn := filepath.Join(tf.dir, "secret") fh, err := os.OpenFile(fn, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0600) if err != nil { return nil, errors.Errorf("Failed to open file %s: %s", fn, err) } tf.fh = fh return tf, nil }
db.js
const knex = require("knex"); const config = { client: "sqlite3", useNullAsDefault: true, connection: { filename: "./database/database.sqlite",
const db = knex(config); module.exports = db;
}, };
count-contained-permutations.py
# COUNT CONTAINED PERMUTATIONS # O(M * U + N) time and O(U) space, where M -> length of big string, # U -> number of unique characters in small string, N -> length # of small string. # U is actually a constant since it can't be greater than 26. and # M > N, so M will dissolve N # So, modified complexities: # O(M) time and O(1) space, M -> length of big string def countContainedPermutations(bigString, smallString): # Write your code here.
def matchCounts(bigCount, smallCount): for letter in smallCount: if letter not in bigCount: return False if smallCount[letter] != bigCount[letter]: return False return True
smallCount, bigCount = {}, {} for letter in smallString: if letter not in smallCount: smallCount[letter] = 0 smallCount[letter] += 1 bigSize, smallSize = len(bigString), len(smallString) start, end, totalCount = 0, 0, 0 while end < bigSize: letterToAdd = bigString[end] if letterToAdd not in bigCount: bigCount[letterToAdd] = 0 bigCount[letterToAdd] += 1 if end - start == smallSize: letterToRemove = bigString[start] if bigCount[letterToRemove] == 1: del bigCount[letterToRemove] else: bigCount[letterToRemove] -= 1 start += 1 if matchCounts(bigCount, smallCount): totalCount += 1 end += 1 return totalCount
issue-4577.rs
fn main()
{ let s: String = "ABAABBAA".chars() .filter(|c| { if *c == 'A' { true } else { false } }) .map(|c| -> char { if c == 'A' { '0' } else { '1' } }).collect(); println!("{}", s); }
write_file.py
"""写入文件""" file_name = 'user_input.txt' user_input = input('请输入您要存储的信息:\n')
with open(file_name, 'w',encoding='utf8') as file_object: file_object.write(user_input) print('写入成功')
index.js
/* eslint-disable no-console */ require('dotenv').config(); // 1) Obtenemos app y PORT mediante "destructuring" // 2) app y PORT vienen del archivo index de la carpeta server // NOTA: No es necesario agregar "index" cuando se solicita mediante const { server, PORT } = require('./server');
// Encedemos servidor server.listen(PORT, () => console.log(`Listening on ${PORT}`));
require('./database');
parsingFailure_test.go
package govaluateplus import ( "fmt" "regexp/syntax" "strings" "testing" ) const ( UNEXPECTED_END string = "Unexpected end of expression" INVALID_TOKEN_TRANSITION = "Cannot transition token types" INVALID_TOKEN_KIND = "Invalid token" UNCLOSED_QUOTES = "Unclosed string literal" UNCLOSED_BRACKETS = "Unclosed parameter bracket" UNBALANCED_PARENTHESIS = "Unbalanced parenthesis" INVALID_NUMERIC = "Unable to parse numeric value" UNDEFINED_FUNCTION = "Undefined function" HANGING_ACCESSOR = "Hanging accessor on token" UNEXPORTED_ACCESSOR = "Unable to access unexported" INVALID_HEX = "Unable to parse hex value" ) /* Represents a test for parsing failures */ type ParsingFailureTest struct { Name string Input string Expected string } func TestParsingFailure(test *testing.T) { parsingTests := []ParsingFailureTest{ ParsingFailureTest{ Name: "Invalid equality comparator", Input: "1 = 1", Expected: INVALID_TOKEN_KIND, }, ParsingFailureTest{ Name: "Invalid equality comparator", Input: "1 === 1", Expected: INVALID_TOKEN_KIND, }, ParsingFailureTest{ Name: "Too many characters for logical operator", Input: "true &&& false", Expected: INVALID_TOKEN_KIND, }, ParsingFailureTest{ Name: "Too many characters for logical operator", Input: "true ||| false", Expected: INVALID_TOKEN_KIND, }, ParsingFailureTest{ Name: "Premature end to expression, via modifier", Input: "10 > 5 +", Expected: UNEXPECTED_END, }, ParsingFailureTest{ Name: "Premature end to expression, via comparator", Input: "10 + 5 >", Expected: UNEXPECTED_END, }, ParsingFailureTest{ Name: "Premature end to expression, via logical operator", Input: "10 > 5 &&", Expected: UNEXPECTED_END, }, ParsingFailureTest{ Name: "Premature end to expression, via ternary operator", Input: "true ?", Expected: UNEXPECTED_END, }, ParsingFailureTest{ Name: "Hanging REQ", Input: "'wat' =~", Expected: UNEXPECTED_END, }, ParsingFailureTest{ Name: "Invalid operator change to REQ", Input: " / =~", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid starting token, comparator", Input: "> 10", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid starting token, modifier", Input: "+ 5", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid starting token, logical operator", Input: "&& 5 < 10", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid NUMERIC transition", Input: "10 10", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid STRING transition", Input: "'foo' 'foo'", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid operator transition", Input: "10 > < 10", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Starting with unbalanced parens", Input: " ) ( arg2", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Unclosed bracket", Input: "[foo bar", Expected: UNCLOSED_BRACKETS, }, ParsingFailureTest{ Name: "Unclosed quote", Input: "foo == 'responseTime", Expected: UNCLOSED_QUOTES, }, ParsingFailureTest{ Name: "Constant regex pattern fail to compile", Input: "foo =~ '[abc'", Expected: string(syntax.ErrMissingBracket), }, ParsingFailureTest{ Name: "Unbalanced parenthesis",
Name: "Multiple radix", Input: "127.0.0.1", Expected: INVALID_NUMERIC, }, ParsingFailureTest{ Name: "Undefined function", Input: "foobar()", Expected: UNDEFINED_FUNCTION, }, ParsingFailureTest{ Name: "Hanging accessor", Input: "foo.Bar.", Expected: HANGING_ACCESSOR, }, ParsingFailureTest{ // this is expected to change once there are structtags in place that allow aliasing of fields Name: "Unexported parameter access", Input: "foo.bar", Expected: UNEXPORTED_ACCESSOR, }, ParsingFailureTest{ Name: "Incomplete Hex", Input: "0x", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Invalid Hex literal", Input: "0x > 0", Expected: INVALID_HEX, }, ParsingFailureTest{ Name: "Hex float (Unsupported)", Input: "0x1.1", Expected: INVALID_TOKEN_TRANSITION, }, ParsingFailureTest{ Name: "Hex invalid letter", Input: "0x12g1", Expected: INVALID_TOKEN_TRANSITION, }, } runParsingFailureTests(parsingTests, test) } func runParsingFailureTests(parsingTests []ParsingFailureTest, test *testing.T) { var err error fmt.Printf("Running %d parsing test cases...\n", len(parsingTests)) for _, testCase := range parsingTests { _, err = NewEvaluableExpression(testCase.Input) if err == nil { test.Logf("Test '%s' failed", testCase.Name) test.Logf("Expected a parsing error, found no error.") test.Fail() continue } if !strings.Contains(err.Error(), testCase.Expected) { test.Logf("Test '%s' failed", testCase.Name) test.Logf("Got error: '%s', expected '%s'", err.Error(), testCase.Expected) test.Fail() continue } } }
Input: "10 > (1 + 50", Expected: UNBALANCED_PARENTHESIS, }, ParsingFailureTest{
container_list_test.go
package client // import "github.com/docker/docker/client" import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "net/http" "strings" "testing" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" ) func TestContainerListError(t *testing.T) { client := &Client{ client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) if !errdefs.IsSystem(err) { t.Fatalf("expected a Server Error, got %[1]T: %[1]v", err) } } func
(t *testing.T) { expectedURL := "/containers/json" expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) } query := req.URL.Query() all := query.Get("all") if all != "1" { return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) } limit := query.Get("limit") if limit != "0" { return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) } since := query.Get("since") if since != "container" { return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) } before := query.Get("before") if before != "" { return nil, fmt.Errorf("before should have not be present in query, go %s", before) } size := query.Get("size") if size != "1" { return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) } filters := query.Get("filters") if filters != expectedFilters { return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) } b, err := json.Marshal([]types.Container{ { ID: "container_id1", }, { ID: "container_id2", }, }) if err != nil { return nil, err } return &http.Response{ StatusCode: http.StatusOK, Body: ioutil.NopCloser(bytes.NewReader(b)), }, nil }), } filters := filters.NewArgs() filters.Add("label", "label1") filters.Add("label", "label2") filters.Add("before", "container") containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ Size: true, All: true, Since: "container", Filters: filters, }) if err != nil { t.Fatal(err) } if len(containers) != 2 { t.Fatalf("expected 2 containers, got %v", containers) } }
TestContainerList
issue20780.go
// errorcheck // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // We have a limit of 1GB for stack frames. // Make sure we include the callee args section. package main type Big = [400e6]byte func f()
func g(Big, Big) func h() Big
{ // GC_ERROR "stack frame too large" // Note: This test relies on the fact that we currently always // spill function-results to the stack, even if they're so // large that we would normally heap allocate them. If we ever // improve the backend to spill temporaries to the heap, this // test will probably need updating to find some new way to // construct an overly large stack frame. g(h(), h()) }
index.js
/* * Elements */ const instructionsElement = document.querySelector('.js-instructions'); const drawElement = document.querySelector('.js-draw'); const context = drawElement.getContext('2d'); /* * Variables */ let isDrawing = false; let lastX = 0; let lastY = 0; let hue = 0; let growing = false; /* * Functions */ function resize() { const oldImageData = context.getImageData(0, 0, drawElement.width, drawElement.height); drawElement.width = window.innerWidth; drawElement.height = window.innerHeight; context.lineJoin = 'round'; context.lineCap = 'round'; context.lineWidth = 100; context.putImageData(oldImageData, 0, 0); } function handlePress(e) { e.preventDefault(); e.stopPropagation(); isDrawing = true; if (e.type === 'touchstart') { const target = e.targetTouches[0]; [lastX, lastY] = [target.clientX, target.clientY]; } else { [lastX, lastY] = [e.offsetX, e.offsetY]; } } function draw(e) { // stop if not drawing if (!isDrawing) { return; } instructionsElement.remove(); // make sure p is no longer displaying // get x and y let x; let y; if (e.type === 'touchmove') { const target = e.targetTouches[0]; [x, y] = [target.clientX, target.clientY]; } else { [x, y] = [e.offsetX, e.offsetY]; } // draw the line context.strokeStyle = `hsl(${hue}, 100%, 50%)`; context.beginPath(); context.moveTo(lastX, lastY); context.lineTo(x, y); context.stroke(); [lastX, lastY] = [x, y]; // update colour hue += 1; if (hue >= 360) { hue = 0; } // update line width growing = (growing && context.lineWidth < 100) || (!growing && context.lineWidth <= 1); if (growing) { context.lineWidth += 1; } else { context.lineWidth -= 1; } } function
() { isDrawing = false; } /* * Initialise */ window.addEventListener('resize', resize); drawElement.addEventListener('mousedown', handlePress); drawElement.addEventListener('touchstart', handlePress); drawElement.addEventListener('mousemove', draw); drawElement.addEventListener('touchmove', draw); drawElement.addEventListener('mouseup', stopDrawing); drawElement.addEventListener('mouseout', stopDrawing); drawElement.addEventListener('touchend', stopDrawing); drawElement.addEventListener('touchcancel', stopDrawing); resize();
stopDrawing
sr.py
from handlers.common import Common class
(Common): _type = "SR" def __init__(self, xapi, ref=None, params=None): super().__init__(xapi, ref, params)
SR
p2p_invalid_block.py
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017-2018 The Placeholder Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid blocks. In this test we connect to one node over p2p, and test block requests: 1) Valid blocks should be requested and become chain tip. 2) Invalid block with duplicated transaction should be re-requested. 3) Invalid block with bad coinbase value should be rejected and not re-requested. """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import copy import time # Use the ComparisonTestFramework with 1 node: only use --testbinary. class InvalidBlockRequestTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def
(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) self.tip = None self.block_time = None NetworkThread().start() # Start up network handling in another thread test.run() def get_tests(self): if self.tip is None: self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time())+1 ''' Create a new block with an anyone-can-spend coinbase ''' height = 1 block = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Now we need that block to mature so we can spend the coinbase. ''' test = TestInstance(sync_every_block=False) for i in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.tip = block.sha256 self.block_time += 1 test.blocks_and_transactions.append([block, True]) height += 1 yield test ''' Now we use merkle-root malleability to generate an invalid block with same blockheader. Manufacture a block with 3 transactions (coinbase, spend of prior coinbase, spend of that spend). Duplicate the 3rd transaction to leave merkle root and blockheader unchanged but invalidate the block. ''' block2 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 # b'0x51' is OP_TRUE tx1 = create_transaction(self.block1.vtx[0], 0, b'\x51', 5000 * COIN) tx2 = create_transaction(tx1, 0, b'\x51', 5000 * COIN) block2.vtx.extend([tx1, tx2]) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(tx2) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert(block2_orig.vtx != block2.vtx) self.tip = block2.sha256 yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')], [block2_orig, True]]) height += 1 ''' Make sure that a totally screwed up block is not valid. ''' block3 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256=None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]]) if __name__ == '__main__': InvalidBlockRequestTest().main()
run_test
p2pserver.rs
use log::*; use futures::{future, prelude::*}; use rustcoinbase::rustcoinlib::p2pservice::*; use rustcoinbase::rustcoinlib::peerdb::*; use rustcoinbase::rustcoinlib::settings::*; use rustcoinbase::rustcoinlib::constants::*; use rustcoinbase::rustcoinlib::localaddress::*; use std::net::{SocketAddr, IpAddr}; use tarpc::{ context, server::{self, incoming::Incoming, Channel}, }; use tokio_serde::formats::*; use serde_cbor::Value; use serde_json; #[derive(Clone)] struct
(SocketAddr, SocketAddr, PeerDatabase, Statics, LocalAddressMap); #[tarpc::server] impl P2PService for P2PServer { async fn addr(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("addr request: {:?} from {}", request, self.0); let remote_addr = self.0; let _my_addr = self.1; let mut peerdb = self.2; let _constants = self.3; let key = String::from(remote_addr.to_string()); let value: PeerDBValue = serde_json::json!({ "test": "blah" }); peerdb.write_raw(&key, value).unwrap(); //let result = peerdb.read(&key).expect("Couldn't read back from db"); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn alert(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("alert request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn block(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("block request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn checkorder(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("checkorder request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn checkpoint(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("checkpoint request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn getaddr(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("getaddr request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn getblocks(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("getblocks request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn getdata(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("getdata request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn headers(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("headers request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn inv(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("inv request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn mempool(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("mempool request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn ping(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("ping request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn pong(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("pong request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn reply(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("reply request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn tx(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("tx request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn version(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("version request: {:?} from {}", request, self.0); info!("{}", message); let request: P2PVersionRequest = P2PVersionRequest::from(request); info!("{:?}", request); let remote_addr = self.0; let my_addr = self.1; let mut peerdb = self.2; let constants = self.3; let mut localaddress = self.4; let key = String::from(remote_addr.to_string()); let mut peer = peerdb.read(&key).expect("Couldn't read back from db"); info!("{:?}", peer); let mut response = P2PMap::new(); if peer.version != 0 { peer.misbehaving += 1; return response; } peer.version = request.version; peer.services = request.services; peer.addr_remote = MySocketAddr(remote_addr); let _time = request.time; let addr_me: IpAddr = request.addr_me.parse().unwrap(); let sa = MySocketAddr(SocketAddr::new(addr_me, my_addr.port())); if peer.inbound && sa.is_routable() { peer.addr_local = sa.clone(); if localaddress.increment(&sa) { // Readvertise with new values } } info!("Peer: {:?}", peer); if peer.version < constants.min_peer_proto_version { error!("Peer {:?} using obsolete version {}; disconnecting", remote_addr, peer.version); peer.disconnect = true; peerdb.write(&key, peer).unwrap(); return response; } let nonce = request.nonce; if nonce == constants.local_host_nonce { error!("Connected to self from {}, disconnecting", remote_addr); peer.disconnect = true; return response; } response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } async fn verack(self, _: context::Context, request: P2PMap) -> P2PMap { let message = format!("verack request: {:?} from {}", request, self.0); info!("{}", message); let mut response = P2PMap::new(); response.insert(Value::from(String::from("text")), Value::from(String::from(message))); response } } #[tokio::main] pub async fn start_p2pserver(settings: &Settings, peerdb: PeerDatabase, localaddresses: &LocalAddressMap ) -> anyhow::Result<()> { let constants: Statics = Statics::new(); let mut local_addr_map = localaddresses; let server_ip = settings.p2p.bind; let server_port = settings.p2p.port; let server_addr = (server_ip, server_port); info!("Starting P2P Service at {:?}", server_addr); // JSON transport is provided by the json_transport tarpc module. It makes it easy // to start up a serde-powered json serialization strategy over TCP. let mut listener = tarpc::serde_transport::tcp::listen(&server_addr, Cbor::default).await?; listener.config_mut().max_frame_length(usize::MAX); listener // Ignore accept errors. .filter_map(|r| future::ready(r.ok())) .map(server::BaseChannel::with_defaults) // Limit channels to 1 per IP. .max_channels_per_key(1, |t| t.transport().peer_addr().unwrap().ip()) // serve is generated by the service attribute. It takes as input any type implementing // the generated World trait. .map(|channel| { let server = P2PServer(channel.transport().peer_addr().unwrap(), channel.transport().local_addr().unwrap(), peerdb.clone(), constants.clone(), local_addr_map.clone()); channel.execute(server.serve()) }) // Max 10 channels. .buffer_unordered(10) .for_each(|_| async {}) .await; Ok(()) }
P2PServer
rax_files_objects.py
#!/usr/bin/python # (c) 2013, Paul Durivage <[email protected]> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # This is a DOCUMENTATION stub specific to this module, it extends # a documentation fragment located in ansible.utils.module_docs_fragments ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: rax_files_objects short_description: Upload, download, and delete objects in Rackspace Cloud Files description: - Upload, download, and delete objects in Rackspace Cloud Files version_added: "1.5" options: clear_meta: description: - Optionally clear existing metadata when applying metadata to existing objects. Selecting this option is only appropriate when setting type=meta choices: - "yes" - "no" default: "no" container: description: - The container to use for file object operations. required: true default: null dest: description: - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder". Used to specify the destination of an operation on a remote object; i.e. a file name, "file1", or a comma-separated list of remote objects, "file1,file2,file17" expires: description: - Used to set an expiration on a file or folder uploaded to Cloud Files. Requires an integer, specifying expiration in seconds default: null meta: description: - A hash of items to set as metadata values on an uploaded file or folder default: null method: description: - The method of operation to be performed. For example, put to upload files to Cloud Files, get to download files from Cloud Files or delete to delete remote objects in Cloud Files choices: - get - put - delete default: get src: description: - Source from which to upload files. Used to specify a remote object as a source for an operation, i.e. a file name, "file1", or a comma-separated list of remote objects, "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations default: null structure: description: - Used to specify whether to maintain nested directory structure when downloading objects from Cloud Files. Setting to false downloads the contents of a container to a single, flat directory choices: - yes - "no" default: "yes" state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present type: description: - Type of object to do work on - Metadata object or a file object choices: - file - meta default: file author: "Paul Durivage (@angstwad)" extends_documentation_fragment: rackspace ''' EXAMPLES = ''' - name: "Test Cloud Files Objects" hosts: local gather_facts: False tasks: - name: "Get objects from test container" rax_files_objects: container: testcont dest: ~/Downloads/testcont - name: "Get single object from test container" rax_files_objects: container: testcont src: file1 dest: ~/Downloads/testcont - name: "Get several objects from test container" rax_files_objects: container: testcont src: file1,file2,file3 dest: ~/Downloads/testcont - name: "Delete one object in test container" rax_files_objects: container: testcont method: delete dest: file1 - name: "Delete several objects in test container" rax_files_objects: container: testcont method: delete dest: file2,file3,file4 - name: "Delete all objects in test container" rax_files_objects: container: testcont method: delete - name: "Upload all files to test container" rax_files_objects: container: testcont method: put src: ~/Downloads/onehundred - name: "Upload one file to test container" rax_files_objects: container: testcont method: put src: ~/Downloads/testcont/file1 - name: "Upload one file to test container with metadata" rax_files_objects: container: testcont src: ~/Downloads/testcont/file2 method: put meta: testkey: testdata who_uploaded_this: [email protected] - name: "Upload one file to test container with TTL of 60 seconds" rax_files_objects: container: testcont method: put src: ~/Downloads/testcont/file3 expires: 60 - name: "Attempt to get remote object that does not exist" rax_files_objects: container: testcont method: get src: FileThatDoesNotExist.jpg dest: ~/Downloads/testcont ignore_errors: yes - name: "Attempt to delete remote object that does not exist" rax_files_objects: container: testcont method: delete dest: FileThatDoesNotExist.jpg ignore_errors: yes - name: "Test Cloud Files Objects Metadata" hosts: local gather_facts: false tasks: - name: "Get metadata on one object" rax_files_objects: container: testcont type: meta dest: file2 - name: "Get metadata on several objects" rax_files_objects: container: testcont type: meta src: file2,file1 - name: "Set metadata on an object" rax_files_objects: container: testcont type: meta dest: file17 method: put meta: key1: value1 key2: value2 clear_meta: true - name: "Verify metadata is set" rax_files_objects: container: testcont type: meta src: file17 - name: "Delete metadata" rax_files_objects: container: testcont type: meta dest: file17 method: delete meta: key1: '' key2: '' - name: "Get metadata on all objects" rax_files_objects: container: testcont type: meta ''' try: import pyrax HAS_PYRAX = True except ImportError: HAS_PYRAX = False EXIT_DICT = dict(success=False) META_PREFIX = 'x-object-meta-' def _get_container(module, cf, container): try: return cf.get_container(container) except pyrax.exc.NoSuchContainer as e: module.fail_json(msg=e.message) def _upload_folder(cf, folder, container, ttl=None, headers=None): """ Uploads a folder to Cloud Files. """ total_bytes = 0 for root, dirs, files in os.walk(folder): for fname in files: full_path = os.path.join(root, fname) obj_name = os.path.relpath(full_path, folder) obj_size = os.path.getsize(full_path) cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers) total_bytes += obj_size return total_bytes def upload(module, cf, container, src, dest, meta, expires): """ Uploads a single object or a folder to Cloud Files Optionally sets an metadata, TTL value (expires), or Content-Disposition and Content-Encoding headers. """ if not src: module.fail_json(msg='src must be specified when uploading') c = _get_container(module, cf, container) src = os.path.abspath(os.path.expanduser(src)) is_dir = os.path.isdir(src) if not is_dir and not os.path.isfile(src) or not os.path.exists(src): module.fail_json(msg='src must be a file or a directory') if dest and is_dir: module.fail_json(msg='dest cannot be set when whole ' 'directories are uploaded') cont_obj = None total_bytes = 0 if dest and not is_dir: try: cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta) except Exception as e: module.fail_json(msg=e.message) elif is_dir: try: total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta) except Exception as e: module.fail_json(msg=e.message) else: try: cont_obj = c.upload_file(src, ttl=expires, headers=meta) except Exception as e: module.fail_json(msg=e.message) EXIT_DICT['success'] = True EXIT_DICT['container'] = c.name EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name) if cont_obj or total_bytes > 0: EXIT_DICT['changed'] = True if meta: EXIT_DICT['meta'] = dict(updated=True) if cont_obj: EXIT_DICT['bytes'] = cont_obj.total_bytes EXIT_DICT['etag'] = cont_obj.etag else: EXIT_DICT['bytes'] = total_bytes module.exit_json(**EXIT_DICT) def download(module, cf, container, src, dest, structure):
def delete(module, cf, container, src, dest): """ Delete specific objects by proving a single file name or a comma-separated list to src OR dest (but not both). Omitting file name(s) assumes the entire container is to be deleted. """ objs = None if src and dest: module.fail_json(msg="Error: ambiguous instructions; files to be deleted " "have been specified on both src and dest args") elif dest: objs = dest else: objs = src c = _get_container(module, cf, container) if objs: objs = objs.split(',') objs = map(str.strip, objs) else: objs = c.get_object_names() num_objs = len(objs) results = [] for obj in objs: try: result = c.delete_object(obj) except Exception as e: module.fail_json(msg=e.message) else: results.append(result) num_deleted = results.count(True) EXIT_DICT['container'] = c.name EXIT_DICT['deleted'] = num_deleted EXIT_DICT['requested_deleted'] = objs if num_deleted: EXIT_DICT['changed'] = True if num_objs == num_deleted: EXIT_DICT['success'] = True EXIT_DICT['msg'] = "%s objects deleted" % num_deleted else: EXIT_DICT['msg'] = ("Error: only %s of %s objects " "deleted" % (num_deleted, num_objs)) module.exit_json(**EXIT_DICT) def get_meta(module, cf, container, src, dest): """ Get metadata for a single file, comma-separated list, or entire container """ c = _get_container(module, cf, container) objs = None if src and dest: module.fail_json(msg="Error: ambiguous instructions; files to be deleted " "have been specified on both src and dest args") elif dest: objs = dest else: objs = src if objs: objs = objs.split(',') objs = map(str.strip, objs) else: objs = c.get_object_names() results = dict() for obj in objs: try: meta = c.get_object(obj).get_metadata() except Exception as e: module.fail_json(msg=e.message) else: results[obj] = dict() for k, v in meta.items(): meta_key = k.split(META_PREFIX)[-1] results[obj][meta_key] = v EXIT_DICT['container'] = c.name if results: EXIT_DICT['meta_results'] = results EXIT_DICT['success'] = True module.exit_json(**EXIT_DICT) def put_meta(module, cf, container, src, dest, meta, clear_meta): """ Set metadata on a container, single file, or comma-separated list. Passing a true value to clear_meta clears the metadata stored in Cloud Files before setting the new metadata to the value of "meta". """ objs = None if src and dest: module.fail_json(msg="Error: ambiguous instructions; files to set meta" " have been specified on both src and dest args") elif dest: objs = dest else: objs = src objs = objs.split(',') objs = map(str.strip, objs) c = _get_container(module, cf, container) results = [] for obj in objs: try: result = c.get_object(obj).set_metadata(meta, clear=clear_meta) except Exception as e: module.fail_json(msg=e.message) else: results.append(result) EXIT_DICT['container'] = c.name EXIT_DICT['success'] = True if results: EXIT_DICT['changed'] = True EXIT_DICT['num_changed'] = True module.exit_json(**EXIT_DICT) def delete_meta(module, cf, container, src, dest, meta): """ Removes metadata keys and values specified in meta, if any. Deletes on all objects specified by src or dest (but not both), if any; otherwise it deletes keys on all objects in the container """ objs = None if src and dest: module.fail_json(msg="Error: ambiguous instructions; meta keys to be " "deleted have been specified on both src and dest" " args") elif dest: objs = dest else: objs = src objs = objs.split(',') objs = map(str.strip, objs) c = _get_container(module, cf, container) results = [] # Num of metadata keys removed, not objects affected for obj in objs: if meta: for k, v in meta.items(): try: result = c.get_object(obj).remove_metadata_key(k) except Exception as e: module.fail_json(msg=e.message) else: results.append(result) else: try: o = c.get_object(obj) except pyrax.exc.NoSuchObject as e: module.fail_json(msg=e.message) for k, v in o.get_metadata().items(): try: result = o.remove_metadata_key(k) except Exception as e: module.fail_json(msg=e.message) results.append(result) EXIT_DICT['container'] = c.name EXIT_DICT['success'] = True if results: EXIT_DICT['changed'] = True EXIT_DICT['num_deleted'] = len(results) module.exit_json(**EXIT_DICT) def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires): """ Dispatch from here to work with metadata or file objects """ cf = pyrax.cloudfiles if cf is None: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') if typ == "file": if method == 'put': upload(module, cf, container, src, dest, meta, expires) elif method == 'get': download(module, cf, container, src, dest, structure) elif method == 'delete': delete(module, cf, container, src, dest) else: if method == 'get': get_meta(module, cf, container, src, dest) if method == 'put': put_meta(module, cf, container, src, dest, meta, clear_meta) if method == 'delete': delete_meta(module, cf, container, src, dest, meta) def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( container=dict(required=True), src=dict(), dest=dict(), method=dict(default='get', choices=['put', 'get', 'delete']), type=dict(default='file', choices=['file', 'meta']), meta=dict(type='dict', default=dict()), clear_meta=dict(default=False, type='bool'), structure=dict(default=True, type='bool'), expires=dict(type='int'), ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') container = module.params.get('container') src = module.params.get('src') dest = module.params.get('dest') method = module.params.get('method') typ = module.params.get('type') meta = module.params.get('meta') clear_meta = module.params.get('clear_meta') structure = module.params.get('structure') expires = module.params.get('expires') if clear_meta and not typ == 'meta': module.fail_json(msg='clear_meta can only be used when setting metadata') setup_rax_module(module, pyrax) cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires) from ansible.module_utils.basic import * from ansible.module_utils.rax import * if __name__ == '__main__': main()
""" Download objects from Cloud Files to a local path specified by "dest". Optionally disable maintaining a directory structure by by passing a false value to "structure". """ # Looking for an explicit destination if not dest: module.fail_json(msg='dest is a required argument when ' 'downloading from Cloud Files') # Attempt to fetch the container by name c = _get_container(module, cf, container) # Accept a single object name or a comma-separated list of objs # If not specified, get the entire container if src: objs = src.split(',') objs = map(str.strip, objs) else: objs = c.get_object_names() dest = os.path.abspath(os.path.expanduser(dest)) is_dir = os.path.isdir(dest) if not is_dir: module.fail_json(msg='dest must be a directory') results = [] for obj in objs: try: c.download_object(obj, dest, structure=structure) except Exception as e: module.fail_json(msg=e.message) else: results.append(obj) len_results = len(results) len_objs = len(objs) EXIT_DICT['container'] = c.name EXIT_DICT['requested_downloaded'] = results if results: EXIT_DICT['changed'] = True if len_results == len_objs: EXIT_DICT['success'] = True EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest) else: EXIT_DICT['msg'] = "Error: only %s of %s objects were " \ "downloaded" % (len_results, len_objs) module.exit_json(**EXIT_DICT)
query.go
package conditions import ( "errors" "fmt" "strings" "time" "github.com/grafana/grafana/pkg/plugins" "github.com/grafana/grafana/pkg/tsdb/prometheus" gocontext "context" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/null" "github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/services/alerting" "github.com/grafana/grafana/pkg/util/errutil" ) func init() { alerting.RegisterCondition("query", func(model *simplejson.Json, index int) (alerting.Condition, error) { return newQueryCondition(model, index) }) } // QueryCondition is responsible for issue and query, reduce the // timeseries into single values and evaluate if they are firing or not. type QueryCondition struct { Index int Query AlertQuery Reducer *queryReducer Evaluator AlertEvaluator Operator string } // AlertQuery contains information about what datasource a query // should be sent to and the query object. type AlertQuery struct { Model *simplejson.Json DatasourceID int64 From string To string } // Eval evaluates the `QueryCondition`. func (c *QueryCondition) Eval(context *alerting.EvalContext, requestHandler plugins.DataRequestHandler) (*alerting.ConditionResult, error) { timeRange := plugins.NewDataTimeRange(c.Query.From, c.Query.To) seriesList, err := c.executeQuery(context, timeRange, requestHandler) if err != nil { return nil, err } emptySeriesCount := 0 evalMatchCount := 0 var matches []*alerting.EvalMatch for _, series := range seriesList { reducedValue := c.Reducer.Reduce(series) evalMatch := c.Evaluator.Eval(reducedValue) if !reducedValue.Valid { emptySeriesCount++ } if context.IsTestRun { context.Logs = append(context.Logs, &alerting.ResultLogEntry{ Message: fmt.Sprintf("Condition[%d]: Eval: %v, Metric: %s, Value: %s", c.Index, evalMatch, series.Name, reducedValue), }) } if evalMatch { evalMatchCount++ matches = append(matches, &alerting.EvalMatch{ Metric: series.Name, Value: reducedValue, Tags: series.Tags, }) } } // handle no series special case if len(seriesList) == 0 { // eval condition for null value evalMatch := c.Evaluator.Eval(null.FloatFromPtr(nil)) if context.IsTestRun { context.Logs = append(context.Logs, &alerting.ResultLogEntry{ Message: fmt.Sprintf("Condition: Eval: %v, Query Returned No Series (reduced to null/no value)", evalMatch), }) } if evalMatch { evalMatchCount++ matches = append(matches, &alerting.EvalMatch{Metric: "NoData", Value: null.FloatFromPtr(nil)}) } } return &alerting.ConditionResult{ Firing: evalMatchCount > 0, NoDataFound: emptySeriesCount == len(seriesList), Operator: c.Operator, EvalMatches: matches, }, nil } func (c *QueryCondition) executeQuery(context *alerting.EvalContext, timeRange plugins.DataTimeRange, requestHandler plugins.DataRequestHandler) (plugins.DataTimeSeriesSlice, error) { getDsInfo := &models.GetDataSourceQuery{ Id: c.Query.DatasourceID, OrgId: context.Rule.OrgID, } if err := bus.Dispatch(getDsInfo); err != nil { return nil, fmt.Errorf("could not find datasource: %w", err) } err := context.RequestValidator.Validate(getDsInfo.Result.Url, nil) if err != nil { return nil, fmt.Errorf("access denied: %w", err) } req := c.getRequestForAlertRule(getDsInfo.Result, timeRange, context.IsDebug) result := make(plugins.DataTimeSeriesSlice, 0) if context.IsDebug { data := simplejson.New() if req.TimeRange != nil { data.Set("from", req.TimeRange.GetFromAsMsEpoch()) data.Set("to", req.TimeRange.GetToAsMsEpoch()) } type queryDto struct { RefID string `json:"refId"` Model *simplejson.Json `json:"model"` Datasource *simplejson.Json `json:"datasource"` MaxDataPoints int64 `json:"maxDataPoints"` IntervalMS int64 `json:"intervalMs"` } queries := []*queryDto{} for _, q := range req.Queries { queries = append(queries, &queryDto{ RefID: q.RefID, Model: q.Model, Datasource: simplejson.NewFromAny(map[string]interface{}{ "id": q.DataSource.Id, "name": q.DataSource.Name, }), MaxDataPoints: q.MaxDataPoints, IntervalMS: q.IntervalMS, }) } data.Set("queries", queries) context.Logs = append(context.Logs, &alerting.ResultLogEntry{ Message: fmt.Sprintf("Condition[%d]: Query", c.Index), Data: data, }) } resp, err := requestHandler.HandleRequest(context.Ctx, getDsInfo.Result, req) if err != nil { return nil, toCustomError(err) } for _, v := range resp.Results { if v.Error != nil { return nil, fmt.Errorf("request handler response error %v", v) } // If there are dataframes but no series on the result useDataframes := v.Dataframes != nil && (v.Series == nil || len(v.Series) == 0) if useDataframes { // convert the dataframes to plugins.DataTimeSeries frames, err := v.Dataframes.Decoded() if err != nil { return nil, errutil.Wrap("request handler failed to unmarshal arrow dataframes from bytes", err) } for _, frame := range frames { ss, err := FrameToSeriesSlice(frame) if err != nil { return nil, errutil.Wrapf(err, `request handler failed to convert dataframe "%v" to plugins.DataTimeSeriesSlice`, frame.Name) } result = append(result, ss...) } } else { result = append(result, v.Series...) } queryResultData := map[string]interface{}{} if context.IsTestRun { queryResultData["series"] = result } if context.IsDebug && v.Meta != nil { queryResultData["meta"] = v.Meta } if context.IsTestRun || context.IsDebug { if useDataframes { queryResultData["fromDataframe"] = true } context.Logs = append(context.Logs, &alerting.ResultLogEntry{ Message: fmt.Sprintf("Condition[%d]: Query Result", c.Index), Data: simplejson.NewFromAny(queryResultData), }) } } return result, nil } func (c *QueryCondition) getRequestForAlertRule(datasource *models.DataSource, timeRange plugins.DataTimeRange, debug bool) plugins.DataQuery { queryModel := c.Query.Model req := plugins.DataQuery{ TimeRange: &timeRange, Queries: []plugins.DataSubQuery{ { RefID: "A", Model: queryModel, DataSource: datasource, QueryType: queryModel.Get("queryType").MustString(""), }, }, Headers: map[string]string{ "FromAlert": "true", }, Debug: debug, } return req } func newQueryCondition(model *simplejson.Json, index int) (*QueryCondition, error) { condition := QueryCondition{} condition.Index = index queryJSON := model.Get("query") condition.Query.Model = queryJSON.Get("model") condition.Query.From = queryJSON.Get("params").MustArray()[1].(string) condition.Query.To = queryJSON.Get("params").MustArray()[2].(string) if err := validateFromValue(condition.Query.From); err != nil { return nil, err } if err := validateToValue(condition.Query.To); err != nil { return nil, err } condition.Query.DatasourceID = queryJSON.Get("datasourceId").MustInt64() reducerJSON := model.Get("reducer") condition.Reducer = newSimpleReducer(reducerJSON.Get("type").MustString()) evaluatorJSON := model.Get("evaluator") evaluator, err := NewAlertEvaluator(evaluatorJSON) if err != nil { return nil, fmt.Errorf("error in condition %v: %v", index, err) } condition.Evaluator = evaluator operatorJSON := model.Get("operator") operator := operatorJSON.Get("type").MustString("and") condition.Operator = operator return &condition, nil } func validateFromValue(from string) error { fromRaw := strings.Replace(from, "now-", "", 1) _, err := time.ParseDuration("-" + fromRaw) return err } func validateToValue(to string) error { if to == "now" { return nil } else if strings.HasPrefix(to, "now-") { withoutNow := strings.Replace(to, "now-", "", 1) _, err := time.ParseDuration("-" + withoutNow) if err == nil { return nil } } _, err := time.ParseDuration(to) return err } // FrameToSeriesSlice converts a frame that is a valid time series as per data.TimeSeriesSchema() // to a DataTimeSeriesSlice. func FrameToSeriesSlice(frame *data.Frame) (plugins.DataTimeSeriesSlice, error) { tsSchema := frame.TimeSeriesSchema() if tsSchema.Type == data.TimeSeriesTypeNot { // If no fields, or only a time field, create an empty plugins.DataTimeSeriesSlice with a single // time series in order to trigger "no data" in alerting. if len(frame.Fields) == 0 || (len(frame.Fields) == 1 && frame.Fields[0].Type().Time()) { return plugins.DataTimeSeriesSlice{{ Name: frame.Name, Points: make(plugins.DataTimeSeriesPoints, 0), }}, nil } return nil, fmt.Errorf("input frame is not recognized as a time series") } seriesCount := len(tsSchema.ValueIndices) seriesSlice := make(plugins.DataTimeSeriesSlice, 0, seriesCount) timeField := frame.Fields[tsSchema.TimeIndex] timeNullFloatSlice := make([]null.Float, timeField.Len()) for i := 0; i < timeField.Len(); i++ { // built slice of time as epoch ms in null floats tStamp, err := timeField.FloatAt(i) if err != nil { return nil, err } timeNullFloatSlice[i] = null.FloatFrom(tStamp) } for _, fieldIdx := range tsSchema.ValueIndices { // create a TimeSeries for each value Field field := frame.Fields[fieldIdx] ts := plugins.DataTimeSeries{ Points: make(plugins.DataTimeSeriesPoints, field.Len()), } if len(field.Labels) > 0 { ts.Tags = field.Labels.Copy() } switch { case field.Config != nil && field.Config.DisplayName != "": ts.Name = field.Config.DisplayName case field.Config != nil && field.Config.DisplayNameFromDS != "": ts.Name = field.Config.DisplayNameFromDS case len(field.Labels) > 0: // Tags are appended to the name so they are eventually included in EvalMatch's Metric property // for display in notifications. ts.Name = fmt.Sprintf("%v {%v}", field.Name, field.Labels.String()) default: ts.Name = field.Name } for rowIdx := 0; rowIdx < field.Len(); rowIdx++ { // for each value in the field, make a TimePoint val, err := field.FloatAt(rowIdx) if err != nil { return nil, errutil.Wrapf(err, "failed to convert frame to DataTimeSeriesSlice, can not convert value %v to float", field.At(rowIdx)) } ts.Points[rowIdx] = plugins.DataTimePoint{ null.FloatFrom(val), timeNullFloatSlice[rowIdx], } } seriesSlice = append(seriesSlice, ts) } return seriesSlice, nil } func toCustomError(err error) error
{ // is context timeout if errors.Is(err, gocontext.DeadlineExceeded) { return fmt.Errorf("alert execution exceeded the timeout") } // is Prometheus error if prometheus.IsAPIError(err) { return prometheus.ConvertAPIError(err) } // generic fallback return fmt.Errorf("request handler error: %w", err) }