file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
test_global_modules_cfg.py
import FWCore.ParameterSet.Config as cms nEvtLumi = 4 nEvtRun = 2*nEvtLumi nRuns = 64 nStreams = 4 nEvt = nRuns*nEvtRun process = cms.Process("TESTGLOBALMODULES") import FWCore.Framework.test.cmsExceptionsFatalOption_cff process.options = cms.untracked.PSet( numberOfStreams = cms.untracked.uint32(nStreams), numberOfThreads = cms.untracked.uint32(nStreams), numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(nEvt) ) process.source = cms.Source("EmptySource", timeBetweenEvents = cms.untracked.uint64(1000), firstTime = cms.untracked.uint64(1000000), numberEventsInRun = cms.untracked.uint32(nEvtRun), numberEventsInLuminosityBlock = cms.untracked.uint32(nEvtLumi) ) process.StreamIntProd = cms.EDProducer("edmtest::global::StreamIntProducer", transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2))) ,cachevalue = cms.int32(1) ) process.RunIntProd = cms.EDProducer("edmtest::global::RunIntProducer", transitions = cms.int32(int(2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiIntProd = cms.EDProducer("edmtest::global::LumiIntProducer", transitions = cms.int32(int(2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) ) process.RunSumIntProd = cms.EDProducer("edmtest::global::RunSummaryIntProducer", transitions = cms.int32(int(nStreams*(nEvt/nEvtRun)+2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiSumIntProd = cms.EDProducer("edmtest::global::LumiSummaryIntProducer", transitions = cms.int32(int(nStreams*(nEvt/nEvtLumi)+2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) ) process.ProcessBlockIntProd = cms.EDProducer("edmtest::global::ProcessBlockIntProducer", transitions = cms.int32(int(nEvt + 2)), consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin"), consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end") ) process.TestBeginProcessBlockProd = cms.EDProducer("edmtest::global::TestBeginProcessBlockProducer", transitions = cms.int32(int(nEvt + 1)), consumesBeginProcessBlock = cms.InputTag("") ) process.TestBeginProcessBlockProdRead = cms.EDProducer("edmtest::global::TestBeginProcessBlockProducer", transitions = cms.int32(int(nEvt + 1)), consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin") ) process.TestEndProcessBlockProd = cms.EDProducer("edmtest::global::TestEndProcessBlockProducer", transitions = cms.int32(int(nEvt + 1)), consumesEndProcessBlock = cms.InputTag("") ) process.TestEndProcessBlockProdRead = cms.EDProducer("edmtest::global::TestEndProcessBlockProducer", transitions = cms.int32(int(nEvt + 1)), consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end") ) process.TestBeginRunProd = cms.EDProducer("edmtest::global::TestBeginRunProducer", transitions = cms.int32(int(nEvt/nEvtRun)) ) process.TestEndRunProd = cms.EDProducer("edmtest::global::TestEndRunProducer", transitions = cms.int32(int(nEvt/nEvtRun)) ) process.TestBeginLumiBlockProd = cms.EDProducer("edmtest::global::TestBeginLumiBlockProducer", transitions = cms.int32(int(nEvt/nEvtLumi)) ) process.TestEndLumiBlockProd = cms.EDProducer("edmtest::global::TestEndLumiBlockProducer", transitions = cms.int32(int(nEvt/nEvtLumi)) ) process.StreamIntAn = cms.EDAnalyzer("edmtest::global::StreamIntAnalyzer", transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2))) ,cachevalue = cms.int32(1) ) process.RunIntAn= cms.EDAnalyzer("edmtest::global::RunIntAnalyzer", transitions = cms.int32(int(nEvt+2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiIntAn = cms.EDAnalyzer("edmtest::global::LumiIntAnalyzer", transitions = cms.int32(int(nEvt+2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) # needed to avoid deleting TestAccumulator1 ,moduleLabel = cms.InputTag("TestAccumulator1") ) process.RunSumIntAn = cms.EDAnalyzer("edmtest::global::RunSummaryIntAnalyzer", transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtRun)+1)+2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiSumIntAn = cms.EDAnalyzer("edmtest::global::LumiSummaryIntAnalyzer", transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtLumi)+1)+2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) ) process.ProcessBlockIntAn = cms.EDAnalyzer("edmtest::global::ProcessBlockIntAnalyzer", transitions = cms.int32(652), consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockProd" ,"begin"), consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockProd", "end") ) process.StreamIntFil = cms.EDFilter("edmtest::global::StreamIntFilter", transitions = cms.int32(int(nEvt+nStreams*(2*(nEvt/nEvtRun)+2*(nEvt/nEvtLumi)+2))) ,cachevalue = cms.int32(1) ) process.RunIntFil = cms.EDFilter("edmtest::global::RunIntFilter", transitions = cms.int32(int(nEvt+2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiIntFil = cms.EDFilter("edmtest::global::LumiIntFilter", transitions = cms.int32(int(nEvt+2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) ) process.RunSumIntFil = cms.EDFilter("edmtest::global::RunSummaryIntFilter", transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtRun)+1)+2*(nEvt/nEvtRun))) ,cachevalue = cms.int32(nEvtRun) ) process.LumiSumIntFil = cms.EDFilter("edmtest::global::LumiSummaryIntFilter", transitions = cms.int32(int(nEvt+nStreams*((nEvt/nEvtLumi)+1)+2*(nEvt/nEvtLumi))) ,cachevalue = cms.int32(nEvtLumi) ) process.ProcessBlockIntFil = cms.EDFilter("edmtest::global::ProcessBlockIntFilter", transitions = cms.int32(int(nEvt + 2)), consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockFil" ,"begin"), consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockFil", "end") ) process.TestBeginProcessBlockFil = cms.EDFilter("edmtest::global::TestBeginProcessBlockFilter", transitions = cms.int32(int(nEvt + 1)), consumesBeginProcessBlock = cms.InputTag("") ) process.TestBeginProcessBlockFilRead = cms.EDFilter("edmtest::global::TestBeginProcessBlockFilter",
process.TestEndProcessBlockFil = cms.EDFilter("edmtest::global::TestEndProcessBlockFilter", transitions = cms.int32(int(nEvt + 1)), consumesEndProcessBlock = cms.InputTag("") ) process.TestEndProcessBlockFilRead = cms.EDFilter("edmtest::global::TestEndProcessBlockFilter", transitions = cms.int32(int(nEvt + 1)), consumesEndProcessBlock = cms.InputTag("TestEndProcessBlockFil", "end") ) process.TestBeginRunFil = cms.EDFilter("edmtest::global::TestBeginRunFilter", transitions = cms.int32(int(nEvt/nEvtRun)) ) process.TestEndRunFil = cms.EDFilter("edmtest::global::TestEndRunFilter", transitions = cms.int32(int(nEvt/nEvtRun)) ) process.TestBeginLumiBlockFil = cms.EDFilter("edmtest::global::TestBeginLumiBlockFilter", transitions = cms.int32(int(nEvt/nEvtLumi)) ) process.TestEndLumiBlockFil = cms.EDFilter("edmtest::global::TestEndLumiBlockFilter", transitions = cms.int32(int(nEvt/nEvtLumi)) ) process.TestAccumulator1 = cms.EDProducer("edmtest::global::TestAccumulator", expectedCount = cms.uint32(512) ) process.TestAccumulator2 = cms.EDProducer("edmtest::global::TestAccumulator", expectedCount = cms.uint32(35) ) process.testFilterModule = cms.EDFilter("TestFilterModule", acceptValue = cms.untracked.int32(5), onlyOne = cms.untracked.bool(False) ) process.task = cms.Task(process.TestAccumulator1) process.p = cms.Path(process.StreamIntProd + process.RunIntProd + process.LumiIntProd + process.RunSumIntProd + process.LumiSumIntProd + process.ProcessBlockIntProd + process.TestBeginProcessBlockProdRead + process.TestBeginProcessBlockProd + process.TestEndProcessBlockProdRead + process.TestEndProcessBlockProd + process.TestBeginRunProd + process.TestEndRunProd + process.TestBeginLumiBlockProd + process.TestEndLumiBlockProd + process.StreamIntAn + process.RunIntAn + process.LumiIntAn + process.RunSumIntAn + process.LumiSumIntAn + process.ProcessBlockIntAn + process.StreamIntFil + process.RunIntFil + process.LumiIntFil + process.RunSumIntFil + process.LumiSumIntFil + process.ProcessBlockIntFil + process.TestBeginProcessBlockFilRead + process.TestBeginProcessBlockFil + process.TestEndProcessBlockFilRead + process.TestEndProcessBlockFil + process.TestBeginRunFil + process.TestEndRunFil + process.TestBeginLumiBlockFil + process.TestEndLumiBlockFil + process.testFilterModule + process.TestAccumulator2, process.task)
transitions = cms.int32(int(nEvt + 1)), consumesBeginProcessBlock = cms.InputTag("TestBeginProcessBlockFil" ,"begin") )
dump.rs
use std::{ collections::HashSet, fs::{create_dir_all, File}, io::{BufRead, BufReader, Write}, path::{Path, PathBuf}, }; use heed::{EnvOpenOptions, RoTxn}; use serde::{Deserialize, Serialize}; use uuid::Uuid; use super::{Result, State, UpdateStore}; use crate::index_controller::{ index_actor::IndexActorHandle, update_actor::store::update_uuid_to_file_path, Enqueued, UpdateStatus, }; #[derive(Serialize, Deserialize)] struct UpdateEntry { uuid: Uuid, update: UpdateStatus, } impl UpdateStore { pub fn dump( &self, uuids: &HashSet<Uuid>, path: PathBuf, handle: impl IndexActorHandle, ) -> Result<()> { let state_lock = self.state.write(); state_lock.swap(State::Dumping); // txn must *always* be acquired after state lock, or it will dead lock. let txn = self.env.write_txn()?; let dump_path = path.join("updates"); create_dir_all(&dump_path)?; self.dump_updates(&txn, uuids, &dump_path)?; let fut = dump_indexes(uuids, handle, &path); tokio::runtime::Handle::current().block_on(fut)?; state_lock.swap(State::Idle); Ok(()) } fn dump_updates( &self, txn: &RoTxn, uuids: &HashSet<Uuid>, path: impl AsRef<Path>, ) -> Result<()> { let dump_data_path = path.as_ref().join("data.jsonl"); let mut dump_data_file = File::create(dump_data_path)?; let update_files_path = path.as_ref().join(super::UPDATE_DIR); create_dir_all(&update_files_path)?; self.dump_pending(txn, uuids, &mut dump_data_file, &path)?; self.dump_completed(txn, uuids, &mut dump_data_file)?; Ok(()) } fn
( &self, txn: &RoTxn, uuids: &HashSet<Uuid>, mut file: &mut File, dst_path: impl AsRef<Path>, ) -> Result<()> { let pendings = self.pending_queue.iter(txn)?.lazily_decode_data(); for pending in pendings { let ((_, uuid, _), data) = pending?; if uuids.contains(&uuid) { let update = data.decode()?; if let Some(ref update_uuid) = update.content { let src = super::update_uuid_to_file_path(&self.path, *update_uuid); let dst = super::update_uuid_to_file_path(&dst_path, *update_uuid); std::fs::copy(src, dst)?; } let update_json = UpdateEntry { uuid, update: update.into(), }; serde_json::to_writer(&mut file, &update_json)?; file.write_all(b"\n")?; } } Ok(()) } fn dump_completed( &self, txn: &RoTxn, uuids: &HashSet<Uuid>, mut file: &mut File, ) -> Result<()> { let updates = self.updates.iter(txn)?.lazily_decode_data(); for update in updates { let ((uuid, _), data) = update?; if uuids.contains(&uuid) { let update = data.decode()?; let update_json = UpdateEntry { uuid, update }; serde_json::to_writer(&mut file, &update_json)?; file.write_all(b"\n")?; } } Ok(()) } pub fn load_dump( src: impl AsRef<Path>, dst: impl AsRef<Path>, db_size: usize, ) -> anyhow::Result<()> { let dst_update_path = dst.as_ref().join("updates/"); create_dir_all(&dst_update_path)?; let mut options = EnvOpenOptions::new(); options.map_size(db_size as usize); let (store, _) = UpdateStore::new(options, &dst_update_path)?; let src_update_path = src.as_ref().join("updates"); let update_data = File::open(&src_update_path.join("data.jsonl"))?; let mut update_data = BufReader::new(update_data); std::fs::create_dir_all(dst_update_path.join("update_files/"))?; let mut wtxn = store.env.write_txn()?; let mut line = String::new(); loop { match update_data.read_line(&mut line) { Ok(0) => break, Ok(_) => { let UpdateEntry { uuid, update } = serde_json::from_str(&line)?; store.register_raw_updates(&mut wtxn, &update, uuid)?; // Copy ascociated update path if it exists if let UpdateStatus::Enqueued(Enqueued { content: Some(uuid), .. }) = update { let src = update_uuid_to_file_path(&src_update_path, uuid); let dst = update_uuid_to_file_path(&dst_update_path, uuid); std::fs::copy(src, dst)?; } } _ => break, } line.clear(); } wtxn.commit()?; Ok(()) } } async fn dump_indexes( uuids: &HashSet<Uuid>, handle: impl IndexActorHandle, path: impl AsRef<Path>, ) -> Result<()> { for uuid in uuids { handle.dump(*uuid, path.as_ref().to_owned()).await?; } Ok(()) }
dump_pending
gee_main.go
/* * @Author: aleimu * @Date: 2020-11-23 21:53:48 * @Description: file content */ package main import ( "go-7days/gee" "net/http" "time" ) func main() { r := gee.Default() r.LoadHTMLGlob("templates/*") r.Static("/assets", "./static") r.GET("/", func(c *gee.Context) { c.Template(http.StatusOK, "css.tmpl", nil) }) r.GET("/date", func(c *gee.Context) { c.Template(http.StatusOK, "custom_func.tmpl", gee.H{ "title": "gee", "now": time.Date(2019, 8, 17, 0, 0, 0, 0, time.UTC), }) }) r.GET("/hello", func(c *gee.Context) { // expect /hello?name=geektutu c.String(http.StatusOK, "hello %s, you're at %s\n", c.Query("name"), c.Path) }) r.GET("/hello/:name", func(c *gee.Context) { // expect /hello/geektutu c.String(http.StatusOK, "hello %s, you're at %s\n", c.Param("name"), c.Path) }) r.GET("/assets/*filepath", func(c *gee.Context) { c.JSON(http.StatusOK, gee.H{"filepath": c.Param("filepath")}) }) r.POST("/login", func(c *gee.Context) { c.JSON(http.StatusOK, gee.H{ "username": c.PostForm("username"), "password": c.PostForm("password"), })
}) r.GET("/index", func(c *gee.Context) { c.HTML(http.StatusOK, "<h1>Index Page</h1>") }) v1 := r.Group("/v1") { v1.GET("/", func(c *gee.Context) { c.HTML(http.StatusOK, "<h1>Hello Gee</h1>") }) v1.GET("/hello", func(c *gee.Context) { // expect /hello?name=geektutu c.String(http.StatusOK, "hello %s, you're at %s\n", c.Query("name"), c.Path) }) v1.GET("/a/*/b/*", func(c *gee.Context) { c.HTML(http.StatusOK, "<h1>Hello Gee</h1>") }) } v2 := r.Group("/v2") { v2.GET("/hello/:name", func(c *gee.Context) { // expect /hello/geektutu c.String(http.StatusOK, "hello %s, you're at %s\n", c.Param("name"), c.Path) }) v2.POST("/login", func(c *gee.Context) { c.JSON(http.StatusOK, gee.H{ "username": c.PostForm("username"), "password": c.PostForm("password"), }) }) } r.Static("/logs", "/static") r.Run(":3002") }
main.go
package main import (
"github.com/xurwxj/ctils/mail" "github.com/xurwxj/ctils/oss/aws" "github.com/xurwxj/ctils/sms" "github.com/xurwxj/viper" ) func main() { testAWSUpload() } func testAWSUpload() { initConfig() bn, es, err := aws.ChunkUpload("default", "ds/0b24e9e6c987ca31c849a34e234753f8", "", "./LICENSE", "license.txt") fmt.Println(bn) fmt.Println(es) fmt.Println(err) } func testAWSURL() { initConfig() s, err := aws.GetTempDownURLFileName("tawsdown.dental3dcloud.com", "dentalFull/0b24e9e6c987ca31c849a34e234753f8", 864000) fmt.Println(s) fmt.Println(err) } func testSMSInvalid() { initConfig() smss, err := sms.FetchSMSSendResult("15942397109") fmt.Println(smss) fmt.Println(err) } func testMailInvalid() { initConfig() emails, err := mail.FetchInvalidEmails() fmt.Println(emails) fmt.Println(err) } func testChunkUpload() { initConfig() bn, endoint, err := aws.ChunkUpload("cnhz", "test/jkjkjsl", "pub", "../../../git.shining3d.com/cloud/algorithm/tmp/2021-01-08_003_111_谭彩红.zip", "2021-01-08_003_111_谭彩红.zip") // bn, endoint, err := ali.ChunkUpload("cnhz", "test/jkjkjsl", "pub", "../../../git.shining3d.com/cloud/algorithm/tmp/2021-01-08_003_111_谭彩红.zip") fmt.Println("bn: ", bn) fmt.Println("endoint: ", endoint) fmt.Println("err: ", err) } func initConfig() { viper.AddConfigPath(".") viper.SetConfigName(".server") viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { fmt.Println("Using config file:", viper.ConfigFileUsed()) fmt.Println("ReadInConfig keys: ", viper.AllKeys()) } else { fmt.Println("viper ReadInConfig set err: ", err) logConfig := ` { "log": { "output": "file", "path": "logs", "file": "service.log", "level": "info", "max": 10, "maxAge": 30, "localtime": true }, "version": "" } ` if err := viper.ReadConfig(bytes.NewReader([]byte(logConfig))); err == nil { fmt.Println("ReadConfig keys: ", viper.AllKeys()) } else { fmt.Println("viper ReadConfig set err: ", err) } } }
"bytes" "fmt"
move-scalar.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main()
{ let y: isize = 42; let mut x: isize; x = y; assert_eq!(x, 42); }
search_in_rotated_sorted_array.py
''' 假设按照升序排序的数组在预先未知的某个点上进行了旋转。 ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。 搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。 你可以假设数组中不存在重复的元素。 你的算法时间复杂度必须是 O(log n) 级别。 示例 1: 输入: nums = [4,5,6,7,0,1,2], target = 0 输出: 4 示例 2: 输入: nums = [4,5,6,7,0,1,2], target = 3 输出: -1 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/search-in-rotated-sorted-array ''' class Solution: def search(self, nums: List[int], target: int) -> int: l = 0 r = len(nums) -1 while (l <= r): mid = (l+r) >> 1 if target == nums[mid]: return mid
l = mid + 1 else: if target > nums[mid] and target <= nums[r]: l = mid + 1 else: r = mid - 1 return -1
if nums[l]<= nums[mid]: if target >= nums[l] and target < nums[mid]: r = mid -1 else:
FeatureFactory.go
package togglr type FeatureFactory (func(data interface{}) (Feature, bool)) var factories = make([]FeatureFactory, 0, 2) //Add new FeatureFactory func
(factory FeatureFactory) { factories = append(factories, factory) }
InjectFeatureFactory
dataloader.py
import os import random import numpy as np import pandas as pd import tensorflow as tf from augment import Augment AUTO = tf.data.experimental.AUTOTUNE def set_dataset(task, data_path): trainset = pd.read_csv( os.path.join( data_path, 'imagenet_trainset.csv' )).values.tolist() trainset = [[os.path.join(data_path, t[0]), t[1]] for t in trainset] if task == 'lincls': valset = pd.read_csv( os.path.join( data_path, 'imagenet_valset.csv' )).values.tolist() valset = [[os.path.join(data_path, t[0]), t[1]] for t in valset] return np.array(trainset, dtype='object'), np.array(valset, dtype='object') return np.array(trainset, dtype='object') class DataLoader: def __init__(self, args, mode, datalist, batch_size, num_workers=1, shuffle=True): self.args = args self.mode = mode self.datalist = datalist self.batch_size = batch_size self.num_workers = num_workers self.shuffle = shuffle self.dataloader = self._dataloader() def __len__(self): return len(self.datalist) def fetch_dataset(self, path, y=None): x = tf.io.read_file(path) if y is not None: return tf.data.Dataset.from_tensors((x, y)) return tf.data.Dataset.from_tensors(x) def augmentation(self, img, shape): augset = Augment(self.args, self.mode) if self.args.task in ['v1', 'v2']: img_list = [] for _ in range(2): # query, key aug_img = tf.identity(img) if self.args.task == 'v1': aug_img = augset._augmentv1(aug_img, shape) # moco v1 else: radius = np.random.choice([3, 5]) aug_img = augset._augmentv2(aug_img, shape, (radius, radius)) # moco v2 img_list.append(aug_img) return img_list else: return augset._augment_lincls(img, shape) def dataset_parser(self, value, label=None): shape = tf.image.extract_jpeg_shape(value) img = tf.io.decode_jpeg(value, channels=3) if label is None: # moco query, key = self.augmentation(img, shape) inputs = {'query': query, 'key': key} labels = tf.zeros([]) else: # lincls inputs = self.augmentation(img, shape) labels = tf.one_hot(label, self.args.classes) return (inputs, labels) def
(self, value, labels): if self.num_workers > 1: pre_shuffle = [(i, value['key'][i]) for i in range(self.batch_size)] random.shuffle(pre_shuffle) shuffle_idx = [] value_temp = [] for vv in pre_shuffle: shuffle_idx.append(vv[0]) value_temp.append(tf.expand_dims(vv[1], axis=0)) value['key'] = tf.concat(value_temp, axis=0) unshuffle_idx = np.array(shuffle_idx).argsort().tolist() value.update({'unshuffle': unshuffle_idx}) return (value, labels) def _dataloader(self): self.imglist = self.datalist[:,0].tolist() if self.args.task in ['v1', 'v2']: dataset = tf.data.Dataset.from_tensor_slices(self.imglist) else: self.labellist = self.datalist[:,1].tolist() dataset = tf.data.Dataset.from_tensor_slices((self.imglist, self.labellist)) dataset = dataset.repeat() if self.shuffle: dataset = dataset.shuffle(len(self.datalist)) dataset = dataset.interleave(self.fetch_dataset, num_parallel_calls=AUTO) dataset = dataset.map(self.dataset_parser, num_parallel_calls=AUTO) dataset = dataset.batch(self.batch_size) dataset = dataset.prefetch(AUTO) if self.args.shuffle_bn and self.args.task in ['v1', 'v2']: # only moco dataset = dataset.map(self.shuffle_BN, num_parallel_calls=AUTO) return dataset
shuffle_BN
PersonRole276.tsx
import * as React from "react"; import { IEmojiProps } from "../../styled"; const SvgPersonRole276 = (props: IEmojiProps) => ( <svg viewBox="0 0 72 72" width="1em" height="1em" {...props}> <path fill="#9B9B9A" d="M50.903 27.532c0 1.225-.147 2.416-.424 3.556-1.597 6.568-7.517 11.444-14.576 11.444s-12.979-4.876-14.576-11.443a15.035 15.035 0 01-.424-3.557c0-8.284 6.716-15 15-15 8.284 0 15 6.716 15 15z" /> <path fill="#D0CFCE" d="M31.932 12.938s-6.519 2.24-9.184 7.384c-1.16 2.114-3.006 3.506-1.42 10.767s10.56 11.047 10.56 11.047a15.023 15.023 0 0011.138-11.048c.278-1.14.424-2.33.424-3.556 0-7.086-4.912-13.024-11.518-14.594zM16.764 58.776s-2-13.727 10-13.727c3.192 2.128 5.927 3.598 9 3.592h-.125c3.074.006 5.508-2.164 8.7-4.292 15.57 0 10.7 14.427 10.7 14.427" /> <path fill="#9B9B9A" d="M28.142 52.903h15.561v5.824H28.142z" /> <path fill="#9B9B9A" d="M44.437 45.676c-3.17 2.113-4.875 3-8.937 3.589 13.062.411 13.062 5.516 13.824 9.516h5.113s2-13.105-10-13.105z" /> <g> <path d="M33.836 22.579s-3.655 5.391-8.463 8.015l-.646.471a17.689 17.689 0 01-.188-4.095l-.62 1a9.452 9.452 0 015.672-4.598M42.024 23.372a9.455 9.455 0 015.673 4.598l-.56-.912c.024.38.037.763.037 1.15 0 2.114-.37 4.12-1.034 5.922l.261-.698c-3.14 1.57-2.894-9.158-2.894-9.158" /> </g> <g>
d="M46.356 33.45c.016-.007.03-.01.045-.018l-.261.698c.081-.22.144-.454.216-.68-3.09 1.441-2.849-9.176-2.849-9.176l-1.483-.902-8.189-.793s-3.654 5.39-8.462 8.015l-.646.47a17.689 17.689 0 01-.23-2.855c0-.207.014-.41.02-.614-.044.454-.079.916-.079 1.29 0 7.827 5.077 14.173 11.339 14.173 5.945 0 10.813-5.723 11.291-13a16.93 16.93 0 01-.712 3.392z" /> </g> <g> <path d="M34.487 58.719a2 2 0 00-1.934-2.508h.007a2.001 2.001 0 00-1.935 2.508M41.19 58.719a2 2 0 00-1.935-2.508h.007a2.001 2.001 0 00-1.935 2.508" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeLinejoin="round" strokeMiterlimit={10} strokeWidth={2} d="M43.523 57.796v-4.808H28.262v4.808" /> <path d="M41.93 26.965a2 2 0 11-4.001-.002 2 2 0 014.001.002M33.93 26.965a2 2 0 11-4.001-.002 2 2 0 014.001.002M35.93 36.967c-1.152 0-2.304-.286-3.447-.858a1 1 0 11.894-1.789c1.718.86 3.388.86 5.106 0a1 1 0 01.894 1.79c-1.143.571-2.295.857-3.447.857z" /> <path fill="none" stroke="#000" strokeMiterlimit={10} strokeWidth={2} d="M46.235 34.247a17.148 17.148 0 001.034-5.921c0-.388-.013-.771-.037-1.15M24.634 27.087c-.028.409-.043.821-.043 1.239 0 7.827 5.077 14.173 11.34 14.173" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeLinejoin="round" strokeMiterlimit={10} strokeWidth={2} d="M16.87 57.818s-2-13 10-13c3.192 2.128 5.927 3.598 9 3.592h-.125c3.074.006 5.808-1.464 9-3.592 12 0 10 13 10 13M50.902 27.532c0 1.225-.147 2.416-.424 3.556-1.597 6.568-7.517 11.444-14.576 11.444s-12.979-4.876-14.576-11.443a15.04 15.04 0 01-.424-3.557c0-8.284 6.716-15 15-15s15 6.716 15 15z" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeLinejoin="round" strokeMiterlimit={10} strokeWidth={2} d="M47.792 28.087a9.455 9.455 0 00-5.673-4.598c-2.12-.621-4.13-.903-6.217-.844-2.087-.059-4.096.222-6.216.844a9.452 9.452 0 00-5.673 4.597M53.864 25.467v4.771M17.751 25.467v4.771" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeMiterlimit={10} strokeWidth={2} d="M46.148 34.215c-2.551 5.133-9.592 5.139-9.592 5.139" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeLinejoin="round" strokeMiterlimit={10} strokeWidth={2} d="M43.83 57.796v-4.808H28.57v4.808" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeMiterlimit={10} strokeWidth={2} d="M43.602 24.39s-.246 10.73 2.894 9.16M33.93 22.696s-3.655 5.391-8.462 8.015" /> <path fill="none" stroke="#000" strokeMiterlimit={10} strokeWidth={2} d="M46.235 34.247a17.148 17.148 0 001.034-5.921c0-.388-.013-.771-.037-1.15M24.634 27.087c-.028.409-.043.821-.043 1.239 0 7.827 5.077 14.173 11.34 14.173" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeLinejoin="round" strokeMiterlimit={10} strokeWidth={2} d="M47.792 28.087a9.455 9.455 0 00-5.673-4.598c-2.12-.621-4.13-.903-6.217-.844-2.087-.059-4.096.222-6.216.844a9.452 9.452 0 00-5.673 4.597" /> <path fill="none" stroke="#000" strokeLinecap="round" strokeMiterlimit={10} strokeWidth={2} d="M43.602 24.39s-.246 10.73 2.894 9.16M33.93 22.696s-3.655 5.391-8.462 8.015" /> </g> </svg> ); export default SvgPersonRole276;
<path fill="#a57939"
nsSFTP.go
/* Copyright (C) 2017 Verizon. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package nsSFTP import ( "github.com/lavaorg/lrtx/config" "github.com/lavaorg/lrtx/luaext/gluamapper" "github.com/lavaorg/lrtx/mlog" "github.com/lavaorg/lua" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) const ( SFTP_CONN_TYPE = "SFTPConnection" ) var ( NsSFTPConnectionLimit, _ = config.GetInt("NS_SFTP_CONNECTION_LIMIT", 3) ) type NsSFTPModule struct { Limit int } func
() *NsSFTPModule { return &NsSFTPModule{Limit: NsSFTPConnectionLimit} } func (nsSFTP *NsSFTPModule) Loader(L *lua.LState) int { api := map[string]lua.LGFunction{ "connect": nsSFTP.connect, } t := L.NewTable() L.SetFuncs(t, api) L.Push(t) return 1 } func (nsSFTP *NsSFTPModule) connect(L *lua.LState) int { if nsSFTP.Limit == 0 { return nsSFTP.error(L, "connection limit exceeded", nil, "") } var destination Destination if err := gluamapper.Map(L.CheckTable(1), &destination); err != nil { L.Push(lua.LNil) L.Push(lua.LString(err.Error())) return nsSFTP.error(L, err.Error(), nil, "connect") } config := &ssh.ClientConfig{ User: destination.User, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Auth: []ssh.AuthMethod{ ssh.Password(destination.Password), }, } config.SetDefaults() sshConn, err := ssh.Dial("tcp", destination.HostPort, config) if err != nil { return nsSFTP.error(L, err.Error(), nil, "connect") } client, err := sftp.NewClient(sshConn) if err != nil { return nsSFTP.error(L, err.Error(), nil, "connect") } nsSFTP.Limit-- mt := L.NewTypeMetatable(SFTP_CONN_TYPE) methods := map[string]lua.LGFunction{ "store": nsSFTP.store, "mkdir": nsSFTP.mkdir, "disconnect": nsSFTP.disconnect, } L.SetField(mt, "__index", L.SetFuncs(L.NewTable(), methods)) connection := L.NewUserData() connection.Value = &Clients{SSH: sshConn, SFTP: client} L.SetMetatable(connection, L.GetTypeMetatable(SFTP_CONN_TYPE)) Connect.Incr() L.Push(connection) mlog.Debug("SSH connection established to: %s", destination.HostPort) return 1 } func (nsSFTP *NsSFTPModule) mkdir(L *lua.LState) int { conn := L.CheckUserData(1) if clients, ok := conn.Value.(*Clients); ok { path := L.CheckString(2) if err := clients.SFTP.Mkdir(path); err != nil { return nsSFTP.error(L, err.Error(), nil, "mkdir") } Mkdir.Incr() mlog.Debug("Directory %s created", path) return 0 } return nsSFTP.error(L, "unknown connection handle", nil, "mkdir") } func (nsSFTP *NsSFTPModule) store(L *lua.LState) int { timer := NsSFTP.NewTimer("SFTPStoreTimer") conn := L.CheckUserData(1) if clients, ok := conn.Value.(*Clients); ok { filename := L.CheckString(2) data := L.CheckString(3) mlog.Debug("Creating file: %s", filename) file, err := clients.SFTP.Create(filename) if err != nil { return nsSFTP.error(L, err.Error(), timer, "store") } defer file.Close() mlog.Debug("Writing data: %s", data) if _, err := file.Write([]byte(data)); err != nil { return nsSFTP.error(L, err.Error(), timer, "store") } mlog.Debug("Data written") Store.Incr() return 0 } return nsSFTP.error(L, "unknown connection handle", nil, "store") } func (nsSFTP *NsSFTPModule) disconnect(L *lua.LState) int { conn := L.CheckUserData(1) if connection, ok := conn.Value.(*Clients); ok { if err := connection.SSH.Close(); err != nil { return nsSFTP.error(L, err.Error(), nil, "disconnect") } if err := connection.SFTP.Close(); err != nil { return nsSFTP.error(L, err.Error(), nil, "disconnect") } nsSFTP.Limit++ Disconnect.Incr() mlog.Debug("Disconnected from server: %v", connection.SSH.RemoteAddr()) return 0 } return nsSFTP.error(L, "unknown connection handle", nil, "disconnect") }
NewNsSFTPModule
whitespace-trimming.rs
// ignore-tidy-linelength fn main() {
//~^ ERROR mismatched types }
let _: () = 42;
ConnectionIndicator.js
/* global APP, $, config */ /* jshint -W101 */ import JitsiPopover from "../util/JitsiPopover"; import VideoLayout from "./VideoLayout"; /** * Constructs new connection indicator. * @param videoContainer the video container associated with the indicator. * @constructor */ function ConnectionIndicator(videoContainer, id) { this.videoContainer = videoContainer; this.bandwidth = null; this.packetLoss = null; this.bitrate = null; this.showMoreValue = false; this.resolution = null; this.isResolutionHD = null; this.transport = []; this.popover = null; this.id = id; this.create(); } /** * Values for the connection quality * @type {{98: string, * 81: string, * 64: string, * 47: string, * 30: string, * 0: string}} */ ConnectionIndicator.connectionQualityValues = { 98: "18px", //full 81: "15px",//4 bars 64: "11px",//3 bars 47: "7px",//2 bars 30: "3px",//1 bar 0: "0px"//empty }; ConnectionIndicator.getIP = function(value) { return value.substring(0, value.lastIndexOf(":")); }; ConnectionIndicator.getPort = function(value) { return value.substring(value.lastIndexOf(":") + 1, value.length); }; ConnectionIndicator.getStringFromArray = function (array) { var res = ""; for(var i = 0; i < array.length; i++) { res += (i === 0? "" : ", ") + array[i]; } return res; }; /** * Generates the html content. * @returns {string} the html content. */ ConnectionIndicator.prototype.generateText = function () { var downloadBitrate, uploadBitrate, packetLoss, i; var translate = APP.translation.translateString; if(this.bitrate === null) { downloadBitrate = "N/A"; uploadBitrate = "N/A"; } else { downloadBitrate = this.bitrate.download? this.bitrate.download + " Kbps" : "N/A"; uploadBitrate = this.bitrate.upload? this.bitrate.upload + " Kbps" : "N/A"; } if(this.packetLoss === null) { packetLoss = "N/A"; } else { packetLoss = "<span class='jitsipopover_green'>&darr;</span>" + (this.packetLoss.download !== null ? this.packetLoss.download : "N/A") + "% <span class='jitsipopover_orange'>&uarr;</span>" + (this.packetLoss.upload !== null? this.packetLoss.upload : "N/A") + "%"; } // GENERATE RESOLUTIONS STRING let resolutions = this.resolution || {}; let resolutionStr = Object.keys(resolutions).map(function (ssrc) { let {width, height} = resolutions[ssrc]; return `${width}x${height}`; }).join(', ') || 'N/A'; var result = "<table style='width:100%'>" + "<tr>" + "<td><span class='jitsipopover_blue' data-i18n='connectionindicator.bitrate'>" + translate("connectionindicator.bitrate") + "</span></td>" + "<td><span class='jitsipopover_green'>&darr;</span>" + downloadBitrate + " <span class='jitsipopover_orange'>&uarr;</span>" + uploadBitrate + "</td>" + "</tr><tr>" + "<td><span class='jitsipopover_blue' data-i18n='connectionindicator.packetloss'>" + translate("connectionindicator.packetloss") + "</span></td>" + "<td>" + packetLoss + "</td>" + "</tr><tr>" + "<td><span class='jitsipopover_blue' data-i18n='connectionindicator.resolution'>" + translate("connectionindicator.resolution") + "</span></td>" + "<td>" + resolutionStr + "</td></tr></table>"; if(this.videoContainer.videoSpanId == "localVideoContainer") { result += "<div class=\"jitsipopover_showmore\" " + "onclick = \"APP.UI.connectionIndicatorShowMore('" + // FIXME: we do not know local id when this text is generated //this.id + "')\" data-i18n='connectionindicator." + "local')\" data-i18n='connectionindicator." + (this.showMoreValue ? "less" : "more") + "'>" + translate("connectionindicator." + (this.showMoreValue ? "less" : "more")) + "</div><br />"; } if (this.showMoreValue) { var downloadBandwidth, uploadBandwidth, transport; if (this.bandwidth === null) { downloadBandwidth = "N/A"; uploadBandwidth = "N/A"; } else { downloadBandwidth = this.bandwidth.download? this.bandwidth.download + " Kbps" : "N/A"; uploadBandwidth = this.bandwidth.upload? this.bandwidth.upload + " Kbps" : "N/A"; } if (!this.transport || this.transport.length === 0) { transport = "<tr>" + "<td><span class='jitsipopover_blue' " + "data-i18n='connectionindicator.address'>" + translate("connectionindicator.address") + "</span></td>" + "<td> N/A</td></tr>"; } else { var data = {remoteIP: [], localIP:[], remotePort:[], localPort:[]}; for(i = 0; i < this.transport.length; i++) { var ip = ConnectionIndicator.getIP(this.transport[i].ip); var port = ConnectionIndicator.getPort(this.transport[i].ip); var localIP = ConnectionIndicator.getIP(this.transport[i].localip); var localPort = ConnectionIndicator.getPort(this.transport[i].localip); if(data.remoteIP.indexOf(ip) == -1) { data.remoteIP.push(ip); } if(data.remotePort.indexOf(port) == -1) { data.remotePort.push(port); } if(data.localIP.indexOf(localIP) == -1) { data.localIP.push(localIP); } if(data.localPort.indexOf(localPort) == -1) { data.localPort.push(localPort); } } var local_address_key = "connectionindicator.localaddress"; var remote_address_key = "connectionindicator.remoteaddress"; var localTransport = "<tr><td><span class='jitsipopover_blue' data-i18n='" + local_address_key +"' data-i18n-options='" + JSON.stringify({count: data.localIP.length}) + "'>" + translate(local_address_key, {count: data.localIP.length}) + "</span></td><td> " + ConnectionIndicator.getStringFromArray(data.localIP) + "</td></tr>"; transport = "<tr><td><span class='jitsipopover_blue' data-i18n='" + remote_address_key + "' data-i18n-options='" + JSON.stringify({count: data.remoteIP.length}) + "'>" + translate(remote_address_key, {count: data.remoteIP.length}) + "</span></td><td> " + ConnectionIndicator.getStringFromArray(data.remoteIP) + "</td></tr>"; var key_remote = "connectionindicator.remoteport", key_local = "connectionindicator.localport"; transport += "<tr>" + "<td>" + "<span class='jitsipopover_blue' data-i18n='" + key_remote + "' data-i18n-options='" + JSON.stringify({count: this.transport.length}) + "'>" + translate(key_remote, {count: this.transport.length}) + "</span></td><td>"; localTransport += "<tr>" + "<td>" + "<span class='jitsipopover_blue' data-i18n='" + key_local + "' data-i18n-options='" + JSON.stringify({count: this.transport.length}) + "'>" + translate(key_local, {count: this.transport.length}) + "</span></td><td>"; transport += ConnectionIndicator.getStringFromArray(data.remotePort); localTransport += ConnectionIndicator.getStringFromArray(data.localPort); transport += "</td></tr>"; transport += localTransport + "</td></tr>"; transport +="<tr>" + "<td><span class='jitsipopover_blue' data-i18n='connectionindicator.transport'>" + translate("connectionindicator.transport") + "</span></td>" + "<td>" + this.transport[0].type + "</td></tr>"; } result += "<table style='width:100%'>" + "<tr>" + "<td>" + "<span class='jitsipopover_blue' data-i18n='connectionindicator.bandwidth'>" + translate("connectionindicator.bandwidth") + "</span>" + "</td><td>" + "<span class='jitsipopover_green'>&darr;</span>" + downloadBandwidth + " <span class='jitsipopover_orange'>&uarr;</span>" + uploadBandwidth + "</td></tr>"; result += transport + "</table>"; } return result; }; /** * Shows or hide the additional information. */ ConnectionIndicator.prototype.showMore = function () { this.showMoreValue = !this.showMoreValue; this.updatePopoverData(); }; function
(classes, iconClass) { var icon = document.createElement("span"); for(var i in classes) { icon.classList.add(classes[i]); } icon.appendChild( document.createElement("i")).classList.add(iconClass); return icon; } /** * Creates the indicator */ ConnectionIndicator.prototype.create = function () { this.connectionIndicatorContainer = document.createElement("div"); this.connectionIndicatorContainer.className = "connectionindicator"; this.connectionIndicatorContainer.style.display = "none"; this.videoContainer.container.appendChild( this.connectionIndicatorContainer); this.popover = new JitsiPopover( $("#" + this.videoContainer.videoSpanId + " > .connectionindicator"), {content: "<div class=\"connection_info\" data-i18n='connectionindicator.na'>" + APP.translation.translateString("connectionindicator.na") + "</div>", skin: "black"}); // override popover show method to make sure we will update the content // before showing the popover var origShowFunc = this.popover.show; this.popover.show = function () { // update content by forcing it, to finish even if popover // is not visible this.updatePopoverData(true); // call the original show, passing its actual this origShowFunc.call(this.popover); }.bind(this); this.emptyIcon = this.connectionIndicatorContainer.appendChild( createIcon(["connection", "connection_empty"], "icon-connection")); this.fullIcon = this.connectionIndicatorContainer.appendChild( createIcon(["connection", "connection_full"], "icon-connection")); this.interruptedIndicator = this.connectionIndicatorContainer.appendChild( createIcon(["connection", "connection_lost"],"icon-connection-lost")); $(this.interruptedIndicator).hide(); }; /** * Removes the indicator */ ConnectionIndicator.prototype.remove = function() { if (this.connectionIndicatorContainer.parentNode) { this.connectionIndicatorContainer.parentNode.removeChild( this.connectionIndicatorContainer); } this.popover.forceHide(); }; /** * Updates the UI which displays warning about user's connectivity problems. * * @param {boolean} isActive true if the connection is working fine or false if * the user is having connectivity issues. */ ConnectionIndicator.prototype.updateConnectionStatusIndicator = function (isActive) { this.isConnectionActive = isActive; if (this.isConnectionActive) { $(this.interruptedIndicator).hide(); $(this.emptyIcon).show(); $(this.fullIcon).show(); } else { $(this.interruptedIndicator).show(); $(this.emptyIcon).hide(); $(this.fullIcon).hide(); this.updateConnectionQuality(0 /* zero bars */); } }; /** * Updates the data of the indicator * @param percent the percent of connection quality * @param object the statistics data. */ ConnectionIndicator.prototype.updateConnectionQuality = function (percent, object) { if (percent === null) { this.connectionIndicatorContainer.style.display = "none"; this.popover.forceHide(); return; } else { if(this.connectionIndicatorContainer.style.display == "none") { this.connectionIndicatorContainer.style.display = "block"; } } if (object) { this.bandwidth = object.bandwidth; this.bitrate = object.bitrate; this.packetLoss = object.packetLoss; this.transport = object.transport; if (object.resolution) { this.resolution = object.resolution; } } for (var quality in ConnectionIndicator.connectionQualityValues) { if (percent >= quality) { this.fullIcon.style.width = ConnectionIndicator.connectionQualityValues[quality]; } } if (object && typeof object.isResolutionHD === 'boolean') { this.isResolutionHD = object.isResolutionHD; } this.updateResolutionIndicator(); this.updatePopoverData(); }; /** * Updates the resolution * @param resolution the new resolution */ ConnectionIndicator.prototype.updateResolution = function (resolution) { this.resolution = resolution; this.updateResolutionIndicator(); this.updatePopoverData(); }; /** * Updates the content of the popover if its visible * @param force to work even if popover is not visible */ ConnectionIndicator.prototype.updatePopoverData = function (force) { // generate content, translate it and add it to document only if // popover is visible or we force to do so. if(this.popover.popoverShown || force) { this.popover.updateContent( `<div class="connection_info">${this.generateText()}</div>` ); APP.translation.translateElement($(".connection_info")); } }; /** * Hides the popover */ ConnectionIndicator.prototype.hide = function () { this.popover.forceHide(); }; /** * Hides the indicator */ ConnectionIndicator.prototype.hideIndicator = function () { this.connectionIndicatorContainer.style.display = "none"; if(this.popover) this.popover.forceHide(); }; /** * Updates the resolution indicator. */ ConnectionIndicator.prototype.updateResolutionIndicator = function () { if (this.id !== null && VideoLayout.isCurrentlyOnLarge(this.id)) { let showResolutionLabel = false; if (this.isResolutionHD !== null) showResolutionLabel = this.isResolutionHD; else if (this.resolution !== null) { let resolutions = this.resolution || {}; Object.keys(resolutions).map(function (ssrc) { const { height } = resolutions[ssrc]; if (height >= config.minHDHeight) showResolutionLabel = true; }); } VideoLayout.updateResolutionLabel(showResolutionLabel); } }; export default ConnectionIndicator;
createIcon
fuzz_task_test.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """fuzz_task tests.""" # pylint: disable=protected-access from builtins import object from builtins import range import datetime import mock import os import parameterized import shutil import tempfile import time import unittest from pyfakefs import fake_filesystem_unittest import six from base import utils from bot import testcase_manager from bot.fuzzers import engine from bot.fuzzers.libFuzzer import engine as libfuzzer_engine from bot.tasks import fuzz_task from bot.untrusted_runner import file_host from build_management import build_manager from chrome import crash_uploader from crash_analysis.stack_parsing import stack_analyzer from datastore import data_handler from datastore import data_types from datastore import ndb from google_cloud_utils import big_query from metrics import monitor from metrics import monitoring_metrics from system import environment from tests.test_libs import helpers from tests.test_libs import test_utils from tests.test_libs import untrusted_runner_helpers class TrackFuzzerRunResultTest(unittest.TestCase): """Test _track_fuzzer_run_result.""" def setUp(self): monitor.metrics_store().reset_for_testing() def test_fuzzer_run_result(self): """Ensure _track_fuzzer_run_result set the right metrics.""" fuzz_task._track_fuzzer_run_result('name', 10, 100, 2) fuzz_task._track_fuzzer_run_result('name', 100, 200, 2) fuzz_task._track_fuzzer_run_result('name', 1000, 2000, 2) fuzz_task._track_fuzzer_run_result('name', 1000, 500, 0) fuzz_task._track_fuzzer_run_result('name', 0, 1000, -1) fuzz_task._track_fuzzer_run_result('name', 0, 0, 2) self.assertEqual( 4, monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({ 'fuzzer': 'name', 'return_code': 2 })) self.assertEqual( 1, monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({ 'fuzzer': 'name', 'return_code': 0 })) self.assertEqual( 1, monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({ 'fuzzer': 'name', 'return_code': -1 })) testcase_count_ratio = ( monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.get({ 'fuzzer': 'name' })) self.assertEqual(3.1, testcase_count_ratio.sum) self.assertEqual(5, testcase_count_ratio.count) expected_buckets = [0 for _ in range(22)] expected_buckets[1] = 1 expected_buckets[3] = 1 expected_buckets[11] = 2 expected_buckets[21] = 1 self.assertListEqual(expected_buckets, testcase_count_ratio.buckets) class TrackBuildRunResultTest(unittest.TestCase): """Test _track_build_run_result.""" def setUp(self): monitor.metrics_store().reset_for_testing() def test_build_run_result(self): """Ensure _track_build_run_result set the right metrics.""" fuzz_task._track_build_run_result('name', 10000, True) fuzz_task._track_build_run_result('name', 10001, True) fuzz_task._track_build_run_result('name', 10002, False) self.assertEqual( 2, monitoring_metrics.JOB_BAD_BUILD_COUNT.get({ 'job': 'name', 'bad_build': True })) self.assertEqual( 1, monitoring_metrics.JOB_BAD_BUILD_COUNT.get({ 'job': 'name', 'bad_build': False })) class TrackTestcaseRunResultTest(unittest.TestCase): """Test _track_testcase_run_result.""" def setUp(self): monitor.metrics_store().reset_for_testing() def test_testcase_run_result(self): """Ensure _track_testcase_run_result sets the right metrics.""" fuzz_task._track_testcase_run_result('fuzzer', 'job', 2, 5) fuzz_task._track_testcase_run_result('fuzzer', 'job', 5, 10) self.assertEqual(7, monitoring_metrics.JOB_NEW_CRASH_COUNT.get({ 'job': 'job' })) self.assertEqual( 15, monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({ 'job': 'job' })) self.assertEqual( 7, monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({ 'fuzzer': 'fuzzer' })) self.assertEqual( 15, monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({ 'fuzzer': 'fuzzer' })) class TruncateFuzzerOutputTest(unittest.TestCase): """Truncate fuzzer output tests.""" def test_no_truncation(self): """No truncation.""" self.assertEqual('aaaa', fuzz_task.truncate_fuzzer_output('aaaa', 10)) def test_truncation(self): """Truncate.""" self.assertEqual( '123456\n...truncated...\n54321', fuzz_task.truncate_fuzzer_output( '123456xxxxxxxxxxxxxxxxxxxxxxxxxxx54321', 28)) def test_error(self): """Error if limit is too low.""" with self.assertRaises(AssertionError): self.assertEqual( '', fuzz_task.truncate_fuzzer_output('123456xxxxxx54321', 10)) class TrackFuzzTimeTest(unittest.TestCase): """Test _TrackFuzzTime.""" def setUp(self): monitor.metrics_store().reset_for_testing() def _test(self, timeout): """Test helper.""" time_module = helpers.MockTime() with fuzz_task._TrackFuzzTime('fuzzer', 'job', time_module) as tracker: time_module.advance(5) tracker.timeout = timeout fuzzer_total_time = monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.get({ 'fuzzer': 'fuzzer', 'timeout': timeout }) self.assertEqual(5, fuzzer_total_time) def test_success(self): """Test report metrics.""" self._test(False) def test_timeout(self): """Test timeout.""" self._test(True) class GetFuzzerMetadataFromOutputTest(unittest.TestCase): """Test get_fuzzer_metadata_from_output.""" def test_no_metadata(self): """Tests no metadata in output.""" data = 'abc\ndef\n123123' self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {}) data = '' self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {}) def test_metadata(self): """Tests parsing of metadata.""" data = ('abc\n' 'def\n' 'metadata:invalid: invalid\n' 'metadat::invalid: invalid\n' 'metadata::foo: bar\n' '123123\n' 'metadata::blah: 1\n' 'metadata::test:abcd\n' 'metadata::test2: def\n') self.assertDictEqual( fuzz_task.get_fuzzer_metadata_from_output(data), { 'blah': '1', 'test': 'abcd', 'test2': 'def', 'foo': 'bar' }) class GetRegressionTest(unittest.TestCase): """Test get_regression.""" def setUp(self): helpers.patch(self, ['build_management.build_manager.is_custom_binary']) def test_one_time_crasher(self): """Test when one_time_crasher_flag is True.""" self.mock.is_custom_binary.return_value = False self.assertEqual('NA', fuzz_task.get_regression(True)) def test_custom_binary(self): """Test for custom binary.""" self.mock.is_custom_binary.return_value = True self.assertEqual('NA', fuzz_task.get_regression(False)) def test_reproducible_non_custom_binary(self): """Test for reproducible non-custom binary.""" self.mock.is_custom_binary.return_value = False self.assertEqual('', fuzz_task.get_regression(False)) class GetFixedOrMinimizedKeyTest(unittest.TestCase): """Test get_fixed_or_minimized_key.""" def test_one_time_crasher(self): """Test when one_time_crasher_flag is True.""" self.assertEqual('NA', fuzz_task.get_fixed_or_minimized_key(True)) def test_reproducible(self): """Test for reproducible.""" self.assertEqual('', fuzz_task.get_fixed_or_minimized_key(False)) class CrashInitTest(fake_filesystem_unittest.TestCase): """Test Crash.__init__.""" def setUp(self): helpers.patch(self, [ 'chrome.crash_uploader.FileMetadataInfo', 'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs', 'crash_analysis.stack_parsing.stack_analyzer.get_crash_data', 'bot.testcase_manager.get_additional_command_line_flags', 'bot.testcase_manager.get_command_line_for_application', 'base.utils.get_crash_stacktrace_output', 'crash_analysis.crash_analyzer.ignore_stacktrace', 'crash_analysis.crash_analyzer.is_security_issue', ]) helpers.patch_environ(self) test_utils.set_up_pyfakefs(self) self.mock.get_command_line_for_application.return_value = 'cmd' dummy_state = stack_analyzer.StackAnalyzerState() dummy_state.crash_type = 'type' dummy_state.crash_address = 'address' dummy_state.crash_state = 'state' dummy_state.crash_stacktrace = 'orig_trace' dummy_state.frames = ['frame 1', 'frame 2'] self.mock.get_crash_data.return_value = dummy_state self.mock.get_crash_stacktrace_output.return_value = 'trace' self.mock.archive_testcase_and_dependencies_in_gcs.return_value = ( 'fuzzed_key', True, 'absolute_path', 'archive_filename') environment.set_value('FILTER_FUNCTIONAL_BUGS', False) with open('/stack_file_path', 'w') as f: f.write('unsym') def test_error(self): """Test failing to reading stacktrace file.""" crash = fuzz_task.Crash.from_testcase_manager_crash( testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges', '/no_stack_file')) self.assertIsNone(crash) def _test_crash(self, should_be_ignored, security_flag): """Test crash.""" self.mock.get_command_line_for_application.reset_mock() self.mock.get_crash_data.reset_mock() self.mock.get_crash_stacktrace_output.reset_mock() self.mock.is_security_issue.reset_mock() self.mock.ignore_stacktrace.reset_mock() self.mock.is_security_issue.return_value = security_flag self.mock.ignore_stacktrace.return_value = should_be_ignored crash = fuzz_task.Crash.from_testcase_manager_crash( testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges', '/stack_file_path')) self.assertEqual('dir/path-http-name', crash.file_path) self.assertEqual(123, crash.crash_time) self.assertEqual(11, crash.return_code) self.assertListEqual(['res'], crash.resource_list) self.assertEqual('ges', crash.gestures) self.assertEqual('path-http-name', crash.filename) self.assertTrue(crash.http_flag) self.assertEqual('cmd', crash.application_command_line) self.mock.get_command_line_for_application.assert_called_once_with( 'dir/path-http-name', needs_http=True) self.assertEqual('unsym', crash.unsymbolized_crash_stacktrace) self.assertEqual('type', crash.crash_type) self.assertEqual('address', crash.crash_address) self.assertEqual('state', crash.crash_state) self.assertListEqual(['frame 1', 'frame 2'], crash.crash_frames) self.mock.get_crash_data.assert_called_once_with('unsym') self.assertEqual('trace', crash.crash_stacktrace) self.mock.get_crash_stacktrace_output.assert_called_once_with( 'cmd', 'orig_trace', 'unsym') self.assertEqual(security_flag, crash.security_flag) self.mock.is_security_issue.assert_called_once_with('unsym', 'type', 'address') self.assertEqual('type,state,%s' % security_flag, crash.key) self.assertEqual(should_be_ignored, crash.should_be_ignored) self.mock.ignore_stacktrace.assert_called_once_with('orig_trace') self.assertFalse(hasattr(crash, 'fuzzed_key')) return crash def _test_validity_and_get_functional_crash(self): """Test validity of different crashes and return functional crash.""" security_crash = self._test_crash( should_be_ignored=False, security_flag=True) self.assertIsNone(security_crash.get_error()) self.assertTrue(security_crash.is_valid()) ignored_crash = self._test_crash(should_be_ignored=True, security_flag=True) self.assertIn('False crash', ignored_crash.get_error()) self.assertFalse(ignored_crash.is_valid()) functional_crash = self._test_crash( should_be_ignored=False, security_flag=False) return functional_crash def test_valid_functional_bug(self): """Test valid because of functional bug.""" functional_crash = self._test_validity_and_get_functional_crash() self.assertIsNone(functional_crash.get_error()) self.assertTrue(functional_crash.is_valid()) def test_invalid_functional_bug(self): """Test invalid because of functional bug.""" environment.set_value('FILTER_FUNCTIONAL_BUGS', True) functional_crash = self._test_validity_and_get_functional_crash() self.assertIn('Functional crash', functional_crash.get_error()) self.assertFalse(functional_crash.is_valid()) def test_hydrate_fuzzed_key(self): """Test hydrating fuzzed_key.""" crash = self._test_crash(should_be_ignored=False, security_flag=True) self.assertFalse(crash.is_archived()) self.assertIsNone(crash.get_error()) self.assertTrue(crash.is_valid()) crash.archive_testcase_in_blobstore() self.assertTrue(crash.is_archived()) self.assertIsNone(crash.get_error()) self.assertTrue(crash.is_valid()) self.assertEqual('fuzzed_key', crash.fuzzed_key) self.assertTrue(crash.archived) self.assertEqual('absolute_path', crash.absolute_path) self.assertEqual('archive_filename', crash.archive_filename) def test_hydrate_fuzzed_key_failure(self): """Test fail to hydrate fuzzed_key.""" self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (None, False, None, None) crash = self._test_crash(should_be_ignored=False, security_flag=True) self.assertFalse(crash.is_archived()) self.assertIsNone(crash.get_error()) self.assertTrue(crash.is_valid()) crash.archive_testcase_in_blobstore() self.assertTrue(crash.is_archived()) self.assertIn('Unable to store testcase in blobstore', crash.get_error()) self.assertFalse(crash.is_valid()) self.assertIsNone(crash.fuzzed_key) self.assertFalse(crash.archived) self.assertIsNone(crash.absolute_path) self.assertIsNone(crash.archive_filename) def test_args_from_testcase_manager(self): """Test args from testcase_manager.Crash.""" testcase_manager_crash = testcase_manager.Crash('path', 0, 0, [], [], '/stack_file_path') self.mock.get_additional_command_line_flags.return_value = 'minimized' environment.set_value('APP_ARGS', 'app') crash = fuzz_task.Crash.from_testcase_manager_crash(testcase_manager_crash) self.assertEqual('app minimized', crash.arguments) class CrashGroupTest(unittest.TestCase): """Test CrashGroup.""" def setUp(self): helpers.patch(self, [ 'bot.tasks.fuzz_task.find_main_crash', 'datastore.data_handler.find_testcase', 'datastore.data_handler.get_project_name', ]) self.mock.get_project_name.return_value = 'some_project' self.crashes = [self._make_crash('g1'), self._make_crash('g2')] self.context = mock.MagicMock( test_timeout=99, fuzzer_name='test', fuzz_target=None) self.reproducible_testcase = self._make_testcase( project_name='some_project', bug_information='', one_time_crasher_flag=False) self.unreproducible_testcase = self._make_testcase( project_name='some_project', bug_information='', one_time_crasher_flag=True) def _make_crash(self, gestures): crash = mock.MagicMock( crash_type='type', crash_state='state', security_flag=True, file_path='file_path', http_flag=True, gestures=gestures) return crash def _make_testcase(self, project_name, bug_information, one_time_crasher_flag, timestamp=datetime.datetime.now()): """Make testcase.""" testcase = data_types.Testcase() testcase.timestamp = timestamp testcase.one_time_crasher_flag = one_time_crasher_flag testcase.bug_information = bug_information testcase.project_name = project_name return testcase def test_no_existing_testcase(self): """is_new=True and should_create_testcase=True when there's no existing testcase.""" self.mock.find_testcase.return_value = None self.mock.find_main_crash.return_value = self.crashes[0], True group = fuzz_task.CrashGroup(self.crashes, self.context) self.assertTrue(group.should_create_testcase()) self.mock.find_main_crash.assert_called_once_with( self.crashes, 'test', 'test', self.context.test_timeout) self.assertIsNone(group.existing_testcase) self.assertEqual(self.crashes[0], group.main_crash) self.assertTrue(group.is_new()) def test_has_existing_reproducible_testcase(self): """should_create_testcase=False when there's an existing reproducible testcase.""" self.mock.find_testcase.return_value = self.reproducible_testcase self.mock.find_main_crash.return_value = (self.crashes[0], True) group = fuzz_task.CrashGroup(self.crashes, self.context) self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures) self.mock.find_main_crash.assert_called_once_with( self.crashes, 'test', 'test', self.context.test_timeout) self.assertFalse(group.is_new()) self.assertFalse(group.should_create_testcase()) self.assertTrue(group.has_existing_reproducible_testcase()) def test_reproducible_crash(self): """should_create_testcase=True when the group is reproducible.""" self.mock.find_testcase.return_value = self.unreproducible_testcase self.mock.find_main_crash.return_value = (self.crashes[0], False) group = fuzz_task.CrashGroup(self.crashes, self.context) self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures) self.mock.find_main_crash.assert_called_once_with( self.crashes, 'test', 'test', self.context.test_timeout) self.assertFalse(group.is_new()) self.assertTrue(group.should_create_testcase()) self.assertFalse(group.has_existing_reproducible_testcase()) self.assertFalse(group.one_time_crasher_flag) def test_has_existing_unreproducible_testcase(self): """should_create_testcase=False when the unreproducible testcase already exists.""" self.mock.find_testcase.return_value = self.unreproducible_testcase self.mock.find_main_crash.return_value = (self.crashes[0], True) group = fuzz_task.CrashGroup(self.crashes, self.context) self.assertFalse(group.should_create_testcase()) self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures) self.mock.find_main_crash.assert_called_once_with( self.crashes, 'test', 'test', self.context.test_timeout) self.assertFalse(group.is_new()) self.assertFalse(group.has_existing_reproducible_testcase()) self.assertTrue(group.one_time_crasher_flag) class FindMainCrashTest(unittest.TestCase): """Test find_main_crash.""" def setUp(self): helpers.patch(self, [ 'bot.testcase_manager.test_for_reproducibility', ]) self.crashes = [ self._make_crash('g1'), self._make_crash('g2'), self._make_crash('g3'), self._make_crash('g4') ] self.reproducible_crashes = [] # pylint: disable=unused-argument def test_for_repro(fuzzer_name, full_fuzzer_name, file_path, state, security_flag, test_timeout, http_flag, gestures, arguments=None): """Mock test_for_reproducibility.""" for c in self.reproducible_crashes: if c.gestures == gestures: return True return False self.mock.test_for_reproducibility.side_effect = test_for_repro def _make_crash(self, gestures): crash = mock.MagicMock( file_path='file_path', crash_state='state', security_flag=True, test_timeout=999, gestures=gestures) return crash def test_reproducible_crash(self): """Find that the 2nd crash is reproducible.""" for c in self.crashes: c.is_valid.return_value = True self.crashes[0].is_valid.return_value = False self.reproducible_crashes = [self.crashes[2]] self.assertEqual((self.crashes[2], False), fuzz_task.find_main_crash(self.crashes, 'test', 'test', 99)) self.crashes[0].archive_testcase_in_blobstore.assert_called_once_with() self.crashes[1].archive_testcase_in_blobstore.assert_called_once_with() self.crashes[2].archive_testcase_in_blobstore.assert_called_once_with() self.crashes[3].archive_testcase_in_blobstore.assert_not_called() # Calls for self.crashes[1] and self.crashes[2]. self.assertEqual(2, self.mock.test_for_reproducibility.call_count) def test_unreproducible_crash(self): """No reproducible crash. Find the first valid one.""" for c in self.crashes: c.is_valid.return_value = True self.crashes[0].is_valid.return_value = False self.reproducible_crashes = [] self.assertEqual((self.crashes[1], True), fuzz_task.find_main_crash(self.crashes, 'test', 'test', 99)) for c in self.crashes: c.archive_testcase_in_blobstore.assert_called_once_with() # Calls for every crash except self.crashes[0] because it's invalid. self.assertEqual( len(self.crashes) - 1, self.mock.test_for_reproducibility.call_count) def test_no_valid_crash(self): """No valid crash.""" for c in self.crashes: c.is_valid.return_value = False self.reproducible_crashes = [] self.assertEqual((None, None), fuzz_task.find_main_crash(self.crashes, 'test', 'test', 99)) for c in self.crashes: c.archive_testcase_in_blobstore.assert_called_once_with() self.assertEqual(0, self.mock.test_for_reproducibility.call_count) @test_utils.with_cloud_emulators('datastore') class ProcessCrashesTest(fake_filesystem_unittest.TestCase): """Test process_crashes.""" def setUp(self): helpers.patch(self, [ 'chrome.crash_uploader.get_symbolized_stack_bytes', 'bot.tasks.task_creation.create_tasks', 'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs', 'crash_analysis.stack_parsing.stack_analyzer.get_crash_data', 'build_management.revisions.get_real_revision', 'bot.testcase_manager.get_command_line_for_application', 'bot.testcase_manager.test_for_reproducibility', 'base.utils.get_crash_stacktrace_output', 'crash_analysis.crash_analyzer.ignore_stacktrace', 'crash_analysis.crash_analyzer.is_security_issue', 'datastore.data_handler.get_issue_tracker_name', 'datastore.data_handler.get_project_name', 'google.appengine.api.app_identity.get_application_id', 'google_cloud_utils.big_query.Client.insert', 'google_cloud_utils.big_query.get_api_client', 'time.sleep', 'time.time' ]) test_utils.set_up_pyfakefs(self) self.mock.time.return_value = 987 self.mock.get_issue_tracker_name.return_value = 'some_issue_tracker' self.mock.get_project_name.return_value = 'some_project' self.mock.archive_testcase_and_dependencies_in_gcs.return_value = ( 'fuzzed_key', True, 'absolute_path', 'archive_filename') def _make_crash(self, trace, state='state'): """Make crash.""" self.mock.get_real_revision.return_value = 'this.is.fake.ver' self.mock.get_command_line_for_application.return_value = 'cmd' dummy_state = stack_analyzer.StackAnalyzerState() dummy_state.crash_type = 'type' dummy_state.crash_address = 'address' dummy_state.crash_state = state dummy_state.crash_stacktrace = 'orig_trace' dummy_state.crash_frames = ['frame 1', 'frame 2'] self.mock.get_crash_data.return_value = dummy_state self.mock.get_symbolized_stack_bytes.return_value = 'f00df00d' self.mock.get_crash_stacktrace_output.return_value = trace self.mock.is_security_issue.return_value = True self.mock.ignore_stacktrace.return_value = False with open('/stack_file_path', 'w') as f: f.write('unsym') crash = fuzz_task.Crash.from_testcase_manager_crash( testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], ['ges'], '/stack_file_path')) return crash def test_existing_unreproducible_testcase(self): """Test existing unreproducible testcase.""" crashes = [self._make_crash('c1'), self._make_crash('c2')] self.mock.test_for_reproducibility.return_value = False existing_testcase = data_types.Testcase() existing_testcase.crash_stacktrace = 'existing' existing_testcase.crash_type = crashes[0].crash_type existing_testcase.crash_state = crashes[0].crash_state existing_testcase.security_flag = crashes[0].security_flag existing_testcase.one_time_crasher_flag = True existing_testcase.job_type = 'existing_job' existing_testcase.timestamp = datetime.datetime.now() existing_testcase.project_name = 'some_project' existing_testcase.put() variant = data_types.TestcaseVariant() variant.status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE variant.job_type = 'job' variant.testcase_id = existing_testcase.key.id() variant.put() new_crash_count, known_crash_count, groups = fuzz_task.process_crashes( crashes=crashes, context=fuzz_task.Context( project_name='some_project', bot_name='bot', job_type='job', fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'), redzone=111, disable_ubsan=True, platform_id='platform', crash_revision=1234, fuzzer_name='fuzzer', window_argument='win_args', fuzzer_metadata={}, testcases_metadata={}, timeout_multiplier=1, test_timeout=2, thread_wait_timeout=3, data_directory='/data')) self.assertEqual(0, new_crash_count) self.assertEqual(2, known_crash_count) self.assertEqual(1, len(groups)) self.assertEqual(2, len(groups[0].crashes)) self.assertFalse(groups[0].is_new()) self.assertEqual(crashes[0].crash_type, groups[0].main_crash.crash_type) self.assertEqual(crashes[0].crash_state, groups[0].main_crash.crash_state) self.assertEqual(crashes[0].security_flag, groups[0].main_crash.security_flag) testcases = list(data_types.Testcase.query()) self.assertEqual(1, len(testcases)) self.assertEqual('existing', testcases[0].crash_stacktrace) variant = data_handler.get_testcase_variant(existing_testcase.key.id(), 'job') self.assertEqual(data_types.TestcaseVariantStatus.FLAKY, variant.status) self.assertEqual('fuzzed_key', variant.reproducer_key) self.assertEqual(1234, variant.revision) self.assertEqual('type', variant.crash_type) self.assertEqual('state', variant.crash_state) self.assertEqual(True, variant.security_flag) self.assertEqual(True, variant.is_similar) @parameterized.parameterized.expand(['some_project', 'chromium']) def test_create_many_groups(self, project_name): """Test creating many groups.""" self.mock.get_project_name.return_value = project_name self.mock.insert.return_value = {'insertErrors': [{'index': 0}]} # TODO(metzman): Add a seperate test for strategies. r2_stacktrace = ('r2\ncf::fuzzing_strategies: value_profile\n') crashes = [ self._make_crash('r1', state='reproducible1'), self._make_crash(r2_stacktrace, state='reproducible1'), self._make_crash('r3', state='reproducible1'), self._make_crash('r4', state='reproducible2'), self._make_crash('u1', state='unreproducible1'), self._make_crash('u2', state='unreproducible2'), self._make_crash('u3', state='unreproducible2'), self._make_crash('u4', state='unreproducible3') ] self.mock.test_for_reproducibility.side_effect = [ False, # For r1. It returns False. So, r1 is demoted. True, # For r2. It returns True. So, r2 becomes primary for its group. True, # For r4. False, # For u1. False, # For u2. False, # For u3. False ] # For u4. new_crash_count, known_crash_count, groups = fuzz_task.process_crashes( crashes=crashes, context=fuzz_task.Context( project_name=project_name, bot_name='bot', job_type='job', fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'), redzone=111, disable_ubsan=False, platform_id='platform', crash_revision=1234, fuzzer_name='fuzzer', window_argument='win_args', fuzzer_metadata={}, testcases_metadata={}, timeout_multiplier=1, test_timeout=2, thread_wait_timeout=3, data_directory='/data')) self.assertEqual(5, new_crash_count) self.assertEqual(3, known_crash_count) self.assertEqual(5, len(groups)) self.assertEqual([ 'reproducible1', 'reproducible2', 'unreproducible1', 'unreproducible2', 'unreproducible3' ], [group.main_crash.crash_state for group in groups]) self.assertEqual([True, True, True, True, True], [group.is_new() for group in groups]) self.assertEqual([3, 1, 1, 2, 1], [len(group.crashes) for group in groups]) testcases = list(data_types.Testcase.query()) self.assertEqual(5, len(testcases)) self.assertSetEqual( set([r2_stacktrace, 'r4', 'u1', 'u2', 'u4']), set(t.crash_stacktrace for t in testcases)) self.assertSetEqual( set([ '{"fuzzing_strategies": ["value_profile"]}', None, None, None, None ]), set(t.additional_metadata for t in testcases)) # r2 is a reproducible crash, so r3 doesn't # invoke archive_testcase_in_blobstore. Therefore, the # archive_testcase_in_blobstore is called `len(crashes) - 1`. self.assertEqual( len(crashes) - 1, self.mock.archive_testcase_and_dependencies_in_gcs.call_count) # Check only the desired testcases were saved. actual_crash_infos = [group.main_crash.crash_info for group in groups] if project_name != 'chromium': expected_crash_infos = [None] * len(actual_crash_infos) else: expected_saved_crash_info = crash_uploader.CrashReportInfo( product='Chrome_' + environment.platform().lower().capitalize(), version='this.is.fake.ver', serialized_crash_stack_frames='f00df00d') expected_crash_infos = [ expected_saved_crash_info, # r2 is main crash for group r1,r2,r3 expected_saved_crash_info, # r4 is main crash for its own group None, # u1 is not reproducible None, # u2, u3 are not reproducible None, # u4 is not reproducible ] self.assertEqual(len(expected_crash_infos), len(actual_crash_infos)) for expected, actual in zip(expected_crash_infos, actual_crash_infos): if not expected: self.assertIsNone(actual) continue self.assertEqual(expected.product, actual.product) self.assertEqual(expected.version, actual.version) self.assertEqual(expected.serialized_crash_stack_frames, actual.serialized_crash_stack_frames) def _make_big_query_json(crash, reproducible_flag, new_flag, testcase_id): return { 'crash_type': crash.crash_type, 'crash_state': crash.crash_state, 'created_at': 987, 'platform': 'platform', 'crash_time_in_ms': int(crash.crash_time * 1000), 'parent_fuzzer_name': 'engine', 'fuzzer_name': 'engine_binary', 'job_type': 'job', 'security_flag': crash.security_flag, 'reproducible_flag': reproducible_flag, 'revision': '1234', 'new_flag': new_flag, 'project': project_name, 'testcase_id': testcase_id } def _get_testcase_id(crash): rows = list( data_types.Testcase.query( data_types.Testcase.crash_type == crash.crash_type, data_types.Testcase.crash_state == crash.crash_state, data_types.Testcase.security_flag == crash.security_flag)) if not rows: return None return str(rows[0].key.id()) # Calls to write 5 groups of crashes to BigQuery. self.assertEqual(5, self.mock.insert.call_count) self.mock.insert.assert_has_calls([ mock.call(mock.ANY, [ big_query.Insert( _make_big_query_json(crashes[0], True, False, None), '%s:bot:987:0' % crashes[0].key), big_query.Insert( _make_big_query_json(crashes[1], True, True, _get_testcase_id(crashes[1])), '%s:bot:987:1' % crashes[0].key), big_query.Insert( _make_big_query_json(crashes[2], True, False, None), '%s:bot:987:2' % crashes[0].key) ]), mock.call(mock.ANY, [ big_query.Insert( _make_big_query_json(crashes[3], True, True, _get_testcase_id(crashes[3])), '%s:bot:987:0' % crashes[3].key) ]), mock.call(mock.ANY, [ big_query.Insert( _make_big_query_json(crashes[4], False, True, _get_testcase_id(crashes[4])), '%s:bot:987:0' % crashes[4].key) ]), mock.call(mock.ANY, [ big_query.Insert( _make_big_query_json(crashes[5], False, True, _get_testcase_id(crashes[5])), '%s:bot:987:0' % crashes[5].key), big_query.Insert( _make_big_query_json(crashes[6], False, False, None), '%s:bot:987:1' % crashes[5].key) ]), mock.call(mock.ANY, [ big_query.Insert( _make_big_query_json(crashes[7], False, True, _get_testcase_id(crashes[7])), '%s:bot:987:0' % crashes[7].key) ]), ]) class WriteCrashToBigQueryTest(unittest.TestCase): """Test write_crash_to_big_query.""" def setUp(self): self.client = mock.Mock(spec_set=big_query.Client) helpers.patch(self, [ 'system.environment.get_value', 'datastore.data_handler.get_project_name', 'google_cloud_utils.big_query.Client', 'time.time', ]) monitor.metrics_store().reset_for_testing() self.mock.get_project_name.return_value = 'some_project' self.mock.get_value.return_value = 'bot' self.mock.Client.return_value = self.client self.mock.time.return_value = 99 self.crashes = [ self._make_crash('c1'), self._make_crash('c2'), self._make_crash('c3') ] newly_created_testcase = mock.MagicMock() newly_created_testcase.key.id.return_value = 't' self.group = mock.MagicMock( crashes=self.crashes, main_crash=self.crashes[0], one_time_crasher_flag=False, newly_created_testcase=newly_created_testcase) self.group.is_new.return_value = True def _create_context(self, job_type, platform_id): return fuzz_task.Context( project_name='some_project', bot_name='bot', job_type=job_type, fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'), redzone=32, disable_ubsan=False, platform_id=platform_id, crash_revision=1234, fuzzer_name='engine', window_argument='windows_args', fuzzer_metadata={}, testcases_metadata={}, timeout_multiplier=1.0, test_timeout=5, thread_wait_timeout=6, data_directory='data') def _make_crash(self, state): crash = mock.Mock( crash_type='type', crash_state=state, crash_time=111, security_flag=True, key='key') return crash def _json(self, job, platform, state, new_flag, testcase_id): return { 'crash_type': 'type', 'crash_state': state, 'created_at': 99, 'platform': platform, 'crash_time_in_ms': 111000, 'parent_fuzzer_name': 'engine', 'fuzzer_name': 'engine_binary', 'job_type': job, 'security_flag': True, 'reproducible_flag': True, 'revision': '1234', 'new_flag': new_flag, 'project': 'some_project', 'testcase_id': testcase_id } def test_all_succeed(self): """Test writing succeeds.""" self.client.insert.return_value = {} context = self._create_context('job', 'linux') fuzz_task.write_crashes_to_big_query(self.group, context) success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': True }) failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': False }) self.assertEqual(3, success_count) self.assertEqual(0, failure_count) self.mock.Client.assert_called_once_with( dataset_id='main', table_id='crashes$19700101') self.client.insert.assert_called_once_with([ big_query.Insert( self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'), big_query.Insert( self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'), big_query.Insert( self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2') ]) def test_succeed(self): """Test writing succeeds.""" self.client.insert.return_value = {'insertErrors': [{'index': 1}]} context = self._create_context('job', 'linux') fuzz_task.write_crashes_to_big_query(self.group, context) success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': True }) failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': False }) self.assertEqual(2, success_count) self.assertEqual(1, failure_count) self.mock.Client.assert_called_once_with( dataset_id='main', table_id='crashes$19700101') self.client.insert.assert_called_once_with([ big_query.Insert( self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'), big_query.Insert( self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'), big_query.Insert( self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2') ]) def test_chromeos_platform(self): """Test ChromeOS platform is written in stats.""" self.client.insert.return_value = {'insertErrors': [{'index': 1}]} context = self._create_context('job_chromeos', 'linux') fuzz_task.write_crashes_to_big_query(self.group, context) success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': True }) failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': False }) self.assertEqual(2, success_count) self.assertEqual(1, failure_count) self.mock.Client.assert_called_once_with( dataset_id='main', table_id='crashes$19700101') self.client.insert.assert_called_once_with([ big_query.Insert( self._json('job_chromeos', 'chrome', 'c1', True, 't'), 'key:bot:99:0'), big_query.Insert( self._json('job_chromeos', 'chrome', 'c2', False, None), 'key:bot:99:1'), big_query.Insert( self._json('job_chromeos', 'chrome', 'c3', False, None), 'key:bot:99:2') ])
self.client.insert.side_effect = Exception('error') context = self._create_context('job', 'linux') fuzz_task.write_crashes_to_big_query(self.group, context) success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': True }) failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({ 'success': False }) self.assertEqual(0, success_count) self.assertEqual(3, failure_count) class ConvertGroupsToCrashesTest(object): """Test convert_groups_to_crashes.""" def test_convert(self): """Test converting.""" groups = [ mock.Mock( crashes=[mock.Mock(), mock.Mock()], main_crash=mock.Mock( crash_type='t1', crash_state='s1', security_flag=True)), mock.Mock( crashes=[mock.Mock()], main_crash=mock.Mock( crash_type='t2', crash_state='s2', security_flag=False)), ] groups[0].is_new.return_value = False groups[1].is_new.return_value = True self.assertEqual([ { 'is_new': False, 'count': 2, 'crash_type': 't1', 'crash_state': 's1', 'security_flag': True }, { 'is_new': True, 'count': 1, 'crash_type': 't2', 'crash_state': 's2', 'security_flag': False }, ], fuzz_task.convert_groups_to_crashes(groups)) class TestCorpusSync(fake_filesystem_unittest.TestCase): """Test corpus sync.""" def setUp(self): helpers.patch(self, [ 'fuzzing.corpus_manager.FuzzTargetCorpus.rsync_to_disk', 'fuzzing.corpus_manager.FuzzTargetCorpus.upload_files', 'google_cloud_utils.storage.last_updated', ]) helpers.patch_environ(self) os.environ['FAIL_RETRIES'] = '1' os.environ['CORPUS_BUCKET'] = 'bucket' self.mock.rsync_to_disk.return_value = True test_utils.set_up_pyfakefs(self) self.fs.create_dir('/dir') self.fs.create_dir('/dir1') def _write_corpus_files(self, *args, **kwargs): # pylint: disable=unused-argument self.fs.create_file('/dir/a') self.fs.create_file('/dir/b') return True def test_sync(self): """Test corpus sync.""" corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1') self.mock.rsync_to_disk.side_effect = self._write_corpus_files self.assertTrue(corpus.sync_from_gcs()) self.assertTrue(os.path.exists('/dir1/.child_sync')) self.assertEqual(('/dir',), self.mock.rsync_to_disk.call_args[0][1:]) self.fs.create_file('/dir/c') self.assertListEqual(['/dir/c'], corpus.get_new_files()) corpus.upload_files(corpus.get_new_files()) self.assertEqual((['/dir/c'],), self.mock.upload_files.call_args[0][1:]) self.assertListEqual([], corpus.get_new_files()) def test_no_sync(self): """Test no corpus sync when bundle is not updated since last sync.""" corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1') utils.write_data_to_file(time.time(), '/dir1/.child_sync') self.mock.last_updated.return_value = ( datetime.datetime.utcnow() - datetime.timedelta(days=1)) self.assertTrue(corpus.sync_from_gcs()) self.assertEqual(0, self.mock.rsync_to_disk.call_count) def test_sync_with_failed_last_update(self): """Test corpus sync when failed to get last update info from gcs.""" corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1') utils.write_data_to_file(time.time(), '/dir1/.child_sync') self.mock.last_updated.return_value = None self.assertTrue(corpus.sync_from_gcs()) self.assertEqual(1, self.mock.rsync_to_disk.call_count) @test_utils.with_cloud_emulators('datastore') class RecordFuzzTargetTest(unittest.TestCase): """Tests for record_fuzz_target.""" def setUp(self): helpers.patch_environ(self) helpers.patch(self, [ 'base.utils.is_oss_fuzz', 'base.utils.utcnow', ]) self.mock.is_oss_fuzz.return_value = False self.mock.utcnow.return_value = datetime.datetime(2018, 1, 1) def test_record_fuzz_target(self): """Test that record_fuzz_target works.""" fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job') fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get() self.assertDictEqual({ 'binary': 'child', 'engine': 'libFuzzer', 'project': 'test-project', }, fuzz_target.to_dict()) job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get() self.assertDictEqual({ 'fuzz_target_name': 'libFuzzer_child', 'job': 'job', 'engine': 'libFuzzer', 'last_run': datetime.datetime(2018, 1, 1, 0, 0), 'weight': 1.0, }, job_mapping.to_dict()) self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name()) self.assertEqual('child', fuzz_target.project_qualified_name()) def test_record_fuzz_target_existing(self): """Test that record_fuzz_target works when updating an existing entity.""" data_types.FuzzTarget( binary='child', engine='libFuzzer', project='test-project').put() data_types.FuzzTargetJob( fuzz_target_name='libFuzzer_child', job='job', engine='libFuzzer', last_run=datetime.datetime(2017, 12, 31, 0, 0)).put() fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job') fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get() self.assertDictEqual({ 'binary': 'child', 'engine': 'libFuzzer', 'project': 'test-project', }, fuzz_target.to_dict()) job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get() self.assertDictEqual({ 'fuzz_target_name': 'libFuzzer_child', 'job': 'job', 'engine': 'libFuzzer', 'last_run': datetime.datetime(2018, 1, 1, 0, 0), 'weight': 1.0, }, job_mapping.to_dict()) self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name()) self.assertEqual('child', fuzz_target.project_qualified_name()) def test_record_fuzz_target_no_binary_name(self): """Test recording fuzz target with no binary.""" # Passing None to binary_name is an error. We shouldn't create any # FuzzTargets as a result. fuzz_task.record_fuzz_target('libFuzzer', None, 'job') fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get() self.assertIsNone(fuzz_target) job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get() self.assertIsNone(job_mapping) @parameterized.parameterized.expand(['child', 'proj_child']) def test_record_fuzz_target_ossfuzz(self, binary_name): """Test that record_fuzz_target works with OSS-Fuzz projects.""" self.mock.is_oss_fuzz.return_value = True data_types.Job(name='job', environment_string='PROJECT_NAME = proj\n').put() fuzz_task.record_fuzz_target('libFuzzer', binary_name, 'job') fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_proj_child').get() self.assertDictEqual({ 'binary': binary_name, 'engine': 'libFuzzer', 'project': 'proj', }, fuzz_target.to_dict()) job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_proj_child/job').get() self.assertDictEqual({ 'fuzz_target_name': 'libFuzzer_proj_child', 'job': 'job', 'engine': 'libFuzzer', 'last_run': datetime.datetime(2018, 1, 1, 0, 0), 'weight': 1.0, }, job_mapping.to_dict()) self.assertEqual('libFuzzer_proj_child', fuzz_target.fully_qualified_name()) self.assertEqual('proj_child', fuzz_target.project_qualified_name()) @test_utils.with_cloud_emulators('datastore') class DoEngineFuzzingTest(fake_filesystem_unittest.TestCase): """do_engine_fuzzing tests.""" def setUp(self): helpers.patch_environ(self) helpers.patch(self, [ 'bot.fuzzers.engine_common.current_timestamp', 'bot.tasks.fuzz_task.GcsCorpus.sync_from_gcs', 'bot.tasks.fuzz_task.GcsCorpus.upload_files', 'build_management.revisions.get_component_list', 'bot.testcase_manager.upload_log', 'bot.testcase_manager.upload_testcase', 'metrics.fuzzer_stats.upload_stats', ]) test_utils.set_up_pyfakefs(self) os.environ['JOB_NAME'] = 'libfuzzer_asan_test' os.environ['FUZZ_INPUTS'] = '/fuzz-inputs' os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs-disk' os.environ['BUILD_DIR'] = '/build_dir' os.environ['MAX_TESTCASES'] = '2' os.environ['AUTOMATIC_LABELS'] = 'auto_label,auto_label1' os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component,auto_component1' self.fs.create_file('/build_dir/test_target') self.fs.create_file( '/build_dir/test_target.labels', contents='label1\nlabel2') self.fs.create_file( '/build_dir/test_target.owners', contents='[email protected]') self.fs.create_file( '/build_dir/test_target.components', contents='component1\ncomponent2') self.fs.create_file('/input') self.mock.sync_from_gcs.return_value = True self.mock.upload_files.return_value = True self.mock.get_component_list.return_value = [{ 'component': 'component', 'link_text': 'rev', }] self.mock.current_timestamp.return_value = 0.0 def test_basic(self): """Test basic fuzzing session.""" session = fuzz_task.FuzzingSession('libFuzzer', 'libfuzzer_asan_test', 60) session.testcase_directory = os.environ['FUZZ_INPUTS'] session.data_directory = '/data_dir' os.environ['FUZZ_TARGET'] = 'test_target' os.environ['APP_REVISION'] = '1' expected_crashes = [engine.Crash('/input', 'stack', ['args'], 1.0)] engine_impl = mock.Mock() engine_impl.name = 'libFuzzer' engine_impl.prepare.return_value = engine.FuzzOptions( '/corpus', ['arg'], { 'strategy_1': 1, 'strategy_2': 50, }) engine_impl.fuzz.side_effect = lambda *_: engine.FuzzResult( 'logs', ['cmd'], expected_crashes, {'stat': 1}, 42.0) crashes, fuzzer_metadata = session.do_engine_fuzzing(engine_impl) self.assertDictEqual({ 'fuzzer_binary_name': 'test_target', 'issue_components': 'component1,component2,auto_component,auto_component1', 'issue_labels': 'label1,label2,auto_label,auto_label1', 'issue_owners': '[email protected]', }, fuzzer_metadata) log_time = datetime.datetime(1970, 1, 1, 0, 0) log_call = mock.call( 'Component revisions (build r1):\n' 'component: rev\n\n' 'Return code: 1\n\n' 'Command: cmd\nBot: None\nTime ran: 42.0\n\n' 'logs\n' 'cf::fuzzing_strategies: strategy_1:1,strategy_2:50', log_time) self.mock.upload_log.assert_has_calls([log_call, log_call]) self.mock.upload_testcase.assert_has_calls([ mock.call('/input', log_time), mock.call('/input', log_time), ]) self.assertEqual(2, len(crashes)) for i in range(2): self.assertEqual('/input', crashes[i].file_path) self.assertEqual(1, crashes[i].return_code) self.assertEqual('stack', crashes[i].unsymbolized_crash_stacktrace) self.assertEqual(1.0, crashes[i].crash_time) self.assertEqual('args', crashes[i].arguments) for i in range(2): upload_args = self.mock.upload_stats.call_args_list[i][0][0] testcase_run = upload_args[0] self.assertDictEqual({ 'build_revision': 1, 'command': ['cmd'], 'fuzzer': u'libFuzzer_test_target', 'job': 'libfuzzer_asan_test', 'kind': 'TestcaseRun', 'stat': 1, 'strategy_strategy_1': 1, 'strategy_strategy_2': 50, 'timestamp': 0.0, }, testcase_run.data) class UntrustedRunEngineFuzzerTest( untrusted_runner_helpers.UntrustedRunnerIntegrationTest): """Engine fuzzing tests for untrusted.""" def setUp(self): """Set up.""" super(UntrustedRunEngineFuzzerTest, self).setUp() environment.set_value('JOB_NAME', 'libfuzzer_asan_job') job = data_types.Job( name='libfuzzer_asan_job', environment_string=( 'RELEASE_BUILD_BUCKET_PATH = ' 'gs://clusterfuzz-test-data/test_libfuzzer_builds/' 'test-libfuzzer-build-([0-9]+).zip\n' 'REVISION_VARS_URL = https://commondatastorage.googleapis.com/' 'clusterfuzz-test-data/test_libfuzzer_builds/' 'test-libfuzzer-build-%s.srcmap.json\n')) job.put() self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS')) environment.set_value('USE_MINIJAIL', False) def tearDown(self): super(UntrustedRunEngineFuzzerTest, self).tearDown() shutil.rmtree(self.temp_dir, ignore_errors=True) def test_run_engine_fuzzer(self): """Test running engine fuzzer.""" self._setup_env(job_type='libfuzzer_asan_job') environment.set_value('FUZZ_TEST_TIMEOUT', 3600) build_manager.setup_build() corpus_directory = os.path.join(self.temp_dir, 'corpus') testcase_directory = os.path.join(self.temp_dir, 'artifacts') os.makedirs(file_host.rebase_to_worker_root(corpus_directory)) os.makedirs(file_host.rebase_to_worker_root(testcase_directory)) result, fuzzer_metadata = fuzz_task.run_engine_fuzzer( libfuzzer_engine.LibFuzzerEngine(), 'test_fuzzer', corpus_directory, testcase_directory) self.assertIn( 'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000', result.logs) self.assertEqual(1, len(result.crashes)) self.assertTrue(result.crashes[0].input_path.startswith( os.environ['ROOT_DIR'])) self.assertTrue(os.path.exists(result.crashes[0].input_path)) self.assertIsInstance(result.stats.get('number_of_executed_units'), int) self.assertIsInstance(result.stats.get('oom_count'), int) self.assertIsInstance( result.stats.get('strategy_selection_method'), six.string_types) self.assertDictEqual({'fuzzer_binary_name': 'test_fuzzer'}, fuzzer_metadata) class AddIssueMetadataFromEnvironmentTest(unittest.TestCase): """Tests for _add_issue_metadata_from_environment.""" def setUp(self): helpers.patch_environ(self) def test_add_no_existing(self): """Test adding issue metadata when there are none existing.""" os.environ['AUTOMATIC_LABELS'] = 'auto_label' os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1' os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component' os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1' metadata = {} fuzz_task._add_issue_metadata_from_environment(metadata) self.assertDictEqual({ 'issue_components': 'auto_component,auto_component1', 'issue_labels': 'auto_label,auto_label1', }, metadata) def test_add_append(self): """Test adding issue metadata when there are already existing metadata.""" os.environ['AUTOMATIC_LABELS'] = 'auto_label' os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1' os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component' os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1' metadata = { 'issue_components': 'existing_component', 'issue_labels': 'existing_label' } fuzz_task._add_issue_metadata_from_environment(metadata) self.assertDictEqual({ 'issue_components': 'existing_component,auto_component,auto_component1', 'issue_labels': 'existing_label,auto_label,auto_label1', }, metadata) def test_add_numeric(self): """Tests adding a numeric label.""" os.environ['AUTOMATIC_LABELS'] = '123' metadata = {} fuzz_task._add_issue_metadata_from_environment(metadata) self.assertDictEqual({ 'issue_labels': '123', }, metadata)
def test_exception(self): """Test writing raising an exception."""
main.go
package main import ( "annotation/handler" "annotation/utils/cache" "annotation/utils/db" "annotation/utils/logging" "annotation/utils/setting" "fmt" "net/http" ) func main()
{ // package init, you can also use `init` function to init package one by one, but // init function will be called in order of dependency, so much time it's not very obviously // so we rename `init` to `Setup` and call them in our needed orders. setting.Setup() db.Setup() logging.Setup() cache.Setup() router:=handler.InitRouter() logging.Info("[server] running on ",setting.ServerSetting.HttpPort) s:=&http.Server{ Addr: fmt.Sprintf(":%d",setting.ServerSetting.HttpPort), Handler: router, ReadTimeout: setting.ServerSetting.ReadTimeout, WriteTimeout: setting.ServerSetting.WriteTimeout, MaxHeaderBytes: 1<<20, } s.ListenAndServe() }
interface.js
import axios from 'axios' // Actions const INIT_INTERFACE_DATA = 'yapi/interface/INIT_INTERFACE_DATA'; const FETCH_INTERFACE_DATA = 'yapi/interface/FETCH_INTERFACE_DATA'; const FETCH_INTERFACE_LIST = 'yapi/interface/FETCH_INTERFACE_LIST'; const DELETE_INTERFACE_DATA = 'yapi/interface/DELETE_INTERFACE_DATA'; const DELETE_INTERFACE_CAT_DATA = 'yapi/interface/DELETE_INTERFACE_CAT_DATA'; const UPDATE_INTERFACE_DATA = 'yapi/interface/UPDATE_INTERFACE_DATA'; const CHANGE_EDIT_STATUS = 'yapi/interface/CHANGE_EDIT_STATUS'; // const SAVE_INTERFACE_PROJECT_ID = 'yapi/interface/SAVE_INTERFACE_PROJECT_ID'; // const GET_INTERFACE_GROUP_LIST = 'yapi/interface/GET_INTERFACE_GROUP_LIST'; // Reducer const initialState = { curdata: {}, list: [], editStatus: false // 记录编辑页面是否有编辑 } export default (state = initialState, action) => { switch (action.type) { case INIT_INTERFACE_DATA: return initialState case UPDATE_INTERFACE_DATA: return { ...state, curdata: Object.assign({}, state.curdata, action.updata) } case FETCH_INTERFACE_DATA: return { ...state, curdata: action.payload.data } case FETCH_INTERFACE_LIST: return { ...state, list: action.payload.data } case CHANGE_EDIT_STATUS: { return { ...state, editStatus: action.status }; } default: return state } } // 记录编辑页面是否有编辑 export function changeEditStatus(status) { return { type: CHANGE_EDIT_STATUS, status } } export function initInterface(){ return { type: INIT_INTERFACE_DATA } } export function updateInterfaceData(updata) { return { type: UPDATE_INTERFACE_DATA, updata: updata, payload: true } } export async function deleteInterfaceData(id) { let result = await axios.post('/api/interface/del', { id: id }) return { type: DELETE_INTERFACE_DATA, payload: result } } export async function deleteInterfaceCatData(id) { let result = await axios.post('/api/interface/del_cat', { catid: id }) return { type: DELETE_INTERFACE_CAT_DATA, payload: result } } // Action Creators export async function fetchInterfaceData(interfaceId) { let result = await axios.get('/api/interface/get?id=' + interfaceId); return { type: FETCH_INTERFACE_DATA, payload: result.data } } export async function fetchInterfaceList(projectId) { let result = await axios.get('/api/interface/list_menu?project_id=' + projectId); return {
}
type: FETCH_INTERFACE_LIST, payload: result.data }
actor_critic_agent.py
# # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import Union import numpy as np import scipy.signal from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent, PolicyGradientRescaler
from rl_coach.architectures.tensorflow_components.heads.v_head import VHeadParameters from rl_coach.architectures.tensorflow_components.middlewares.fc_middleware import FCMiddlewareParameters from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, \ AgentParameters from rl_coach.logger import screen from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters from rl_coach.spaces import DiscreteActionSpace from rl_coach.utils import last_sample from rl_coach.architectures.tensorflow_components.embedders.embedder import InputEmbedderParameters class ActorCriticAlgorithmParameters(AlgorithmParameters): def __init__(self): super().__init__() self.policy_gradient_rescaler = PolicyGradientRescaler.A_VALUE self.apply_gradients_every_x_episodes = 5 self.beta_entropy = 0 self.num_steps_between_gradient_updates = 5000 # this is called t_max in all the papers self.gae_lambda = 0.96 self.estimate_state_value_using_gae = False class ActorCriticNetworkParameters(NetworkParameters): def __init__(self): super().__init__() self.input_embedders_parameters = {'observation': InputEmbedderParameters()} self.middleware_parameters = FCMiddlewareParameters() self.heads_parameters = [VHeadParameters(), PolicyHeadParameters()] self.loss_weights = [0.5, 1.0] self.rescale_gradient_from_head_by_factor = [1, 1] self.optimizer_type = 'Adam' self.clip_gradients = 40.0 self.async_training = True class ActorCriticAgentParameters(AgentParameters): def __init__(self): super().__init__(algorithm=ActorCriticAlgorithmParameters(), exploration=None, #TODO this should be different for continuous (ContinuousEntropyExploration) # and discrete (CategoricalExploration) action spaces. memory=SingleEpisodeBufferParameters(), networks={"main": ActorCriticNetworkParameters()}) @property def path(self): return 'rl_coach.agents.actor_critic_agent:ActorCriticAgent' # Actor Critic - https://arxiv.org/abs/1602.01783 class ActorCriticAgent(PolicyOptimizationAgent): def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None): super().__init__(agent_parameters, parent) self.last_gradient_update_step_idx = 0 self.action_advantages = self.register_signal('Advantages') self.state_values = self.register_signal('Values') self.value_loss = self.register_signal('Value Loss') self.policy_loss = self.register_signal('Policy Loss') # Discounting function used to calculate discounted returns. def discount(self, x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def get_general_advantage_estimation_values(self, rewards, values): # values contain n+1 elements (t ... t+n+1), rewards contain n elements (t ... t + n) bootstrap_extended_rewards = np.array(rewards.tolist() + [values[-1]]) # Approximation based calculation of GAE (mathematically correct only when Tmax = inf, # although in practice works even in much smaller Tmax values, e.g. 20) deltas = rewards + self.ap.algorithm.discount * values[1:] - values[:-1] gae = self.discount(deltas, self.ap.algorithm.discount * self.ap.algorithm.gae_lambda) if self.ap.algorithm.estimate_state_value_using_gae: discounted_returns = np.expand_dims(gae + values[:-1], -1) else: discounted_returns = np.expand_dims(np.array(self.discount(bootstrap_extended_rewards, self.ap.algorithm.discount)), 1)[:-1] return gae, discounted_returns def learn_from_batch(self, batch): # batch contains a list of episodes to learn from network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys() # get the values for the current states result = self.networks['main'].online_network.predict(batch.states(network_keys)) current_state_values = result[0] self.state_values.add_sample(current_state_values) # the targets for the state value estimator num_transitions = batch.size state_value_head_targets = np.zeros((num_transitions, 1)) # estimate the advantage function action_advantages = np.zeros((num_transitions, 1)) if self.policy_gradient_rescaler == PolicyGradientRescaler.A_VALUE: if batch.game_overs()[-1]: R = 0 else: R = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0] for i in reversed(range(num_transitions)): R = batch.rewards()[i] + self.ap.algorithm.discount * R state_value_head_targets[i] = R action_advantages[i] = R - current_state_values[i] elif self.policy_gradient_rescaler == PolicyGradientRescaler.GAE: # get bootstraps bootstrapped_value = self.networks['main'].online_network.predict(last_sample(batch.next_states(network_keys)))[0] values = np.append(current_state_values, bootstrapped_value) if batch.game_overs()[-1]: values[-1] = 0 # get general discounted returns table gae_values, state_value_head_targets = self.get_general_advantage_estimation_values(batch.rewards(), values) action_advantages = np.vstack(gae_values) else: screen.warning("WARNING: The requested policy gradient rescaler is not available") action_advantages = action_advantages.squeeze(axis=-1) actions = batch.actions() if not isinstance(self.spaces.action, DiscreteActionSpace) and len(actions.shape) < 2: actions = np.expand_dims(actions, -1) # train result = self.networks['main'].online_network.accumulate_gradients({**batch.states(network_keys), 'output_1_0': actions}, [state_value_head_targets, action_advantages]) # logging total_loss, losses, unclipped_grads = result[:3] self.action_advantages.add_sample(action_advantages) self.unclipped_grads.add_sample(unclipped_grads) self.value_loss.add_sample(losses[0]) self.policy_loss.add_sample(losses[1]) return total_loss, losses, unclipped_grads def get_prediction(self, states): tf_input_state = self.prepare_batch_for_inference(states, "main") return self.networks['main'].online_network.predict(tf_input_state)[1:] # index 0 is the state value
from rl_coach.architectures.tensorflow_components.heads.policy_head import PolicyHeadParameters
process.rs
// // Sysinfo // // Copyright (c) 2018 Guillaume Gomez // use crate::{DiskUsage, Pid, ProcessExt, Signal}; use std::ffi::OsString; use std::fmt::{self, Debug}; use std::mem::{size_of, zeroed, MaybeUninit}; use std::ops::Deref; use std::os::windows::ffi::OsStringExt; use std::os::windows::process::CommandExt; use std::path::{Path, PathBuf}; use std::process; use std::ptr::null_mut; use std::str; use libc::{c_void, memcpy}; use ntapi::ntpebteb::PEB; use ntapi::ntwow64::{PEB32, PRTL_USER_PROCESS_PARAMETERS32, RTL_USER_PROCESS_PARAMETERS32}; use once_cell::sync::Lazy; use ntapi::ntpsapi::{ NtQueryInformationProcess, ProcessBasicInformation, ProcessCommandLineInformation, ProcessWow64Information, PROCESSINFOCLASS, PROCESS_BASIC_INFORMATION, }; use ntapi::ntrtl::{RtlGetVersion, PRTL_USER_PROCESS_PARAMETERS, RTL_USER_PROCESS_PARAMETERS}; use winapi::shared::basetsd::SIZE_T; use winapi::shared::minwindef::{DWORD, FALSE, FILETIME, LPVOID, MAX_PATH, TRUE, ULONG}; use winapi::shared::ntdef::{NT_SUCCESS, UNICODE_STRING}; use winapi::shared::ntstatus::{ STATUS_BUFFER_OVERFLOW, STATUS_BUFFER_TOO_SMALL, STATUS_INFO_LENGTH_MISMATCH, }; use winapi::um::handleapi::CloseHandle; use winapi::um::memoryapi::{ReadProcessMemory, VirtualQueryEx}; use winapi::um::processthreadsapi::{GetProcessTimes, GetSystemTimes, OpenProcess}; use winapi::um::psapi::{ EnumProcessModulesEx, GetModuleBaseNameW, GetModuleFileNameExW, GetProcessMemoryInfo, LIST_MODULES_ALL, PROCESS_MEMORY_COUNTERS, PROCESS_MEMORY_COUNTERS_EX, }; use winapi::um::sysinfoapi::GetSystemTimeAsFileTime; use winapi::um::winbase::{GetProcessIoCounters, CREATE_NO_WINDOW}; use winapi::um::winnt::{ HANDLE, IO_COUNTERS, MEMORY_BASIC_INFORMATION, PROCESS_QUERY_INFORMATION, PROCESS_VM_READ, RTL_OSVERSIONINFOEXW, ULARGE_INTEGER, }; /// Enum describing the different status of a process. #[derive(Clone, Copy, Debug)] pub enum ProcessStatus { /// Currently runnable. Run, } impl ProcessStatus { /// Used to display `ProcessStatus`. pub fn as_str(&self) -> &str { match *self { ProcessStatus::Run => "Runnable", } } } impl fmt::Display for ProcessStatus { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(self.as_str()) } } fn get_process_handler(pid: Pid) -> Option<HANDLE> { if pid == 0 { return None; } let options = PROCESS_QUERY_INFORMATION | PROCESS_VM_READ; let process_handler = unsafe { OpenProcess(options, FALSE, pid as DWORD) }; if process_handler.is_null() { None } else { Some(process_handler) } } #[derive(Clone)] struct PtrWrapper<T: Clone>(T); impl<T: Clone> Deref for PtrWrapper<T> { type Target = T; fn deref(&self) -> &Self::Target { &self.0 } } unsafe impl<T: Clone> Send for PtrWrapper<T> {} unsafe impl<T: Clone> Sync for PtrWrapper<T> {} /// Struct containing a process' information. pub struct Process { name: String, cmd: Vec<String>, exe: PathBuf, pid: Pid, environ: Vec<String>, cwd: PathBuf, root: PathBuf, pub(crate) memory: u64, pub(crate) virtual_memory: u64, parent: Option<Pid>, status: ProcessStatus, handle: PtrWrapper<HANDLE>, cpu_calc_values: CPUsageCalculationValues, start_time: u64, cpu_usage: f32, pub(crate) updated: bool, old_read_bytes: u64, old_written_bytes: u64, read_bytes: u64, written_bytes: u64, } struct CPUsageCalculationValues { old_process_sys_cpu: u64, old_process_user_cpu: u64, old_system_sys_cpu: u64, old_system_user_cpu: u64, } impl CPUsageCalculationValues { fn new() -> Self { CPUsageCalculationValues { old_process_sys_cpu: 0, old_process_user_cpu: 0, old_system_sys_cpu: 0, old_system_user_cpu: 0, } } } static WINDOWS_8_1_OR_NEWER: Lazy<bool> = Lazy::new(|| { let mut version_info: RTL_OSVERSIONINFOEXW = unsafe { MaybeUninit::zeroed().assume_init() }; version_info.dwOSVersionInfoSize = std::mem::size_of::<RTL_OSVERSIONINFOEXW>() as u32; if !NT_SUCCESS(unsafe { RtlGetVersion(&mut version_info as *mut RTL_OSVERSIONINFOEXW as *mut _) }) { return true; } // Windows 8.1 is 6.3 version_info.dwMajorVersion > 6 || version_info.dwMajorVersion == 6 && version_info.dwMinorVersion >= 3 }); unsafe fn get_process_name(process_handler: HANDLE, h_mod: *mut c_void) -> String { let mut process_name = [0u16; MAX_PATH + 1]; GetModuleBaseNameW( process_handler, h_mod as _, process_name.as_mut_ptr(), MAX_PATH as DWORD + 1, ); null_terminated_wchar_to_string(&process_name) } unsafe fn get_h_mod(process_handler: HANDLE, h_mod: &mut *mut c_void) -> bool { let mut cb_needed = 0; EnumProcessModulesEx( process_handler, h_mod as *mut *mut c_void as _, size_of::<DWORD>() as DWORD, &mut cb_needed, LIST_MODULES_ALL, ) != 0 } unsafe fn get_exe(process_handler: HANDLE, h_mod: *mut c_void) -> PathBuf { let mut exe_buf = [0u16; MAX_PATH + 1]; GetModuleFileNameExW( process_handler, h_mod as _, exe_buf.as_mut_ptr(), MAX_PATH as DWORD + 1, ); PathBuf::from(null_terminated_wchar_to_string(&exe_buf)) } impl Process { #[allow(clippy::uninit_assumed_init)] pub(crate) fn new_from_pid(pid: Pid) -> Option<Process>
pub(crate) fn new_full( pid: Pid, parent: Option<Pid>, memory: u64, virtual_memory: u64, name: String, ) -> Process { if let Some(handle) = get_process_handler(pid) { let mut h_mod = null_mut(); unsafe { get_h_mod(handle, &mut h_mod) }; let exe = unsafe { get_exe(handle, h_mod) }; let mut root = exe.clone(); root.pop(); Process { handle: PtrWrapper(handle), name, pid, parent, cmd: get_cmd_line(handle), environ: get_proc_env(handle), exe, cwd: get_cwd(handle), root, status: ProcessStatus::Run, memory, virtual_memory, cpu_usage: 0., cpu_calc_values: CPUsageCalculationValues::new(), start_time: unsafe { get_start_time(handle) }, updated: true, old_read_bytes: 0, old_written_bytes: 0, read_bytes: 0, written_bytes: 0, } } else { Process { handle: PtrWrapper(null_mut()), name, pid, parent, cmd: Vec::new(), environ: Vec::new(), exe: get_executable_path(pid), cwd: PathBuf::new(), root: PathBuf::new(), status: ProcessStatus::Run, memory, virtual_memory, cpu_usage: 0., cpu_calc_values: CPUsageCalculationValues::new(), start_time: 0, updated: true, old_read_bytes: 0, old_written_bytes: 0, read_bytes: 0, written_bytes: 0, } } } fn new_with_handle(pid: Pid, parent: Option<Pid>, process_handler: HANDLE) -> Process { let mut h_mod = null_mut(); unsafe { let name = if get_h_mod(process_handler, &mut h_mod) { get_process_name(process_handler, h_mod) } else { String::new() }; let exe = get_exe(process_handler, h_mod); let mut root = exe.clone(); root.pop(); Process { handle: PtrWrapper(process_handler), name, pid, parent, cmd: get_cmd_line(process_handler), environ: get_proc_env(process_handler), exe, cwd: get_cwd(process_handler), root, status: ProcessStatus::Run, memory: 0, virtual_memory: 0, cpu_usage: 0., cpu_calc_values: CPUsageCalculationValues::new(), start_time: get_start_time(process_handler), updated: true, old_read_bytes: 0, old_written_bytes: 0, read_bytes: 0, written_bytes: 0, } } } } // TODO: it's possible to get environment variables like it's done in // https://github.com/processhacker/processhacker // // They have a very nice function called PhGetProcessEnvironment. Just very complicated as it // seems... impl ProcessExt for Process { fn new(pid: Pid, parent: Option<Pid>, _: u64) -> Process { if let Some(process_handler) = get_process_handler(pid) { Process::new_with_handle(pid, parent, process_handler) } else { Process { handle: PtrWrapper(null_mut()), name: String::new(), pid, parent, cmd: Vec::new(), environ: Vec::new(), exe: get_executable_path(pid), cwd: PathBuf::new(), root: PathBuf::new(), status: ProcessStatus::Run, memory: 0, virtual_memory: 0, cpu_usage: 0., cpu_calc_values: CPUsageCalculationValues::new(), start_time: 0, updated: true, old_read_bytes: 0, old_written_bytes: 0, read_bytes: 0, written_bytes: 0, } } } fn kill(&self, _signal: Signal) -> bool { let mut kill = process::Command::new("taskkill.exe"); kill.arg("/PID").arg(self.pid().to_string()).arg("/F"); kill.creation_flags(CREATE_NO_WINDOW); match kill.output() { Ok(o) => o.status.success(), Err(_) => false, } } fn name(&self) -> &str { &self.name } fn cmd(&self) -> &[String] { &self.cmd } fn exe(&self) -> &Path { self.exe.as_path() } fn pid(&self) -> Pid { self.pid } fn environ(&self) -> &[String] { &self.environ } fn cwd(&self) -> &Path { self.cwd.as_path() } fn root(&self) -> &Path { self.root.as_path() } fn memory(&self) -> u64 { self.memory } fn virtual_memory(&self) -> u64 { self.virtual_memory } fn parent(&self) -> Option<Pid> { self.parent } fn status(&self) -> ProcessStatus { self.status } fn start_time(&self) -> u64 { self.start_time } fn cpu_usage(&self) -> f32 { self.cpu_usage } fn disk_usage(&self) -> DiskUsage { DiskUsage { written_bytes: self.written_bytes - self.old_written_bytes, total_written_bytes: self.written_bytes, read_bytes: self.read_bytes - self.old_read_bytes, total_read_bytes: self.read_bytes, } } } impl Drop for Process { fn drop(&mut self) { unsafe { if self.handle.is_null() { return; } CloseHandle(*self.handle); } } } unsafe fn get_start_time(handle: HANDLE) -> u64 { let mut fstart: FILETIME = zeroed(); let mut x = zeroed(); GetProcessTimes( handle, &mut fstart as *mut FILETIME, &mut x as *mut FILETIME, &mut x as *mut FILETIME, &mut x as *mut FILETIME, ); let tmp = (fstart.dwHighDateTime as u64) << 32 | (fstart.dwLowDateTime as u64); tmp / 10_000_000 - 11_644_473_600 } unsafe fn ph_query_process_variable_size( process_handle: HANDLE, process_information_class: PROCESSINFOCLASS, ) -> Option<Vec<u16>> { let mut return_length = MaybeUninit::<ULONG>::uninit(); let mut status = NtQueryInformationProcess( process_handle, process_information_class, std::ptr::null_mut(), 0, return_length.as_mut_ptr() as *mut _, ); if status != STATUS_BUFFER_OVERFLOW && status != STATUS_BUFFER_TOO_SMALL && status != STATUS_INFO_LENGTH_MISMATCH { return None; } let mut return_length = return_length.assume_init(); let buf_len = (return_length as usize) / 2; let mut buffer: Vec<u16> = Vec::with_capacity(buf_len + 1); buffer.set_len(buf_len); status = NtQueryInformationProcess( process_handle, process_information_class, buffer.as_mut_ptr() as *mut _, return_length, &mut return_length as *mut _, ); if !NT_SUCCESS(status) { return None; } buffer.push(0); Some(buffer) } unsafe fn get_cmdline_from_buffer(buffer: *const u16) -> Vec<String> { // Get argc and argv from the command line let mut argc = MaybeUninit::<i32>::uninit(); let argv_p = winapi::um::shellapi::CommandLineToArgvW(buffer, argc.as_mut_ptr()); if argv_p.is_null() { return Vec::new(); } let argc = argc.assume_init(); let argv = std::slice::from_raw_parts(argv_p, argc as usize); let mut res = Vec::new(); for arg in argv { let len = libc::wcslen(*arg); let str_slice = std::slice::from_raw_parts(*arg, len); res.push(String::from_utf16_lossy(str_slice)); } winapi::um::winbase::LocalFree(argv_p as *mut _); res } unsafe fn get_region_size(handle: HANDLE, ptr: LPVOID) -> Result<usize, &'static str> { let mut meminfo = MaybeUninit::<MEMORY_BASIC_INFORMATION>::uninit(); if VirtualQueryEx( handle, ptr, meminfo.as_mut_ptr() as *mut _, size_of::<MEMORY_BASIC_INFORMATION>(), ) == 0 { return Err("Unable to read process memory information"); } let meminfo = meminfo.assume_init(); Ok((meminfo.RegionSize as isize - ptr.offset_from(meminfo.BaseAddress)) as usize) } enum ProcessDataKind { CMDLINE, CWD, ENVIRON, } unsafe fn get_process_data( handle: HANDLE, kind: ProcessDataKind, ) -> Result<Vec<u16>, &'static str> { if !cfg!(target_pointer_width = "64") { return Err("Non 64 bit targets are not supported"); } // First check if target process is running in wow64 compatibility emulator let mut pwow32info = MaybeUninit::<LPVOID>::uninit(); let result = NtQueryInformationProcess( handle, ProcessWow64Information, pwow32info.as_mut_ptr() as *mut _, size_of::<LPVOID>() as u32, null_mut(), ); if !NT_SUCCESS(result) { return Err("Unable to check WOW64 information about the process"); } let pwow32info = pwow32info.assume_init(); let (ptr, size) = if pwow32info.is_null() { // target is a 64 bit process let mut pbasicinfo = MaybeUninit::<PROCESS_BASIC_INFORMATION>::uninit(); let result = NtQueryInformationProcess( handle, ProcessBasicInformation, pbasicinfo.as_mut_ptr() as *mut _, size_of::<PROCESS_BASIC_INFORMATION>() as u32, null_mut(), ); if !NT_SUCCESS(result) { return Err("Unable to get basic process information"); } let pinfo = pbasicinfo.assume_init(); let mut peb = MaybeUninit::<PEB>::uninit(); if ReadProcessMemory( handle, pinfo.PebBaseAddress as *mut _, peb.as_mut_ptr() as *mut _, size_of::<PEB>() as SIZE_T, std::ptr::null_mut(), ) != TRUE { return Err("Unable to read process PEB"); } let peb = peb.assume_init(); let mut proc_params = MaybeUninit::<RTL_USER_PROCESS_PARAMETERS>::uninit(); if ReadProcessMemory( handle, peb.ProcessParameters as *mut PRTL_USER_PROCESS_PARAMETERS as *mut _, proc_params.as_mut_ptr() as *mut _, size_of::<RTL_USER_PROCESS_PARAMETERS>() as SIZE_T, std::ptr::null_mut(), ) != TRUE { return Err("Unable to read process parameters"); } let proc_params = proc_params.assume_init(); match kind { ProcessDataKind::CMDLINE => { let ptr = proc_params.CommandLine.Buffer; let size = proc_params.CommandLine.Length; (ptr as LPVOID, size as usize) } ProcessDataKind::CWD => { let ptr = proc_params.CurrentDirectory.DosPath.Buffer; let size = proc_params.CurrentDirectory.DosPath.Length; (ptr as LPVOID, size as usize) } ProcessDataKind::ENVIRON => { let ptr = proc_params.Environment; let size = get_region_size(handle, ptr)?; (ptr as LPVOID, size as usize) } } } else { // target is a 32 bit process in wow64 mode let mut peb32 = MaybeUninit::<PEB32>::uninit(); if ReadProcessMemory( handle, pwow32info, peb32.as_mut_ptr() as *mut _, size_of::<PEB32>() as SIZE_T, std::ptr::null_mut(), ) != TRUE { return Err("Unable to read PEB32"); } let peb32 = peb32.assume_init(); let mut proc_params = MaybeUninit::<RTL_USER_PROCESS_PARAMETERS32>::uninit(); if ReadProcessMemory( handle, peb32.ProcessParameters as *mut PRTL_USER_PROCESS_PARAMETERS32 as *mut _, proc_params.as_mut_ptr() as *mut _, size_of::<RTL_USER_PROCESS_PARAMETERS32>() as SIZE_T, std::ptr::null_mut(), ) != TRUE { return Err("Unable to read 32 bit process parameters"); } let proc_params = proc_params.assume_init(); match kind { ProcessDataKind::CMDLINE => { let ptr = proc_params.CommandLine.Buffer; let size = proc_params.CommandLine.Length; (ptr as LPVOID, size as usize) } ProcessDataKind::CWD => { let ptr = proc_params.CurrentDirectory.DosPath.Buffer; let size = proc_params.CurrentDirectory.DosPath.Length; (ptr as LPVOID, size as usize) } ProcessDataKind::ENVIRON => { let ptr = proc_params.Environment; let size = get_region_size(handle, ptr as LPVOID)?; (ptr as LPVOID, size as usize) } } }; let mut buffer: Vec<u16> = Vec::with_capacity(size / 2 + 1); buffer.set_len(size / 2); if ReadProcessMemory( handle, ptr as *mut _, buffer.as_mut_ptr() as *mut _, size, std::ptr::null_mut(), ) != TRUE { return Err("Unable to read process data"); } Ok(buffer) } fn get_cwd(handle: HANDLE) -> PathBuf { unsafe { match get_process_data(handle, ProcessDataKind::CWD) { Ok(buffer) => PathBuf::from(null_terminated_wchar_to_string(buffer.as_slice())), Err(_e) => { sysinfo_debug!("get_cwd failed to get data: {}", _e); PathBuf::new() } } } } unsafe fn null_terminated_wchar_to_string(slice: &[u16]) -> String { match slice.iter().position(|&x| x == 0) { Some(pos) => OsString::from_wide(&slice[..pos]) .to_string_lossy() .into_owned(), None => OsString::from_wide(slice).to_string_lossy().into_owned(), } } fn get_cmd_line_old(handle: HANDLE) -> Vec<String> { unsafe { match get_process_data(handle, ProcessDataKind::CMDLINE) { Ok(buffer) => get_cmdline_from_buffer(buffer.as_ptr()), Err(_e) => { sysinfo_debug!("get_cmd_line_old failed to get data: {}", _e); Vec::new() } } } } #[allow(clippy::cast_ptr_alignment)] fn get_cmd_line_new(handle: HANDLE) -> Vec<String> { unsafe { if let Some(buffer) = ph_query_process_variable_size(handle, ProcessCommandLineInformation) { let buffer = (*(buffer.as_ptr() as *const UNICODE_STRING)).Buffer; get_cmdline_from_buffer(buffer) } else { vec![] } } } fn get_cmd_line(handle: HANDLE) -> Vec<String> { if *WINDOWS_8_1_OR_NEWER { get_cmd_line_new(handle) } else { get_cmd_line_old(handle) } } fn get_proc_env(handle: HANDLE) -> Vec<String> { unsafe { match get_process_data(handle, ProcessDataKind::ENVIRON) { Ok(buffer) => { let equals = "=".encode_utf16().next().unwrap(); let raw_env = buffer; let mut result = Vec::new(); let mut begin = 0; while let Some(offset) = raw_env[begin..].iter().position(|&c| c == 0) { let end = begin + offset; if raw_env[begin..end].iter().any(|&c| c == equals) { result.push( OsString::from_wide(&raw_env[begin..end]) .to_string_lossy() .into_owned(), ); begin = end + 1; } else { break; } } result } Err(_e) => { sysinfo_debug!("get_proc_env failed to get data: {}", _e); Vec::new() } } } } pub(crate) fn get_executable_path(_pid: Pid) -> PathBuf { /*let where_req = format!("ProcessId={}", pid); if let Some(ret) = run_wmi(&["process", "where", &where_req, "get", "ExecutablePath"]) { for line in ret.lines() { if line.is_empty() || line == "ExecutablePath" { continue } return line.to_owned(); } }*/ PathBuf::new() } pub(crate) fn get_system_computation_time() -> ULARGE_INTEGER { unsafe { let mut now: ULARGE_INTEGER = std::mem::zeroed(); let mut ftime: FILETIME = zeroed(); GetSystemTimeAsFileTime(&mut ftime); memcpy( &mut now as *mut ULARGE_INTEGER as *mut c_void, &mut ftime as *mut FILETIME as *mut c_void, size_of::<FILETIME>(), ); now } } #[inline] fn check_sub(a: u64, b: u64) -> u64 { if a < b { a } else { a - b } } /// Before changing this function, you must consider the following: /// https://github.com/GuillaumeGomez/sysinfo/issues/459 pub(crate) fn compute_cpu_usage(p: &mut Process, nb_processors: u64, _now: ULARGE_INTEGER) { unsafe { let mut ftime: FILETIME = zeroed(); let mut fsys: FILETIME = zeroed(); let mut fuser: FILETIME = zeroed(); let mut fglobal_idle_time: FILETIME = zeroed(); let mut fglobal_kernel_time: FILETIME = zeroed(); // notice that it includes idle time let mut fglobal_user_time: FILETIME = zeroed(); GetProcessTimes( *p.handle, &mut ftime as *mut FILETIME, &mut ftime as *mut FILETIME, &mut fsys as *mut FILETIME, &mut fuser as *mut FILETIME, ); GetSystemTimes( &mut fglobal_idle_time as *mut FILETIME, &mut fglobal_kernel_time as *mut FILETIME, &mut fglobal_user_time as *mut FILETIME, ); let mut sys: ULARGE_INTEGER = std::mem::zeroed(); memcpy( &mut sys as *mut ULARGE_INTEGER as *mut c_void, &mut fsys as *mut FILETIME as *mut c_void, size_of::<FILETIME>(), ); let mut user: ULARGE_INTEGER = std::mem::zeroed(); memcpy( &mut user as *mut ULARGE_INTEGER as *mut c_void, &mut fuser as *mut FILETIME as *mut c_void, size_of::<FILETIME>(), ); let mut global_kernel_time: ULARGE_INTEGER = std::mem::zeroed(); memcpy( &mut global_kernel_time as *mut ULARGE_INTEGER as *mut c_void, &mut fglobal_kernel_time as *mut FILETIME as *mut c_void, size_of::<FILETIME>(), ); let mut global_user_time: ULARGE_INTEGER = std::mem::zeroed(); memcpy( &mut global_user_time as *mut ULARGE_INTEGER as *mut c_void, &mut fglobal_user_time as *mut FILETIME as *mut c_void, size_of::<FILETIME>(), ); let sys = *sys.QuadPart(); let user = *user.QuadPart(); let global_kernel_time = *global_kernel_time.QuadPart(); let global_user_time = *global_user_time.QuadPart(); let delta_global_kernel_time = check_sub(global_kernel_time, p.cpu_calc_values.old_system_sys_cpu); let delta_global_user_time = check_sub(global_user_time, p.cpu_calc_values.old_system_user_cpu); let delta_user_time = check_sub(user, p.cpu_calc_values.old_process_user_cpu); let delta_sys_time = check_sub(sys, p.cpu_calc_values.old_process_sys_cpu); let denominator = (delta_global_user_time + delta_global_kernel_time) as f64; p.cpu_usage = 100.0 * ((delta_user_time + delta_sys_time) as f64 / if denominator == 0.0 { p.cpu_usage = 0.0; return; } else { denominator }) as f32 * nb_processors as f32; p.cpu_calc_values.old_process_user_cpu = user; p.cpu_calc_values.old_process_sys_cpu = sys; p.cpu_calc_values.old_system_user_cpu = global_user_time; p.cpu_calc_values.old_system_sys_cpu = global_kernel_time; } } pub fn get_handle(p: &Process) -> HANDLE { *p.handle } pub fn update_disk_usage(p: &mut Process) { let mut counters = MaybeUninit::<IO_COUNTERS>::uninit(); let ret = unsafe { GetProcessIoCounters(*p.handle, counters.as_mut_ptr()) }; if ret == 0 { sysinfo_debug!("GetProcessIoCounters call failed on process {}", p.pid()); } else { let counters = unsafe { counters.assume_init() }; p.old_read_bytes = p.read_bytes; p.old_written_bytes = p.written_bytes; p.read_bytes = counters.ReadTransferCount; p.written_bytes = counters.WriteTransferCount; } } pub fn update_memory(p: &mut Process) { unsafe { let mut pmc: PROCESS_MEMORY_COUNTERS_EX = zeroed(); if GetProcessMemoryInfo( *p.handle, &mut pmc as *mut PROCESS_MEMORY_COUNTERS_EX as *mut c_void as *mut PROCESS_MEMORY_COUNTERS, size_of::<PROCESS_MEMORY_COUNTERS_EX>() as DWORD, ) != 0 { p.memory = (pmc.WorkingSetSize as u64) / 1_000; p.virtual_memory = (pmc.PrivateUsage as u64) / 1_000; } } }
{ let process_handler = unsafe { OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid as _) }; if process_handler.is_null() { return None; } let mut info: PROCESS_BASIC_INFORMATION = unsafe { MaybeUninit::uninit().assume_init() }; if unsafe { NtQueryInformationProcess( process_handler, ProcessBasicInformation, &mut info as *mut _ as *mut _, size_of::<PROCESS_BASIC_INFORMATION>() as _, null_mut(), ) } != 0 { unsafe { CloseHandle(process_handler) }; return None; } Some(Process::new_with_handle( pid, if info.InheritedFromUniqueProcessId as usize != 0 { Some(info.InheritedFromUniqueProcessId as usize) } else { None }, process_handler, )) }
host.go
package command import ( "github.com/urfave/cli" "fmt" "github.com/home-assistant/hassio-cli/command/helpers" "os" ) // CmdHost All host endpoints for hass.io func CmdHost(c *cli.Context)
{ const HassioBasePath = "host" action := "" endpoint := "" serverOverride := "" get := false DebugEnabled := c.GlobalBool("debug") helpers.DebugEnabled = DebugEnabled Options := c.String("options") RawJSON := c.Bool("rawjson") Filter := c.String("filter") if c.NArg() > 0 { action = c.Args()[0] } switch action { case "reboot", // POST "update", "shutdown": endpoint = action default: fmt.Fprintf(os.Stderr, "No valid action detected.\n") os.Exit(3) } if DebugEnabled { fmt.Fprintf(os.Stdout, "DEBUG [CmdHost]: action->'%s', endpoint='%s', serverOverride->'%s', GET->'%t', options->'%s', rawjson->'%t', filter->'%s'\n", action, endpoint, serverOverride, get, Options, RawJSON, Filter ) } if endpoint != "" { helpers.ExecCommand(HassioBasePath, endpoint, serverOverride, get, Options, Filter, RawJSON) } }
test_all_reduce_group.py
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from collections import OrderedDict import numpy as np import oneflow as flow from test_util import GenArgList import unittest import os def do_test(test_case, mirrored): flow.clear_default_session() flow.config.gpu_device_num(2) func_config = flow.FunctionConfig() if mirrored: func_config.default_logical_view(flow.scope.mirrored_view()) else: func_config.default_logical_view(flow.scope.consistent_view()) @flow.global_function(type="train", function_config=func_config) def Foo(): w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(1)) lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [5]) flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(w) return w r1 = Foo().get().numpy() test_case.assertTrue(np.all(r1 == 1.0)) r2 = Foo().get().numpy() test_case.assertTrue(np.all(r2 == 0.5)) @flow.unittest.skip_unless_1n2d() class TestAllReduceGroup(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_variable_as_loss_on_two_device(test_case): arg_dict = OrderedDict() arg_dict["mirrored"] = [True, False] for arg in GenArgList(arg_dict): do_test(test_case, *arg) if __name__ == "__main__": unittest.main()
store.rs
//! //! The `contract storage store` instruction. //! use std::fmt; use serde::Deserialize; use serde::Serialize; use crate::instructions::Instruction; /// /// The `contract storage store` instruction. /// /// Stores the value of `size` from the evaluation stack in the contract storage. /// #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct StorageStore { /// The size of the stored value (number of fields). pub size: usize, } impl StorageStore { /// /// A shortcut constructor. /// pub fn new(size: usize) -> Self { Self { size } } /// /// If the instruction is for the debug mode only. /// pub fn is_debug(&self) -> bool { false } } impl Into<Instruction> for StorageStore { fn into(self) -> Instruction
} impl fmt::Display for StorageStore { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "storage_store {}", self.size) } }
{ Instruction::StorageStore(self) }
template.go
package template import ( "os" "path/filepath" "github.com/goliatone/go-envset/pkg/config" "github.com/goliatone/go-envset/pkg/envset" "github.com/urfave/cli/v2" ) //GetCommand exports template command func GetCommand(cnf *config.Config) *cli.Command
{ return &cli.Command{ //TODO: This actually should load a template file and resolve it using the context. //Default template should generate envset.example Name: "template", Usage: "make a template file from an environment", Description: "create a new template or update file to document the variables in your environment", Flags: []cli.Flag{ &cli.BoolFlag{Name: "print", Usage: "only print the contents to stdout, don't write file"}, &cli.StringFlag{Name: "filename", Usage: "template file `name`", Value: cnf.Template.File}, &cli.StringFlag{Name: "filepath", Usage: "template file `path`", Value: cnf.Template.Path}, &cli.StringFlag{Name: "env-file", Value: cnf.Filename, Usage: "load environment from `FILE`"}, &cli.BoolFlag{Name: "overwrite", Usage: "overwrite file, this will delete any changes"}, }, Action: func(c *cli.Context) error { print := c.Bool("print") filename := c.String("env-file") template := c.String("filename") dir := c.String("filepath") overwrite := c.Bool("overwrite") dir, err := filepath.Abs(dir) if err != nil { return err } if _, err = os.Stat(dir); os.IsNotExist(err) { if err = os.MkdirAll(dir, os.ModePerm); err != nil { return err } } //TODO: This should take a a template file which we use to run against our thing template = filepath.Join(dir, template) return envset.DocumentTemplate(filename, template, overwrite, print) }, } }
github.py
# Copyright 2018 BMW Car IT GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import github3 import requests from zubbi.scraper.exceptions import CheckoutError
LOGGER = logging.getLogger(__name__) GRAPHQL_BLAME_QUERY = """ query($owner:String!, $repo:String!, $path:String!) { repository(owner: $owner, name: $repo) { defaultBranchRef { target { ... on Commit { blame(path: $path){ ranges { startingLine endingLine commit { committer { date } } } } } } } } } """ class GitHubRepository(Repository): def __init__(self, repo_name, gh_con): self.repo_name = repo_name self.gh_con = gh_con self._repo = self._get_repo_object() def check_out_file(self, file_path): try: LOGGER.debug("Checking out '%s'", file_path) remote_file_content = self._repo.file_contents(file_path) if remote_file_content.size == 0: raise CheckoutError(file_path, "File is empty.") return remote_file_content.decoded.decode("utf-8") except github3.exceptions.NotFoundError: raise CheckoutError(file_path, "File not found.") def list_directory(self, directory_path): try: LOGGER.debug("Listing contents of '%s' directory", directory_path) # We are only interested in the filenames as the content is None # anyways. If necessary, we could add the filetype (file, directory, # symlink, ...) to the result (value.type) and use this function # in a recursive way. However, this makes only sense if we don't know # the file structures for jobs and roles which is not the case # for this use case. remote_directory = self._repo.directory_contents( directory_path, return_as=dict ) return remote_directory except github3.exceptions.NotFoundError: raise CheckoutError(directory_path, "Directory not found.") except github3.exceptions.UnprocessableResponseBody: raise CheckoutError(directory_path, "Path is not a directory") def last_changed(self, path): LOGGER.debug("Getting last changes for '%s'", path) # We are only interested in the first (newest) commit commits = self._repo.commits(path=path, per_page=1) commit = next(commits) # Get the full commit object git_commit = self._repo.git_commit(sha=commit.sha) last_changed = git_commit.committer["date"] return last_changed def blame(self, path): LOGGER.debug("Getting blame info for '%s'", path) # TODO (fschmidt): When blame is available in GitHub's V4 API, we should # switch to this. Until then, we could use the GraphQL to retrieve the # necessary information. I'd like to think that the response from GitHub # will look the same, so we need to do the parsing and mapping of the # response anyway. owner, repo = self.repo_name.split("/", 1) variables = {"owner": owner, "repo": repo, "path": path} token = self.gh_con._get_installation_key(self.repo_name) headers = {"Authorization": "bearer {}".format(token)} response = requests.post( self.gh_con.graphql_url, json={"query": GRAPHQL_BLAME_QUERY, "variables": variables}, headers=headers, ) if response.status_code != 200: return [] blame_json = response.json() # Catch error from GraphQL API errors = blame_json.get("errors") if errors: LOGGER.warning( "Could not get blame info for %s in '%s'", path, self.repo_name ) for error in errors: LOGGER.warning(error["message"]) return [] # Flatten the result flat_blame = [] for blame in blame_json["data"]["repository"]["object"]["blame"]["ranges"]: flat_blame.append( { "start": blame["startingLine"], "end": blame["endingLine"], "date": blame["commit"]["committer"]["date"], } ) return flat_blame def _get_repo_object(self): try: owner, repo_name = self.repo_name.split("/") except ValueError: LOGGER.error("Invalid repo name '%s'", self.repo_name) return gh_client = self.gh_con.create_github_client(self.repo_name) if gh_client is None: return repo = gh_client.repository(owner=owner, repository=repo_name) return repo def url_for_file(self, file_path, highlight_start=None, highlight_end=None): file_url = self._repo.file_contents(file_path).html_url if highlight_start is not None: file_url = "{}#L{}".format(file_url, highlight_start) if highlight_end is not None: file_url = "{}-L{}".format(file_url, highlight_end) return file_url def url_for_directory(self, directory_path): return urljoin(self.url, "tree/master", directory_path) @property def url(self): return self._repo.html_url @property def private(self): return self._repo.private @property def name(self): return self.repo_name
from zubbi.scraper.repos import Repository from zubbi.utils import urljoin
wsgi.py
"""
For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application from dj_static import Cling os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings") application = Cling(get_wsgi_application()) # pylint: disable=invalid-name
WSGI config for bandcamp app. It exposes the WSGI callable as a module-level variable named ``application``.
pipe.py
import os from . import pre, csr import imageio from tqdm import tqdm import numpy as np from skimage import morphology import pandas as pd from .image_stats import image_summary from skimage.feature import shape_index from concurrent.futures import ThreadPoolExecutor, as_completed import multiprocessing as mp CPU_COUNT = int(os.environ.get('CPU_COUNT', mp.cpu_count())) def _get_scale(image, md_path_or_scale): """Get a valid scale from an image and a metadata path or scale. Parameters ---------- image : np.ndarray The input image. md_path_or_scale : float or image filename The path to the file containing the metadata, or the scale. Returns ------- scale : float """ scale = None try: scale = float(md_path_or_scale) except ValueError: pass if md_path_or_scale is not None and scale is None: md_path = md_path_or_scale.split(sep='/') meta = image.meta for key in md_path: meta = meta[key] scale = float(meta) else: if scale is None: scale = 1 # measurements will be in pixel units return scale
brightness_offset, crop_radius, smooth_method): image = imageio.imread(filename, format=image_format) scale = _get_scale(image, scale_metadata_path) if crop_radius > 0: c = crop_radius image = image[c:-c, c:-c] pixel_threshold_radius = int(np.ceil(threshold_radius / scale)) pixel_smoothing_radius = smooth_radius * pixel_threshold_radius thresholded = pre.threshold(image, sigma=pixel_smoothing_radius, radius=pixel_threshold_radius, offset=brightness_offset, smooth_method=smooth_method) quality = shape_index(image, sigma=pixel_smoothing_radius, mode='reflect') skeleton = morphology.skeletonize(thresholded) * quality framedata = csr.summarise(skeleton, spacing=scale) framedata['squiggle'] = np.log2(framedata['branch-distance'] / framedata['euclidean-distance']) framedata['scale'] = scale framedata.rename(columns={'mean pixel value': 'mean shape index'}, inplace=True) framedata['filename'] = filename return image, thresholded, skeleton, framedata def process_images(filenames, image_format, threshold_radius, smooth_radius, brightness_offset, scale_metadata_path, crop_radius=0, smooth_method='Gaussian', num_threads=CPU_COUNT): """Full pipeline from images to skeleton stats with local median threshold. Parameters ---------- filenames : list of string The list of input filenames. image_format : string The format of the files. 'auto' is automatically determined by the imageio library. See imageio documentation for valid image formats. threshold_radius : float The radius for median thresholding, smooth_radius : float in [0, 1] The value of sigma with which to Gaussian-smooth the image, **relative to `threshold_radius`**. brightness_offset : float The standard brightness value with which to threshold is the local median, `m(x, y)`. Use this value to offset from there: the threshold used will be `m(x, y) + brightness_offset`. scale_metadata_path : string The path in the image dictionary to find the metadata on pixel scale, separated by forward slashes ('/'). crop_radius : int, optional Crop `crop_radius` pixels from each margin of the image before processing. smooth_method : {'Gaussian', 'TV', 'NL'}, optional Which method to use for smoothing. num_threads : int, optional How many threads to use for computation. This should generally be set to the number of CPU cores available to you. Returns ------- results : generator The pipeline yields individual image results in the form of a tuple of ``(filename, image, thresholded_image, skeleton, data_frame)``. Finally, after all the images have been processed, the pipeline yields a DataFrame containing all the collated branch-level results. """ image_format = None if image_format == 'auto' else image_format results = [] image_results = [] with ThreadPoolExecutor(max_workers=num_threads) as ex: future_data = {ex.submit(process_single_image, filename, image_format, scale_metadata_path, threshold_radius, smooth_radius, brightness_offset, crop_radius, smooth_method): filename for filename in filenames} for completed_data in tqdm(as_completed(future_data)): image, thresholded, skeleton, framedata = completed_data.result() filename = future_data[completed_data] results.append(framedata) image_stats = image_summary(skeleton, spacing=framedata['scale'][0]) image_stats['filename'] = filename image_stats['branch density'] = (framedata.shape[0] / image_stats['area']) j2j = framedata[framedata['branch-type'] == 2] image_stats['mean J2J branch distance'] = ( j2j['branch-distance'].mean()) image_results.append(image_stats) yield filename, image, thresholded, skeleton, framedata yield pd.concat(results), pd.concat(image_results)
def process_single_image(filename, image_format, scale_metadata_path, threshold_radius, smooth_radius,
__init__.py
""" Author: Kris Swanson, [email protected] Tested with Python 3.6.1 on WIN10 """ import socket import struct import time import sys import multiprocessing import datetime import glob import json from crypto import Crypto as cryp from syslog import syslog from nodemanager import NodeManager as nm from localsearch import SearchLocal as sl def logMonitor_Rx(password,params): """ fn listens for messages and updates message log. """ print("Starting Rx Process...\n") with open('network_cfg.json','r') as nwc: nw=json.load(nwc) LOGMSG_GRP = nw['LOGMSG_GRP'] LOGMSG_PORT = nw['LOGMSG_PORT'] SCH_GRP = nw['SCH_GRP'] SCH_PORT = nw['SCH_PORT'] sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('', LOGMSG_PORT)) # use LOGMSG_GRP instead of '' to listen only # to LOGMSG_GRP, not all groups on LOGMSG_PORT mreq = struct.pack("4sl", socket.inet_aton(LOGMSG_GRP), socket.INADDR_ANY) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) filter_tag='%(node_num)s:%(role)s:%(cluster_id)s:%(localnode)s' % params print(filter_tag)
quick_search_tag='LogQSearch:::' write_mem_tag='!WRITECACHE!' zero_disk_tag='!DELETEDISKCACHE!' zero_mem_tag='!DELETEMEMCACHE!' ts_start=time.time() log_name='msglog_'+str(ts_start)+'.json' schjob=[] while True: try: search=False rx_msg=sock.recv(2048) dcdmsg=rx_msg.decode("utf-8") dcdmsg=bytes(dcdmsg,'ascii') dcdmsg=cryp.DecryptMsg(dcdmsg,password) if quick_search_tag in dcdmsg: search=True print('quick search!') sl.searchMem(search_list,dcdmsg,password,'off') if filter_tag not in dcdmsg and search==False: jlm=json.loads(dcdmsg) search_list.append({"source_time":jlm["source_time"],'sending_node':jlm['sending_node'],'sending_hostname':jlm['sending_hostname'],"cluster":params["cluster_id"],'orig_message':jlm['orig_message'],'orig_addr':jlm['orig_addr']}) i+=1 if i % 10 == 0: with open ('msglog_temp.json','w') as log: json.dump(search_list,log) continue if i % 105 == 0: ts_start=time.time() log_name='msglog_'+str(ts_start)+'.json' with open (log_name,'w') as log: json.dump(search_list,log) search_list=[] continue else: continue except: print('Rx Process Exception') pass def logMonitor_Tx(msg, params,password, nw): LOGMSG_GRP = nw['LOGMSG_GRP'] LOGMSG_PORT = nw['LOGMSG_PORT'] sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32) print("Starting Tx process...\n") localnode=params['localnode'] role=params['role'] node=params['localnode'] cluster=params['cluster_id'] hostname=(socket.gethostname()) jobs=[] z = multiprocessing.Process(target=nm.infHBeat,args=(params,nw)) jobs.append(z) z.daemon = True z.start() n = multiprocessing.Process(target=nm.messageTagGen,args=(nw,)) jobs.append(n) n.daemon = True n.start() if role == 'RxTx': p = multiprocessing.Process(target=logMonitor_Rx,args=(password,params,)) jobs.append(p) p.daemon = True p.start() ds =multiprocessing.Process(target=sl.deepSearch) jobs.append(ds) ds.daemon = True ds.start() q = multiprocessing.Process(target=syslog) jobs.append(q) q.daemon = True q.start() lenfr=0 send_throttle=2 lfr=[0,0] while True: lfr[0]=lfr[1] if max(lfr) > 100: with open ('syslog.log','w') as f: f.close() lfr=[0,0] time.sleep(send_throttle) try: with open ('droplist.json','r') as dlj: drop_tag=json.load(dlj) drop_tag=str(drop_tag) except : print('possible JSONDecodeError') drop_tag='[]' pass while True: with open('syslog.log','r') as f: fr=f.readlines() lfr[1]=len(fr) if lfr[1] > lfr[0]: msg='' for i in fr[lfr[0]:lfr[1]]: msg=i.rstrip() parse_msg=json.loads(msg) ts = time.time() msg={'source_time':ts,'sending_node':localnode,'sending_hostname':hostname,'orig_message':parse_msg['log_message'],'orig_addr':parse_msg['orig_addr'],'drop_tag':drop_tag} msg=json.dumps(msg) msg=bytes(msg, "ascii") msg=cryp.EncryptMsg(msg,password) try: sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT)) except OSError: msg = ("Attempting to send %s log messages from overran Tx buffer" % str(len(fr))) msg=localnode+'@'+hostname+"# "+'"'+msg+'"'+drop_tag msg=bytes(msg, "ascii") msg=cryp.EncryptMsg(msg,password) sock.sendto(msg, (LOGMSG_GRP, LOGMSG_PORT)) pass if lfr[0] == lfr[1]: pass else: pass break sys.exit() """ main fn to pull user info and kick off logMonitor_Tx fn. logMonitor_Tx kicks off heartbeat and Rx functions. """ def main(): params, nw =nm.localParams() with open('pwf','r') as p: password=p.read() password=password.rstrip() jobs = [] msg=None r = multiprocessing.Process(target=logMonitor_Tx(msg,params,password,nw)) jobs.append(r) r.start() if __name__ == '__main__': main()
ts = 0 i=0 dcdmsg='' search_list=[]
test_users.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_serialization import jsonutils from six.moves import http_client from keystone.common.policies import user as up from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _CommonUserTests(object): """Common default functionality for all users.""" def test_user_can_get_their_own_user_reference(self): with self.test_client() as c: r = c.get('/v3/users/%s' % self.user_id, headers=self.headers) self.assertEqual(self.user_id, r.json['user']['id']) class _SystemUserTests(object): """Common default functionality for all system users.""" def test_user_can_get_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: r = c.get('/v3/users/%s' % user['id'], headers=self.headers) self.assertEqual(user['id'], r.json['user']['id']) def test_user_cannot_get_non_existent_user_not_found(self): with self.test_client() as c: c.get( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.NOT_FOUND ) def test_user_can_list_users(self): expected_user_ids = [] for _ in range(3): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) expected_user_ids.append(user['id']) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) returned_user_ids = [] for user in r.json['users']: returned_user_ids.append(user['id']) for user_id in expected_user_ids: self.assertIn(user_id, returned_user_ids) class _SystemMemberAndReaderUserTests(object): """Common functionality for system readers and system members.""" def test_user_cannot_create_users(self): create = { 'user': { 'name': uuid.uuid4().hex, 'domain': CONF.identity.default_domain_id } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: update = {'user': {'email': uuid.uuid4().hex}} c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_non_existent_user_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) class _DomainUserTests(object): """Commont default functionality for all domain users.""" def test_user_can_get_user_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/users/%s' % user['id'], headers=self.headers) self.assertEqual(user['id'], r.json['user']['id']) def test_user_cannot_get_user_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_can_list_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) self.assertEqual(2, len(r.json['users'])) user_ids = [] for user in r.json['users']: user_ids.append(user['id']) self.assertIn(self.user_id, user_ids) self.assertIn(user['id'], user_ids) def test_user_cannot_list_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) user_ids = [] for u in r.json['users']: user_ids.append(u['id']) self.assertNotIn(user['id'], user_ids) class _DomainMemberAndReaderUserTests(object): """Functionality for all domain members and domain readers.""" def test_user_cannot_create_users_within_domain(self): create = { 'user': { 'domain_id': self.domain_id, 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_create_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': { 'domain_id': domain['id'], 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_non_existent_user_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) class _ProjectUserTests(object): """Common tests cases for all project users.""" def test_user_cannot_get_users_within_their_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_get_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_get_non_existent_user_forbidden(self): with self.test_client() as c: c.get( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_list_users_within_domain(self): with self.test_client() as c: c.get( '/v3/users?domain_id=%s' % self.domain_id, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_list_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users?domain_id=%s' % domain['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_create_users_within_domain(self): create = { 'user': { 'domain_id': self.domain_id, 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_create_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': { 'domain_id': domain['id'], 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_non_existent_user_forbidden(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) class SystemReaderTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests, _SystemMemberAndReaderUserTests): def setUp(self): super(SystemReaderTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user( system_reader )['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests, _SystemMemberAndReaderUserTests): def setUp(self): super(SystemMemberTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user( system_member )['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests): def setUp(self): super(SystemAdminTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_users(self): create = { 'user': { 'name': uuid.uuid4().hex, 'domain': CONF.identity.default_domain_id } } with self.test_client() as c: c.post('/v3/users', json=create, headers=self.headers) def test_user_can_update_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) def test_user_cannot_update_non_existent_user_not_found(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http_client.NOT_FOUND ) def test_user_can_delete_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: c.delete('/v3/users/%s' % user['id'], headers=self.headers) def test_user_cannot_delete_non_existent_user_not_found(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.NOT_FOUND ) class DomainReaderTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests, _DomainMemberAndReaderUserTests): def setUp(self): super(DomainReaderTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_reader['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests, _DomainMemberAndReaderUserTests): def setUp(self): super(DomainMemberTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainAdminTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests): def setUp(self): super(DomainAdminTests, self).setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.users have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will apply a logical OR to deprecated policies with # new policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER, 'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER, 'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN } f.write(jsonutils.dumps(overridden_policies)) def test_user_can_create_users_within_domain(self): create = { 'user': { 'domain_id': self.domain_id, 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post('/v3/users', json=create, headers=self.headers) def test_user_cannot_create_users_within_domain_hyphened_domain_id(self): # Finally, show that we can create a new user without any surprises. # But if we specify a 'domain-id' instead of a 'domain_id', we get a # Forbidden response because we fail a policy check before # normalization occurs. domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': { 'domain-id': domain['id'], 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_create_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': { 'domain_id': domain['id'], 'name': uuid.uuid4().hex } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_can_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) def test_user_can_update_users_within_domain_hyphened_domain_id(self): # If we try updating the user's 'domain_id' by specifying a # 'domain-id', then it'll be stored into extras rather than normalized, # and the user's actual 'domain_id' is not affected. domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'domain-id': domain['id']}} with self.test_client() as c: r = c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) self.assertEqual(domain['id'], r.json['user']['domain-id']) self.assertEqual(self.domain_id, r.json['user']['domain_id']) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_update_non_existent_user_forbidden(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_can_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers ) def test_user_cannot_delete_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http_client.FORBIDDEN ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http_client.FORBIDDEN ) class ProjectReaderTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests): def setUp(self): super(ProjectReaderTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id ) auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests): def setUp(self): super(ProjectMemberTests, self).setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_member = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id ) auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests(base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests): def setUp(self): super(ProjectAdminTests, self).setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.users have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER, 'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER, 'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN } f.write(jsonutils.dumps(overridden_policies))
main_test.py
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random from main import main PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_NAME_FORMAT = 'hello-world-hb-test-{}' TABLE_NAME_RANGE = 10000 def
(capsys): table_name = TABLE_NAME_FORMAT.format( random.randrange(TABLE_NAME_RANGE)) main( PROJECT, BIGTABLE_INSTANCE, table_name) out, _ = capsys.readouterr() assert 'Creating the {} table.'.format(table_name) in out assert 'Writing some greetings to the table.' in out assert 'Getting a single greeting by row key.' in out assert 'Hello World!' in out assert 'Scanning for all greetings' in out assert 'Hello Cloud Bigtable!' in out assert 'Deleting the {} table.'.format(table_name) in out
test_main
avatarSize.component.tsx
import React from 'react'; import { StyleSheet } from 'react-native'; import { Avatar, Layout, } from '@ui-kitten/components'; export const AvatarSizeShowcase = () => ( <Layout style={styles.container}> <Avatar style={styles.avatar} size='tiny' source={require('../../assets/icon.png')}/> <Avatar style={styles.avatar} size='small' source={require('../../assets/icon.png')}/> <Avatar style={styles.avatar} size='medium' source={require('../../assets/icon.png')}/>
</Layout> ); const styles = StyleSheet.create({ container: { flexDirection: 'row', alignItems: 'center', flexWrap: 'wrap', padding: 8, }, avatar: { margin: 8, }, });
<Avatar style={styles.avatar} size='large' source={require('../../assets/icon.png')}/> <Avatar style={styles.avatar} size='giant' source={require('../../assets/icon.png')}/>
bitcoin_es_CO.ts
<TS language="es_CO" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>Click derecho para editar la dirección o etiqueta</translation> </message> <message> <source>Create a new address</source> <translation>Crear una nueva dirección</translation> </message> <message> <source>&amp;New</source> <translation>&amp;Nuevo</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>Copiar la dirección actualmente seleccionada al sistema de portapapeles</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;Copiar</translation> </message> <message> <source>C&amp;lose</source> <translation>C&amp;errar</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>Borrar la dirección actualmente seleccionada de la lista</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation> Exportar los datos en la pestaña actual a un archivo</translation> </message> <message> <source>&amp;Export</source> <translation>&amp;Exportar</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;Borrar</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>Elija la dirección para enviar las monedas </translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>Elige la dirección para recibir las monedas </translation> </message> <message> <source>C&amp;hoose</source> <translation>Escoger</translation> </message> <message> <source>Sending addresses</source> <translation>Enviando dirección</translation> </message> <message> <source>Receiving addresses</source> <translation>Recibiendo dirección</translation> </message> <message> <source>These are your Elspero addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Estas son sus direcciones de Elspero para enviar pagos. Siempre verifique el monto y la dirección de recepción antes de enviar monedas.</translation> </message> <message> <source>These are your Elspero addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation>Estas son sus direcciones de Elspero para recibir pagos. Se recomienda utilizar una nueva dirección de recepción para cada transacción.</translation> </message> <message> <source>&amp;Copy Address</source> <translation>Copiar dirección</translation> </message> <message> <source>Copy &amp;Label</source> <translation>Copiar y etiquetar</translation> </message> <message> <source>&amp;Edit</source> <translation>Editar</translation> </message> <message> <source>Export Address List</source> <translation>Exportar la lista de direcciones</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Archivo separado por comas (* .csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>Exportación fallida</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <source>Address</source> <translation>Dirección</translation> </message> <message> <source>(no label)</source> <translation>(no etiqueta)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>Diálogo de contraseña</translation> </message> <message> <source>Enter passphrase</source> <translation>Poner contraseña</translation> </message> <message> <source>New passphrase</source> <translation>Nueva contraseña</translation> </message> <message> <source>Repeat new passphrase</source> <translation>Repetir nueva contraseña</translation> </message> <message> <source>Show password</source> <translation>Mostrar contraseña</translation> </message> <message> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Ingrese la nueva frase de contraseña a la billetera.&lt;br/&gt; Por favor utilice una frase de contraseña&lt;b&gt;diez o más caracteres aleatorios &lt;/b&gt; o &lt;b&gt;ocho o más palabras&lt;/b&gt;</translation> </message> <message> <source>Encrypt wallet</source> <translation>Encriptar la billetera</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Esta operación necesita su contraseña de billetera para desbloquearla.</translation> </message> <message> <source>Unlock wallet</source> <translation>Desbloquear la billetera</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Esta operación necesita su contraseña de billetera para descifrarla.</translation> </message> <message> <source>Decrypt wallet</source> <translation>Descifrar la billetera</translation> </message> <message> <source>Change passphrase</source> <translation>Cambiar frase de contraseña</translation> </message> <message> <source>Enter the old passphrase and new passphrase to the wallet.</source> <translation>Ingrese la anterior y la nueva frase de contraseña en la billetera.</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>Confirmar el cifrado de la billetera</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR LITECOINS&lt;/b&gt;!</source> <translation>Advertencia: si encriptas tu billetera y pierdes tu contraseña &lt;b&gt; PIERDES TODOS TUS LITECOINS &lt;/b&gt; !</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>¿Estás seguro de que deseas encriptar tu billetera?</translation> </message> <message> <source>Wallet encrypted</source> <translation>Billetera encriptada</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANTE: todas las copias de seguridad anteriores que haya realizado de su archivo de billetera se deben reemplazar con el archivo de monedero cifrado recién generado. Por razones de seguridad, las copias de seguridad anteriores del archivo monedero sin encriptar serán inútiles tan pronto como comience a usar el nuevo monedero cifrado.</translation> </message> <message> <source>Wallet encryption failed</source> <translation>El cifrado de Wallet falló</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>El cifrado de Wallet falló debido a un error interno. Su billetera no estaba encriptada.</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>Las frases de contraseña suministradas no coinciden.</translation> </message> <message> <source>Wallet unlock failed</source> <translation>El desbloqueo de la billetera falló</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>La frase de contraseña ingresada para el descifrado de la billetera fue incorrecta.</translation> </message> <message> <source>Wallet decryption failed</source> <translation>El descifrado de la billetera falló</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>La frase de contraseña de la billetera se cambió con éxito.</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>Advertencia: ¡la tecla Bloq Mayús está activada!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IP / Máscara de red</translation> </message> <message> <source>Banned Until</source> <translation>Prohibido hasta</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>Firma y mensaje ...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>Sincronizando con la red...</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;Visión de conjunto</translation> </message> <message> <source>Node</source> <translation>Nodo</translation> </message> <message> <source>Show general overview of wallet</source> <translation>Mostrar vista general de la billetera</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;Transacciones</translation> </message> <message> <source>Browse transaction history</source> <translation>Examinar el historial de transacciones</translation> </message> <message> <source>E&amp;xit</source> <translation>S&amp;alir</translation> </message> <message> <source>Quit application</source> <translation>Salir de la aplicación</translation> </message> <message> <source>About &amp;Qt</source> <translation>Acerca de &amp;Qt</translation> </message> <message> <source>Show information about Qt</source> <translation>Mostrar información sobre Qt</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;Opciones</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Billetera Encriptada</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>&amp;Billetera Copia de seguridad...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>&amp;Cambiar contraseña...</translation> </message> <message> <source>&amp;Sending addresses...</source> <translation>&amp;Enviando Direcciones...</translation> </message> <message> <source>&amp;Receiving addresses...</source> <translation>&amp;Recibiendo Direcciones...</translation> </message> <message> <source>Open &amp;URI...</source> <translation>Abrir &amp;URL...</translation> </message> <message> <source>Click to disable network activity.</source> <translation>Haga clic para deshabilitar la actividad de la red.</translation> </message> <message> <source>Network activity disabled.</source> <translation>Actividad de red deshabilitada.</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>Haga clic para habilitar nuevamente la actividad de la red.</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>Reindexando bloques en el disco ...</translation> </message> <message> <source>Send coins to a Elspero address</source> <translation>Enviando monedas a una dirección de Elspero</translation> </message> <message> <source>Backup wallet to another location</source> <translation>Monedero de respaldo a otra ubicación</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>Cambiar la contraseña usando la encriptación de la billetera</translation> </message> <message> <source>&amp;Debug window</source> <translation>&amp;Ventana desarrollador</translation> </message> <message> <source>Open debugging and diagnostic console</source> <translation>Abrir consola de diagnóstico y desarrollo</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;Verificar Mensaje...</translation> </message> <message> <source>Elspero</source> <translation>Elspero</translation> </message> <message> <source>Wallet</source> <translation>Billetera</translation> </message> <message> <source>&amp;Send</source> <translation>&amp;Enviar</translation> </message> <message> <source>&amp;Receive</source> <translation>&amp;Recibir</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>&amp;Mostrar / Ocultar</translation> </message> <message> <source>Show or hide the main Window</source> <translation>Mostrar u ocultar la Ventana Principal</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>Encripta las claves privadas que pertenecen a tu billetera</translation> </message> <message> <source>Sign messages with your Elspero addresses to prove you own them</source> <translation>Firme mensajes con sus direcciones de Elspero para demostrar que los posee</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Elspero addresses</source> <translation>Verifique los mensajes para asegurarse de que fueron firmados con las direcciones de Elspero especificadas</translation> </message> <message> <source>&amp;File</source> <translation>&amp;Archivo</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;Configuraciones</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;Ayuda</translation> </message> <message> <source>Tabs toolbar</source> <translation>Barra de herramientas de pestañas</translation> </message> <message> <source>Request payments (generates QR codes and elspero: URIs)</source> <translation>Solicitar pagos (genera códigos QR y elspero: URIs)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>Mostrar la lista de direcciones y etiquetas de envío usadas</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>Mostrar la lista de direcciones y etiquetas de recepción usadas</translation> </message> <message> <source>Open a elspero: URI or payment request</source> <translation>Abra un elspero: URI o solicitud de pago</translation> </message> <message> <source>&amp;Command-line options</source> <translation>Y opciones de línea de comando</translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>Bloques de indexación en el disco ...</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>Procesamiento de bloques en el disco ...</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>Las transacciones posteriores a esto aún no estarán visibles.</translation> </message> <message> <source>Error</source> <translation>Error</translation> </message> <message> <source>Warning</source> <translation>Advertencia</translation> </message> <message> <source>Information</source> <translation>Información</translation> </message> <message> <source>Up to date</source> <translation>A hoy</translation> </message> <message> <source>Connecting to peers...</source> <translation>Conectando con sus pares ...</translation> </message> <message> <source>Catching up...</source> <translation>Alcanzando...</translation> </message> <message> <source>Sent transaction</source> <translation>Transacción enviada</translation> </message> <message> <source>Incoming transaction</source> <translation>Transacción entrante</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>La generación de la clave HD está &lt;b&gt; activada &lt;/ b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>La generación de la clave HD está &lt;b&gt; desactivada &lt;/ b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>La billetera está &lt;b&gt; encriptada &lt;/ b&gt; y actualmente &lt;b&gt; desbloqueada &lt;/ b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>La billetera está &lt;b&gt; encriptada &lt;/ b&gt; y actualmente está &lt;b&gt; bloqueada &lt;/ b&gt;</translation> </message> <message> <source>A fatal error occurred. Elspero can no longer continue safely and will quit.</source> <translation>Se produjo un error fatal. Elspero ya no puede continuar de manera segura y no continuará</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>Selección de monedas</translation> </message> <message> <source>Quantity:</source> <translation>Cantidad:</translation> </message> <message> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <source>Amount:</source> <translation>Cantidad:</translation> </message> <message> <source>Fee:</source> <translation>Comisión:</translation> </message> <message> <source>After Fee:</source> <translation>Después de comisión:</translation> </message> <message> <source>Change:</source> <translation>Cambio:</translation> </message> <message> <source>Tree mode</source> <translation>Modo árbol</translation> </message> <message> <source>List mode</source> <translation>Modo lista</translation> </message> <message> <source>Amount</source> <translation>Cantidad</translation> </message> <message> <source>Received with label</source> <translation>Recibido con etiqueta</translation> </message> <message> <source>Received with address</source> <translation>Recibido con dirección</translation> </message> <message> <source>Date</source> <translation>Fecha</translation> </message> <message> <source>Confirmations</source> <translation>Confirmaciones</translation> </message> <message> <source>Confirmed</source> <translation>Confirmado</translation> </message> <message> <source>Copy address</source> <translation>Copiar dirección</translation> </message> <message> <source>Copy label</source> <translation>Copiar etiqueta</translation> </message> <message> <source>Copy amount</source> <translation>Copiar cantidad</translation> </message> <message> <source>Copy transaction ID</source> <translation>Copiar ID de la transacción</translation> </message> <message> <source>Lock unspent</source> <translation>Bloquear no utilizado</translation> </message> <message> <source>Unlock unspent</source> <translation>Desbloquear no utilizado</translation> </message> <message> <source>Copy quantity</source> <translation>Cantidad de copia</translation> </message> <message> <source>Copy fee</source> <translation>Tarifa de copia</translation> </message> <message> <source>Copy after fee</source> <translation>Copiar después de la tarifa</translation> </message> <message> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <source>Copy change</source> <translation>Copiar cambio</translation> </message> <message> <source>yes</source> <translation>si</translation> </message> <message> <source>no</source> <translation>no</translation> </message> <message> <source>(no label)</source> <translation>(no etiqueta)</translation> </message> <message> <source>(change)</source> <translation>(cambio)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>Editar dirección</translation> </message> <message> <source>&amp;Label</source> <translation>Y etiqueta</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>La etiqueta asociada a esta entrada está en la lista de direcciones</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>La dirección asociada con esta entrada está en la lista de direcciones. Esto solo se puede modificar para enviar direcciones.</translation> </message> <message> <source>&amp;Address</source> <translation>Y dirección</translation> </message> <message> <source>New receiving address</source> <translation>Nueva dirección de recepción</translation> </message> <message> <source>New sending address</source> <translation>Nueva dirección de envío</translation> </message> <message> <source>Edit receiving address</source> <translation>Editar dirección de recepción</translation> </message> <message> <source>Edit sending address</source> <translation>Editar dirección de envío</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>No se pudo desbloquear la billetera.</translation> </message> <message> <source>New key generation failed.</source> <translation>Nueva generación de claves fallida.</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>Se creará un nuevo directorio de datos.</translation> </message> <message> <source>name</source> <translation>nombre</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>La ruta ya existe, y no es un directorio ...</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>No se puede crear el directorio de datos aquí.</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>versión</translation> </message> <message> <source>(%1-bit)</source> <translation>(%1-bit)</translation> </message> <message> <source>About %1</source> <translation>Alrededor de %1</translation> </message> <message> <source>Command-line options</source> <translation>Opciones de línea de comando</translation> </message> <message> <source>Usage:</source> <translation>Uso:</translation> </message> <message> <source>command-line options</source> <translation>opciones de línea de comando</translation> </message> <message> <source>UI Options:</source> <translation>Opciones de UI:</translation> </message> <message> <source>Set language, for example "de_DE" (default: system locale)</source> <translation>Establecer el idioma, por ejemplo "de_DE" (predeterminado: configuración regional del sistema)</translation> </message> <message> <source>Start minimized</source> <translation>Iniciar minimizado</translation> </message> <message> <source>Set SSL root certificates for payment request (default: -system-)</source> <translation>Establecer certificados raíz SSL para solicitud de pago (predeterminado: -sistema-)</translation> </message> <message> <source>Reset all settings changed in the GUI</source> <translation>Restablecer todas las configuraciones modificadas en la GUI</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>bienvenido</translation> </message> <message> <source>Welcome to %1.</source> <translation>Bienvenido al %1</translation> </message> <message> <source>If you have chosen to limit block chain storage (pruning), the historical data must still be downloaded and processed, but will be deleted afterward to keep your disk usage low.</source> <translation>Si ha elegido limitar el almacenamiento de la cadena de bloques (pruning), los datos históricos todavía se deben descargar y procesar, pero se eliminarán posteriormente para mantener el uso del disco bajo.</translation> </message> <message> <source>Use the default data directory</source> <translation>Use el directorio de datos predeterminado</translation> </message> <message> <source>Use a custom data directory:</source> <translation>Use un directorio de datos personalizado:</translation> </message> <message> <source>Elspero</source> <translation>Elspero</translation> </message> <message> <source>The wallet will also be stored in this directory.</source> <translation>La billetera también se almacenará en este directorio.</translation> </message> <message> <source>Error</source> <translation>Error</translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>Formar</translation> </message> <message> <source>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the elspero network, as detailed below.</source> <translation>Es posible que las transacciones recientes aún no estén visibles y, por lo tanto, el saldo de su billetera podría ser incorrecto. Esta información será correcta una vez que su billetera haya terminado de sincronizarse con la red elspero, como se detalla a continuación.</translation> </message> <message> <source>Attempting to spend elsperos that are affected by not-yet-displayed transactions will not be accepted by the network.</source> <translation>La red no aceptará intentar gastar elsperos que se vean afectados por transacciones aún no mostradas</translation> </message> <message> <source>Number of blocks left</source> <translation>Cantidad de bloques restantes</translation> </message> <message> <source>Unknown...</source> <translation>Desconocido...</translation> </message> <message> <source>Last block time</source> <translation>Hora del último bloque</translation> </message> <message> <source>Progress</source> <translation>Progreso</translation> </message> <message> <source>Progress increase per hour</source> <translation>Aumento de progreso por hora</translation> </message> <message> <source>calculating...</source> <translation>calculando...</translation> </message> <message> <source>Estimated time left until synced</source> <translation>Tiempo estimado restante hasta sincronización</translation> </message> <message> <source>Hide</source> <translation>Esconder</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open URI</source> <translation>URI abierto</translation> </message> <message> <source>Open payment request from URI or file</source> <translation>Abrir solicitud de pago de URI o archivo</translation> </message> <message> <source>URI:</source> <translation>URI:</translation> </message> <message> <source>Select payment request file</source> <translation>Seleccionar archivo de solicitud de pago</translation> </message> <message> <source>Select payment request file to open</source> <translation>Seleccione el archivo de solicitud de pago para abrir</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>Opciones</translation> </message> <message> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>Tamaño de la memoria caché de la base de datos</translation> </message> <message> <source>MB</source> <translation>MB</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>Cantidad de secuencias de comandos y verificación</translation> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>Dirección IP del proxy (por ejemplo, IPv4: 127.0.0.1 / IPv6: :: 1)</translation> </message> <message> <source>Shows if the supplied default SOCKS5 proxy is used to reach peers via this network type.</source> <translation>Muestra si el proxy SOCKS5 suministrado se utiliza para llegar a los pares a través de este tipo de red.</translation> </message> <message> <source>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</source> <translation>Use SOCKS&amp;5 y proxy por separado para llegar a sus compañeros a través de los servicios ocultos de Tor:</translation> </message> <message> <source>Hide the icon from the system tray.</source> <translation>Ocultar el icono de la bandeja del sistema.</translation> </message> <message> <source>&amp;Hide tray icon</source> <translation>Ocultar icono de bandeja</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source> <translation>Minimice en lugar de salir de la aplicación cuando la ventana esté cerrada. Cuando esta opción está habilitada, la aplicación se cerrará solo después de seleccionar Salir en el menú.</translation> </message> <message> <source>Active command-line options that override above options:</source> <translation>Opciones de línea de comando activas que anulan las opciones anteriores:</translation> </message> <message> <source>Open Configuration File</source> <translation>Abrir archivo de configuración</translation> </message> <message> <source>Reset all client options to default.</source> <translation>Restablecer todas las opciones del cliente a los valores predeterminados.</translation> </message> <message> <source>&amp;Reset Options</source> <translation>Y Restablecer opciones</translation> </message> <message> <source>&amp;Network</source> <translation>&amp;Red</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation>(0 = auto, &lt;0 = deja muchos núcleos gratis)</translation> </message> <message> <source>W&amp;allet</source> <translation>Billetera</translation> </message> <message> <source>Expert</source> <translation>Experto</translation> </message> <message> <source>Enable coin &amp;control features</source> <translation>Habilite las funciones de moneda y control</translation> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>Si deshabilita el gasto de un cambio no confirmado, el cambio de una transacción no se puede usar hasta que esa transacción tenga al menos una confirmación. Esto también afecta cómo se calcula su saldo.</translation> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation>&amp; Gastar cambio no confirmado</translation> </message> <message> <source>Automatically open the Elspero client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Abra automáticamente el puerto cliente de Elspero en el enrutador. Esto solo funciona cuando su enrutador admite UPnP y está habilitado.</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>Puerto de mapa usando &amp; UPnP</translation> </message> <message> <source>Accept connections from outside.</source> <translation>Acepta conexiones desde afuera.</translation> </message> <message> <source>Allow incomin&amp;g connections</source> <translation>Permitir conexiones entrantes</translation> </message> <message> <source>Connect to the Elspero network through a SOCKS5 proxy.</source> <translation>Conéctese a la red de Elspero a través de un proxy SOCKS5.</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>Conectar a través del proxy SOCKS5 (proxy predeterminado):</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>Proxy &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>Puerto:</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>Puerto del proxy (por ejemplo, 9050)</translation> </message> <message> <source>Used for reaching peers via:</source> <translation>Utilizado para llegar a los compañeros a través de:</translation> </message> <message> <source>IPv4</source> <translation>IPv4</translation> </message> <message> <source>IPv6</source> <translation>IPv6</translation> </message> <message> <source>Tor</source> <translation>Tor</translation> </message> <message> <source>Connect to the Elspero network through a separate SOCKS5 proxy for Tor hidden services.</source> <translation>Conéctese a la red de Elspero a través de un proxy SOCKS5 separado para los servicios Tor ocultos.</translation> </message> <message> <source>&amp;Window</source> <translation>Ventana</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>Mostrar solo un icono de bandeja después de minimizar la ventana.</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>Minimice la bandeja en lugar de la barra de tareas</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>Minimice al cerrar</translation> </message> <message> <source>&amp;Display</source> <translation>Monitor</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>Interfaz de usuario e idioma:</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation> Unidad para mostrar montos en:</translation> </message> <message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Elija la unidad de subdivisión predeterminada para mostrar en la interfaz y al enviar monedas.</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation>Ya sea para mostrar las funciones de control de monedas o no.</translation> </message> <message> <source>&amp;Third party transaction URLs</source> <translation>URLs de transacciones de terceros</translation> </message> <message> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <source>&amp;Cancel</source> <translation>Cancelar</translation> </message> <message> <source>default</source> <translation>defecto</translation> </message> <message> <source>none</source> <translation>ninguno </translation> </message> <message> <source>Confirm options reset</source> <translation>Confirmar restablecimiento de opciones</translation> </message> <message> <source>Client restart required to activate changes.</source> <translation>Se requiere el reinicio del cliente para activar los cambios.</translation> </message> <message> <source>Client will be shut down. Do you want to proceed?</source> <translation>El cliente será cluasurado. Quieres proceder?</translation> </message> <message> <source>Configuration options</source> <translation>Opciones de configuración</translation> </message> <message> <source>The configuration file is used to specify advanced user options which override GUI settings. Additionally, any command-line options will override this configuration file.</source> <translation>El archivo de configuración se utiliza para especificar opciones de usuario avanzadas que anulan la configuración de la GUI. Además, cualquier opción de línea de comandos anulará este archivo de configuración.</translation> </message> <message> <source>Error</source> <translation>Error</translation> </message> <message> <source>The configuration file could not be opened.</source> <translation>El archivo de configuración no se pudo abrir.</translation> </message> <message> <source>This change would require a client restart.</source> <translation>Este cambio requeriría un reinicio del cliente.</translation> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>La dirección proxy suministrada no es válida.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>Configurar</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Elspero network after a connection is established, but this process has not completed yet.</source> <translation>La información mostrada puede estar desactualizada. Su billetera se sincroniza automáticamente con la red de Elspero después de establecer una conexión, pero este proceso aún no se ha completado.</translation> </message> <message> <source>Watch-only:</source> <translation>Ver-solo:</translation> </message> <message> <source>Available:</source> <translation>Disponible</translation> </message> <message> <source>Your current spendable balance</source> <translation>Su saldo disponible actual</translation> </message> <message> <source>Pending:</source> <translation>Pendiente:</translation> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>Total de transacciones que aún no se han confirmado y aún no cuentan para el saldo disponible</translation> </message> <message> <source>Immature:</source> <translation>Inmaduro:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation>Balance minero que aún no ha madurado</translation> </message> <message> <source>Balances</source> <translation>Balances</translation> </message> <message> <source>Total:</source> <translation>Total:</translation> </message> <message> <source>Your current total balance</source> <translation>Su saldo total actual</translation> </message> <message> <source>Your current balance in watch-only addresses</source> <translation>Tu saldo actual en solo ver direcciones</translation> </message> <message> <source>Recent transactions</source> <translation>Transacciones recientes</translation> </message> <message> <source>Unconfirmed transactions to watch-only addresses</source> <translation>Transacciones no confirmadas para ver solo direcciones</translation> </message> <message> <source>Mined balance in watch-only addresses that has not yet matured</source> <translation>Balance minero ver solo direcciones que aún no ha madurado</translation> </message> <message> <source>Current total balance in watch-only addresses</source> <translation>Saldo total actual en direcciones de solo reloj</translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>Payment request error</source> <translation>Error de solicitud de pago</translation> </message> <message> <source>Cannot start elspero: click-to-pay handler</source> <translation>No se puede iniciar Elspero: controlador de clic para pagar</translation> </message> <message> <source>URI handling</source> <translation>Manejo de URI</translation> </message> <message> <source>URI cannot be parsed! This can be caused by an invalid Elspero address or malformed URI parameters.</source> <translation>¡URI no puede ser analizado! Esto puede deberse a una dirección de Elspero no válida o a parámetros de URI mal formados.</translation> </message> <message> <source>Payment request file handling</source> <translation>Manejo de archivos de solicitud de pago</translation> </message> <message> <source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source> <translation>¡El archivo de solicitud de pago no se puede leer! Esto puede deberse a un archivo de solicitud de pago no válido.</translation> </message> <message> <source>Payment request rejected</source> <translation>Solicitud de pago rechazada</translation> </message> <message> <source>Payment request network doesn't match client network.</source> <translation>La red de solicitud de pago no coincide con la red del cliente.</translation> </message> <message> <source>Payment request expired.</source> <translation>Solicitud de pago caducada.</translation> </message> <message> <source>Payment request is not initialized.</source> <translation>La solicitud de pago no está inicializada.</translation> </message> <message> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation>Las solicitudes de pago no verificadas para los scripts de pago personalizados no son compatibles.</translation> </message> <message> <source>Invalid payment request.</source> <translation>Solicitud de pago inválida</translation> </message> <message> <source>Payment request cannot be parsed!</source> <translation>¡La solicitud de pago no se puede analizar!</translation> </message> <message> <source>Network request error</source> <translation>Error de solicitud de red</translation> </message> <message> <source>Payment acknowledged</source> <translation>Pago reconocido</translation> </message> </context> <context> <name>PeerTableModel</name> <message> <source>User Agent</source> <translation>Agente de usuario</translation> </message> <message> <source>Node/Service</source> <translation>Nodo / Servicio</translation> </message> <message> <source>NodeId</source> <translation>NodeId</translation> </message> <message> <source>Sent</source> <translation>Expedido</translation> </message> <message> <source>Received</source> <translation>Recibido</translation> </message> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>Cantidad</translation> </message> <message> <source>%1 d</source> <translation>%1 d</translation> </message> <message> <source>%1 h</source> <translation>%1 d</translation> </message> <message> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <source>%1 s</source> <translation>%1 s</translation> </message> <message> <source>None</source> <translation>Ninguno</translation> </message> <message> <source>N/A</source> <translation>N/D</translation> </message> <message> <source>%1 ms</source> <translation>%1 ms</translation> </message> <message numerus="yes"> <source>%n second(s)</source> <translation><numerusform>%n segundos</numerusform><numerusform>%n segundos</numerusform></translation> </message> <message numerus="yes"> <source>%n minute(s)</source> <translation><numerusform>%n minutos</numerusform><numerusform>%n minutos</numerusform></translation> </message> <message numerus="yes"> <source>%n hour(s)</source> <translation><numerusform>%n horas</numerusform><numerusform>%n horas</numerusform></translation> </message> <message numerus="yes"> <source>%n day(s)</source> <translation><numerusform>%n días </numerusform><numerusform>%n días </numerusform></translation> </message> <message numerus="yes"> <source>%n week(s)</source> <translation><numerusform>%n semanas</numerusform><numerusform>%n semanas</numerusform></translation> </message> <message> <source>%1 and %2</source> <translation>%1 y %2</translation> </message> <message numerus="yes"> <source>%n year(s)</source> <translation><numerusform>%n años</numerusform><numerusform>%n años</numerusform></translation> </message> <message> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <source>unknown</source> <translation>desconocido</translation> </message> </context> <context> <name>QObject::QObject</name> <message> <source>Error: %1</source> <translation>Error: %1</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation>Guardar imagen...</translation> </message> <message> <source>&amp;Copy Image</source> <translation>Copiar imagen</translation> </message> <message> <source>Save QR Code</source> <translation>Guardar código QR</translation> </message> <message> <source>PNG Image (*.png)</source> <translation>Imagen PNG (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <source>N/A</source> <translation>N/D</translation> </message> <message> <source>Client version</source> <translation>Versión cliente</translation> </message> <message> <source>&amp;Information</source> <translation>Información</translation> </message> <message> <source>Debug window</source> <translation>Ventana de depuración</translation> </message> <message> <source>General</source> <translation>General</translation> </message> <message> <source>Using BerkeleyDB version</source> <translation>Usando la versión BerkeleyDB</translation> </message> <message> <source>Datadir</source> <translation>Datadir</translation> </message> <message> <source>Startup time</source> <translation>Tiempo de inicio</translation> </message> <message> <source>Network</source> <translation>Red</translation> </message> <message> <source>Name</source> <translation>Nombre</translation> </message> <message> <source>Number of connections</source> <translation>Número de conexiones </translation> </message> <message> <source>Block chain</source> <translation>Cadena de bloques</translation> </message> <message> <source>Current number of blocks</source> <translation>Número actual de bloques</translation> </message> <message> <source>Memory Pool</source> <translation>Grupo de memoria</translation>
<translation>Número actual de transacciones</translation> </message> <message> <source>Memory usage</source> <translation>Uso de memoria</translation> </message> <message> <source>&amp;Reset</source> <translation>Reiniciar</translation> </message> <message> <source>Received</source> <translation>Recibido</translation> </message> <message> <source>Sent</source> <translation>Expedido</translation> </message> <message> <source>&amp;Peers</source> <translation>Pares</translation> </message> <message> <source>Banned peers</source> <translation>Pares prohibidos</translation> </message> <message> <source>Select a peer to view detailed information.</source> <translation>Seleccione un par para ver información detallada.</translation> </message> <message> <source>Whitelisted</source> <translation>Incluido en la lista blanca</translation> </message> <message> <source>Direction</source> <translation>Dirección</translation> </message> <message> <source>Version</source> <translation>Versión</translation> </message> <message> <source>Starting Block</source> <translation>Bloque de inicio</translation> </message> <message> <source>Synced Headers</source> <translation>Encabezados sincronizados</translation> </message> <message> <source>Synced Blocks</source> <translation>Bloques sincronizados</translation> </message> <message> <source>User Agent</source> <translation>Agente de usuario</translation> </message> <message> <source>Decrease font size</source> <translation>Disminuir tamaño de letra</translation> </message> <message> <source>Increase font size</source> <translation>Aumenta el tamaño de la fuente</translation> </message> <message> <source>Services</source> <translation>Servicios</translation> </message> <message> <source>Ban Score</source> <translation>Puntuación Ban</translation> </message> <message> <source>Connection Time</source> <translation>Tiempo de conexión</translation> </message> <message> <source>Last Send</source> <translation>Último envío</translation> </message> <message> <source>Last Receive</source> <translation>Última recepción</translation> </message> <message> <source>Ping Time</source> <translation>Tiempo Ping</translation> </message> <message> <source>The duration of a currently outstanding ping.</source> <translation>La duración de un ping actualmente pendiente.</translation> </message> <message> <source>Ping Wait</source> <translation>Ping espera</translation> </message> <message> <source>Min Ping</source> <translation>Min Ping</translation> </message> <message> <source>Time Offset</source> <translation>Desplazamiento de tiempo</translation> </message> <message> <source>Last block time</source> <translation>Hora del último bloque</translation> </message> <message> <source>&amp;Open</source> <translation>Abierto</translation> </message> <message> <source>&amp;Console</source> <translation>Consola</translation> </message> <message> <source>&amp;Network Traffic</source> <translation>Tráfico de red</translation> </message> <message> <source>Totals</source> <translation>Totales </translation> </message> <message> <source>In:</source> <translation>En:</translation> </message> <message> <source>Out:</source> <translation>Fuera:</translation> </message> <message> <source>Debug log file</source> <translation>Archivo de registro de depuración</translation> </message> <message> <source>Clear console</source> <translation>Consola limpia</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 hora</translation> </message> <message> <source>1 &amp;day</source> <translation>1 día</translation> </message> <message> <source>1 &amp;week</source> <translation>1 semana</translation> </message> <message> <source>1 &amp;year</source> <translation>1 año</translation> </message> <message> <source>&amp;Disconnect</source> <translation>Desconectar</translation> </message> <message> <source>Ban for</source> <translation>Prohibición de</translation> </message> <message> <source>WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.</source> <translation>ADVERTENCIA: los estafadores han estado activos, pidiendo a los usuarios que escriban comandos aquí, robando el contenido de su billetera. No use esta consola sin entender completamente las ramificaciones de un comando</translation> </message> <message> <source>Network activity disabled</source> <translation>Actividad de red deshabilitada</translation> </message> <message> <source>never</source> <translation>nunca </translation> </message> <message> <source>Inbound</source> <translation>Entrante</translation> </message> <message> <source>Outbound</source> <translation>Salida</translation> </message> <message> <source>Yes</source> <translation>Si </translation> </message> <message> <source>No</source> <translation>No</translation> </message> <message> <source>Unknown</source> <translation>Desconocido</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>Cantidad</translation> </message> <message> <source>&amp;Label:</source> <translation>Etiqueta:</translation> </message> <message> <source>&amp;Message:</source> <translation>Mensaje:</translation> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Elspero network.</source> <translation>Un mensaje opcional para adjuntar a la solicitud de pago, que se mostrará cuando se abra la solicitud. Nota: El mensaje no se enviará con el pago a través de la red de Elspero.</translation> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation>Una etiqueta opcional para asociar con la nueva dirección de recepción</translation> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>Use este formulario para solicitar pagos. Todos los campos son &lt;b&gt; opcionales &lt;/ b&gt;.</translation> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>Un monto opcional para solicitar. Deje esto vacío o en cero para no solicitar una cantidad específica.</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Borre todos los campos del formulario.</translation> </message> <message> <source>Clear</source> <translation>Aclarar</translation> </message> <message> <source>Requested payments history</source> <translation>Historial de pagos solicitado</translation> </message> <message> <source>&amp;Request payment</source> <translation>Solicitar pago</translation> </message> <message> <source>Bech32 addresses (BIP-173) are cheaper to spend from and offer better protection against typos. When unchecked a P2SH wrapped SegWit address will be created, compatible with older wallets.</source> <translation>Las direcciones Bech32 (BIP-173) son más baratas para gastar y ofrecen una mejor protección contra los errores tipográficos. Cuando no se selecciona, se creará una dirección SegWit envuelta en P2SH, compatible con monederos más antiguos.</translation> </message> <message> <source>Generate Bech32 address</source> <translation>Generar dirección Bech32</translation> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>Mostrar la solicitud seleccionada (hace lo mismo que hacer doble clic en una entrada)</translation> </message> <message> <source>Show</source> <translation>Mostrar</translation> </message> <message> <source>Remove the selected entries from the list</source> <translation>Eliminar las entradas seleccionadas de la lista</translation> </message> <message> <source>Remove</source> <translation>Eliminar</translation> </message> <message> <source>Copy URI</source> <translation>Copiar URI</translation> </message> <message> <source>Copy label</source> <translation>Copiar etiqueta</translation> </message> <message> <source>Copy message</source> <translation>Copiar mensaje</translation> </message> <message> <source>Copy amount</source> <translation>Copiar cantidad</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>QR Code</source> <translation>Código QR</translation> </message> <message> <source>Copy &amp;URI</source> <translation>Copiar URI</translation> </message> <message> <source>Copy &amp;Address</source> <translation>Copiar dirección</translation> </message> <message> <source>&amp;Save Image...</source> <translation>Guardar imagen...</translation> </message> <message> <source>Address</source> <translation>Dirección</translation> </message> <message> <source>Amount</source> <translation>Cantidad</translation> </message> <message> <source>Label</source> <translation>Etiqueta</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>Fecha</translation> </message> <message> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <source>(no label)</source> <translation>(no etiqueta)</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Quantity:</source> <translation>Cantidad:</translation> </message> <message> <source>Bytes:</source> <translation>Bytes:</translation> </message> <message> <source>Amount:</source> <translation>Cantidad:</translation> </message> <message> <source>Fee:</source> <translation>Comisión:</translation> </message> <message> <source>After Fee:</source> <translation>Después de comisión:</translation> </message> <message> <source>Change:</source> <translation>Cambio:</translation> </message> <message> <source>Hide</source> <translation>Esconder</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>Borre todos los campos del formulario.</translation> </message> <message> <source>Copy quantity</source> <translation>Cantidad de copia</translation> </message> <message> <source>Copy amount</source> <translation>Copiar cantidad</translation> </message> <message> <source>Copy fee</source> <translation> Tarifa de copia</translation> </message> <message> <source>Copy after fee</source> <translation>Copiar después de la tarifa</translation> </message> <message> <source>Copy bytes</source> <translation>Copiar bytes</translation> </message> <message> <source>Copy change</source> <translation>Copiar cambio</translation> </message> <message> <source>Payment request expired.</source> <translation>Solicitud de pago caducada.</translation> </message> <message> <source>(no label)</source> <translation>(no etiqueta)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>&amp;Label:</source> <translation>Etiqueta:</translation> </message> </context> <context> <name>SendConfirmationDialog</name> <message> <source>Yes</source> <translation>Si </translation> </message> </context> <context> <name>ShutdownWindow</name> </context> <context> <name>SignVerifyMessageDialog</name> </context> <context> <name>SplashScreen</name> </context> <context> <name>TrafficGraphWidget</name> </context> <context> <name>TransactionDesc</name> <message> <source>Date</source> <translation>Fecha</translation> </message> <message> <source>unknown</source> <translation>desconocido</translation> </message> <message> <source>Amount</source> <translation>Cantidad</translation> </message> </context> <context> <name>TransactionDescDialog</name> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>Fecha</translation> </message> <message> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <source>(no label)</source> <translation>(no etiqueta)</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>Copy address</source> <translation>Copiar dirección</translation> </message> <message> <source>Copy label</source> <translation>Copiar etiqueta</translation> </message> <message> <source>Copy amount</source> <translation>Copiar cantidad</translation> </message> <message> <source>Copy transaction ID</source> <translation>Copiar ID de la transacción</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Archivo separado por comas (* .csv)</translation> </message> <message> <source>Confirmed</source> <translation>Confirmado</translation> </message> <message> <source>Date</source> <translation>Fecha</translation> </message> <message> <source>Label</source> <translation>Etiqueta</translation> </message> <message> <source>Address</source> <translation>Dirección</translation> </message> <message> <source>Exporting Failed</source> <translation>Exportación fallida</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> </context> <context> <name>WalletFrame</name> </context> <context> <name>WalletModel</name> </context> <context> <name>WalletView</name> <message> <source>Export the data in the current tab to a file</source> <translation> Exportar los datos en la pestaña actual a un archivo</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Elspero Core</source> <translation>Elspero Core</translation> </message> <message> <source>Information</source> <translation>Información</translation> </message> <message> <source>Warning</source> <translation>Peligro.</translation> </message> <message> <source>Insufficient funds</source> <translation>Fondos Insuficientes</translation> </message> <message> <source>Loading wallet...</source> <translation>Cargando billetera...</translation> </message> <message> <source>Rescanning...</source> <translation>Reescaneando</translation> </message> <message> <source>Done loading</source> <translation>Listo Cargando</translation> </message> <message> <source>Error</source> <translation>Error</translation> </message> </context> </TS>
</message> <message> <source>Current number of transactions</source>
ApduDataExtNetworkParameterResponse.go
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package model import ( "github.com/apache/plc4x/plc4go/internal/plc4go/spi/utils" ) // Code generated by code-generation. DO NOT EDIT. // The data-structure of this message type ApduDataExtNetworkParameterResponse struct { *ApduDataExt // Arguments. Length uint8 } // The corresponding interface type IApduDataExtNetworkParameterResponse interface { // GetLengthInBytes returns the length in bytes GetLengthInBytes() uint16 // GetLengthInBits returns the length in bits GetLengthInBits() uint16 // Serialize serializes this type Serialize(writeBuffer utils.WriteBuffer) error } /////////////////////////////////////////////////////////// // Accessors for discriminator values. /////////////////////////////////////////////////////////// func (m *ApduDataExtNetworkParameterResponse) ExtApciType() uint8 { return 0x1B } func (m *ApduDataExtNetworkParameterResponse) GetExtApciType() uint8 { return 0x1B } func (m *ApduDataExtNetworkParameterResponse) InitializeParent(parent *ApduDataExt) {} /////////////////////////////////////////////////////////// // Accessors for property fields. /////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////// // Accessors for virtual fields. /////////////////////////////////////////////////////////// // NewApduDataExtNetworkParameterResponse factory function for ApduDataExtNetworkParameterResponse func NewApduDataExtNetworkParameterResponse(length uint8) *ApduDataExt { child := &ApduDataExtNetworkParameterResponse{ ApduDataExt: NewApduDataExt(length), } child.Child = child return child.ApduDataExt } func CastApduDataExtNetworkParameterResponse(structType interface{}) *ApduDataExtNetworkParameterResponse { castFunc := func(typ interface{}) *ApduDataExtNetworkParameterResponse { if casted, ok := typ.(ApduDataExtNetworkParameterResponse); ok
if casted, ok := typ.(*ApduDataExtNetworkParameterResponse); ok { return casted } if casted, ok := typ.(ApduDataExt); ok { return CastApduDataExtNetworkParameterResponse(casted.Child) } if casted, ok := typ.(*ApduDataExt); ok { return CastApduDataExtNetworkParameterResponse(casted.Child) } return nil } return castFunc(structType) } func (m *ApduDataExtNetworkParameterResponse) GetTypeName() string { return "ApduDataExtNetworkParameterResponse" } func (m *ApduDataExtNetworkParameterResponse) GetLengthInBits() uint16 { return m.GetLengthInBitsConditional(false) } func (m *ApduDataExtNetworkParameterResponse) GetLengthInBitsConditional(lastItem bool) uint16 { lengthInBits := uint16(m.GetParentLengthInBits()) return lengthInBits } func (m *ApduDataExtNetworkParameterResponse) GetLengthInBytes() uint16 { return m.GetLengthInBits() / 8 } func ApduDataExtNetworkParameterResponseParse(readBuffer utils.ReadBuffer, length uint8) (*ApduDataExt, error) { if pullErr := readBuffer.PullContext("ApduDataExtNetworkParameterResponse"); pullErr != nil { return nil, pullErr } if closeErr := readBuffer.CloseContext("ApduDataExtNetworkParameterResponse"); closeErr != nil { return nil, closeErr } // Create a partially initialized instance _child := &ApduDataExtNetworkParameterResponse{ ApduDataExt: &ApduDataExt{}, } _child.ApduDataExt.Child = _child return _child.ApduDataExt, nil } func (m *ApduDataExtNetworkParameterResponse) Serialize(writeBuffer utils.WriteBuffer) error { ser := func() error { if pushErr := writeBuffer.PushContext("ApduDataExtNetworkParameterResponse"); pushErr != nil { return pushErr } if popErr := writeBuffer.PopContext("ApduDataExtNetworkParameterResponse"); popErr != nil { return popErr } return nil } return m.SerializeParent(writeBuffer, m, ser) } func (m *ApduDataExtNetworkParameterResponse) String() string { if m == nil { return "<nil>" } buffer := utils.NewBoxedWriteBufferWithOptions(true, true) m.Serialize(buffer) return buffer.GetBox().String() }
{ return &casted }
evaluation-all.js
import {TYPE, URL} from "../../config" import 'whatwg-fetch' const _URL = URL.EVALUATION.EVALUATION_ALL const _TYPE = TYPE.EVALUATION.EVALUATION_ALL export function
(calendar, semester) { return function (dispatch) { fetch(_URL.GET_USER_EVALUATION(calendar, semester), { credentials: 'same-origin' }).then(function (response) { return response.json() }).then(function (json) { dispatch({ type: _TYPE.SET_USER_EVALUATION, payload: json }) }) } } export function UpdateUserEvaluation(user_evaluation) { return function (dispatch) { dispatch({ type: _TYPE.SET_USER_EVALUATION, payload: user_evaluation }) } }
getUserEvaluation
templates.rs
//! Run with //! //! ```not_rust //! cargo run --example templates //! ```
use axum::{prelude::*, response::IntoResponse}; use http::{Response, StatusCode}; use std::net::SocketAddr; #[tokio::main] async fn main() { tracing_subscriber::fmt::init(); // build our application with some routes let app = route("/greet/:name", get(greet)); // run it let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); tracing::debug!("listening on {}", addr); hyper::Server::bind(&addr) .serve(app.into_make_service()) .await .unwrap(); } async fn greet(params: extract::UrlParamsMap) -> impl IntoResponse { let name = params .get("name") .expect("`name` will be there if route was matched") .to_string(); let template = HelloTemplate { name }; HtmlTemplate(template) } #[derive(Template)] #[template(path = "hello.html")] struct HelloTemplate { name: String, } struct HtmlTemplate<T>(T); impl<T> IntoResponse for HtmlTemplate<T> where T: Template, { fn into_response(self) -> http::Response<Body> { match self.0.render() { Ok(html) => response::Html(html).into_response(), Err(err) => Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(Body::from(format!( "Failed to render template. Error: {}", err ))) .unwrap(), } } }
use askama::Template;
node.rs
//! Node, and related, feature set //! use libc::{c_char, c_void}; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::ffi::{CStr, CString}; use std::hash::{Hash, Hasher}; use std::ptr; use std::rc::Rc; use std::str; use crate::bindings::*; use crate::c_helpers::*; use crate::tree::namespace::Namespace; use crate::tree::nodetype::NodeType; use crate::tree::{Document, DocumentRef, DocumentWeak}; use crate::xpath::Context; /// Guard treshold for enforcing runtime mutability checks for Nodes pub static mut NODE_RC_MAX_GUARD: usize = 2; /// Set the guard value for the max Rc "strong count" allowed for mutable use of a Node /// Default is 2 pub fn set_node_rc_guard(value: usize) { unsafe { NODE_RC_MAX_GUARD = value; } } type NodeRef = Rc<RefCell<_Node>>; #[derive(Debug)] struct _Node { /// libxml's xmlNodePtr node_ptr: xmlNodePtr, /// Reference to parent `Document` document: DocumentWeak, /// Bookkeep removal from a parent unlinked: bool, } /// An xml node #[derive(Clone, Debug)] pub struct Node(NodeRef); impl Hash for Node { /// Generates a hash value from the `node_ptr` value. fn hash<H: Hasher>(&self, state: &mut H) { self.node_ptr().hash(state); } } impl PartialEq for Node { /// Two nodes are considered equal, if they point to the same xmlNode. fn eq(&self, other: &Node) -> bool { self.node_ptr() == other.node_ptr() } } impl Eq for Node {} impl Drop for _Node { /// Free node if it isn't bound in some document /// Warning: xmlFreeNode is RECURSIVE into the node's children, so this may lead to segfaults if used carelessly fn drop(&mut self) { if self.unlinked { let node_ptr = self.node_ptr; if !node_ptr.is_null() { unsafe { xmlFreeNode(node_ptr); } } } } } impl Node { /// Create a new node, bound to a given document. pub fn new(name: &str, ns: Option<Namespace>, doc: &Document) -> Result<Self, ()> { // We will only allow to work with document-bound nodes for now, to avoid the problems of memory management. let c_name = CString::new(name).unwrap(); let ns_ptr = match ns { None => ptr::null_mut(), Some(ns) => ns.ns_ptr(), }; unsafe { let node = xmlNewDocRawNode( doc.doc_ptr(), ns_ptr, c_name.as_bytes().as_ptr(), ptr::null(), ); if node.is_null() { Err(()) } else { Ok(Node::wrap_new(node, &doc.0)) } } } /// Immutably borrows the underlying libxml2 `xmlNodePtr` pointer pub fn node_ptr(&self) -> xmlNodePtr { self.0.borrow().node_ptr } /// Mutably borrows the underlying libxml2 `xmlNodePtr` pointer /// Also protects against mutability conflicts at runtime. pub fn node_ptr_mut(&mut self) -> Result<xmlNodePtr, String> { let weak_count = Rc::weak_count(&self.0); let strong_count = Rc::strong_count(&self.0); // The basic idea would be to use `Rc::get_mut` to guard against multiple borrows. // However, our approach to bookkeeping nodes implies there is *always* a second Rc reference // in the document.nodes Hash. So rather than use `get_mut` directly, the // correct check would be to have a weak count of 0 and a strong count <=2 (one for self, one for .nodes) let guard_ok = unsafe { weak_count == 0 && strong_count <= NODE_RC_MAX_GUARD }; if guard_ok { Ok(self.0.borrow_mut().node_ptr) } else { Err(format!( "Can not mutably reference a shared Node {:?}! Rc: weak count: {:?}; strong count: {:?}", self.get_name(), weak_count, strong_count, )) } } /// Wrap a libxml node ptr with a Node fn _wrap(node_ptr: xmlNodePtr, unlinked: bool, document: &DocumentRef) -> Node { // If already seen, return saved Node if let Some(node) = document.borrow().get_node(node_ptr) { return node.clone(); } // If newly encountered pointer, wrap let node = _Node { node_ptr, document: Rc::downgrade(&document), unlinked, }; let wrapped_node = Node(Rc::new(RefCell::new(node))); document .borrow_mut() .insert_node(node_ptr, wrapped_node.clone()); wrapped_node } /// Wrap a node already linked to a `document` tree pub(crate) fn wrap(node_ptr: xmlNodePtr, document: &DocumentRef) -> Node { Node::_wrap(node_ptr, false, document) } /// Wrap, a node owned by, but not yet linked to, a `document` pub(crate) fn wrap_new(node_ptr: xmlNodePtr, document: &DocumentRef) -> Node { Node::_wrap(node_ptr, true, document) } /// Create a new text node, bound to a given document pub fn new_text(content: &str, doc: &Document) -> Result<Self, ()> { // We will only allow to work with document-bound nodes for now, to avoid the problems of memory management. let c_content = CString::new(content).unwrap(); unsafe { let node = xmlNewDocText(doc.doc_ptr(), c_content.as_bytes().as_ptr()); if node.is_null() { Err(()) } else { Ok(Node::wrap_new(node, &doc.0)) } } } /// Create a mock node, used for a placeholder argument pub fn mock(doc: &Document) -> Self { Node::new("mock", None, &doc).unwrap() } /// Create a mock node, used for a placeholder argument pub fn null() -> Self { Node(Rc::new(RefCell::new(_Node { node_ptr: ptr::null_mut(), document: Rc::downgrade(&Document::null_ref()), unlinked: true, }))) } /// `libc::c_void` isn't hashable and cannot be made hashable pub fn to_hashable(&self) -> usize { self.node_ptr() as usize } pub(crate) fn get_docref(&self) -> DocumentWeak { self.0.borrow().document.clone() } /// Returns the next sibling if it exists pub fn get_next_sibling(&self) -> Option<Node> { let ptr = xmlNextSibling(self.node_ptr()); self.ptr_as_option(ptr) } /// Returns the previous sibling if it exists pub fn get_prev_sibling(&self) -> Option<Node> { let ptr = xmlPrevSibling(self.node_ptr()); self.ptr_as_option(ptr) } /// Returns the first child if it exists pub fn get_first_child(&self) -> Option<Node> { let ptr = xmlGetFirstChild(self.node_ptr()); self.ptr_as_option(ptr) } /// Returns the first element child if it exists pub fn get_first_element_child(&self) -> Option<Node> { match self.get_first_child() { None => None, Some(child) => { let mut current_node = child; while !current_node.is_element_node() { if let Some(sibling) = current_node.get_next_sibling() { current_node = sibling; } else { break; } } if current_node.is_element_node() { Some(current_node) } else { None } } } } /// Returns the last child if it exists pub fn get_last_child(&self) -> Option<Node> { let ptr = unsafe { xmlGetLastChild(self.node_ptr()) }; self.ptr_as_option(ptr) } /// Returns all child nodes of the given node as a vector pub fn get_child_nodes(&self) -> Vec<Node> { let mut children = Vec::new(); if let Some(first_child) = self.get_first_child() { children.push(first_child); while let Some(sibling) = children.last().unwrap().get_next_sibling() { children.push(sibling) } } children } /// Returns all child elements of the given node as a vector pub fn get_child_elements(&self) -> Vec<Node> { self .get_child_nodes() .into_iter() .filter(|n| n.get_type() == Some(NodeType::ElementNode)) .collect::<Vec<Node>>() } /// Returns the parent if it exists pub fn get_parent(&self) -> Option<Node> { let ptr = xmlGetParent(self.node_ptr()); self.ptr_as_option(ptr) } /// Get the node type pub fn get_type(&self) -> Option<NodeType> { NodeType::from_int(xmlGetNodeType(self.node_ptr())) } /// Add a previous sibling pub fn add_prev_sibling(&mut self, new_sibling: &mut Node) -> Result<(), Box<dyn Error>> { new_sibling.set_linked(); unsafe { if xmlAddPrevSibling(self.node_ptr_mut()?, new_sibling.node_ptr_mut()?).is_null() { Err(From::from("add_prev_sibling returned NULL")) } else { Ok(()) } } } /// Add a next sibling pub fn add_next_sibling(&mut self, new_sibling: &mut Node) -> Result<(), Box<dyn Error>> { new_sibling.set_linked(); unsafe { if xmlAddNextSibling(self.node_ptr_mut()?, new_sibling.node_ptr_mut()?).is_null() { Err(From::from("add_next_sibling returned NULL")) } else { Ok(()) } } } /// Returns true iff it is a text node pub fn is_text_node(&self) -> bool { self.get_type() == Some(NodeType::TextNode) } /// Checks if the given node is an Element pub fn is_element_node(&self) -> bool { self.get_type() == Some(NodeType::ElementNode) } /// Checks if the underlying libxml2 pointer is `NULL` pub fn is_null(&self) -> bool { self.node_ptr().is_null() } /// Returns the name of the node (empty string if name pointer is `NULL`) pub fn get_name(&self) -> String { let name_ptr = xmlNodeGetName(self.node_ptr()); if name_ptr.is_null() { return String::new(); } //empty string let c_string = unsafe { CStr::from_ptr(name_ptr) }; c_string.to_string_lossy().into_owned() } /// Sets the name of this `Node` pub fn set_name(&mut self, name: &str) -> Result<(), Box<dyn Error>> { let c_name = CString::new(name).unwrap(); unsafe { xmlNodeSetName(self.node_ptr_mut()?, c_name.as_bytes().as_ptr()) } Ok(()) } /// Returns the content of the node /// (assumes UTF-8 XML document) pub fn get_content(&self) -> String { let content_ptr = unsafe { xmlNodeGetContent(self.node_ptr()) }; if content_ptr.is_null() { //empty string when none return String::new(); } let c_string = unsafe { CStr::from_ptr(content_ptr as *const c_char) }; let rust_utf8 = c_string.to_string_lossy().into_owned(); unsafe { libc::free(content_ptr as *mut c_void); } rust_utf8 } /// Sets the text content of this `Node` pub fn set_content(&mut self, content: &str) -> Result<(), Box<dyn Error>> { let c_content = CString::new(content).unwrap(); unsafe { xmlNodeSetContent(self.node_ptr_mut()?, c_content.as_bytes().as_ptr()) } Ok(()) } /// Returns the value of property `name` pub fn get_property(&self, name: &str) -> Option<String> { let c_name = CString::new(name).unwrap(); let value_ptr = unsafe { xmlGetProp(self.node_ptr(), c_name.as_bytes().as_ptr()) }; if value_ptr.is_null() { return None; } let c_value_string = unsafe { CStr::from_ptr(value_ptr as *const c_char) }; let prop_str = c_value_string.to_string_lossy().into_owned(); // A safe way to free the memory is using libc::free -- I have experienced that xmlFree from libxml2 is not reliable unsafe { libc::free(value_ptr as *mut c_void); } Some(prop_str) } /// Returns the value of property `name` in namespace `ns` pub fn get_property_ns(&self, name: &str, ns: &str) -> Option<String> { let c_name = CString::new(name).unwrap(); let c_ns = CString::new(ns).unwrap(); let value_ptr = unsafe { xmlGetNsProp( self.node_ptr(), c_name.as_bytes().as_ptr(), c_ns.as_bytes().as_ptr(), ) }; if value_ptr.is_null() { return None; } let c_value_string = unsafe { CStr::from_ptr(value_ptr as *const c_char) }; let prop_str = c_value_string.to_string_lossy().into_owned(); unsafe { libc::free(value_ptr as *mut c_void); } Some(prop_str) } /// Return an attribute as a `Node` struct of type AttributeNode pub fn get_property_node(&self, name: &str) -> Option<Node> { let c_name = CString::new(name).unwrap(); unsafe { let attr_node = xmlHasProp(self.node_ptr(), c_name.as_bytes().as_ptr()); self.ptr_as_option(attr_node as xmlNodePtr) } } /// Sets the value of property `name` to `value` pub fn set_property(&mut self, name: &str, value: &str) -> Result<(), Box<dyn Error>> { let c_name = CString::new(name).unwrap(); let c_value = CString::new(value).unwrap(); unsafe { xmlSetProp( self.node_ptr_mut()?, c_name.as_bytes().as_ptr(), c_value.as_bytes().as_ptr(), ) }; Ok(()) } /// Sets a namespaced attribute pub fn set_property_ns( &mut self, name: &str, value: &str, ns: &Namespace, ) -> Result<(), Box<dyn Error>> { let c_name = CString::new(name).unwrap(); let c_value = CString::new(value).unwrap(); unsafe { xmlSetNsProp( self.node_ptr_mut()?, ns.ns_ptr(), c_name.as_bytes().as_ptr(), c_value.as_bytes().as_ptr(), ) }; Ok(()) } /// Removes the property of given `name` pub fn remove_property(&mut self, name: &str) -> Result<(), Box<dyn Error>> { let c_name = CString::new(name).unwrap(); unsafe { let attr_node = xmlHasProp(self.node_ptr_mut()?, c_name.as_bytes().as_ptr()); if !attr_node.is_null() { let remove_prop_status = xmlRemoveProp(attr_node); if remove_prop_status == 0 { Ok(()) } else { // Propagate libxml2 failure to remove Err(From::from(format!( "libxml2 failed to remove property with status: {:?}", remove_prop_status ))) } } else { // silently no-op if asked to remove a property which is not present Ok(()) } } } /// Alias for get_property pub fn get_attribute(&self, name: &str) -> Option<String> { self.get_property(name) } /// Alias for get_property_ns pub fn get_attribute_ns(&self, name: &str, ns: &str) -> Option<String>
/// Alias for get_property_node pub fn get_attribute_node(&self, name: &str) -> Option<Node> { self.get_property_node(name) } /// Alias for set_property pub fn set_attribute(&mut self, name: &str, value: &str) -> Result<(), Box<dyn Error>> { self.set_property(name, value) } /// Alias for set_property_ns pub fn set_attribute_ns( &mut self, name: &str, value: &str, ns: &Namespace, ) -> Result<(), Box<dyn Error>> { self.set_property_ns(name, value, ns) } /// Alias for remove_property pub fn remove_attribute(&mut self, name: &str) -> Result<(), Box<dyn Error>> { self.remove_property(name) } /// Get a copy of the attributes of this node pub fn get_properties(&self) -> HashMap<String, String> { let mut attributes = HashMap::new(); let mut attr_names = Vec::new(); unsafe { let mut current_prop = xmlGetFirstProperty(self.node_ptr()); while !current_prop.is_null() { let name_ptr = xmlAttrName(current_prop); let c_name_string = CStr::from_ptr(name_ptr); let name = c_name_string.to_string_lossy().into_owned(); attr_names.push(name); current_prop = xmlNextPropertySibling(current_prop); } } for name in attr_names { let value = self.get_property(&name).unwrap_or_default(); attributes.insert(name, value); } attributes } /// Alias for `get_properties` pub fn get_attributes(&self) -> HashMap<String, String> { self.get_properties() } /// Gets the active namespace associated of this node pub fn get_namespace(&self) -> Option<Namespace> { let ns_ptr = xmlNodeNs(self.node_ptr()); if ns_ptr.is_null() { None } else { Some(Namespace { ns_ptr }) } } /// Gets a list of namespaces associated with this node pub fn get_namespaces(&self, doc: &Document) -> Vec<Namespace> { let list_ptr_raw = unsafe { xmlGetNsList(doc.doc_ptr(), self.node_ptr()) }; if list_ptr_raw.is_null() { Vec::new() } else { let mut namespaces = Vec::new(); let mut ptr_iter = list_ptr_raw as *mut xmlNsPtr; unsafe { while !ptr_iter.is_null() && !(*ptr_iter).is_null() { namespaces.push(Namespace { ns_ptr: *ptr_iter }); ptr_iter = ptr_iter.add(1); } /* TODO: valgrind suggests this technique isn't sufficiently fluent: ==114895== Conditional jump or move depends on uninitialised value(s) ==114895== at 0x4E9962F: xmlFreeNs (in /usr/lib/x86_64-linux-gnu/libxml2.so.2.9.4) ==114895== by 0x195CE8: libxml::tree::Node::get_namespaces (tree.rs:723) ==114895== by 0x12E7B6: base_tests::can_work_with_namespaces (base_tests.rs:537) DG: I could not improve on this state without creating memory leaks after ~1 hour, so I am marking it as future work. */ /* TODO: How do we properly deallocate here? The approach bellow reliably segfaults tree_tests on 1 thread */ // println!("\n-- xmlfreens on : {:?}", list_ptr_raw); // xmlFreeNs(list_ptr_raw as xmlNsPtr); } namespaces } } /// Get a list of namespaces declared with this node pub fn get_namespace_declarations(&self) -> Vec<Namespace> { if self.get_type() != Some(NodeType::ElementNode) { // only element nodes can have declarations return Vec::new(); } let mut namespaces = Vec::new(); let mut ns_ptr = xmlNodeNsDeclarations(self.node_ptr()); while !ns_ptr.is_null() { if !xmlNsPrefix(ns_ptr).is_null() || !xmlNsHref(ns_ptr).is_null() { namespaces.push(Namespace { ns_ptr }); } ns_ptr = xmlNextNsSibling(ns_ptr); } namespaces } /// Sets a `Namespace` for the node pub fn set_namespace(&mut self, namespace: &Namespace) -> Result<(), Box<dyn Error>> { unsafe { xmlSetNs(self.node_ptr_mut()?, namespace.ns_ptr()); } Ok(()) } /// Looks up the prefix of a namespace from its URI, basedo around a given `Node` pub fn lookup_namespace_prefix(&self, href: &str) -> Option<String> { if href.is_empty() { return None; } let c_href = CString::new(href).unwrap(); unsafe { let ptr_mut = self.node_ptr(); let ns_ptr = xmlSearchNsByHref(xmlGetDoc(ptr_mut), ptr_mut, c_href.as_bytes().as_ptr()); if !ns_ptr.is_null() { let ns = Namespace { ns_ptr }; let ns_prefix = ns.get_prefix(); Some(ns_prefix) } else { None } } } /// Looks up the uri of a namespace from its prefix, basedo around a given `Node` pub fn lookup_namespace_uri(&self, prefix: &str) -> Option<String> { if prefix.is_empty() { return None; } let c_prefix = CString::new(prefix).unwrap(); unsafe { let ns_ptr = xmlSearchNs( xmlGetDoc(self.node_ptr()), self.node_ptr(), c_prefix.as_bytes().as_ptr(), ); if !ns_ptr.is_null() { let ns = Namespace { ns_ptr }; let ns_prefix = ns.get_href(); if !ns_prefix.is_empty() { Some(ns_prefix) } else { None } } else { None } } } // TODO: Clear a future Document namespaces vec /// Removes the namespaces of this `Node` and it's children! pub fn recursively_remove_namespaces(&mut self) -> Result<(), Box<dyn Error>> { xmlNodeRecursivelyRemoveNs(self.node_ptr_mut()?); Ok(()) } /// Get a set of class names from this node's attributes pub fn get_class_names(&self) -> HashSet<String> { let mut set = HashSet::new(); if let Some(value) = self.get_property("class") { for n in value.split(' ') { set.insert(n.to_owned()); } } set } /// Creates a new `Node` as child to the self `Node` pub fn add_child(&mut self, child: &mut Node) -> Result<(), String> { child.set_linked(); unsafe { let new_child_ptr = xmlAddChild(self.node_ptr_mut()?, child.node_ptr_mut()?); if new_child_ptr.is_null() { Err("add_child encountered NULL pointer".to_string()) } else { Ok(()) } } } /// Creates a new `Node` as child to the self `Node` pub fn new_child(&mut self, ns: Option<Namespace>, name: &str) -> Result<Node, Box<dyn Error>> { let c_name = CString::new(name).unwrap(); let ns_ptr = match ns { None => ptr::null_mut(), Some(mut ns) => ns.ns_ptr_mut(), }; unsafe { let new_ptr = xmlNewChild( self.node_ptr_mut()?, ns_ptr, c_name.as_bytes().as_ptr(), ptr::null(), ); Ok(Node::wrap(new_ptr, &self.get_docref().upgrade().unwrap())) } } /// Adds a new text child, to this `Node` pub fn add_text_child( &mut self, ns: Option<Namespace>, name: &str, content: &str, ) -> Result<Node, Box<dyn Error>> { let c_name = CString::new(name).unwrap(); let c_content = CString::new(content).unwrap(); let ns_ptr = match ns { None => ptr::null_mut(), Some(mut ns) => ns.ns_ptr_mut(), }; unsafe { let new_ptr = xmlNewTextChild( self.node_ptr_mut()?, ns_ptr, c_name.as_bytes().as_ptr(), c_content.as_bytes().as_ptr(), ); Ok(Node::wrap(new_ptr, &self.get_docref().upgrade().unwrap())) } } /// Append text to this `Node` pub fn append_text(&mut self, content: &str) -> Result<(), Box<dyn Error>> { let c_len = content.len() as i32; if c_len > 0 { let c_content = CString::new(content).unwrap(); unsafe { xmlNodeAddContentLen(self.node_ptr_mut()?, c_content.as_bytes().as_ptr(), c_len); } } Ok(()) } /// Unbinds the Node from its siblings and Parent, but not from the Document it belongs to. /// If the node is not inserted into the DOM afterwards, it will be lost after the program terminates. /// From a low level view, the unbound node is stripped /// from the context it is and inserted into a (hidden) document-fragment. pub fn unlink_node(&mut self) { let node_type = self.get_type(); if node_type != Some(NodeType::DocumentNode) && node_type != Some(NodeType::DocumentFragNode) && !self.is_unlinked() { // only unlink nodes that are currently marked as linked self.set_unlinked(); unsafe { xmlUnlinkNode(self.node_ptr()); } } } /// Alias for `unlink_node` pub fn unlink(&mut self) { self.unlink_node() } /// Alias for `unlink_node` pub fn unbind_node(&mut self) { self.unlink_node() } /// Alias for `unlink_node` pub fn unbind(&mut self) { self.unlink_node() } /// Checks if node is marked as unlinked pub fn is_unlinked(&self) -> bool { self.0.borrow().unlinked } fn ptr_as_option(&self, node_ptr: xmlNodePtr) -> Option<Node> { if node_ptr.is_null() { None } else { let doc_ref = self.get_docref().upgrade().unwrap(); let new_node = Node::wrap(node_ptr, &doc_ref); Some(new_node) } } /// internal helper to ensure the node is marked as linked/imported/adopted in the main document tree pub(crate) fn set_linked(&self) { self.0.borrow_mut().unlinked = false; } /// internal helper to ensure the node is marked as unlinked/removed from the main document tree pub(crate) fn set_unlinked(&self) { self.0.borrow_mut().unlinked = true; self .get_docref() .upgrade() .unwrap() .borrow_mut() .forget_node(self.node_ptr()); } /// find nodes via xpath, at a specified node or the document root pub fn findnodes(&self, xpath: &str) -> Result<Vec<Node>, ()> { let mut context = Context::from_node(&self)?; context.findnodes(xpath, Some(self)) } /// replace a `self`'s `old` child node with a `new` node in the same position /// borrowed from Perl's XML::LibXML pub fn replace_child_node( &mut self, mut new: Node, mut old: Node, ) -> Result<Node, Box<dyn Error>> { // if newNode == oldNode or self == newNode then do nothing, just return nNode. if new == old || self == &new { // nothing to do here, already in place Ok(old) } else if self.get_type() == Some(NodeType::ElementNode) { if let Some(old_parent) = old.get_parent() { if &old_parent == self { // unlink new to be available for insertion new.unlink(); // mid-child case old.add_next_sibling(&mut new)?; old.unlink(); Ok(old) } else { Err(From::from(format!( "Old node was not a child of {:?} parent. Registered parent is {:?} instead.", self.get_name(), old_parent.get_name() ))) } } else { Err(From::from(format!( "Old node was not a child of {:?} parent. No registered parent exists.", self.get_name() ))) } } else { Err(From::from( "Can only call replace_child_node an a NodeType::Element type parent.", )) } } }
{ self.get_property_ns(name, ns) }
day12.rs
use std::convert::TryInto; use std::io; use std::io::prelude::*; pub fn solve(input: impl BufRead, part: u8) -> io::Result<()> { let instrs = parse(input); let solution = match part { 1 => part_1(&instrs), 2 => part_2(&instrs), _ => unimplemented!(), }; println!("{}", solution); Ok(()) } fn parse(mut input: impl BufRead) -> Vec<Instr> { let mut input_str = String::new(); input.read_to_string(&mut input_str).unwrap(); input_str.lines().map(|l| Instr::parse(l)).collect() } #[derive(Debug, Clone)] enum Dir { North, East, South, West, } impl Dir { fn to_angle(&self) -> usize { match self { Dir::North => 270, Dir::East => 0, Dir::South => 90, Dir::West => 180, } } fn from_angle(angle: usize) -> Self { let angle = angle % 360; match angle { 270 => Dir::North, 0 => Dir::East, 90 => Dir::South, 180 => Dir::West, _ => unreachable!(), } } } #[derive(Debug)] enum Instr { Turn(usize), Forward(usize), Move(Dir, usize), } impl Instr { fn parse(str: &str) -> Self { let (i, n) = str.split_at(1); let n = n.parse::<usize>().unwrap(); match i.as_bytes()[0] { b'N' => Instr::Move(Dir::North, n), b'E' => Instr::Move(Dir::East, n), b'S' => Instr::Move(Dir::South, n), b'W' => Instr::Move(Dir::West, n), b'R' => Instr::Turn(n), b'L' => Instr::Turn(360 - n), b'F' => Instr::Forward(n), _ => unreachable!(), } } } #[derive(Debug)] struct Position { x: isize, y: isize, w_x: isize, w_y: isize, orientation: Dir, } impl Position { fn new() -> Self { Self { x: 0, y: 0, w_x: 10, w_y: 1, orientation: Dir::East, } }
let dist = *dist as isize; match *dir { Dir::North => self.y += dist, Dir::East => self.x += dist, Dir::South => self.y -= dist, Dir::West => self.x -= dist, } } fn step(&mut self, instr: &Instr) { match instr { Instr::Move(dir, n) => self.move_towards(dir, n), Instr::Forward(n) => { let orientation = self.orientation.clone(); self.move_towards(&orientation, n); } Instr::Turn(n) => self.orientation = Dir::from_angle(n + self.orientation.to_angle()), } } fn move_waypoint(&mut self, dir: &Dir, dist: &usize) { let dist = *dist as isize; match *dir { Dir::North => self.w_y += dist, Dir::East => self.w_x += dist, Dir::South => self.w_y -= dist, Dir::West => self.w_x -= dist, } } fn step_waypoint(&mut self, instr: &Instr) { match instr { Instr::Move(dir, n) => self.move_waypoint(dir, n), Instr::Forward(n) => { let n = *n as isize; self.x += n * self.w_x; self.y += n * self.w_y; } Instr::Turn(n) => match n { 0 => {} 90 => { let w_x = self.w_x; self.w_x = self.w_y; self.w_y = -w_x; } 180 => { self.w_x = -self.w_x; self.w_y = -self.w_y; } 270 => { let w_x = self.w_x; self.w_x = -self.w_y; self.w_y = w_x; } _ => unreachable!(), }, } } fn manhattan(&self) -> usize { (isize::abs(self.x) + isize::abs(self.y)) .try_into() .unwrap() } } fn part_1(instrs: &[Instr]) -> usize { let mut position = Position::new(); for instr in instrs { position.step(instr); } position.manhattan() } fn part_2(instrs: &[Instr]) -> usize { let mut position = Position::new(); for instr in instrs { position.step_waypoint(instr); } position.manhattan() } #[cfg(test)] mod tests { use super::*; const EXAMPLE: &str = "F10 N3 F7 R90 F11"; #[test] fn part_1_example() { let instrs = parse(io::Cursor::new(EXAMPLE)); assert_eq!(part_1(&instrs), 25); } #[test] fn part_2_example() { let instrs = parse(io::Cursor::new(EXAMPLE)); assert_eq!(part_2(&instrs), 286); } }
fn move_towards(&mut self, dir: &Dir, dist: &usize) {
b_my_solution.py
# B. Сбалансированное дерево # ID успешной посылки 66593272 class Node: def __init__(self, value, left=None, right=None): self.value = value self.right = right self.left = left def height(root): if root is None:
return 0 return max(height(root.left), height(root.right)) + 1 def solution(root): if root is None: return True left_height = height(root.left) right_height = height(root.right) if ((abs(left_height - right_height) <= 1) and solution(root.left) is True and solution(root.right) is True): return True return False def test(): node1 = Node(1) node2 = Node(-5) node3 = Node(3, node1, node2) node4 = Node(10) node5 = Node(2, node3, node4) assert solution(node5)
SearchRequest.py
from AbstractRequest import AbstractRequest class SearchRequest(AbstractRequest): name = "" sequence = "" sequence_option = "" sequence_length = "" n_terminus_id = "" c_terminus_id = "" target_group_id = "" target_object_id = "" synthesis_type = "" kingdom_id = "" bond_id = "" unusual_amino_acid_id = "" author_id = "" journal_id = "" article_year = "" article_title = "" complexity = "monomer" target_species_id = "" non_standart_experimental_conditions = "false" hemolytic_and_cytotoxic_activities = "false" def query_type(self):
def get_parameters(self): dict = {} dict["complexity"] = self.complexity dict["name"] = self.name dict["sequence"] = self.sequence dict["sequence_option"] = self.sequence_option dict["sequence_length"] = self.sequence_length dict["n_terminus_id"] = self.n_terminus_id dict["c_terminus_id"] = self.c_terminus_id dict["target_group_id"] = self.target_group_id dict["target_object_id"] = self.target_object_id dict["synthesis_type"] = self.synthesis_type dict["kingdom_id"] = self.kingdom_id dict["bond_id"] = self.bond_id dict["unusual_amino_acid_id"] = self.unusual_amino_acid_id dict["author_id"] = self.author_id dict["journal_id"] = self.journal_id dict["article_year"] = self.article_year dict["article_title"] = self.article_title dict["target_species_id"] = self.target_species_id dict["non_standart_experimental_conditions"] = self.non_standart_experimental_conditions dict["hemolytic_and_cytotoxic_activities"] = self.hemolytic_and_cytotoxic_activities return dict
return "search"
room.py
import uuid import auth from api.socket.constants import GAME_NS from app import app, sio from models import PlayerRoom from models.role import Role from state.game import game_state from utils import logger @sio.on("Room.Info.InviteCode.Refresh", namespace=GAME_NS) @auth.login_required(app, sio) async def refresh_invite_code(sid: str):
@sio.on("Room.Info.Players.Kick", namespace=GAME_NS) @auth.login_required(app, sio) async def kick_player(sid: str, player_id: int): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning(f"{pr.player.name} attempted to refresh the invitation code.") return pr = PlayerRoom.get_or_none(player=player_id, room=pr.room) if pr: for psid in game_state.get_sids(player=pr.player, room=pr.room): await sio.disconnect(psid, namespace=GAME_NS) pr.delete_instance(True) @sio.on("Room.Delete", namespace=GAME_NS) @auth.login_required(app, sio) async def delete_session(sid: str): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning(f"{pr.player.name} attempted to REMOVE A SESSION.") return pr.room.delete_instance(True) @sio.on("Room.Info.Set.Locked", namespace=GAME_NS) @auth.login_required(app, sio) async def set_locked_game_state(sid: str, is_locked: bool): pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning(f"{pr.player.name} attempted to set the locked game_state.") return pr.room.is_locked = is_locked pr.room.save() for psid, player in game_state.get_users(room=pr.room): if player != pr.room.creator: await sio.disconnect(psid, namespace=GAME_NS)
pr: PlayerRoom = game_state.get(sid) if pr.role != Role.DM: logger.warning(f"{pr.player.name} attempted to refresh the invitation code.") return pr.room.invitation_code = uuid.uuid4() pr.room.save() await sio.emit( "Room.Info.InvitationCode.Set", str(pr.room.invitation_code), room=sid, namespace=GAME_NS, )
db.go
package lsmtree import ( "fmt" ) // Options is used to configure how the database will behave. type Options struct { // MaxWALSegmentSize (in bytes) is the largest a single WAL segment file will grow to before a // new segment is started. This does not include the last transaction to be appended to a single // WAL segment. If the last transaction puts the segment over this limit then it will still be // appended (resulting in a large segment) but then a new segment will be created for subsequent // transactions. // Default is 8kb.
// Default is 32kb. MaxValueChunkSize uint64 // WALDirectory is the folder where WAL segment files will be stored. // Default is db/wal. WALDirectory string // DataDirectory is the folder where heap and value files will be stored. // Default is db/data. DataDirectory string // Number of pending writes that can be queued up concurrently before transaction commits will // be blocked. PendingWritesBuffer int } // DB is the root object for the database. You can open/create your DB by calling Open(). type DB struct { wal *walManager values *valueManager writeChannel chan interface{} stopWriteChannel chan chan error } // Open will open or create the database using the provided configuration. func Open(options Options) (*DB, error) { // TODO (elliotcourant) Add options validation. // Try to setup the WAL manager. wal, err := newWalManager(options.WALDirectory, options.MaxWALSegmentSize) if err != nil { return nil, err } db := &DB{ wal: wal, values: nil, writeChannel: make(chan interface{}, options.PendingWritesBuffer), // TODO (elliotcourant) make this channel some sort of cancelFuture object. stopWriteChannel: make(chan chan error, 1), // Make this a single byte for now. } // Start the background writer to accept transaction commits. go db.backgroundWriter() return db, nil } // DefaultOptions just provides a basic configuration which can be passed to open a database. func DefaultOptions() Options { return Options{ MaxWALSegmentSize: 1024 /* 1kb */ * 8, /* 8kb */ MaxValueChunkSize: 1024 /* 1kb */ * 32, /* 32kb */ DataDirectory: "db/data", WALDirectory: "db/wal", PendingWritesBuffer: 8, } } // Close will close any open files and stop any background writes. Any writes that have not been // returned successfully will not have been written to the database. func (db *DB) Close() error { // Create a channel that we can use to wait for the response from the background writer. writeChannelFuture := make(chan error, 0) // Stop the background writer by sending the channel to it. db.stopWriteChannel <- writeChannelFuture // Wait to get a response from the background writer. if err := <-writeChannelFuture; err != nil { return err } // TODO (elliotcourant) Add timeout logic here if the background writer takes too long to exit. return nil } func (db *DB) backgroundWriter() { for { select { case txn := <-db.writeChannel: fmt.Println(txn) case stopResult := <-db.stopWriteChannel: // If we receive anything on the stopWriteChannel then just exit this method. stopResult <- nil return } } }
MaxWALSegmentSize uint64 // MaxValueChunkSize (in byteS) is the largest a single Value file will grow to before a new // file is created. This does not include the last value appended to the value file.
mod.rs
//! Datalog program. //! //! The client constructs a `struct Program` that describes Datalog relations and rules and //! calls `Program::run()` to instantiate the program. The method returns an error or an //! instance of `RunningProgram` that can be used to interact with the program at runtime. //! Interactions include starting, committing or rolling back a transaction and modifying input //! relations. The engine invokes user-provided callbacks as records are added or removed from //! relations. `RunningProgram::stop()` terminates the Datalog program destroying all its state. //! If not invoked manually (which allows for manual error handling), `RunningProgram::stop` //! will be called when the program object leaves scope. // TODO: namespace cleanup // TODO: single input relation pub mod arrange; pub mod config; mod timestamp; mod update; mod worker; pub use arrange::diff_distinct; pub use config::{Config, ProfilingConfig}; pub use timestamp::{TSNested, TupleTS, TS}; pub use update::Update; use crate::{ ddval::*, profile::*, record::Mutator, render::{ arrange_by::{ArrangeBy, ArrangementKind}, RenderContext, }, }; use arrange::{ antijoin_arranged, Arrangement as DataflowArrangement, ArrangementFlavor, Arrangements, }; use config::SelfProfilingRig; use crossbeam_channel::{Receiver, Sender}; use fnv::{FnvHashMap, FnvHashSet}; use std::{ any::Any, borrow::Cow, cmp, collections::{hash_map, BTreeSet}, fmt::{self, Debug, Formatter}, iter::{self, Cycle, Skip}, ops::Range, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, }, thread::JoinHandle, }; use timestamp::ToTupleTS; use triomphe::Arc as ThinArc; use worker::DDlogWorker; use differential_dataflow::lattice::Lattice; use differential_dataflow::operators::arrange::arrangement::Arranged; use differential_dataflow::operators::arrange::*; use differential_dataflow::operators::*; use differential_dataflow::trace::implementations::ord::OrdKeySpine as DefaultKeyTrace; use differential_dataflow::trace::implementations::ord::OrdValSpine as DefaultValTrace; use differential_dataflow::trace::wrappers::enter::TraceEnter; use differential_dataflow::trace::{BatchReader, Cursor, TraceReader}; use differential_dataflow::Collection; use dogsdogsdogs::{ altneu::AltNeu, calculus::{Differentiate, Integrate}, operators::lookup_map, }; use timely::communication::{initialize::WorkerGuards, Allocator}; use timely::dataflow::scopes::*; use timely::order::TotalOrder; use timely::progress::{timestamp::Refines, PathSummary, Timestamp}; use timely::worker::Worker; type ValTrace<S> = DefaultValTrace<DDValue, DDValue, S, Weight, u32>; type KeyTrace<S> = DefaultKeyTrace<DDValue, S, Weight, u32>; type TValAgent<S> = TraceAgent<ValTrace<S>>; type TKeyAgent<S> = TraceAgent<KeyTrace<S>>; type TValEnter<P, T> = TraceEnter<TValAgent<P>, T>; type TKeyEnter<P, T> = TraceEnter<TKeyAgent<P>, T>; /// Diff associated with records in differential dataflow pub type Weight = i32; /// Message buffer for profiling messages const PROF_MSG_BUF_SIZE: usize = 10_000; /// Result type returned by this library pub type Response<X> = Result<X, String>; /// Unique identifier of a DDlog relation. // TODO: Newtype this for type-safety pub type RelId = usize; /// Unique identifier of an index. // TODO: Newtype this for type-safety pub type IdxId = usize; /// Unique identifier of an arranged relation. /// The first element of the tuple identifies relation; the second is the index /// of arrangement for the given relation. // TODO: Newtype this for type-safety pub type ArrId = (RelId, usize); /// Function type used to map the content of a relation /// (see `XFormCollection::Map`). pub type MapFunc = fn(DDValue) -> DDValue; /// Function type used to extract join key from a relation /// (see `XFormCollection::StreamJoin`). pub type KeyFunc = fn(&DDValue) -> Option<DDValue>; /// (see `XFormCollection::FlatMap`). pub type FlatMapFunc = fn(DDValue) -> Option<Box<dyn Iterator<Item = DDValue>>>; /// Function type used to filter a relation /// (see `XForm*::Filter`). pub type FilterFunc = fn(&DDValue) -> bool; /// Function type used to simultaneously filter and map a relation /// (see `XFormCollection::FilterMap`). pub type FilterMapFunc = fn(DDValue) -> Option<DDValue>; /// Function type used to inspect a relation /// (see `XFormCollection::InspectFunc`) pub type InspectFunc = fn(&DDValue, TupleTS, Weight) -> (); /// Function type used to arrange a relation into key-value pairs /// (see `XFormArrangement::Join`, `XFormArrangement::Antijoin`). pub type ArrangeFunc = fn(DDValue) -> Option<(DDValue, DDValue)>; /// Function type used to assemble the result of a join into a value. /// Takes join key and a pair of values from the two joined relations /// (see `XFormArrangement::Join`). pub type JoinFunc = fn(&DDValue, &DDValue, &DDValue) -> Option<DDValue>; /// Similar to JoinFunc, but only takes values from the two joined /// relations, and not the key (`XFormArrangement::StreamJoin`). pub type ValJoinFunc = fn(&DDValue, &DDValue) -> Option<DDValue>; /// Function type used to assemble the result of a semijoin into a value. /// Takes join key and value (see `XFormArrangement::Semijoin`). pub type SemijoinFunc = fn(&DDValue, &DDValue, &()) -> Option<DDValue>; /// Similar to SemijoinFunc, but only takes one value. /// (see `XFormCollection::StreamSemijoin`). pub type StreamSemijoinFunc = fn(&DDValue) -> Option<DDValue>; /// Aggregation function: aggregates multiple values into a single value. pub type AggFunc = fn(&DDValue, &[(&DDValue, Weight)]) -> Option<DDValue>; // TODO: add validating constructor for Program: // - relation id's are unique // - rules only refer to previously declared relations or relations in the local scc // - input relations do not occur in LHS of rules // - all references to arrangements are valid /// A Datalog program is a vector of nodes representing /// individual non-recursive relations and strongly connected components /// comprised of one or more mutually recursive relations. /// * `delayed_rels` - delayed relations used in the program. /// * `init_data` - initial relation contents. #[derive(Clone)] pub struct Program { pub nodes: Vec<ProgNode>, pub delayed_rels: Vec<DelayedRelation>, pub init_data: Vec<(RelId, DDValue)>, } type TransformerMap<'a> = FnvHashMap<RelId, Collection<Child<'a, Worker<Allocator>, TS>, DDValue, Weight>>; /// Represents a dataflow fragment implemented outside of DDlog directly in differential-dataflow. /// /// Takes the set of already constructed collections and modifies this /// set, adding new collections. Note that the transformer can only be applied in the top scope /// (`Child<'a, Worker<Allocator>, TS>`), as we currently don't have a way to ensure that the /// transformer is monotonic and thus it may not converge if used in a nested scope. pub type TransformerFuncRes = Box<dyn for<'a> Fn(&mut TransformerMap<'a>)>; /// A function returning a dataflow fragment implemented in differential-dataflow pub type TransformerFunc = fn() -> TransformerFuncRes; /// Program node is either an individual non-recursive relation, a transformer application or /// a vector of one or more mutually recursive relations. #[derive(Clone)] pub enum ProgNode { Rel { rel: Relation }, Apply { tfun: TransformerFunc }, Scc { rels: Vec<RecursiveRelation> }, } /// Relation computed in a nested scope as a fixed point. /// /// The `distinct` flag indicates that the `distinct` operator should be applied /// to the relation before closing the loop to enforce convergence of the fixed /// point computation. #[derive(Clone)] pub struct RecursiveRelation { pub rel: Relation, pub distinct: bool, } pub trait RelationCallback: Fn(RelId, &DDValue, Weight) + Send + Sync { fn clone_boxed(&self) -> Box<dyn RelationCallback>; } impl<T> RelationCallback for T where T: Fn(RelId, &DDValue, Weight) + Clone + Send + Sync + ?Sized + 'static, { fn clone_boxed(&self) -> Box<dyn RelationCallback> { Box::new(self.clone()) } } impl Clone for Box<dyn RelationCallback> { fn clone(&self) -> Self { self.clone_boxed() } } /// Caching mode for input relations only /// /// `NoCache` - don't cache the contents of the relation. /// `CacheSet` - cache relation as a set. Duplicate inserts are /// ignored (for relations without a key) or fail (for relations /// with key). /// `CacheMultiset` - cache relation as a generalized multiset with /// integer weights. #[derive(Clone)] pub enum CachingMode { Stream, Set, Multiset, } /// Datalog relation. /// /// defines a set of rules and a set of arrangements with which this relation is used in /// rules. The set of rules can be empty (if this is a ground relation); the set of arrangements /// can also be empty if the relation is not used in the RHS of any rules. #[derive(Clone)] pub struct Relation { /// Relation name; does not have to be unique pub name: Cow<'static, str>, /// `true` if this is an input relation. Input relations are populated by the client /// of the library via `RunningProgram::insert()`, `RunningProgram::delete()` and `RunningProgram::apply_updates()` methods. pub input: bool, /// Apply distinct_total() to this relation after concatenating all its rules pub distinct: bool, /// Caching mode (for input relations only). pub caching_mode: CachingMode, /// If `key_func` is present, this indicates that the relation is indexed with a unique /// key computed by key_func pub key_func: Option<fn(&DDValue) -> DDValue>, /// Unique relation id pub id: RelId, /// Rules that define the content of the relation. /// Input relations cannot have rules. /// Rules can only refer to relations introduced earlier in the program as well as relations in the same strongly connected /// component. pub rules: Vec<Rule>, /// Arrangements of the relation used to compute other relations. Index in this vector /// along with relation id uniquely identifies the arrangement (see `ArrId`). pub arrangements: Vec<Arrangement>, /// Callback invoked when an element is added or removed from relation. pub change_cb: Option<Arc<dyn RelationCallback + 'static>>, } impl Relation { pub fn name(&self) -> &str { &*self.name } } /// `DelayedRelation` refers to the contents of a given base relation from /// `delay` epochs ago. /// /// The use of delayed relations in rules comes with an additional constraint. /// A delayed relation produces outputs ahead of time, e.g., at time `ts` it /// can yield values labeled `ts + delay`. In DDlog we don't want to see these /// values until we explicitly advance the epoch to `ts + delay`. We apply the /// `consolidate` operator before `probe`, which guarantees that any /// output can only be produced once DD knows that it should not expect any more /// changes for the given timstamp. So as long as each output relation depends on /// at least one regular (not delayed) relation, we shouldn't observe any values /// generated ahead of time. It is up to the compiler to enforce this /// constraint. #[derive(Clone)] pub struct DelayedRelation { /// Unique id of this delayed relation. Delayed relation and regular relations ids live in the /// same name space and therefore cannot clash. pub id: RelId, /// Id of the base relation that this DelayedRelation is a delayed version of. pub rel_id: RelId, /// The number of epochs to delay by. Must be greater than 0. pub delay: TS, // /// We don't have a use case for this, and this is not exposed through the DDlog syntax (since // /// delayed relations are currently only used with streams), but we could in principle have // /// shared arrangements of delayed relations. // pub arrangements: Vec<Arrangement>, } /// A Datalog relation or rule can depend on other relations and their /// arrangements. #[derive(Copy, PartialEq, Eq, Hash, Debug, Clone)] pub enum Dep { Rel(RelId), Arr(ArrId), } impl Dep { pub fn relid(&self) -> RelId { match self { Dep::Rel(relid) => *relid, Dep::Arr((relid, _)) => *relid, } } } /// Transformations, such as maps, flatmaps, filters, joins, etc. are the building blocks of /// DDlog rules. /// /// Different kinds of transformations can be applied only to flat collections, /// only to arranged collections, or both. We therefore use separate types to represent /// collection and arrangement transformations. /// /// Note that differential sometimes allows the same kind of transformation to be applied to both /// collections and arrangements; however the former is implemented on top of the latter and incurs /// the additional cost of arranging the collection. We only support the arranged version of these /// transformations, forcing the user to explicitly arrange the collection if necessary (or, as much /// as possible, keep the data arranged throughout the chain of transformations). /// /// `XFormArrangement` - arrangement transformation. #[derive(Clone)] pub enum XFormArrangement { /// FlatMap arrangement into a collection FlatMap { description: Cow<'static, str>, fmfun: FlatMapFunc, /// Transformation to apply to resulting collection. /// `None` terminates the chain of transformations. next: Box<Option<XFormCollection>>, }, FilterMap { description: Cow<'static, str>, fmfun: FilterMapFunc, /// Transformation to apply to resulting collection. /// `None` terminates the chain of transformations. next: Box<Option<XFormCollection>>, }, /// Aggregate Aggregate { description: Cow<'static, str>, /// Filter arrangement before grouping ffun: Option<FilterFunc>, /// Aggregation to apply to each group. aggfun: AggFunc, /// Apply transformation to the resulting collection. next: Box<Option<XFormCollection>>, }, /// Join Join { description: Cow<'static, str>, /// Filter arrangement before joining ffun: Option<FilterFunc>, /// Arrangement to join with. arrangement: ArrId, /// Function used to put together ouput value. jfun: JoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Semijoin Semijoin { description: Cow<'static, str>, /// Filter arrangement before joining ffun: Option<FilterFunc>, /// Arrangement to semijoin with. arrangement: ArrId, /// Function used to put together ouput value. jfun: SemijoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Return a subset of values that correspond to keys not present in `arrangement`. Antijoin { description: Cow<'static, str>, /// Filter arrangement before joining ffun: Option<FilterFunc>, /// Arrangement to antijoin with arrangement: ArrId, /// Antijoin returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Streaming join: join arrangement with a collection. /// This outputs a collection obtained by matching each value /// in the input collection against the arrangement without /// arranging the collection first. StreamJoin { description: Cow<'static, str>, /// Filter arrangement before join. ffun: Option<FilterFunc>, /// Relation to join with. rel: RelId, /// Extract join key from the _collection_. kfun: KeyFunc, /// Function used to put together ouput value. The first argument comes /// from the arrangement, the second from the collection. jfun: ValJoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Streaming semijoin. StreamSemijoin { description: Cow<'static, str>, /// Filter arrangement before join. ffun: Option<FilterFunc>, /// Relation to join with. rel: RelId, /// Extract join key from the relation. kfun: KeyFunc, /// Function used to put together ouput value. jfun: StreamSemijoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, } impl XFormArrangement { pub fn description(&self) -> &str { match self { XFormArrangement::FlatMap { description, .. } => &description, XFormArrangement::FilterMap { description, .. } => &description, XFormArrangement::Aggregate { description, .. } => &description, XFormArrangement::Join { description, .. } => &description, XFormArrangement::Semijoin { description, .. } => &description, XFormArrangement::Antijoin { description, .. } => &description, XFormArrangement::StreamJoin { description, .. } => &description, XFormArrangement::StreamSemijoin { description, .. } => &description, } } pub(super) fn dependencies(&self) -> FnvHashSet<Dep> { match self { XFormArrangement::FlatMap { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormArrangement::FilterMap { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormArrangement::Aggregate { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormArrangement::Join { arrangement, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Arr(*arrangement)); deps } XFormArrangement::Semijoin { arrangement, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Arr(*arrangement)); deps } XFormArrangement::Antijoin { arrangement, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Arr(*arrangement)); deps } XFormArrangement::StreamJoin { rel, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Rel(*rel)); deps } XFormArrangement::StreamSemijoin { rel, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Rel(*rel)); deps } } } } /// `XFormCollection` - collection transformation. #[derive(Clone)] pub enum XFormCollection { /// Arrange the collection, apply `next` transformation to the resulting collection. Arrange { description: Cow<'static, str>, afun: ArrangeFunc, next: Box<XFormArrangement>, }, /// The `Differentiate` operator subtracts the previous value /// of the collection from its current value, `C' = C - (C_-1)`. /// Can be used to transform a stream into a relation that stores /// the new values in the stream for each timestamp. Differentiate { description: Cow<'static, str>, next: Box<Option<XFormCollection>>, }, /// Apply `mfun` to each element in the collection Map { description: Cow<'static, str>, mfun: MapFunc, next: Box<Option<XFormCollection>>, }, /// FlatMap FlatMap { description: Cow<'static, str>, fmfun: FlatMapFunc, next: Box<Option<XFormCollection>>, }, /// Filter collection Filter { description: Cow<'static, str>, ffun: FilterFunc, next: Box<Option<XFormCollection>>, }, /// Map and filter FilterMap { description: Cow<'static, str>, fmfun: FilterMapFunc, next: Box<Option<XFormCollection>>, }, /// Inspector Inspect { description: Cow<'static, str>, ifun: InspectFunc, next: Box<Option<XFormCollection>>, }, /// Streaming join: join collection with an arrangement. /// This outputs a collection obtained by matching each value /// in the input collection against the arrangement without /// arranging the collection first. StreamJoin { description: Cow<'static, str>, /// Function to arrange collection into key/value pairs. afun: ArrangeFunc, /// Arrangement to join with. arrangement: ArrId, /// Function used to put together ouput values (the first argument /// comes from the collection, the second -- from the arrangement). jfun: ValJoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Streaming semijoin. StreamSemijoin { description: Cow<'static, str>, /// Function to arrange collection into key/value pairs. afun: ArrangeFunc, /// Arrangement to join with. arrangement: ArrId, /// Function used to put together ouput values (the first argument /// comes from the collection, the second -- from the arrangement). jfun: StreamSemijoinFunc, /// Join returns a collection: apply `next` transformation to it. next: Box<Option<XFormCollection>>, }, /// Applies `xform` to the stream (i.e., to changes to the collection in /// the last timestamp) and produces the result while discarding any /// intermediate arrangements used to construct the result. /// Example: `xform` may arrange and aggregate the collection. This /// will output the aggregate of values added at the current timestamp /// after each transaction. Since the arrangement is instantly cleared, /// the old value of the aggregate will not get retracted during the next /// transaction. /// /// Stream xforms are currently only supported in the top-level contents. /// /// This transformation is implemented using the "calculus" feature of DD: /// it constructs an `AltNeu` scope, moves the collection into it using the /// `calculus::differentiate` operator, applies `xform` and extracts the /// result using `calculus:integrate`. /// NOTE: This is an experimental feature. We currently don't /// have real use cases for it (stream joins are already more efficiently /// implemented using `lookup_map`, stream aggregation does not sound like /// a very useful feature to me, stream antijoins might be the killer app /// here), and the implementation is ugly. /// It might go away if we don't find what it's good for. StreamXForm { description: Cow<'static, str>, xform: Box<Option<XFormCollection>>, next: Box<Option<XFormCollection>>, }, } impl XFormCollection { pub fn description(&self) -> &str { match self { XFormCollection::Arrange { description, .. } => &description, XFormCollection::Differentiate { description, .. } => &description, XFormCollection::Map { description, .. } => &description, XFormCollection::FlatMap { description, .. } => &description, XFormCollection::Filter { description, .. } => &description, XFormCollection::FilterMap { description, .. } => &description, XFormCollection::Inspect { description, .. } => &description, XFormCollection::StreamJoin { description, .. } => &description, XFormCollection::StreamSemijoin { description, .. } => &description, XFormCollection::StreamXForm { description, .. } => &description, } } pub fn dependencies(&self) -> FnvHashSet<Dep> { match self { XFormCollection::Arrange { next, .. } => next.dependencies(), XFormCollection::Differentiate { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::Map { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::FlatMap { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::Filter { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::FilterMap { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::Inspect { next, .. } => match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }, XFormCollection::StreamJoin { arrangement, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Arr(*arrangement)); deps } XFormCollection::StreamSemijoin { arrangement, next, .. } => { let mut deps = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps.insert(Dep::Arr(*arrangement)); deps } XFormCollection::StreamXForm { xform, next, .. } => { let deps1 = match **xform { None => FnvHashSet::default(), Some(ref x) => x.dependencies(), }; let deps2 = match **next { None => FnvHashSet::default(), Some(ref n) => n.dependencies(), }; deps1.union(&deps2).cloned().collect() } } } } /// Datalog rule (more precisely, the body of a rule) starts with a collection /// or arrangement and applies a chain of transformations to it. #[derive(Clone)] pub enum Rule { CollectionRule { description: Cow<'static, str>, rel: RelId, xform: Option<XFormCollection>, }, ArrangementRule { description: Cow<'static, str>, arr: ArrId, xform: XFormArrangement, }, } impl Rule { pub fn description(&self) -> &str { match self { Rule::CollectionRule { description, .. } => description.as_ref(), Rule::ArrangementRule { description, .. } => description.as_ref(), } } fn dependencies(&self) -> FnvHashSet<Dep> { match self { Rule::CollectionRule { rel, xform, .. } => { let mut deps = match xform { None => FnvHashSet::default(), Some(ref x) => x.dependencies(), }; deps.insert(Dep::Rel(*rel)); deps } Rule::ArrangementRule { arr, xform, .. } => { let mut deps = xform.dependencies(); deps.insert(Dep::Arr(*arr)); deps } } } } /// Describes arrangement of a relation. #[derive(Clone)] pub enum Arrangement { /// Arrange into (key,value) pairs Map { /// Arrangement name; does not have to be unique name: Cow<'static, str>, /// Function used to produce arrangement. afun: ArrangeFunc, /// The arrangement can be queried using `RunningProgram::query_arrangement` /// and `RunningProgram::dump_arrangement`. queryable: bool, }, /// Arrange into a set of values Set { /// Arrangement name; does not have to be unique name: Cow<'static, str>, /// Function used to produce arrangement. fmfun: FilterMapFunc, /// Apply distinct_total() before arranging filtered collection. /// This is necessary if the arrangement is to be used in an antijoin. distinct: bool, }, } impl Arrangement { fn name(&self) -> &str { match self { Arrangement::Map { name, .. } => name, Arrangement::Set { name, .. } => name, } } fn queryable(&self) -> bool { match *self { Arrangement::Map { queryable, .. } => queryable, Arrangement::Set { .. } => false, } } fn build_arrangement_root<S>( &self, render_context: &RenderContext, collection: &Collection<S, DDValue, Weight>, ) -> DataflowArrangement<S, Weight, TValAgent<S::Timestamp>, TKeyAgent<S::Timestamp>> where S: Scope, Collection<S, DDValue, Weight>: ThresholdTotal<S, DDValue, Weight>, S::Timestamp: Lattice + Ord + TotalOrder, { let kind = match *self { Arrangement::Map { afun, .. } => ArrangementKind::Map { value_function: afun, }, Arrangement::Set { fmfun, distinct, .. } => { // TODO: We don't currently produce a `None` as the key extraction // function, but doing so will simplify the dataflow graph // in instances where a function isn't needed ArrangementKind::Set { key_function: Some(fmfun), distinct, } } }; ArrangeBy { kind, target_relation: self.name().into(), } .render_root(render_context, collection) } fn build_arrangement<S>( &self, render_context: &RenderContext, collection: &Collection<S, DDValue, Weight>, ) -> DataflowArrangement<S, Weight, TValAgent<S::Timestamp>, TKeyAgent<S::Timestamp>> where S: Scope, S::Timestamp: Lattice + Ord, { let kind = match *self { Arrangement::Map { afun, .. } => ArrangementKind::Map { value_function: afun, }, Arrangement::Set { fmfun, distinct, .. } => { // TODO: We don't currently produce a `None` as the key extraction // function, but doing so will simplify the dataflow graph // in instances where a function isn't needed ArrangementKind::Set { key_function: Some(fmfun), distinct, } } }; ArrangeBy { kind, target_relation: self.name().into(), } .render(render_context, collection) } } /// Set relation content. pub type ValSet = FnvHashSet<DDValue>; /// Multiset relation content. pub type ValMSet = DeltaSet; /// Indexed relation content. pub type IndexedValSet = FnvHashMap<DDValue, DDValue>; /// Relation delta pub type DeltaSet = FnvHashMap<DDValue, isize>; /// Runtime representation of a datalog program. /// /// The program will be automatically stopped when the object goes out /// of scope. Error occurring as part of that operation are silently /// ignored. If you want to handle such errors, call `stop` manually. pub struct RunningProgram { /// Producer sides of channels used to send commands to workers. /// We use async channels to avoid deadlocks when workers are blocked /// in `step_or_park`. senders: Vec<Sender<Msg>>, /// Channels to receive replies from worker threads. We could use a single /// channel with multiple senders, but use many channels instead to avoid /// deadlocks when one of the workers has died, but `recv` blocks instead /// of failing, since the channel is still considered alive. reply_recv: Vec<Receiver<Reply>>, relations: FnvHashMap<RelId, RelationInstance>, worker_guards: Option<WorkerGuards<Result<(), String>>>, transaction_in_progress: bool, need_to_flush: bool, timestamp: TS, /// CPU profiling enabled (can be expensive). profile_cpu: Option<ThinArc<AtomicBool>>, /// Consume timely_events and output them to CSV file. Can be expensive. profile_timely: Option<ThinArc<AtomicBool>>, /// Profiling thread. prof_thread_handle: Option<JoinHandle<()>>, /// Profiling statistics. pub profile: Option<ThinArc<Mutex<Profile>>>, worker_round_robbin: Skip<Cycle<Range<usize>>>, } // Right now this Debug implementation is more or less a short cut. // Ideally we would want to implement Debug for `RelationInstance`, but // that quickly gets very cumbersome. impl Debug for RunningProgram { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("RunningProgram") .field("senders", &self.senders) .field("reply_recv", &self.reply_recv) .field( "relations", &(&self.relations as *const FnvHashMap<RelId, RelationInstance>), ) .field("transaction_in_progress", &self.transaction_in_progress) .field("need_to_flush", &self.need_to_flush) .field("profile_cpu", &self.profile_cpu) .field("profile_timely", &self.profile_timely) .field("prof_thread_handle", &self.prof_thread_handle) .field("profile", &self.profile) .finish() } } /// Runtime representation of relation enum RelationInstance { Stream { /// Changes since start of transaction. delta: DeltaSet, }, Multiset { /// Multiset of all elements in the relation. elements: ValMSet, /// Changes since start of transaction. delta: DeltaSet, }, Flat { /// Set of all elements in the relation. Used to enforce set semantics for input relations /// (repeated inserts and deletes are ignored). elements: ValSet, /// Changes since start of transaction. delta: DeltaSet, }, Indexed { key_func: fn(&DDValue) -> DDValue, /// Set of all elements in the relation indexed by key. Used to enforce set semantics, /// uniqueness of keys, and to query input relations by key. elements: IndexedValSet, /// Changes since start of transaction. Only maintained for input relations and is used to /// enforce set semantics. delta: DeltaSet, }, } impl RelationInstance { pub fn delta(&self) -> &DeltaSet { match self { RelationInstance::Stream { delta } => delta, RelationInstance::Multiset { delta, .. } => delta, RelationInstance::Flat { delta, .. } => delta, RelationInstance::Indexed { delta, .. } => delta, } } pub fn delta_mut(&mut self) -> &mut DeltaSet { match self { RelationInstance::Stream { delta } => delta, RelationInstance::Multiset { delta, .. } => delta, RelationInstance::Flat { delta, .. } => delta, RelationInstance::Indexed { delta, .. } => delta, } } } /// Messages sent to timely worker threads. #[derive(Debug, Clone)] enum Msg { /// Update input relation. Update { /// The batch of updates. updates: Vec<Update<DDValue>>, /// The timestamp these updates belong to. timestamp: TS, }, /// Propagate changes through the pipeline. Flush { /// The timestamp to advance to. advance_to: TS, }, /// Query arrangement. If the second argument is `None`, returns /// all values in the collection; otherwise returns values associated /// with the specified key. Query(ArrId, Option<DDValue>), /// Stop worker. Stop, } /// Reply messages from timely worker threads. #[derive(Debug)] enum Reply { /// Acknowledge flush completion. FlushAck, /// Result of a query. QueryRes(Option<BTreeSet<DDValue>>), } impl Program { /// Initialize the program with the given configuration pub fn run(&self, config: Config) -> Result<RunningProgram, String> { // Setup channels to communicate with the dataflow. // We use async channels to avoid deadlocks when workers are parked in // `step_or_park`. This has the downside of introducing an unbounded buffer // that is only guaranteed to be fully flushed when the transaction commits. let (request_send, request_recv): (Vec<_>, Vec<_>) = (0..config.num_timely_workers) .map(|_| crossbeam_channel::unbounded::<Msg>()) .unzip(); let request_recv = Arc::from(request_recv); // Channels for responses from worker threads. let (reply_send, reply_recv): (Vec<_>, Vec<_>) = (0..config.num_timely_workers) .map(|_| crossbeam_channel::unbounded::<Reply>()) .unzip(); let reply_send = Arc::from(reply_send); let profiling_rig = SelfProfilingRig::new(&config); // Clone the program so that it can be moved into the timely computation let program = Arc::new(self.clone()); let timely_config = config.timely_config()?; let worker_config = config.clone(); let profiling_data = profiling_rig.profiling_data.clone(); let (builders, others) = timely_config .communication .try_build() .map_err(|err| format!("failed to build timely communication config: {}", err))?; // Start up timely computation. // Note: We use `execute_from()` instead of `timely::execute()` because // `execute()` automatically sets log hooks that connect to // `TIMELY_WORKER_LOG_ADDR`, meaning that no matter what we do // our dataflow will always attempt to connect to that address // if it's present in the env, causing things like ddshow/#7. // See https://github.com/Kixiron/ddshow/issues/7 let worker_guards = timely::execute::execute_from( builders, others, timely_config.worker, move |worker: &mut Worker<Allocator>| -> Result<_, String> { let logger = worker.log_register().get("timely"); let worker = DDlogWorker::new( worker, worker_config.clone(), program.clone(), profiling_data.clone(), Arc::clone(&request_recv), Arc::clone(&reply_send), logger, ); worker.run().map_err(|e| { eprintln!("Worker thread failed: {}", e); e }) }, ) .map_err(|err| format!("Failed to start timely computation: {:?}", err))?; let mut rels = FnvHashMap::default(); for relid in self.input_relations() { let rel = self.get_relation(relid); if rel.input { match rel.caching_mode { CachingMode::Stream => { rels.insert( relid, RelationInstance::Stream { delta: FnvHashMap::default(), }, ); } CachingMode::Multiset => { rels.insert( relid, RelationInstance::Multiset { elements: FnvHashMap::default(), delta: FnvHashMap::default(), }, ); } CachingMode::Set => match rel.key_func { None => { rels.insert( relid, RelationInstance::Flat { elements: FnvHashSet::default(), delta: FnvHashMap::default(), }, ); } Some(f) => { rels.insert( relid, RelationInstance::Indexed { key_func: f, elements: FnvHashMap::default(), delta: FnvHashMap::default(), }, ); } }, } } } let running_program = RunningProgram { senders: request_send, reply_recv, relations: rels, worker_guards: Some(worker_guards), transaction_in_progress: false, need_to_flush: false, timestamp: 1, profile_cpu: profiling_rig.profile_cpu, profile_timely: profiling_rig.profile_timely, prof_thread_handle: profiling_rig.profile_thread, profile: profiling_rig.profile, worker_round_robbin: (0..config.num_timely_workers).cycle().skip(0), }; // Wait for the initial transaction to complete. running_program.await_flush_ack()?; Ok(running_program) } /// This thread function is always invoked whether or not profiling is on. If it isn't, the /// thread will blocks on the channel read as no message will ever arrive. fn prof_thread_func(channel: Receiver<ProfMsg>, profile: ThinArc<Mutex<Profile>>) { loop { match channel.recv() { Ok(message) => { profile.lock().unwrap().update(&message); } _ => return, } } } fn get_delayed_relation(&self, relid: RelId) -> Option<&DelayedRelation> { for drel in &self.delayed_rels { if drel.id == relid { return Some(&drel); } } None } fn get_delayed_relation_name(&self, relid: RelId) -> Option<String> { self.get_delayed_relation(relid) .map(|drel| format!("{}|-{}", self.get_relation(drel.rel_id).name, drel.delay)) } /* Lookup relation by id */ fn get_relation(&self, relid: RelId) -> &Relation { for node in &self.nodes { match node { ProgNode::Rel { rel: r } => { if r.id == relid { return r; } } ProgNode::Apply { .. } => {} ProgNode::Scc { rels: rs } => { for r in rs { if r.rel.id == relid { return &r.rel; } } } } } panic!("get_relation({}): relation not found", relid) } /* indices of program nodes that use arrangement */ fn arrangement_used_by_nodes(&self, arrid: ArrId) -> impl Iterator<Item = usize> + '_ { self.nodes.iter().enumerate().filter_map(move |(i, n)| { if Self::node_uses_arrangement(n, arrid) { Some(i) } else { None } }) } fn node_uses_arrangement(n: &ProgNode, arrid: ArrId) -> bool { match n { ProgNode::Rel { rel } => Self::rel_uses_arrangement(rel, arrid), ProgNode::Apply { .. } => false, ProgNode::Scc { rels } => rels .iter() .any(|rel| Self::rel_uses_arrangement(&rel.rel, arrid)), } } fn rel_uses_arrangement(r: &Relation, arrid: ArrId) -> bool { r.rules .iter() .any(|rule| Self::rule_uses_arrangement(rule, arrid)) } fn rule_uses_arrangement(r: &Rule, arrid: ArrId) -> bool { r.dependencies().contains(&Dep::Arr(arrid)) } /// Returns all input relations of the program fn input_relations(&self) -> impl Iterator<Item = RelId> + '_ { self.nodes.iter().filter_map(|node| match node { ProgNode::Rel { rel: r } => { if r.input { Some(r.id) } else { None } } ProgNode::Apply { .. } => None, ProgNode::Scc { rels: rs } => { for r in rs { assert!(!r.rel.input, "input relation ({}) in Scc", r.rel.name); } None } }) } /// Return all relations required to compute rels, excluding recursive dependencies on rels fn dependencies<'a, R>(rels: R) -> FnvHashSet<Dep> where R: Iterator<Item = &'a Relation> + Clone + 'a, { let mut result = FnvHashSet::default(); for rel in rels.clone() { for rule in &rel.rules { result = result.union(&rule.dependencies()).cloned().collect(); } } result .into_iter() .filter(|d| rels.clone().all(|r| r.id != d.relid())) .collect() } /// TODO: Allow this to return an error, so we can replace `expect`'s below /// with proper error handling. // TODO: Much of this logic would be vastly simplified if we used a // combination of traits and `Vec<XFormCollection>`s (as opposed to // what we do now with a linked list of them) fn xform_collection<'a, S, T, Lookup>( col: Collection<S, DDValue, Weight>, xform: &Option<XFormCollection>, arrangements: &Arrangements<'a, S, T>, lookup_collection: Lookup, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, Lookup: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match xform { None => col, Some(ref x) => Self::xform_collection_ref(&col, x, arrangements, lookup_collection), } } fn xform_collection_ref<'a, S, T, Lookup>( col: &Collection<S, DDValue, Weight>, xform: &XFormCollection, arrangements: &Arrangements<'a, S, T>, lookup_collection: Lookup, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, Lookup: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match *xform { XFormCollection::Arrange { ref description, afun, ref next, } => { let arr = with_prof_context(&description, || col.flat_map(afun).arrange_by_key()); Self::xform_arrangement(&arr, &*next, arrangements, lookup_collection) } XFormCollection::Differentiate { ref description, ref next, } => { #[allow(clippy::unnecessary_cast)] let one = <dyn Any>::downcast_ref::<<S::Timestamp as Timestamp>::Summary>(&(1 as TS)) .expect("Differentiate operator used in recursive context"); let diff = with_prof_context(&description, || { col.concat( &col.delay(move |t| one.results_in(t).expect("Integer overflow in Differentiate: maximal number of transactions exceeded")).negate()) }); Self::xform_collection(diff, &*next, arrangements, lookup_collection) } XFormCollection::Map { ref description, mfun, ref next, } => { let mapped = with_prof_context(&description, || col.map(mfun)); Self::xform_collection(mapped, &*next, arrangements, lookup_collection) } XFormCollection::FlatMap { ref description, fmfun, ref next, } => { let flattened = with_prof_context(&description, || { col.flat_map(move |x| fmfun(x).into_iter().flatten()) }); Self::xform_collection(flattened, &*next, arrangements, lookup_collection) } XFormCollection::Filter { ref description, ffun, ref next, } => { let filtered = with_prof_context(&description, || col.filter(ffun)); Self::xform_collection(filtered, &*next, arrangements, lookup_collection) } XFormCollection::FilterMap { ref description, fmfun, ref next, } => { let flattened = with_prof_context(&description, || col.flat_map(fmfun)); Self::xform_collection(flattened, &*next, arrangements, lookup_collection) } XFormCollection::Inspect { ref description, ifun, ref next, } => { let inspect = with_prof_context(&description, || { col.inspect(move |(v, ts, w)| ifun(v, ts.to_tuple_ts(), *w)) }); Self::xform_collection(inspect, &*next, arrangements, lookup_collection) } XFormCollection::StreamJoin { ref description, afun, arrangement, jfun, ref next, } => { let join = with_prof_context(&description, || { // arrange input collection let collection_with_keys = col.flat_map(afun); let arr = match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Map(arranged)) => arranged, ArrangementFlavor::Local(DataflowArrangement::Set(_)) => { panic!("StreamJoin: not a map arrangement {:?}", arrangement) } _ => panic!("StreamJoin in nested scope: {}", description), }; lookup_map( &collection_with_keys, arr, |(k, _), key| *key = k.clone(), move |v1, w1, v2, w2| (jfun(&v1.1, v2), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. .flat_map(|v| v) }); Self::xform_collection(join, &*next, arrangements, lookup_collection) } XFormCollection::StreamSemijoin { ref description, afun, arrangement, jfun, ref next, } => { let join = with_prof_context(&description, || { // arrange input collection let collection_with_keys = col.flat_map(afun); let arr = match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Set(arranged)) => arranged, ArrangementFlavor::Local(DataflowArrangement::Map(_)) => { panic!("StreamSemijoin: not a set arrangement {:?}", arrangement) } _ => panic!("StreamSemijoin in nested scope: {}", description), }; lookup_map( &collection_with_keys, arr, |(k, _), key| *key = k.clone(), move |v1, w1, _, w2| (jfun(&v1.1), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. .flat_map(|v| v) }); Self::xform_collection(join, &*next, arrangements, lookup_collection) } XFormCollection::StreamXForm { ref description, ref xform, ref next, } => { let xformed = col.scope() .scoped::<AltNeu<S::Timestamp>, _, _>(description, |inner| { let d_col = with_prof_context( format!("differentiate stream before: {}", description).as_ref(), || col.differentiate(inner), ); fn dummy_lookup_collection<S: Scope>( _: RelId, ) -> Option<Collection<S, DDValue, Weight>> { None } // We must call the streamless variant within the nested scope // otherwise we force rustc to instantiate an infinitely long type // since the function calls itself (a potentially infinite number of times), // each requiring further nesting of the scopes (and their types) let xformed = Self::streamless_xform_collection::< Child<S, AltNeu<S::Timestamp>>, S::Timestamp, _, >( d_col, &*xform, &Arrangements { arrangements: &FnvHashMap::default(), }, dummy_lookup_collection, ); with_prof_context( format!("integrate stream after: {}", description).as_ref(), || xformed.integrate(), ) }); Self::xform_collection(xformed, &*next, arrangements, lookup_collection) } } } fn streamless_xform_collection<'a, S, T, Lookup>( col: Collection<S, DDValue, Weight>, xform: &Option<XFormCollection>, arrangements: &Arrangements<'a, S, T>, lookup_collection: Lookup, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, Lookup: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match xform { None => col, Some(ref x) => { Self::streamless_xform_collection_ref(&col, x, arrangements, lookup_collection) } } } fn streamless_xform_collection_ref<'a, S, T, Lookup>( col: &Collection<S, DDValue, Weight>, xform: &XFormCollection, arrangements: &Arrangements<'a, S, T>, lookup_collection: Lookup, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, Lookup: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match *xform { XFormCollection::Arrange { ref description, afun, ref next, } => { let arr = with_prof_context(&description, || col.flat_map(afun).arrange_by_key()); Self::xform_arrangement(&arr, &*next, arrangements, lookup_collection) } XFormCollection::Differentiate { ref description, ref next, } => { #[allow(clippy::unnecessary_cast)] let one = <dyn Any>::downcast_ref::<<S::Timestamp as Timestamp>::Summary>(&(1 as TS)) .expect("Differentiate operator used in recursive context"); let diff = with_prof_context(&description, || { col.concat( &col.delay(move |t| one.results_in(t).expect("Integer overflow in Differentiate: maximal number of transactions exceeded")).negate()) }); Self::streamless_xform_collection(diff, &*next, arrangements, lookup_collection) } XFormCollection::Map { ref description, mfun, ref next, } => { let mapped = with_prof_context(&description, || col.map(mfun)); Self::streamless_xform_collection(mapped, &*next, arrangements, lookup_collection) } XFormCollection::FlatMap { ref description, fmfun, ref next, } => { let flattened = with_prof_context(&description, || { col.flat_map(move |x| fmfun(x).into_iter().flatten()) }); Self::streamless_xform_collection( flattened, &*next, arrangements, lookup_collection, ) } XFormCollection::Filter { ref description, ffun, ref next, } => { let filtered = with_prof_context(&description, || col.filter(ffun)); Self::streamless_xform_collection(filtered, &*next, arrangements, lookup_collection) } XFormCollection::FilterMap { ref description, fmfun, ref next, } => { let flattened = with_prof_context(&description, || col.flat_map(fmfun)); Self::streamless_xform_collection( flattened, &*next, arrangements, lookup_collection, ) } XFormCollection::Inspect { ref description, ifun, ref next, } => { let inspect = with_prof_context(&description, || { col.inspect(move |(v, ts, w)| ifun(v, ts.to_tuple_ts(), *w)) }); Self::streamless_xform_collection(inspect, &*next, arrangements, lookup_collection) } XFormCollection::StreamJoin { ref description, afun, arrangement, jfun, ref next, } => { let join = with_prof_context(&description, || { // arrange input collection let collection_with_keys = col.flat_map(afun); let arr = match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Map(arranged)) => arranged, ArrangementFlavor::Local(DataflowArrangement::Set(_)) => { panic!("StreamJoin: not a map arrangement {:?}", arrangement) } _ => panic!("StreamJoin in nested scope: {}", description), }; lookup_map( &collection_with_keys, arr, |(k, _), key| *key = k.clone(), move |v1, w1, v2, w2| (jfun(&v1.1, v2), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. .flat_map(|v| v) }); Self::streamless_xform_collection(join, &*next, arrangements, lookup_collection) } XFormCollection::StreamSemijoin { ref description, afun, arrangement, jfun, ref next, } => { let join = with_prof_context(&description, || { // arrange input collection let collection_with_keys = col.flat_map(afun); let arr = match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Set(arranged)) => arranged, ArrangementFlavor::Local(DataflowArrangement::Map(_)) => { panic!("StreamSemijoin: not a set arrangement {:?}", arrangement) } _ => panic!("StreamSemijoin in nested scope: {}", description), }; lookup_map( &collection_with_keys, arr, |(k, _), key| *key = k.clone(), move |v1, w1, _, w2| (jfun(&v1.1), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. .flat_map(|v| v) }); Self::streamless_xform_collection(join, &*next, arrangements, lookup_collection) } XFormCollection::StreamXForm { ref description, .. } => { panic!("StreamXForm in nested scope: {}", description); } } } fn xform_arrangement<'a, S, T, TR, LC>( arr: &Arranged<S, TR>, xform: &XFormArrangement, arrangements: &Arrangements<'a, S, T>, lookup_collection: LC, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, TR: TraceReader<Key = DDValue, Val = DDValue, Time = S::Timestamp, R = Weight> + Clone + 'static, TR::Batch: BatchReader<DDValue, DDValue, S::Timestamp, Weight>, TR::Cursor: Cursor<DDValue, DDValue, S::Timestamp, Weight>, LC: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match *xform { XFormArrangement::FlatMap { ref description, fmfun, ref next, } => with_prof_context(&description, || { Self::streamless_xform_collection( arr.flat_map_ref(move |_, v| match fmfun(v.clone()) { Some(iter) => iter, None => Box::new(None.into_iter()), }), &*next, arrangements, lookup_collection, ) }), XFormArrangement::FilterMap { ref description, fmfun, ref next, } => with_prof_context(&description, || { Self::streamless_xform_collection( arr.flat_map_ref(move |_, v| fmfun(v.clone())), &*next, arrangements, lookup_collection, ) }), XFormArrangement::Aggregate { ref description, ffun, aggfun, ref next, } => { let col = with_prof_context(&description, || { ffun.map_or_else( || { arr.reduce(move |key, src, dst| { if let Some(x) = aggfun(key, src) { dst.push((x, 1)); }; }) .map(|(_, v)| v) }, |f| { arr.filter(move |_, v| f(v)) .reduce(move |key, src, dst| { if let Some(x) = aggfun(key, src) { dst.push((x, 1)); }; }) .map(|(_, v)| v) }, ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } XFormArrangement::Join { ref description, ffun, arrangement, jfun, ref next, } => match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Map(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || arr.join_core(&arranged, jfun), |f| arr.filter(move |_, v| f(v)).join_core(&arranged, jfun), ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } ArrangementFlavor::Foreign(DataflowArrangement::Map(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || arr.join_core(&arranged, jfun), |f| arr.filter(move |_, v| f(v)).join_core(&arranged, jfun), ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } _ => panic!("Join: not a map arrangement {:?}", arrangement), }, XFormArrangement::Semijoin { ref description, ffun, arrangement, jfun, ref next, } => match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Set(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || arr.join_core(&arranged, jfun), |f| arr.filter(move |_, v| f(v)).join_core(&arranged, jfun), ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } ArrangementFlavor::Foreign(DataflowArrangement::Set(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || arr.join_core(&arranged, jfun), |f| arr.filter(move |_, v| f(v)).join_core(&arranged, jfun), ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } _ => panic!("Semijoin: not a set arrangement {:?}", arrangement), }, XFormArrangement::Antijoin { ref description, ffun, arrangement, ref next, } => match arrangements.lookup_arr(arrangement) { ArrangementFlavor::Local(DataflowArrangement::Set(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || antijoin_arranged(&arr, &arranged).map(|(_, v)| v), |f| { antijoin_arranged(&arr.filter(move |_, v| f(v)), &arranged) .map(|(_, v)| v) }, ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } ArrangementFlavor::Foreign(DataflowArrangement::Set(arranged)) => { let col = with_prof_context(&description, || { ffun.map_or_else( || antijoin_arranged(&arr, &arranged).map(|(_, v)| v), |f| { antijoin_arranged(&arr.filter(move |_, v| f(v)), &arranged) .map(|(_, v)| v) }, ) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } _ => panic!("Antijoin: not a set arrangement {:?}", arrangement), }, XFormArrangement::StreamJoin { ref description, ffun, rel, kfun, jfun, ref next, } => { let col = with_prof_context(&description, || { // Map `rel` into `(key, value)` pairs, filtering out // records where `kfun` returns `None`. // FIXME: The key will need to be cloned below. To avoid // this overhead, we need a version of `lookup_map` that // allows key function to return `Option`. let kfun = kfun; let jfun = jfun; let collection_with_keys = lookup_collection(rel) .unwrap_or_else(|| panic!("xform_arrangement: unknown relation {:?}", rel)) .flat_map(move |v| kfun(&v).map(|k| (k, v))); // Filter the arrangement if `ffun` is supplied. let join = ffun.map_or_else( || { lookup_map( &collection_with_keys, arr.clone(), |(k, _), key| *key = k.clone(), move |v1, w1, v2, w2| (jfun(v2, &v1.1), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) }, |f| { lookup_map( &collection_with_keys, arr.filter(move |_, v| f(v)), |(k, _), key| *key = k.clone(), move |v1, w1, v2, w2| (jfun(v2, &v1.1), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) }, ); // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. join.flat_map(|v| v) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } XFormArrangement::StreamSemijoin { ref description, ffun, rel, kfun, jfun, ref next, } => { let col = with_prof_context(&description, || { // Extract join key from `rel`, filtering out // FIXME: The key will need to be cloned below. To avoid // this overhead, we need a version of `lookup_map` that // allows key function to return `Option`. let kfun = kfun; let jfun = jfun; let collection_keys = lookup_collection(rel) .unwrap_or_else(|| panic!("xform_arrangement: unknown relation {:?}", rel)) .flat_map(move |v| kfun(&v)); // Filter the arrangement if `ffun` is supplied. let join = ffun.map_or_else( || { lookup_map( &collection_keys, arr.clone(), |k, key| *key = k.clone(), move |_, w1, v2, w2| (jfun(v2), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) }, |f| { lookup_map( &collection_keys, arr.filter(move |_, v| f(v)), |k, key| *key = k.clone(), move |_, w1, v2, w2| (jfun(v2), w1 * w2), ().into_ddvalue(), ().into_ddvalue(), ().into_ddvalue(), ) }, ); // Filter out `None`'s. // FIXME: We wouldn't need this if `lookup_map` allowed `output_func` // to return `Option`. join.flat_map(|v| v) }); Self::streamless_xform_collection(col, &*next, arrangements, lookup_collection) } } } /// Compile right-hand-side of a rule to a collection fn mk_rule<'a, S, T, F>( &self, rule: &Rule, lookup_collection: F, arrangements: Arrangements<'a, S, T>, ) -> Collection<S, DDValue, Weight> where S: Scope, S::Timestamp: Lattice + Refines<T> + ToTupleTS, T: Lattice + Timestamp, F: Fn(RelId) -> Option<Collection<S, DDValue, Weight>>, { match rule { Rule::CollectionRule { rel, xform: None, .. } => { let collection = lookup_collection(*rel) .unwrap_or_else(|| panic!("mk_rule: unknown relation {:?}", rel)); let rel_name = &self .get_delayed_relation_name(*rel) .unwrap_or_else(|| self.get_relation(*rel).name.to_string()); with_prof_context(format!("{} clone", rel_name).as_ref(), || { collection.map(|x| x) }) } Rule::CollectionRule { rel, xform: Some(x), .. } => Self::xform_collection_ref( &lookup_collection(*rel) .unwrap_or_else(|| panic!("mk_rule: unknown relation {:?}", rel)), x, &arrangements, &lookup_collection, ), Rule::ArrangementRule { arr, xform, .. } => match arrangements.lookup_arr(*arr) { ArrangementFlavor::Local(DataflowArrangement::Map(arranged)) => { Self::xform_arrangement(&arranged, xform, &arrangements, &lookup_collection) } ArrangementFlavor::Foreign(DataflowArrangement::Map(arranged)) => { Self::xform_arrangement(&arranged, xform, &arrangements, &lookup_collection) } _ => panic!("Rule starts with a set arrangement {:?}", *arr), }, } } } /// Interface to a running datalog computation // This should not panic, so that the client has a chance to recover from failures // TODO: error messages impl RunningProgram { /// Controls forwarding of `TimelyEvent::Schedule` event to the CPU profiling thread. /// /// `enable = true` - enables forwarding. This can be expensive in large dataflows. /// `enable = false` - disables forwarding. pub fn enable_cpu_profiling(&self, enable: bool) { if let Some(profile_cpu) = self.profile_cpu.as_ref() { profile_cpu.store(enable, Ordering::SeqCst); } // TODO: Log warning if self profiling is disabled } pub fn enable_timely_profiling(&self, enable: bool) { if let Some(profile_timely) = self.profile_timely.as_ref() { profile_timely.store(enable, Ordering::SeqCst); } // TODO: Log warning if self profiling is disabled } /// Terminate program, killing all worker threads. pub fn stop(&mut self) -> Response<()> { if self.worker_guards.is_none() { // Already stopped. return Ok(()); } self.flush() .and_then(|_| self.broadcast(Msg::Stop)) .and_then(|_| { self.worker_guards.take().map_or(Ok(()), |worker_guards| { worker_guards .join() .into_iter() .filter_map(Result::err) .next() .map_or(Ok(()), Err) }) })?; Ok(()) } /// Start a transaction. Does not return a transaction handle, as there /// can be at most one transaction in progress at any given time. Fails /// if there is already a transaction in progress. pub fn transaction_start(&mut self) -> Response<()> { if self.transaction_in_progress { return Err("transaction already in progress".to_string()); } self.transaction_in_progress = true; Ok(()) } /// Commit a transaction. pub fn transaction_commit(&mut self) -> Response<()> { if !self.transaction_in_progress { return Err("transaction_commit: no transaction in progress".to_string()); } self.flush()?; self.delta_cleanup(); self.transaction_in_progress = false; Ok(()) } /// Rollback the transaction, undoing all changes. pub fn transaction_rollback(&mut self) -> Response<()> { if !self.transaction_in_progress { return Err("transaction_rollback: no transaction in progress".to_string()); } self.flush().and_then(|_| self.delta_undo()).map(|_| { self.transaction_in_progress = false; }) } /// Insert one record into input relation. Relations have set semantics, i.e., /// adding an existing record is a no-op. pub fn insert(&mut self, relid: RelId, v: DDValue) -> Response<()> { self.apply_updates(iter::once(Update::Insert { relid, v }), |_| Ok(())) } /// Insert one record into input relation or replace existing record with the same key. pub fn insert_or_update(&mut self, relid: RelId, v: DDValue) -> Response<()> { self.apply_updates(iter::once(Update::InsertOrUpdate { relid, v }), |_| Ok(())) } /// Remove a record if it exists in the relation. pub fn delete_value(&mut self, relid: RelId, v: DDValue) -> Response<()> { self.apply_updates(iter::once(Update::DeleteValue { relid, v }), |_| Ok(())) } /// Remove a key if it exists in the relation. pub fn
(&mut self, relid: RelId, k: DDValue) -> Response<()> { self.apply_updates(iter::once(Update::DeleteKey { relid, k }), |_| Ok(())) } /// Modify a key if it exists in the relation. pub fn modify_key( &mut self, relid: RelId, k: DDValue, m: Arc<dyn Mutator<DDValue> + Send + Sync>, ) -> Response<()> { self.apply_updates(iter::once(Update::Modify { relid, k, m }), |_| Ok(())) } /// Applies a single update. fn apply_update( &mut self, update: Update<DDValue>, filtered_updates: &mut Vec<Update<DDValue>>, ) -> Response<()> { let rel = self .relations .get_mut(&update.relid()) .ok_or_else(|| format!("apply_update: unknown input relation {}", update.relid()))?; match rel { RelationInstance::Stream { delta } => { Self::stream_update(delta, update, filtered_updates) } RelationInstance::Multiset { elements, delta } => { Self::mset_update(elements, delta, update, filtered_updates) } RelationInstance::Flat { elements, delta } => { Self::set_update(elements, delta, update, filtered_updates) } RelationInstance::Indexed { key_func, elements, delta, } => Self::indexed_set_update(*key_func, elements, delta, update, filtered_updates), } } /// Apply multiple insert and delete operations in one batch. /// Updates can only be applied to input relations (see `struct Relation`). pub fn apply_updates<I, F>(&mut self, updates: I, inspect: F) -> Response<()> where I: Iterator<Item = Update<DDValue>>, F: Fn(&Update<DDValue>) -> Response<()>, { if !self.transaction_in_progress { return Err("apply_updates: no transaction in progress".to_string()); } // Remove no-op updates to maintain set semantics let mut filtered_updates = Vec::new(); for update in updates { inspect(&update)?; self.apply_update(update, &mut filtered_updates)?; } if filtered_updates.is_empty() { return Ok(()); } let mut worker_round_robbin = self.worker_round_robbin.clone(); let chunk_size = cmp::max(filtered_updates.len() / self.senders.len(), 5000); filtered_updates .chunks(chunk_size) .map(|chunk| Msg::Update { updates: chunk.to_vec(), timestamp: self.timestamp, }) .zip(&mut worker_round_robbin) .try_for_each(|(update, worker_idx)| self.send(worker_idx, update))?; let next = worker_round_robbin.next().unwrap_or(0); self.worker_round_robbin = (0..self.senders.len()).cycle().skip(next); self.need_to_flush = true; Ok(()) } /// Deletes all values in an input table pub fn clear_relation(&mut self, relid: RelId) -> Response<()> { if !self.transaction_in_progress { return Err("clear_relation: no transaction in progress".to_string()); } let updates = { let rel = self .relations .get_mut(&relid) .ok_or_else(|| format!("clear_relation: unknown input relation {}", relid))?; match rel { RelationInstance::Stream { .. } => { return Err("clear_relation: operation not supported for streams".to_string()) } RelationInstance::Multiset { elements, .. } => { let mut updates: Vec<Update<DDValue>> = Vec::with_capacity(elements.len()); Self::delta_undo_updates(relid, elements, &mut updates); updates } RelationInstance::Flat { elements, .. } => { let mut updates: Vec<Update<DDValue>> = Vec::with_capacity(elements.len()); for v in elements.iter() { updates.push(Update::DeleteValue { relid, v: v.clone(), }); } updates } RelationInstance::Indexed { elements, .. } => { let mut updates: Vec<Update<DDValue>> = Vec::with_capacity(elements.len()); for k in elements.keys() { updates.push(Update::DeleteKey { relid, k: k.clone(), }); } updates } } }; self.apply_updates(updates.into_iter(), |_| Ok(())) } /// Returns all values in the arrangement with the specified key. pub fn query_arrangement(&mut self, arrid: ArrId, k: DDValue) -> Response<BTreeSet<DDValue>> { self._query_arrangement(arrid, Some(k)) } /// Returns the entire content of an arrangement. pub fn dump_arrangement(&mut self, arrid: ArrId) -> Response<BTreeSet<DDValue>> { self._query_arrangement(arrid, None) } fn _query_arrangement( &mut self, arrid: ArrId, k: Option<DDValue>, ) -> Response<BTreeSet<DDValue>> { // Send query and receive replies from all workers. If a key is specified, then at most // one worker will send a non-empty reply. self.broadcast(Msg::Query(arrid, k))?; let mut res: BTreeSet<DDValue> = BTreeSet::new(); let mut unknown = false; for (worker_index, chan) in self.reply_recv.iter().enumerate() { let reply = chan.recv().map_err(|e| { format!( "query_arrangement: failed to receive reply from worker {}: {:?}", worker_index, e ) })?; match reply { Reply::QueryRes(Some(mut vals)) => { if !vals.is_empty() { if res.is_empty() { std::mem::swap(&mut res, &mut vals); } else { res.append(&mut vals); } } } Reply::QueryRes(None) => { unknown = true; } repl => { return Err(format!( "query_arrangement: unexpected reply from worker {}: {:?}", worker_index, repl )); } } } if unknown { Err(format!("query_arrangement: unknown index: {:?}", arrid)) } else { Ok(res) } } /// increment the counter associated with value `x` in the delta-set /// `delta(x) == false` => remove entry (equivalent to delta(x):=0) /// `x not in delta => `delta(x) := true` /// `delta(x) == true` => error fn delta_inc(ds: &mut DeltaSet, x: &DDValue) { let entry = ds.entry(x.clone()); match entry { hash_map::Entry::Occupied(mut oe) => { // debug_assert!(!*oe.get()); let v = oe.get_mut(); if *v == -1 { oe.remove_entry(); } else { *v += 1; } } hash_map::Entry::Vacant(ve) => { ve.insert(1); } } } /// reverse of delta_inc fn delta_dec(ds: &mut DeltaSet, key: &DDValue) { let entry = ds.entry(key.clone()); match entry { hash_map::Entry::Occupied(mut oe) => { //debug_assert!(*oe.get()); let v = oe.get_mut(); if *v == 1 { oe.remove_entry(); } else { *v -= 1; } } hash_map::Entry::Vacant(ve) => { ve.insert(-1); } } } /// Update delta set of an input stream relation before performing an update. /// `ds` is delta since start of transaction. /// `x` is the value being inserted or deleted. /// `insert` indicates type of update (`true` for insert, `false` for delete) fn stream_update( ds: &mut DeltaSet, update: Update<DDValue>, updates: &mut Vec<Update<DDValue>>, ) -> Response<()> { match &update { Update::Insert { v, .. } => { Self::delta_inc(ds, v); } Update::DeleteValue { v, .. } => { Self::delta_dec(ds, v); } Update::InsertOrUpdate { relid, .. } => { return Err(format!( "Cannot perform insert_or_update operation on relation {} that does not have a primary key", relid, )); } Update::DeleteKey { relid, .. } => { return Err(format!( "Cannot delete by key from relation {} that does not have a primary key", relid, )); } Update::Modify { relid, .. } => { return Err(format!( "Cannot modify record in relation {} that does not have a primary key", relid, )); } }; updates.push(update); Ok(()) } /// Update value and delta multisets of an input multiset relation before performing an update. /// `s` is the current content of the relation. /// `ds` is delta since start of transaction. /// `x` is the value being inserted or deleted. /// `insert` indicates type of update (`true` for insert, `false` for delete). /// Returns `true` if the update modifies the relation, i.e., it's not a no-op. fn mset_update( s: &mut ValMSet, ds: &mut DeltaSet, upd: Update<DDValue>, updates: &mut Vec<Update<DDValue>>, ) -> Response<()> { match &upd { Update::Insert { v, .. } => { Self::delta_inc(s, v); Self::delta_inc(ds, v); } Update::DeleteValue { v, .. } => { Self::delta_dec(s, v); Self::delta_dec(ds, v); } Update::InsertOrUpdate { relid, .. } => { return Err(format!( "Cannot perform insert_or_update operation on relation {} that does not have a primary key", relid )); } Update::DeleteKey { relid, .. } => { return Err(format!( "Cannot delete by key from relation {} that does not have a primary key", relid )); } Update::Modify { relid, .. } => { return Err(format!( "Cannot modify record in relation {} that does not have a primary key", relid )); } }; updates.push(upd); Ok(()) } /// Update value set and delta set of an input relation before performing an update. /// `s` is the current content of the relation. /// `ds` is delta since start of transaction. /// `x` is the value being inserted or deleted. /// `insert` indicates type of update (`true` for insert, `false` for delete). /// Returns `true` if the update modifies the relation, i.e., it's not a no-op. fn set_update( s: &mut ValSet, ds: &mut DeltaSet, upd: Update<DDValue>, updates: &mut Vec<Update<DDValue>>, ) -> Response<()> { let ok = match &upd { Update::Insert { v, .. } => { let new = s.insert(v.clone()); if new { Self::delta_inc(ds, v); } new } Update::DeleteValue { v, .. } => { let present = s.remove(&v); if present { Self::delta_dec(ds, v); } present } Update::InsertOrUpdate { relid, .. } => { return Err(format!( "Cannot perform insert_or_update operation on relation {} that does not have a primary key", relid, )); } Update::DeleteKey { relid, .. } => { return Err(format!( "Cannot delete by key from relation {} that does not have a primary key", relid, )); } Update::Modify { relid, .. } => { return Err(format!( "Cannot modify record in relation {} that does not have a primary key", relid, )); } }; if ok { updates.push(upd); } Ok(()) } /// insert: /// key exists in `s`: /// - error /// key not in `s`: /// - s.insert(x) /// - ds(x)++; /// delete: /// key not in `s` /// - return error /// key in `s` with value `v`: /// - s.delete(key) /// - ds(v)-- fn indexed_set_update( key_func: fn(&DDValue) -> DDValue, s: &mut IndexedValSet, ds: &mut DeltaSet, upd: Update<DDValue>, updates: &mut Vec<Update<DDValue>>, ) -> Response<()> { match upd { Update::Insert { relid, v } => match s.entry(key_func(&v)) { hash_map::Entry::Occupied(_) => Err(format!( "Insert: duplicate key '{:?}' in value '{:?}'", key_func(&v), v )), hash_map::Entry::Vacant(ve) => { ve.insert(v.clone()); Self::delta_inc(ds, &v); updates.push(Update::Insert { relid, v }); Ok(()) } }, Update::InsertOrUpdate { relid, v } => match s.entry(key_func(&v)) { hash_map::Entry::Occupied(mut oe) => { // Delete old value. let old = oe.get().clone(); Self::delta_dec(ds, oe.get()); updates.push(Update::DeleteValue { relid, v: old }); // Insert new value. Self::delta_inc(ds, &v); updates.push(Update::Insert { relid, v: v.clone(), }); // Update store *oe.get_mut() = v; Ok(()) } hash_map::Entry::Vacant(ve) => { ve.insert(v.clone()); Self::delta_inc(ds, &v); updates.push(Update::Insert { relid, v }); Ok(()) } }, Update::DeleteValue { relid, v } => match s.entry(key_func(&v)) { hash_map::Entry::Occupied(oe) => { if *oe.get() != v { Err(format!("DeleteValue: key exists but with a different value. Value specified: '{:?}'; existing value: '{:?}'", v, oe.get())) } else { Self::delta_dec(ds, oe.get()); oe.remove_entry(); updates.push(Update::DeleteValue { relid, v }); Ok(()) } } hash_map::Entry::Vacant(_) => { Err(format!("DeleteValue: key not found '{:?}'", key_func(&v))) } }, Update::DeleteKey { relid, k } => match s.entry(k.clone()) { hash_map::Entry::Occupied(oe) => { let old = oe.get().clone(); Self::delta_dec(ds, oe.get()); oe.remove_entry(); updates.push(Update::DeleteValue { relid, v: old }); Ok(()) } hash_map::Entry::Vacant(_) => Err(format!("DeleteKey: key not found '{:?}'", k)), }, Update::Modify { relid, k, m } => match s.entry(k.clone()) { hash_map::Entry::Occupied(mut oe) => { let new = oe.get_mut(); let old: DDValue = (*new).clone(); m.mutate(new)?; Self::delta_dec(ds, &old); updates.push(Update::DeleteValue { relid, v: old }); Self::delta_inc(ds, &new); updates.push(Update::Insert { relid, v: new.clone(), }); Ok(()) } hash_map::Entry::Vacant(_) => Err(format!("Modify: key not found '{:?}'", k)), }, } } /// Returns a reference to indexed input relation content. /// If called in the middle of a transaction, returns state snapshot including changes /// made by the current transaction. pub fn get_input_relation_index(&self, relid: RelId) -> Response<&IndexedValSet> { match self.relations.get(&relid) { None => Err(format!("unknown relation {}", relid)), Some(RelationInstance::Indexed { elements, .. }) => Ok(elements), Some(_) => Err(format!("not an indexed relation {}", relid)), } } /// Returns a reference to a flat input relation content. /// If called in the middle of a transaction, returns state snapshot including changes /// made by the current transaction. pub fn get_input_relation_data(&self, relid: RelId) -> Response<&ValSet> { match self.relations.get(&relid) { None => Err(format!("unknown relation {}", relid)), Some(RelationInstance::Flat { elements, .. }) => Ok(elements), Some(_) => Err(format!("not a flat relation {}", relid)), } } /// Returns a reference to an input multiset content. /// If called in the middle of a transaction, returns state snapshot including changes /// made by the current transaction. pub fn get_input_multiset_data(&self, relid: RelId) -> Response<&ValMSet> { match self.relations.get(&relid) { None => Err(format!("unknown relation {}", relid)), Some(RelationInstance::Multiset { elements, .. }) => Ok(elements), Some(_) => Err(format!("not a flat relation {}", relid)), } } /* /// Returns a reference to delta accumulated by the current transaction pub fn relation_delta(&mut self, relid: RelId) -> Response<&DeltaSet<V>> { if !self.transaction_in_progress { return resp_from_error!("no transaction in progress"); }; self.flush().and_then(move |_| { match self.relations.get_mut(&relid) { None => resp_from_error!("unknown relation"), Some(rel) => Ok(&rel.delta) } }) } */ /// Send message to a worker thread. fn send(&self, worker_index: usize, msg: Msg) -> Response<()> { match self.senders[worker_index].send(msg) { Ok(()) => { // Worker may be blocked in `step_or_park`. Unpark it to ensure // the message is received. self.worker_guards.as_ref().unwrap().guards()[worker_index] .thread() .unpark(); Ok(()) } Err(_) => Err(format!( "failed to communicate with timely dataflow thread {}", worker_index )), } } /// Broadcast message to all worker threads. fn broadcast(&self, msg: Msg) -> Response<()> { for worker_index in 0..self.senders.len() { self.send(worker_index, msg.clone())?; } Ok(()) } /// Clear delta sets of all input relations on transaction commit. fn delta_cleanup(&mut self) { for rel in self.relations.values_mut() { rel.delta_mut().clear(); } } fn delta_undo_updates(relid: RelId, ds: &DeltaSet, updates: &mut Vec<Update<DDValue>>) { // first delete, then insert to avoid duplicate key // errors in `apply_updates()` for (k, w) in ds { if *w >= 0 { for _ in 0..*w { updates.push(Update::DeleteValue { relid, v: k.clone(), }); } } } for (k, w) in ds { if *w < 0 { for _ in 0..(-*w) { updates.push(Update::Insert { relid, v: k.clone(), }); } } } } /// Reverse all changes recorded in delta sets to rollback the transaction. fn delta_undo(&mut self) -> Response<()> { let mut updates = Vec::with_capacity(self.relations.len()); for (relid, rel) in &self.relations { Self::delta_undo_updates(*relid, rel.delta(), &mut updates); } // println!("updates: {:?}", updates); self.apply_updates(updates.into_iter(), |_| Ok(())) .and_then(|_| self.flush()) .map(|_| { /* validation: all deltas must be empty */ for rel in self.relations.values() { //println!("delta: {:?}", *d); debug_assert!(rel.delta().is_empty()); } }) } /// Propagates all changes through the dataflow pipeline. fn flush(&mut self) -> Response<()> { if !self.need_to_flush { return Ok(()); } self.broadcast(Msg::Flush { advance_to: self.timestamp + 1, }) .and_then(|()| { self.timestamp += 1; self.need_to_flush = false; self.await_flush_ack() }) } /// Wait for all workers to complete the `Flush` command. This guarantees /// that all outputs have been produced and we have successfully committed /// the current transaction. fn await_flush_ack(&self) -> Response<()> { for (worker_index, receiver) in self.reply_recv.iter().enumerate() { match receiver.recv() { Err(_) => { return Err(format!( "failed to receive flush ack message from worker {}", worker_index )) } Ok(Reply::FlushAck) => (), Ok(msg) => { return Err(format!( "received unexpected reply to flush request from worker {}: {:?}", worker_index, msg, )) } } } Ok(()) } } impl Drop for RunningProgram { fn drop(&mut self) { let _ = self.stop(); } }
delete_key
par_iter.rs
// Copyright (c) 2021 The Lutino Projects // // Use of this source code is governed by an MIT-style // license that can be found in the LICENSE file or at // https://opensource.org/licenses/MIT. use nh_job_system::iter::ParallelIterator; use nh_job_system::traits::{Folder, Producer, UnindexedConsumer, UnindexedProducer}; use nh_job_system::JobSystem; use crate::iter::indexed::{IndexedIter, TrustedRandomAccess}; pub struct ParallelQueryIter<'env, T: TrustedRandomAccess> { iter: IndexedIter<T>, env: &'env JobSystem, } impl<'env, T: TrustedRandomAccess> ParallelQueryIter<'env, T> { pub fn new(iter: T, env: &'env JobSystem) -> Self { Self { iter: IndexedIter::new(iter), env, } } } impl<'env, T> Producer for ParallelQueryIter<'env, T> where T: TrustedRandomAccess + Send + Sync, { type IntoIter = IndexedIter<T>; type Item = <IndexedIter<T> as Iterator>::Item; fn into_iter(self) -> Self::IntoIter { self.iter } fn split_at(self, index: usize) -> (Self, Self) { let (left, right) = TrustedRandomAccess::split_at(self.iter, index); ( ParallelQueryIter { iter: left, env: self.env, }, ParallelQueryIter { iter: right, env: self.env, }, ) } } impl<'env, T> UnindexedProducer for ParallelQueryIter<'env, T> where T: TrustedRandomAccess + Send + Sync, { type Item = <IndexedIter<T> as Iterator>::Item; fn
(self) -> (Self, Option<Self>) { let len = ExactSizeIterator::len(&self.iter); let index = len / 2; let (left, right) = TrustedRandomAccess::split_at(self.iter, index); ( ParallelQueryIter { iter: right, env: self.env, }, if ExactSizeIterator::len(&left) > 0 { Some(ParallelQueryIter { iter: left, env: self.env, }) } else { None }, ) } fn fold_with<F>(self, folder: F) -> F where F: Folder<Self::Item>, { folder.consume_iter(self.iter) } } // impl<T> IntoIndexableProducer for Par<T> { // type Item = T; // type Producer = Par<T>; // fn len(&self) -> usize; // fn into_producer(self) -> Self::Producer; // } impl<'env, T> ParallelIterator<'env> for ParallelQueryIter<'env, T> where T: TrustedRandomAccess + Send + Sync, <T as TrustedRandomAccess>::Item: Send, { type Item = T::Item; fn drive_unindexed<C>(self, consumer: C) -> C::Result where C: UnindexedConsumer<Self::Item>, { self.env.bridge_unindexed(self, consumer) } #[inline(always)] fn get_env(&self) -> &'env JobSystem { self.env } } // impl<T> IndexedParallelIterator for Par<T> // where // T: TrustedRandomAccess + Send + Sync, // <T as TrustedRandomAccess>::Item: Send, // { // fn len(&self) -> usize { ExactSizeIterator::len(&self.iter) } // fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result { bridge(self, // consumer) } // fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output { // callback.callback(self) // } // }
split
conn_other.go
// +build !darwin package dbus import ( "bytes" "errors" "os/exec" ) func
() (*Conn, error) { cmd := exec.Command("dbus-launch") b, err := cmd.CombinedOutput() if err != nil { return nil, err } i := bytes.IndexByte(b, '=') j := bytes.IndexByte(b, '\n') if i == -1 || j == -1 { return nil, errors.New("dbus: couldn't determine address of session bus") } return Dial(string(b[i+1 : j])) }
sessionBusPlatform
FlagIconTG.tsx
import * as React from "react"; interface Props extends React.ComponentProps<"svg"> { size?: number; width?: number; height?: number; } const FlagIconTG = ({ size = 15, width = 21, height = 15, ...props }: Props) => { if (size !== height) { width = width * (size / height); height = height * (size / height); } return ( <svg width={width} height={height} viewBox="0 0 21 15" {...props}> <defs> <linearGradient id="prefix__a" x1="50%" x2="50%" y1="0%" y2="100%"> <stop offset="0%" stopColor="#FFF" /> <stop offset="100%" stopColor="#F0F0F0" /> </linearGradient> <linearGradient id="prefix__b" x1="50%" x2="50%" y1="0%" y2="100%"> <stop offset="0%" stopColor="#FFD44D" /> <stop offset="100%" stopColor="#FFCD2F" /> </linearGradient> <linearGradient id="prefix__c" x1="50%" x2="50%" y1="0%" y2="100%"> <stop offset="0%" stopColor="#159A74" /> <stop offset="100%" stopColor="#0C6A4F" /> </linearGradient> <linearGradient id="prefix__d" x1="50%" x2="50%" y1="0%" y2="100%"> <stop offset="0%" stopColor="#ED1F45" /> <stop offset="100%" stopColor="#D01739" /> </linearGradient> </defs> <g fill="none" fillRule="evenodd"> <path fill="url(#prefix__a)" d="M0 0h21v15H0z" /> <path fill="url(#prefix__b)" d="M0 0h21v15H0z" />
/> <path fill="url(#prefix__d)" d="M0 0h9v9H0z" /> <path fill="url(#prefix__a)" d="M4.5 5.67L2.737 6.927l.65-2.065-1.74-1.29 2.165-.019L4.5 1.5l.688 2.053 2.165.02-1.74 1.289.65 2.065z" /> </g> </svg> ); }; export default FlagIconTG;
<path fill="url(#prefix__c)" d="M9 0h12v3H9V0zm0 6h12v3H9V6zm-9 6h21v3H0v-3z"
status.rs
#[doc = "Reader of register STATUS"] pub type R = crate::R<u32, super::STATUS>; #[doc = "Writer for register STATUS"] pub type W = crate::W<u32, super::STATUS>; #[doc = "Register STATUS `reset()`'s with value 0"] impl crate::ResetValue for super::STATUS { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `full`"] pub type FULL_R = crate::R<bool, bool>; #[doc = "Write proxy for field `full`"] pub struct FULL_W<'a> { w: &'a mut W, } impl<'a> FULL_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Reader of field `have`"] pub type HAVE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `have`"] pub struct HAVE_W<'a> { w: &'a mut W, } impl<'a> HAVE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W
} impl R { #[doc = "Bit 0 - `0` if more data can fit into the IN FIFO."] #[inline(always)] pub fn full(&self) -> FULL_R { FULL_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - `1` if data can be read from the OUT FIFO."] #[inline(always)] pub fn have(&self) -> HAVE_R { HAVE_R::new(((self.bits >> 1) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - `0` if more data can fit into the IN FIFO."] #[inline(always)] pub fn full(&mut self) -> FULL_W { FULL_W { w: self } } #[doc = "Bit 1 - `1` if data can be read from the OUT FIFO."] #[inline(always)] pub fn have(&mut self) -> HAVE_W { HAVE_W { w: self } } }
{ self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w }
app-routing.module.ts
import { NgModule } from "@angular/core"; import { RouterModule } from "@angular/router"; import { WelcomeComponent } from "./home/welcome.component"; import { PageNotFoundComponent } from "./page-not-found.component"; import { AuthGuard } from "./user/auth-guard.service"; import { SelectiveStrategy } from "./selective-strategy.service";
RouterModule.forRoot( [ { path: "welcome", component: WelcomeComponent }, { path: "products", canActivate: [AuthGuard], data: { preload: true }, loadChildren: "app/products/product.module#ProductModule" }, { path: "", redirectTo: "welcome", pathMatch: "full" }, { path: "**", component: PageNotFoundComponent } ], { enableTracing: true, preloadingStrategy: SelectiveStrategy } ) ], exports: [RouterModule], providers: [SelectiveStrategy] }) export class AppRoutingModule {}
@NgModule({ imports: [
M3-2.py
somme = 0
n = 5 # valeur quelconque i = 1 while i <= n: somme = somme + i i = i + 1 print("La somme des", n, "premiers entiers est :", somme)
config.rs
// Copyright (c) 2018-2022 The MobileCoin Foundation #![deny(missing_docs)] //! Configuration parameters for mobilecoind use clap::Parser; use displaydoc::Display; use mc_attest_verifier::{MrSignerVerifier, Verifier, DEBUG_ENCLAVE}; use mc_common::{logger::Logger, ResponderId}; use mc_connection::{ConnectionManager, HardcodedCredentialsProvider, ThickClient}; use mc_consensus_scp::QuorumSet; use mc_fog_report_connection::GrpcFogReportConnection; use mc_fog_report_validation::FogResolver; use mc_mobilecoind_api::MobilecoindUri; use mc_sgx_css::Signature; use mc_transaction_core::TokenId; use mc_util_parse::{load_css_file, parse_duration_in_seconds}; use mc_util_uri::{ConnectionUri, ConsensusClientUri, FogUri}; #[cfg(feature = "ip-check")] use reqwest::{ blocking::Client, header::{HeaderMap, HeaderValue, CONTENT_TYPE}, };
#[derive(Debug, Parser)] #[clap(name = "mobilecoind", about = "The MobileCoin client daemon.")] pub struct Config { /// Path to ledger db (lmdb). #[clap( long, default_value = "/tmp/ledgerdb", parse(from_os_str), env = "MC_LEDGER_DB" )] pub ledger_db: PathBuf, /// Path to existing ledger db that contains the origin block, used when /// initializing new ledger dbs. #[clap(long, env = "MC_LEDGER_DB_BOOTSTRAP")] pub ledger_db_bootstrap: Option<String>, /// Path to watcher db (lmdb). #[clap(long, parse(from_os_str), env = "MC_WATCHER_DB")] pub watcher_db: Option<PathBuf>, /// Peers config. #[clap(flatten)] pub peers_config: PeersConfig, /// Quorum set for ledger syncing. By default, the quorum set would include /// all peers. /// /// The quorum set is represented in JSON. For example: /// {"threshold":1,"members":[{"type":"Node","args":"node2.test.mobilecoin. /// com:443"},{"type":"Node","args":"node3.test.mobilecoin.com:443"}]} #[clap(long, parse(try_from_str = parse_quorum_set_from_json), env = "MC_QUORUM_SET")] quorum_set: Option<QuorumSet<ResponderId>>, /// URLs to use for transaction data. /// /// For example: https://s3-us-west-1.amazonaws.com/mobilecoin.chain/node1.test.mobilecoin.com/ #[clap( long = "tx-source-url", required_unless_present = "offline", use_value_delimiter = true, env = "MC_TX_SOURCE_URL" )] pub tx_source_urls: Option<Vec<String>>, /// How many seconds to wait between polling. #[clap(long, default_value = "5", parse(try_from_str = parse_duration_in_seconds), env = "MC_POLL_INTERVAL")] pub poll_interval: Duration, // Mobilecoind specific arguments /// Path to mobilecoind database used to store transactions and accounts. #[clap(long, parse(from_os_str), env = "MC_MOBILECOIND_DB")] pub mobilecoind_db: Option<PathBuf>, /// URI to listen on and serve requests from. #[clap(long, env = "MC_LISTEN_URI")] pub listen_uri: Option<MobilecoindUri>, /// Number of worker threads to use for view key scanning. /// Defaults to number of logical CPU cores. #[clap(long, env = "MC_NUM_WORKERS")] pub num_workers: Option<usize>, /// Offline mode. #[clap(long, env = "MC_OFFLINE")] pub offline: bool, /// Fog ingest enclave CSS file (needed in order to enable sending /// transactions to fog recipients). #[clap(long, parse(try_from_str = load_css_file), env = "MC_FOG_INGEST_ENCLAVE_CSS")] pub fog_ingest_enclave_css: Option<Signature>, /// Automatically migrate the ledger db into the most recent version. #[clap(long, env = "MC_LEDGER_DB_MIGRATE")] pub ledger_db_migrate: bool, /// Token id #[structopt(long, env = "MC_TOKEN_ID", default_value = "0")] pub token_id: TokenId, } fn parse_quorum_set_from_json(src: &str) -> Result<QuorumSet<ResponderId>, String> { let quorum_set: QuorumSet<ResponderId> = serde_json::from_str(src) .map_err(|err| format!("Error parsing quorum set {}: {:?}", src, err))?; if !quorum_set.is_valid() { return Err(format!("Invalid quorum set: {:?}", quorum_set)); } Ok(quorum_set) } /// Error type. #[derive(Display, Debug)] pub enum ConfigError { /// Error parsing json {0} Json(serde_json::Error), /// Error handling reqwest {0} Reqwest(reqwest::Error), /// Invalid country InvalidCountry, /// Data missing in the response {0} DataMissing(String), } impl From<serde_json::Error> for ConfigError { fn from(e: serde_json::Error) -> Self { Self::Json(e) } } impl From<reqwest::Error> for ConfigError { fn from(e: reqwest::Error) -> Self { Self::Reqwest(e) } } impl Config { /// Parse the quorom set. /// Panics on error. pub fn quorum_set(&self) -> QuorumSet<ResponderId> { // If we have an explicit quorum set, use that. if let Some(quorum_set) = &self.quorum_set { return quorum_set.clone(); } // Otherwise create a quorum set that includes all of the peers we know about. let node_ids = self.peers_config.responder_ids(); QuorumSet::new_with_node_ids(node_ids.len() as u32, node_ids) } /// Get the attestation verifier used to verify fog reports when sending to /// fog recipients pub fn get_fog_ingest_verifier(&self) -> Option<Verifier> { self.fog_ingest_enclave_css.as_ref().map(|signature| { let mr_signer_verifier = { let mut mr_signer_verifier = MrSignerVerifier::new( signature.mrsigner().into(), signature.product_id(), signature.version(), ); mr_signer_verifier.allow_hardening_advisories(&["INTEL-SA-00334"]); mr_signer_verifier }; let mut verifier = Verifier::default(); verifier.debug(DEBUG_ENCLAVE).mr_signer(mr_signer_verifier); verifier }) } /// Get the function which creates FogResolver given a list of recipient /// addresses The string error should be mapped by invoker of this /// factory to Error::FogError pub fn get_fog_resolver_factory( &self, logger: Logger, ) -> Arc<dyn Fn(&[FogUri]) -> Result<FogResolver, String> + Send + Sync> { let env = Arc::new( grpcio::EnvBuilder::new() .name_prefix("FogPubkeyResolver-RPC".to_string()) .build(), ); let conn = GrpcFogReportConnection::new(env, logger); let verifier = self.get_fog_ingest_verifier(); Arc::new(move |fog_uris| -> Result<FogResolver, String> { if fog_uris.is_empty() { Ok(Default::default()) } else if let Some(verifier) = verifier.as_ref() { let report_responses = conn .fetch_fog_reports(fog_uris.iter().cloned()) .map_err(|err| format!("Failed fetching fog reports: {}", err))?; Ok(FogResolver::new(report_responses, verifier) .map_err(|err| format!("Invalid fog url: {}", err))?) } else { Err( "Some recipients have fog, but no fog ingest report verifier was configured" .to_string(), ) } }) } /// Ensure local IP address is valid. /// /// Uses ipinfo.io for getting details about IP address. /// /// Note, both of these services are free tier and rate-limited. A longer /// term solution would be to filter on the consensus server. #[cfg(feature = "ip-check")] pub fn validate_host(&self) -> Result<(), ConfigError> { let client = Client::builder().gzip(true).use_rustls_tls().build()?; let mut json_headers = HeaderMap::new(); json_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); let response = client .get("https://ipinfo.io/json/") .headers(json_headers) .send()? .error_for_status()?; let data = response.text()?; let data_json: serde_json::Value = serde_json::from_str(&data)?; let data_missing_err = Err(ConfigError::DataMissing(data_json.to_string())); let country: &str = match data_json["country"].as_str() { Some(c) => c, None => return data_missing_err, }; let region: &str = match data_json["region"].as_str() { Some(r) => r, None => return data_missing_err, }; let err = Err(ConfigError::InvalidCountry); match country { "IR" | "SY" | "CU" | "KP" => err, "UA" => match region { "Crimea" => err, _ => Ok(()), }, _ => Ok(()), } } /// Ensure local IP address is valid /// /// This does nothing when ip-check is disabled. #[cfg(not(feature = "ip-check"))] pub fn validate_host(&self) -> Result<(), ConfigError> { Ok(()) } } /// Wrapper for configuring and parsing peer URIs. #[derive(Clone, Debug, Parser)] pub struct PeersConfig { /// Validator nodes to connect to. /// Sample usages: /// --peer mc://foo:123 --peer mc://bar:456 /// --peer mc://foo:123,mc://bar:456 /// env MC_PEER=mc://foo:123,mc://bar:456 #[clap( long = "peer", required_unless_present = "offline", env = "MC_PEER", use_value_delimiter = true )] pub peers: Option<Vec<ConsensusClientUri>>, } impl PeersConfig { /// Parse the peer URIs as ResponderIds. pub fn responder_ids(&self) -> Vec<ResponderId> { self.peers .as_ref() .unwrap() .iter() .map(|peer| { peer.responder_id().unwrap_or_else(|err| { panic!("Could not get responder_id from peer URI {}: {}", peer, err) }) }) .collect() } /// Instantiate a client for each of the peer URIs. pub fn create_peers( &self, verifier: Verifier, grpc_env: Arc<grpcio::Environment>, logger: Logger, ) -> Vec<ThickClient<HardcodedCredentialsProvider>> { self.peers .as_ref() .unwrap() .iter() .map(|client_uri| { ThickClient::new( client_uri.clone(), verifier.clone(), grpc_env.clone(), HardcodedCredentialsProvider::from(client_uri), logger.clone(), ) .expect("Could not create thick client.") }) .collect() } /// Instantiate a ConnectionManager for all the peers. pub fn create_peer_manager( &self, verifier: Verifier, logger: &Logger, ) -> ConnectionManager<ThickClient<HardcodedCredentialsProvider>> { let grpc_env = Arc::new( grpcio::EnvBuilder::new() .cq_count(1) .name_prefix("peer") .build(), ); let peers = self.create_peers(verifier, grpc_env, logger.clone()); ConnectionManager::new(peers, logger.clone()) } }
use std::{path::PathBuf, sync::Arc, time::Duration}; /// Configuration parameters for mobilecoind
install.go
/* Copyright 2019 The pdfcpu Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package font provides support for TrueType fonts. package font import ( "bytes" "encoding/binary" "encoding/gob" "fmt" "io" "os" "path/filepath" "reflect" "sort" "strings" "unicode/utf16" "github.com/pirogom/pdfcpu/pkg/log" "github.com/pkg/errors" ) const ( sfntVersionTrueType = "\x00\x01\x00\x00" sfntVersionTrueTypeApple = "true" sfntVersionCFF = "OTTO" ttfHeadMagicNumber = 0x5F0F3CF5 ttcTag = "ttcf" ) type ttf struct { PostscriptName string // name: NameID 6 Protected bool // OS/2: fsType UnitsPerEm int // head: unitsPerEm Ascent int // OS/2: sTypoAscender Descent int // OS/2: sTypoDescender CapHeight int // OS/2: sCapHeight FirstChar uint16 // OS/2: fsFirstCharIndex LastChar uint16 // OS/2: fsLastCharIndex UnicodeRange [4]uint32 // OS/2: Unicode Character Range LLx, LLy, URx, URy float64 // head: xMin, yMin, xMax, yMax (fontbox) ItalicAngle float64 // post: italicAngle FixedPitch bool // post: isFixedPitch Bold bool // OS/2: usWeightClass == 7 HorMetricsCount int // hhea: numOfLongHorMetrics GlyphCount int // maxp: numGlyphs GlyphWidths []int // hmtx: fd.HorMetricsCount.advanceWidth Chars map[uint32]uint16 // cmap: Unicode character to glyph index ToUnicode map[uint16]uint32 // map glyph index to unicode character Planes map[int]bool // used Unicode planes FontFile []byte } func (fd ttf) String() string { return fmt.Sprintf(` PostscriptName = %s Protected = %t UnitsPerEm = %d Ascent = %d Descent = %d CapHeight = %d FirstChar = %d LastChar = %d FontBoundingBox = (%.2f, %.2f, %.2f, %.2f) ItalicAngle = %.2f FixedPitch = %t Bold = %t HorMetricsCount = %d GlyphCount = %d`, fd.PostscriptName, fd.Protected, fd.UnitsPerEm, fd.Ascent, fd.Descent, fd.CapHeight, fd.FirstChar, fd.LastChar, fd.LLx, fd.LLy, fd.URx, fd.URy, fd.ItalicAngle, fd.FixedPitch, fd.Bold, fd.HorMetricsCount, fd.GlyphCount, ) } func (fd ttf) toPDFGlyphSpace(i int) int { return i * 1000 / fd.UnitsPerEm } type myUint32 []uint32 func (f myUint32) Len() int { return len(f) } func (f myUint32) Less(i, j int) bool { return f[i] < f[j] } func (f myUint32) Swap(i, j int) { f[i], f[j] = f[j], f[i] } func (fd ttf) PrintChars() string { var min = uint16(0xFFFF) var max uint16 var sb strings.Builder sb.WriteByte(0x0a) keys := make(myUint32, 0, len(fd.Chars)) for k := range fd.Chars { keys = append(keys, k) } sort.Sort(keys) for _, c := range keys { g := fd.Chars[c] if g > max { max = g } if g < min { min = g } sb.WriteString(fmt.Sprintf("#%x -> #%x(%d)\n", c, g, g)) } fmt.Printf("using glyphs[%08x,%08x] [%d,%d]\n", min, max, min, max) fmt.Printf("using glyphs #%x - #%x (%d-%d)\n", min, max, min, max) return sb.String() } type table struct { chksum uint32 off uint32 size uint32 padded uint32 data []byte } func (t table) uint16(off int) uint16 { return binary.BigEndian.Uint16(t.data[off:]) } func (t table) int16(off int) int16 { return int16(t.uint16(off)) } func (t table) uint32(off int) uint32 { return binary.BigEndian.Uint32(t.data[off:]) } func (t table) fixed32(off int) float64 { return float64(t.uint32(off)) / 65536.0 } func (t table) parseFontHeaderTable(fd *ttf) error { // table "head" magic := t.uint32(12) if magic != ttfHeadMagicNumber { return fmt.Errorf("parseHead: wrong magic number") } unitsPerEm := t.uint16(18) //fmt.Printf("unitsPerEm: %d\n", unitsPerEm) fd.UnitsPerEm = int(unitsPerEm) llx := t.int16(36) //fmt.Printf("llx: %d\n", llx) fd.LLx = float64(fd.toPDFGlyphSpace(int(llx))) lly := t.int16(38) //fmt.Printf("lly: %d\n", lly) fd.LLy = float64(fd.toPDFGlyphSpace(int(lly))) urx := t.int16(40) //fmt.Printf("urx: %d\n", urx) fd.URx = float64(fd.toPDFGlyphSpace(int(urx))) ury := t.int16(42) //fmt.Printf("ury: %d\n", ury) fd.URy = float64(fd.toPDFGlyphSpace(int(ury))) return nil } func uint16ToBigEndianBytes(i uint16) []byte { b := make([]byte, 2) binary.BigEndian.PutUint16(b, i) return b } func uint32ToBigEndianBytes(i uint32) []byte { b := make([]byte, 4) binary.BigEndian.PutUint32(b, i) return b } func utf16BEToString(bb []byte) string { buf := make([]uint16, len(bb)/2) for i := 0; i < len(buf); i++ { buf[i] = binary.BigEndian.Uint16(bb[2*i:]) } return string(utf16.Decode(buf)) } func (t table) parsePostScriptTable(fd *ttf) error { // table "post" italicAngle := t.fixed32(4) //fmt.Printf("italicAngle: %2.2f\n", italicAngle) fd.ItalicAngle = italicAngle isFixedPitch := t.uint16(16) //fmt.Printf("isFixedPitch: %t\n", isFixedPitch != 0) fd.FixedPitch = isFixedPitch != 0 return nil } // func printUnicodeRange(off int, r uint32) { // for i := 0; i < 64; i++ { // if r&1 > 0 { // fmt.Printf("bit %d: on\n", off+i) // } // r >>= 1 // } // } func (t table) parseWindowsMetricsTable(fd *ttf) error { // table "OS/2" version := t.uint16(0) fsType := t.uint16(8) fd.Protected = fsType&2 > 0 //fmt.Printf("protected: %t\n", fd.Protected) uniCodeRange1 := t.uint32(42) //fmt.Printf("uniCodeRange1: %032b\n", uniCodeRange1) fd.UnicodeRange[0] = uniCodeRange1 uniCodeRange2 := t.uint32(46) //fmt.Printf("uniCodeRange2: %032b\n", uniCodeRange2) fd.UnicodeRange[1] = uniCodeRange2 uniCodeRange3 := t.uint32(50) //fmt.Printf("uniCodeRange3: %032b\n", uniCodeRange3) fd.UnicodeRange[2] = uniCodeRange3 uniCodeRange4 := t.uint32(54) //fmt.Printf("uniCodeRange4: %032b\n", uniCodeRange4) fd.UnicodeRange[3] = uniCodeRange4 // printUnicodeRange(0, uniCodeRange1) // printUnicodeRange(32, uniCodeRange2) // printUnicodeRange(64, uniCodeRange3) // printUnicodeRange(96, uniCodeRange4) sTypoAscender := t.int16(68) fd.Ascent = fd.toPDFGlyphSpace(int(sTypoAscender)) sTypoDescender := t.int16(70) fd.Descent = fd.toPDFGlyphSpace(int(sTypoDescender)) // sCapHeight: This field was defined in version 2 of the OS/2 table. sCapHeight := int16(0) if version >= 2 { sCapHeight = t.int16(88) } fd.CapHeight = fd.toPDFGlyphSpace(int(sCapHeight)) fsSelection := t.uint16(62) fd.Bold = fsSelection&0x40 > 0 fsFirstCharIndex := t.uint16(64) fd.FirstChar = fsFirstCharIndex fsLastCharIndex := t.uint16(66) fd.LastChar = fsLastCharIndex return nil } func (t table) parseNamingTable(fd *ttf) error { // table "name" count := int(t.uint16(2)) stringOffset := t.uint16(4) var nameID uint16 baseOff := 6 for i := 0; i < count; i++ { recOff := baseOff + i*12 pf := t.uint16(recOff) enc := t.uint16(recOff + 2) lang := t.uint16(recOff + 4) nameID = t.uint16(recOff + 6) l := t.uint16(recOff + 8) o := t.uint16(recOff + 10) soff := stringOffset + o s := t.data[soff : soff+l] if nameID == 6 { if pf == 3 && enc == 1 && lang == 0x0409 { fd.PostscriptName = utf16BEToString(s) return nil } if pf == 1 && enc == 0 && lang == 0 { fd.PostscriptName = string(s) return nil } } } return errors.New("pdfcpu: unable to identify postscript name") } func (t table) parseHorizontalHeaderTable(fd *ttf) error { // table "hhea" ascent := t.int16(4) //fmt.Printf("ascent: %d\n", ascent) if fd.Ascent == 0 { fd.Ascent = fd.toPDFGlyphSpace(int(ascent)) } descent := t.int16(6) //fmt.Printf("descent: %d\n", descent) if fd.Descent == 0 { fd.Descent = fd.toPDFGlyphSpace(int(descent)) } lineGap := t.int16(8) //fmt.Printf("lineGap: %d\n", lineGap) if fd.CapHeight == 0 { fd.CapHeight = fd.toPDFGlyphSpace(int(lineGap)) } //advanceWidthMax := t.uint16(10) //fmt.Printf("advanceWidthMax: %d\n", advanceWidthMax) //minLeftSideBearing := t.int16(12) //fmt.Printf("minLeftSideBearing: %d\n", minLeftSideBearing) //minRightSideBearing := t.int16(14) //fmt.Printf("minRightSideBearing: %d\n", minRightSideBearing) //xMaxExtent := t.int16(16) //fmt.Printf("xMaxExtent: %d\n", xMaxExtent) numOfLongHorMetrics := t.uint16(34) //fmt.Printf("numOfLongHorMetrics: %d\n", numOfLongHorMetrics) fd.HorMetricsCount = int(numOfLongHorMetrics) return nil } func (t table) parseMaximumProfile(fd *ttf) error { // table "maxp" numGlyphs := t.uint16(4) fd.GlyphCount = int(numGlyphs) return nil } func (t table) parseHorizontalMetricsTable(fd *ttf) error { // table "hmtx" fd.GlyphWidths = make([]int, fd.GlyphCount) for i := 0; i < int(fd.HorMetricsCount); i++ { fd.GlyphWidths[i] = fd.toPDFGlyphSpace(int(t.uint16(i * 4))) } for i := fd.HorMetricsCount; i < fd.GlyphCount; i++ { fd.GlyphWidths[i] = fd.GlyphWidths[fd.HorMetricsCount-1] } return nil } func (t table) parseCMapFormat4(fd *ttf) error { fd.Planes[0] = true segCount := int(t.uint16(6) / 2) endOff := 14 startOff := endOff + 2*segCount + 2 deltaOff := startOff + 2*segCount rangeOff := deltaOff + 2*segCount count := 0 for i := 0; i < segCount; i++ { sc := t.uint16(startOff + i*2) startCode := uint32(sc) if fd.FirstChar == 0 { fd.FirstChar = sc } ec := t.uint16(endOff + i*2) endCode := uint32(ec) if fd.LastChar == 0 { fd.LastChar = ec } idDelta := uint32(t.uint16(deltaOff + i*2)) idRangeOff := int(t.uint16(rangeOff + i*2)) var v uint16 for c, j := startCode, 0; c <= endCode && c != 0xFFFF; c++ { if idRangeOff > 0 { v = t.uint16(rangeOff + i*2 + idRangeOff + j*2) } else { v = uint16(c + idDelta) } if gi := v; gi > 0 { fd.Chars[c] = gi fd.ToUnicode[gi] = c count++ } j++ } } return nil } func (t table) parseCMapFormat12(fd *ttf) error { numGroups := int(t.uint32(12)) off := 16 count := 0 var ( lowestStartCode uint32 prevCode uint32 ) for i := 0; i < numGroups; i++ { base := off + i*12 startCode := t.uint32(base) if lowestStartCode == 0 { lowestStartCode = startCode fd.Planes[int(lowestStartCode/0x10000)] = true } if startCode/0x10000 != prevCode/0x10000 { fd.Planes[int(startCode/0x10000)] = true } endCode := t.uint32(base + 4) if startCode != endCode { if startCode/0x10000 != endCode/0x10000 { fd.Planes[int(endCode/0x10000)] = true } } prevCode = endCode startGlyphID := uint16(t.uint32(base + 8)) for c, gi := startCode, startGlyphID; c <= endCode; c++ { fd.Chars[c] = gi fd.ToUnicode[gi] = c gi++ count++ } } return nil } func (t table) parseCharToGlyphMappingTable(fd *ttf) error { // table "cmap" fd.Chars = map[uint32]uint16{} fd.ToUnicode = map[uint16]uint32{} fd.Planes = map[int]bool{} tableCount := t.uint16(2) baseOff := 4 var pf, enc, f uint16 m := map[string]table{} for i := 0; i < int(tableCount); i++ { off := baseOff + i*8 pf = t.uint16(off) enc = t.uint16(off + 2) o := t.uint32(off + 4) f = t.uint16(int(o)) l := uint32(t.uint16(int(o) + 2)) if f >= 8 { l = t.uint32(int(o) + 4) } b := t.data[o : o+l] t1 := table{off: o, size: uint32(l), data: b} k := fmt.Sprintf("p%02d.e%02d.f%02d", pf, enc, f) m[k] = t1 } if t, ok := m["p00.e10.f12"]; ok { return t.parseCMapFormat12(fd) } if t, ok := m["p00.e04.f12"]; ok { return t.parseCMapFormat12(fd) } if t, ok := m["p03.e10.f12"]; ok { return t.parseCMapFormat12(fd) } if t, ok := m["p00.e03.f04"]; ok { return t.parseCMapFormat4(fd) } if t, ok := m["p03.e01.f04"]; ok { return t.parseCMapFormat4(fd) } return fmt.Errorf("pdfcpu: unsupported cmap table") } func calcTableChecksum(tag string, b []byte) uint32 { sum := uint32(0) c := (len(b) + 3) / 4 for i := 0; i < c; i++ { if tag == "head" && i == 2 { continue } sum += binary.BigEndian.Uint32(b[i*4:]) } return sum } func getNext32BitAlignedLength(i uint32) uint32 { if i%4 > 0 { return i + (4 - i%4) } return i } func headerAndTables(fn string, r io.ReaderAt, baseOff int64) ([]byte, map[string]*table, error) { header := make([]byte, 12) n, err := r.ReadAt(header, baseOff) if err != nil { return nil, nil, err } if n != 12 { return nil, nil, fmt.Errorf("pdfcpu: corrupt ttf file: %s", fn) } st := string(header[:4]) if st == sfntVersionCFF { return nil, nil, fmt.Errorf("pdfcpu: %s is based on OpenType CFF and unsupported at the moment :(", fn) } if st != sfntVersionTrueType && st != sfntVersionTrueTypeApple { return nil, nil, fmt.Errorf("pdfcpu: unrecognized font format: %s", fn) } c := int(binary.BigEndian.Uint16(header[4:])) b := make([]byte, c*16) n, err = r.ReadAt(b, baseOff+12) if err != nil { return nil, nil, err } if n != c*16 { return nil, nil, fmt.Errorf("pdfcpu: corrupt ttf file: %s", fn) } byteCount := uint32(12) tables := map[string]*table{} for j := 0; j < c; j++ { off := j * 16 b1 := b[off : off+16] tag := string(b1[:4]) chk := binary.BigEndian.Uint32(b1[4:]) o := binary.BigEndian.Uint32(b1[8:]) l := binary.BigEndian.Uint32(b1[12:]) ll := getNext32BitAlignedLength(l) byteCount += ll t := make([]byte, ll) n, err = r.ReadAt(t, int64(o)) if err != nil { return nil, nil, err } if n != int(ll) { return nil, nil, fmt.Errorf("pdfcpu: corrupt table: %s", tag) } sum := calcTableChecksum(tag, t) if sum != chk { fmt.Printf("pdfcpu: fixing table<%s> checksum error; want:%d got:%d\n", tag, chk, sum) chk = sum } tables[tag] = &table{chksum: chk, off: o, size: l, padded: ll, data: t} } return header, tables, nil } func parse(tags map[string]*table, tag string, fd *ttf) error { t, found := tags[tag] if !found { // OS/2 is optional for True Type fonts. if tag == "OS/2" { return nil } return fmt.Errorf("pdfcpu: tag: %s unavailable", tag) } if t.data == nil { return fmt.Errorf("pdfcpu: tag: %s no data", tag) } var err error switch tag { case "head": err = t.parseFontHeaderTable(fd) case "OS/2": err = t.parseWindowsMetricsTable(fd) case "post": err = t.parsePostScriptTable(fd) case "name": err = t.parseNamingTable(fd) case "hhea": err = t.parseHorizontalHeaderTable(fd) case "maxp": err = t.parseMaximumProfile(fd) case "hmtx": err = t.parseHorizontalMetricsTable(fd) case "cmap": err = t.parseCharToGlyphMappingTable(fd) } return err } func writeGob(fileName string, fd ttf) error { f, err := os.Create(fileName) if err != nil { return err } defer f.Close() enc := gob.NewEncoder(f) return enc.Encode(fd) } func readGob(fileName string, fd *ttf) error { f, err := os.Open(fileName) if err != nil { return err } defer f.Close() dec := gob.NewDecoder(f) return dec.Decode(fd) } func installTrueTypeRep(fontDir, fontName string, header []byte, tables map[string]*table) error { fd := ttf{} for _, v := range []string{"head", "OS/2", "post", "name", "hhea", "maxp", "hmtx", "cmap"} { if err := parse(tables, v, &fd); err != nil { return err } } bb, err := createTTF(header, tables) if err != nil { return err } fd.FontFile = bb log.CLI.Println(fd.PostscriptName) gobName := filepath.Join(fontDir, fd.PostscriptName+".gob") // Write the populated ttf struct as gob. if err := writeGob(gobName, fd); err != nil { return err } // Read gob and double check integrity. fdNew := ttf{} if err := readGob(gobName, &fdNew); err != nil { return err } if !reflect.DeepEqual(fd, fdNew) { return errors.Errorf("pdfcpu: %s can't be installed", fontName) } return nil } // InstallTrueTypeCollection saves an internal representation of all fonts // contained in a TrueType collection to the pdfcpu config dir. func InstallTrueTypeCollection(fontDir, fn string) error { f, err := os.Open(fn) if err != nil { return err } defer f.Close() b := make([]byte, 12) n, err := f.Read(b) if err != nil { return err } if n != 12 { return fmt.Errorf("pdfcpu: corrupt ttc file: %s", fn) } if string(b[:4]) != ttcTag { return fmt.Errorf("pdfcpu: corrupt ttc file: %s", fn) } c := int(binary.BigEndian.Uint32(b[8:])) b = make([]byte, c*4) n, err = f.ReadAt(b, 12) if err != nil { return err } if n != c*4 { return fmt.Errorf("pdfcpu: corrupt ttc file: %s", fn) } // Process contained fonts. for i := 0; i < c; i++ { off := int64(binary.BigEndian.Uint32(b[i*4:])) header, tables, err := headerAndTables(fn, f, off) if err != nil { return err } if err := installTrueTypeRep(fontDir, fn, header, tables); err != nil { return err } } return nil } // InstallTrueTypeFont saves an internal representation of TrueType font fontName to the pdfcpu config dir. func InstallTrueTypeFont(fontDir, fontName string) error { f, err := os.Open(fontName) if err != nil { return err } defer f.Close() header, tables, err := headerAndTables(fontName, f, 0) if err != nil { return err } return installTrueTypeRep(fontDir, fontName, header, tables) } func ttfTables(tableCount int, bb []byte) (map[string]*table, error) { tables := map[string]*table{} b := bb[12:] for j := 0; j < tableCount; j++ { off := j * 16 b1 := b[off : off+16] tag := string(b1[:4]) chksum := binary.BigEndian.Uint32(b1[4:]) o := binary.BigEndian.Uint32(b1[8:]) l := binary.BigEndian.Uint32(b1[12:]) ll := getNext32BitAlignedLength(l) t := append([]byte(nil), bb[o:o+ll]...) tables[tag] = &table{chksum: chksum, off: o, size: l, padded: ll, data: t} } return tables, nil } func glyfOffset(loca *table, gid, indexToLocFormat int) int { if indexToLocFormat == 0 { // short offsets return 2 * int(loca.uint16(2*gid)) } // 1 .. long offsets return int(loca.uint32(4 * gid)) } func writeGlyfOffset(buf *bytes.Buffer, off, indexToLocFormat int) { var bb []byte if indexToLocFormat == 0 { // 0 .. short offsets bb = uint16ToBigEndianBytes(uint16(off / 2)) } else { // 1 .. long offsets bb = uint32ToBigEndianBytes(uint32(off)) } buf.Write(bb) } func pad(bb []byte) []byte { i := len(bb) % 4 if i == 0 { return bb } for j := 0; j < 4-i; j++ { bb = append(bb, 0x00) } return bb } func glyphOffsets(gid int, locaFull, glyfsFull *table, numGlyphs, indexToLocFormat int) (int, int) { offFrom := glyfOffset(locaFull, gid, indexToLocFormat) var offThru int if gid == numGlyphs { offThru = int(glyfsFull.padded) } else { offThru = glyfOffset(locaFull, gid+1, indexToLocFormat) } return offFrom, offThru } func resolveCompoundGlyph(fontName string, bb []byte, usedGIDs map[uint16]bool, locaFull, glyfsFull *table, numGlyphs, indexToLocFormat int) error { last := false for off := 10; !last; { flags := binary.BigEndian.Uint16(bb[off:]) last = flags&0x20 == 0 wordArgs := flags&0x01 > 0 gid := binary.BigEndian.Uint16(bb[off+2:]) // Position behind arguments. off += 6 if wordArgs { off += 2 } // Position behind transform. if flags&0x08 > 0 { off += 2 } else if flags&0x40 > 0 { off += 4 } else if flags&0x80 > 0 { off += 8 } if _, ok := usedGIDs[gid]; ok { // duplicate continue } offFrom, offThru := glyphOffsets(int(gid), locaFull, glyfsFull, numGlyphs, indexToLocFormat) if offThru < offFrom { return errors.Errorf("pdfcpu: illegal glyfOffset for font: %s", fontName) } if offFrom == offThru { // not available continue } usedGIDs[gid] = true cbb := glyfsFull.data[offFrom:offThru] if cbb[0]&0x80 == 0 { // simple continue } if err := resolveCompoundGlyph(fontName, cbb, usedGIDs, locaFull, glyfsFull, numGlyphs, indexToLocFormat); err != nil { return err } } return nil } func resolveCompoundGlyphs(fontName string, usedGIDs map[uint16]bool, locaFull, glyfsFull *table, numGlyphs, indexToLocFormat int) error { gids := make([]uint16, len(usedGIDs)) for k := range usedGIDs { gids = append(gids, k) } for _, gid := range gids { offFrom, offThru := glyphOffsets(int(gid), locaFull, glyfsFull, numGlyphs, indexToLocFormat) if offThru < offFrom { return errors.Errorf("pdfcpu: illegal glyfOffset for font: %s", fontName) } if offFrom == offThru { continue } bb := glyfsFull.data[offFrom:offThru] if bb[0]&0x80 == 0 { // simple continue } if err := resolveCompoundGlyph(fontName, bb, usedGIDs, locaFull, glyfsFull, numGlyphs, indexToLocFormat); err != nil { return err } } return nil } func glyfAndLoca(fontName string, tables map[string]*table, usedGIDs map[uint16]bool) error { head, ok := tables["head"] if !ok { return errors.Errorf("pdfcpu: missing \"head\" table for font: %s", fontName) } maxp, ok := tables["maxp"] if !ok { return errors.Errorf("pdfcpu: missing \"maxp\" table for font: %s", fontName) } glyfsFull, ok := tables["glyf"] if !ok { return errors.Errorf("pdfcpu: missing \"glyf\" table for font: %s", fontName) } locaFull, ok := tables["loca"] if !ok { return errors.Errorf("pdfcpu: missing \"loca\" table for font: %s", fontName) } indexToLocFormat := int(head.uint16(50)) // 0 .. short offsets // 1 .. long offsets numGlyphs := int(maxp.uint16(4)) if err := resolveCompoundGlyphs(fontName, usedGIDs, locaFull, glyfsFull, numGlyphs, indexToLocFormat); err != nil { return err } gids := make([]int, 0, len(usedGIDs)+1) gids = append(gids, 0) for gid := range usedGIDs { gids = append(gids, int(gid)) } sort.Ints(gids) glyfBytes := []byte{} var buf bytes.Buffer off := 0 firstPendingGID := 0 for _, gid := range gids { offFrom, offThru := glyphOffsets(gid, locaFull, glyfsFull, numGlyphs, indexToLocFormat) if offThru < offFrom { return errors.Errorf("pdfcpu: illegal glyfOffset for font: %s", fontName) } if offThru != offFrom { // We have a glyph outline. for i := 0; i < gid-firstPendingGID; i++ { writeGlyfOffset(&buf, off, indexToLocFormat) } glyfBytes = append(glyfBytes, glyfsFull.data[offFrom:offThru]...) writeGlyfOffset(&buf, off, indexToLocFormat) off += offThru - offFrom firstPendingGID = gid + 1 } } for i := 0; i <= numGlyphs-firstPendingGID; i++ { writeGlyfOffset(&buf, off, indexToLocFormat) } bb := buf.Bytes() locaFull.size = uint32(len(bb)) locaFull.data = pad(bb) locaFull.padded = uint32(len(locaFull.data)) glyfsFull.size = uint32(len(glyfBytes)) glyfsFull.data = pad(glyfBytes) glyfsFull.padded = uint32(len(glyfsFull.data)) return nil } func createTTF(header []byte, tables map[string]*table) ([]byte, error)
// Subset creates a new font file based on usedGIDs. func Subset(fontName string, usedGIDs map[uint16]bool) ([]byte, error) { bb, err := Read(fontName) if err != nil { return nil, err } header := bb[:12] tableCount := int(binary.BigEndian.Uint16(header[4:])) tables, err := ttfTables(tableCount, bb) if err != nil { return nil, err } if err := glyfAndLoca(fontName, tables, usedGIDs); err != nil { return nil, err } return createTTF(header, tables) }
{ tags := []string{} for t := range tables { tags = append(tags, t) } sort.Strings(tags) buf := bytes.NewBuffer(header) off := uint32(len(header) + len(tables)*16) o := off for _, tag := range tags { t := tables[tag] if _, err := buf.WriteString(tag); err != nil { return nil, err } if tag == "loca" || tag == "glyf" { t.chksum = calcTableChecksum(tag, t.data) } if _, err := buf.Write(uint32ToBigEndianBytes(t.chksum)); err != nil { return nil, err } t.off = o if _, err := buf.Write(uint32ToBigEndianBytes(t.off)); err != nil { return nil, err } if _, err := buf.Write(uint32ToBigEndianBytes(t.size)); err != nil { return nil, err } o += t.padded } for _, tag := range tags { t := tables[tag] n, err := buf.Write(t.data) if err != nil { return nil, err } if n != len(t.data) || n != int(t.padded) { return nil, errors.Errorf("pdfcpu: unable to write %s data\n", tag) } } return buf.Bytes(), nil }
DeleteOutlined.tsx
import type { DIconProps } from '../Icon'; import { DeleteOutlined as AntIcon } from '@ant-design/icons-svg'; import { DIcon } from '../Icon'; export function
(props: Omit<DIconProps, 'dIcon'>) { return <DIcon {...props} dIcon={AntIcon} />; }
DeleteOutlined
noRepeatArray.js
/** * @file 数组去重 * * @author Marx */ /** * 数组去重 * * @param {Array} ele 要去重的数组 * @return {Array} 去重后的数组 */ function getNoRepeatArr(ele) { var newArr = [
r i = 0, len = ele.length; i < len; i++) { var notRepeat = true; for (var j = 0; j < i; j++) { if (ele[j] === ele[i]) { notRepeat = false; } } if (notRepeat) { newArr.push(ele[i]); } } return newArr; }
]; for (va
key.go
package rundeck // KeyMeta is the metadata associated with a resource in the Rundeck key store. type KeyMeta struct { XMLName string `xml:"resource"` Name string `xml:"name,attr,omitempty"` Path string `xml:"path,attr,omitempty"` ResourceType string `xml:"type,attr,omitempty"` URL string `xml:"url,attr,omitempty"` ContentType string `xml:"resource-meta>Rundeck-content-type"` ContentSize string `xml:"resource-meta>Rundeck-content-size"` ContentMask string `xml:"resource-meta>Rundeck-content-mask"` KeyType string `xml:"resource-meta>Rundeck-key-type"` LastModifiedByUserName string `xml:"resource-meta>Rundeck-auth-modified-username"` CreatedByUserName string `xml:"resource-meta>Rundeck-auth-created-username"` CreatedTimestamp string `xml:"resource-meta>Rundeck-content-creation-time"` LastModifiedTimestamp string `xml:"resource-meta>Rundeck-content-modify-time"` } type keyMetaListContents struct { Keys []KeyMeta `xml:"contents>resource"` } // GetKeyMeta returns the metadata for the key at the given keystore path. func (c *Client) GetKeyMeta(path string) (*KeyMeta, error) { k := &KeyMeta{} err := c.get([]string{"storage", "keys", path}, nil, k)
return k, err } // GetKeysInDirMeta returns the metadata for the keys and subdirectories within // the directory at the given keystore path. func (c *Client) GetKeysInDirMeta(path string) ([]KeyMeta, error) { r := &keyMetaListContents{} err := c.get([]string{"storage", "keys", path}, nil, r) if err != nil { return nil, err } return r.Keys, nil } // GetKeyContent retrieves and returns the content of the key at the given keystore path. // Private keys are write-only, so they cannot be retrieved via this interface. func (c *Client) GetKeyContent(path string) (string, error) { return c.rawGet([]string{"storage", "keys", path}, nil, "application/pgp-keys") } func (c *Client) CreatePublicKey(path string, content string) error { return c.createOrReplacePublicKey("POST", path, "application/pgp-keys", content) } func (c *Client) ReplacePublicKey(path string, content string) error { return c.createOrReplacePublicKey("PUT", path, "application/pgp-keys", content) } func (c *Client) CreatePrivateKey(path string, content string) error { return c.createOrReplacePublicKey("POST", path, "application/octet-stream", content) } func (c *Client) ReplacePrivateKey(path string, content string) error { return c.createOrReplacePublicKey("PUT", path, "application/octet-stream", content) } func (c *Client) createOrReplacePublicKey(method string, path string, contentType string, content string) error { req := &request{ Method: method, PathParts: []string{"storage", "keys", path}, Headers: map[string]string{ "Content-Type": contentType, }, BodyBytes: []byte(content), } _, err := c.rawRequest(req) return err } func (c *Client) DeleteKey(path string) error { return c.delete([]string{"storage", "keys", path}) }
compose-component-path.ts
import * as path from 'path'; import format from 'string-format'; import BitId from '../../bit-id/bit-id'; import { DEFAULT_COMPONENTS_DIR_PATH, DEFAULT_DEPENDENCIES_DIR_PATH } from '../../constants'; import { PathLinuxRelative, PathOsBased } from '../path'; import { parseScope } from './parse-scope'; /** * the following place-holders are permitted: * name - component name includes namespace, e.g. 'ui/button'. * scopeId - full scope-id includes the owner, e.g. 'teambit.compilation'. * scope - scope name only, e.g. 'compilation'. * owner - owner name in bit.dev, e.g. 'teambit'. */
let defaultDir = componentsDefaultDirectory; const { scope, owner } = parseScope(bitId.scope); // Prevent case where for example {scope}/{name} becomes /my-comp (in case the scope is empty) if (componentsDefaultDirectory.includes('{scope}/') && !bitId.scope) { defaultDir = componentsDefaultDirectory.replace('{scope}/', ''); } if (componentsDefaultDirectory.includes('{scopeId}/') && !bitId.scope) { defaultDir = componentsDefaultDirectory.replace('{scopeId}/', ''); } if (componentsDefaultDirectory.includes('{owner}.') && !owner) { defaultDir = componentsDefaultDirectory.replace('{owner}.', ''); } const result = format(defaultDir, { name: bitId.name, scope, owner, scopeId: bitId.scope }); return result; } export function composeDependencyPath( bitId: BitId, dependenciesDir: string = DEFAULT_DEPENDENCIES_DIR_PATH ): PathOsBased { return path.join(dependenciesDir, bitId.toFullPath()); } export function composeDependencyPathForIsolated( bitId: BitId, // @ts-ignore AUTO-ADDED-AFTER-MIGRATION-PLEASE-FIX! dependenciesDir?: string = DEFAULT_DEPENDENCIES_DIR_PATH ): PathOsBased { const getIdPath = () => { try { return bitId.toFullPath(); } catch (err) { return bitId.name; } }; return path.join(dependenciesDir, getIdPath()); }
export function composeComponentPath( bitId: BitId, componentsDefaultDirectory: string = DEFAULT_COMPONENTS_DIR_PATH ): PathLinuxRelative {
issue-3874.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // compile-pass #![allow(dead_code)] // pretty-expanded FIXME #23616 enum PureCounter { PureCounterVariant(usize) } fn each<F>(thing: PureCounter, blk: F) where F: FnOnce(&usize) { let PureCounter::PureCounterVariant(ref x) = thing;
} pub fn main() {}
blk(x);
getTrigger.go
// *** WARNING: this file was generated by the Pulumi SDK Generator. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package v20190801 import ( "github.com/pulumi/pulumi/sdk/v2/go/pulumi" ) func
(ctx *pulumi.Context, args *LookupTriggerArgs, opts ...pulumi.InvokeOption) (*LookupTriggerResult, error) { var rv LookupTriggerResult err := ctx.Invoke("azure-nextgen:databoxedge/v20190801:getTrigger", args, &rv, opts...) if err != nil { return nil, err } return &rv, nil } type LookupTriggerArgs struct { // The device name. DeviceName string `pulumi:"deviceName"` // The trigger name. Name string `pulumi:"name"` // The resource group name. ResourceGroupName string `pulumi:"resourceGroupName"` } // Trigger details. type LookupTriggerResult struct { // Trigger Kind. Kind string `pulumi:"kind"` // The object name. Name string `pulumi:"name"` // The hierarchical type of the object. Type string `pulumi:"type"` }
LookupTrigger
sandbox_safe_system_state.rs
use std::{collections::BTreeMap, convert::TryFrom, convert::TryInto}; use ic_base_types::{CanisterId, NumBytes, NumSeconds, PrincipalId}; use ic_cycles_account_manager::{CyclesAccountManager, CyclesAccountManagerError}; use ic_interfaces::execution_environment::{HypervisorError, HypervisorResult}; use ic_nns_constants::CYCLES_MINTING_CANISTER_ID; use ic_registry_subnet_type::SubnetType; use ic_replicated_state::{ canister_state::DEFAULT_QUEUE_CAPACITY, CanisterStatus, StateError, SystemState, }; use ic_types::{ messages::{CallContextId, CallbackId, Request}, methods::Callback, nominal_cycles::NominalCycles, ComputeAllocation, Cycles, MemoryAllocation, }; use serde::{Deserialize, Serialize}; use crate::CERTIFIED_DATA_MAX_LENGTH; /// The information that canisters can see about their own status. #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum CanisterStatusView { Running, Stopping, Stopped, } impl CanisterStatusView { pub fn from_full_status(full_status: &CanisterStatus) -> Self { match full_status { CanisterStatus::Running { .. } => Self::Running, CanisterStatus::Stopping { .. } => Self::Stopping, CanisterStatus::Stopped => Self::Stopped, } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum CallbackUpdate { Register(CallbackId, Callback), Unregister(CallbackId), } /// Tracks changes to the system state that the canister has requested. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemStateChanges { pub(super) new_certified_data: Option<Vec<u8>>, pub(super) callback_updates: Vec<CallbackUpdate>, cycles_balance_change: i128, cycles_consumed: Cycles, call_context_balance_taken: BTreeMap<CallContextId, Cycles>, request_slots_used: BTreeMap<CanisterId, usize>, requests: Vec<Request>, } impl Default for SystemStateChanges { fn default() -> Self { Self { new_certified_data: None, callback_updates: vec![], cycles_balance_change: 0, cycles_consumed: Cycles::from(0), call_context_balance_taken: BTreeMap::new(), request_slots_used: BTreeMap::new(), requests: vec![], } } } impl SystemStateChanges { /// Checks that no cycles were created during the execution of this message /// (unless the canister is the cycles minting canister). fn cycle_change_is_valid(&self, is_cmc_canister: bool) -> bool { let mut universal_cycle_change = 0; universal_cycle_change += self.cycles_balance_change; for call_context_balance_taken in self.call_context_balance_taken.values() { universal_cycle_change = universal_cycle_change.saturating_sub( call_context_balance_taken .get() .try_into() .unwrap_or(i128::MAX), // saturate overflowing conversion ); } for req in self.requests.iter() { universal_cycle_change = universal_cycle_change // saturate overflowing conversion .saturating_add(req.payment.get().try_into().unwrap_or(i128::MAX)); } if is_cmc_canister { true } else { // Check that no cycles were created. universal_cycle_change <= 0 } } /// Verify that the changes to the system state are sound and apply them to /// the system state if they are. /// /// # Panic /// /// This will panic if the changes are invalid. That could indicate that a /// canister has broken out of wasmtime. pub fn apply_changes(self, system_state: &mut SystemState) { // Verify total cycle change is not positive and update cycles balance. assert!(self.cycle_change_is_valid(system_state.canister_id == CYCLES_MINTING_CANISTER_ID)); if self.cycles_balance_change >= 0 { system_state.cycles_balance += Cycles::from(self.cycles_balance_change as u128); } else { let new_balance = system_state .cycles_balance .get() .checked_sub(-self.cycles_balance_change as u128) .unwrap(); system_state.cycles_balance = Cycles::from(new_balance); } // Observe consumed cycles. system_state .canister_metrics .consumed_cycles_since_replica_started += NominalCycles::from_cycles(self.cycles_consumed); // Verify we don't accept more cycles than are available from each call // context and update each call context balance if !self.call_context_balance_taken.is_empty() { let call_context_manager = system_state.call_context_manager_mut().unwrap(); for (context_id, amount_taken) in &self.call_context_balance_taken { let call_context = call_context_manager .call_context_mut(*context_id) .expect("Canister accepted cycles from invalid call context"); call_context .withdraw_cycles(*amount_taken) .expect("Canister accepted more cycles than available from call context"); } } // Push outgoing messages. for msg in &self.requests { system_state .push_output_request(msg.clone()) .expect("Unable to send new request"); } // Verify new certified data isn't too long and set it. if let Some(certified_data) = self.new_certified_data.as_ref() { assert!(certified_data.len() <= CERTIFIED_DATA_MAX_LENGTH as usize); system_state.certified_data = certified_data.clone(); } // Verify callback ids and register new callbacks. for update in self.callback_updates { match update { CallbackUpdate::Register(expected_id, callback) => { let id = system_state .call_context_manager_mut() .unwrap() .register_callback(callback); assert_eq!(id, expected_id); } CallbackUpdate::Unregister(callback_id) => { let _callback = system_state .call_context_manager_mut() .unwrap() .unregister_callback(callback_id) .expect("Tried to unregister callback with an id that isn't in use"); } } } } } /// A version of the `SystemState` that can be used in a sandboxed process. /// Changes are separately tracked so that we can verify the changes are valid /// before applying them to the actual system state. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SandboxSafeSystemState { /// Only public for tests #[doc(hidden)] pub system_state_changes: SystemStateChanges, pub(super) canister_id: CanisterId, pub(super) controller: PrincipalId, pub(super) status: CanisterStatusView, pub(super) subnet_type: SubnetType, freeze_threshold: NumSeconds, memory_allocation: MemoryAllocation, initial_cycles_balance: Cycles, call_context_balances: BTreeMap<CallContextId, Cycles>, cycles_account_manager: CyclesAccountManager, // None indicates that we are in a context where the canister cannot // register callbacks (e.g. running the `start` method when installing a // canister.) next_callback_id: Option<u64>, available_request_slots: BTreeMap<CanisterId, usize>, } impl SandboxSafeSystemState { /// Only public for use in tests. #[doc(hidden)] #[allow(clippy::too_many_arguments)] pub fn new_internal( canister_id: CanisterId, controller: PrincipalId, status: CanisterStatusView, freeze_threshold: NumSeconds, memory_allocation: MemoryAllocation, initial_cycles_balance: Cycles, call_context_balances: BTreeMap<CallContextId, Cycles>, cycles_account_manager: CyclesAccountManager, next_callback_id: Option<u64>, available_request_slots: BTreeMap<CanisterId, usize>, ) -> Self { Self { canister_id, controller, status, subnet_type: cycles_account_manager.subnet_type(), freeze_threshold, memory_allocation, system_state_changes: SystemStateChanges::default(), initial_cycles_balance, call_context_balances, cycles_account_manager, next_callback_id, available_request_slots, } } pub fn new(system_state: &SystemState, cycles_account_manager: CyclesAccountManager) -> Self { let call_context_balances = match system_state.call_context_manager() { Some(call_context_manager) => call_context_manager .call_contexts() .iter() .map(|(id, context)| (*id, context.available_cycles())) .collect(), None => BTreeMap::new(), }; let available_request_slots = system_state.available_output_request_slots(); Self::new_internal( system_state.canister_id, *system_state.controller(), CanisterStatusView::from_full_status(&system_state.status), system_state.freeze_threshold, system_state.memory_allocation, system_state.cycles_balance, call_context_balances, cycles_account_manager, system_state .call_context_manager() .map(|c| c.next_callback_id()), available_request_slots, ) } pub fn canister_id(&self) -> CanisterId
pub fn changes(self) -> SystemStateChanges { self.system_state_changes } pub fn take_changes(&mut self) -> SystemStateChanges { std::mem::take(&mut self.system_state_changes) } /// Only public for use in tests. #[doc(hidden)] pub fn register_callback(&mut self, callback: Callback) -> HypervisorResult<CallbackId> { match &mut self.next_callback_id { Some(next_callback_id) => { *next_callback_id += 1; let id = CallbackId::from(*next_callback_id); self.system_state_changes .callback_updates .push(CallbackUpdate::Register(id, callback)); Ok(id) } None => Err(HypervisorError::ContractViolation( "Tried to register a callback in a context where it isn't allowed.".to_string(), )), } } pub(super) fn unregister_callback(&mut self, id: CallbackId) { self.system_state_changes .callback_updates .push(CallbackUpdate::Unregister(id)) } pub(super) fn cycles_balance(&self) -> Cycles { let cycle_change = self.system_state_changes.cycles_balance_change; if cycle_change >= 0 { Cycles::from( self.initial_cycles_balance .get() .checked_add(cycle_change as u128) .unwrap(), ) } else { Cycles::from( self.initial_cycles_balance .get() .checked_sub(-cycle_change as u128) .unwrap(), ) } } pub(super) fn msg_cycles_available(&self, call_context_id: CallContextId) -> Cycles { let initial_available = *self .call_context_balances .get(&call_context_id) .unwrap_or(&Cycles::from(0)); let already_taken = *self .system_state_changes .call_context_balance_taken .get(&call_context_id) .unwrap_or(&Cycles::from(0)); initial_available - already_taken } fn update_balance_change(&mut self, new_balance: Cycles) { let new_change = i128::try_from(new_balance.get()) .unwrap() .checked_sub(i128::try_from(self.initial_cycles_balance.get()).unwrap()) .unwrap(); self.system_state_changes.cycles_balance_change = new_change; } /// Same as [`update_balance_change`], but asserts the balance has decreased /// and marks the difference as cycles consumed (i.e. burned and not /// transfered). fn update_balance_change_consuming(&mut self, new_balance: Cycles) { let new_change = i128::try_from(new_balance.get()) .unwrap() .checked_sub(i128::try_from(self.initial_cycles_balance.get()).unwrap()) .unwrap(); // Assert that the balance has decreased. assert!(new_change <= self.system_state_changes.cycles_balance_change); let consumed = Cycles::from((self.system_state_changes.cycles_balance_change - new_change) as u128); self.system_state_changes.cycles_consumed += consumed; self.system_state_changes.cycles_balance_change = new_change; } pub(super) fn mint_cycles(&mut self, amount_to_mint: Cycles) -> HypervisorResult<()> { let mut new_balance = self.cycles_balance(); let result = self .cycles_account_manager .mint_cycles(self.canister_id, &mut new_balance, amount_to_mint) .map_err(|CyclesAccountManagerError::ContractViolation(msg)| { HypervisorError::ContractViolation(msg) }); self.update_balance_change(new_balance); result } pub(super) fn refund_cycles(&mut self, cycles: Cycles) { let mut new_balance = self.cycles_balance(); self.cycles_account_manager .add_cycles(&mut new_balance, cycles); self.update_balance_change(new_balance); } pub(super) fn msg_cycles_accept( &mut self, call_context_id: CallContextId, amount_to_accept: Cycles, ) -> Cycles { let mut new_balance = self.cycles_balance(); // Scale amount that can be accepted by what is actually available on // the call context. let amount_available = Cycles::from( self.call_context_balances .get(&call_context_id) .unwrap() .get() .checked_sub( self.system_state_changes .call_context_balance_taken .get(&call_context_id) .unwrap_or(&Cycles::from(0)) .get(), ) .unwrap(), ); let amount_to_accept = std::cmp::min(amount_available, amount_to_accept); // Withdraw and accept the cycles *self .system_state_changes .call_context_balance_taken .entry(call_context_id) .or_insert_with(|| Cycles::from(0)) += amount_to_accept; self.cycles_account_manager .add_cycles(&mut new_balance, amount_to_accept); self.update_balance_change(new_balance); amount_to_accept } pub(super) fn canister_cycles_withdraw( &mut self, canister_current_memory_usage: NumBytes, compute_allocation: ComputeAllocation, amount: Cycles, ) -> HypervisorResult<()> { let mut new_balance = self.cycles_balance(); let result = self .cycles_account_manager .withdraw_cycles_for_transfer( self.canister_id, self.freeze_threshold, self.memory_allocation, canister_current_memory_usage, compute_allocation, &mut new_balance, amount, ) .map_err(HypervisorError::InsufficientCyclesBalance); self.update_balance_change(new_balance); result } /// Only public for use in tests #[doc(hidden)] pub fn push_output_request( &mut self, canister_current_memory_usage: NumBytes, compute_allocation: ComputeAllocation, msg: Request, ) -> Result<(), (StateError, Request)> { let mut new_balance = self.cycles_balance(); if let Err(err) = self.cycles_account_manager.withdraw_request_cycles( self.canister_id, &mut new_balance, self.freeze_threshold, self.memory_allocation, canister_current_memory_usage, compute_allocation, &msg, ) { return Err((StateError::CanisterOutOfCycles(err), msg)); } let initial_available_slots = self .available_request_slots .get(&msg.receiver) .unwrap_or(&DEFAULT_QUEUE_CAPACITY); let used_slots = self .system_state_changes .request_slots_used .entry(msg.receiver) .or_insert(0); if *used_slots >= *initial_available_slots { return Err(( StateError::QueueFull { capacity: DEFAULT_QUEUE_CAPACITY, }, msg, )); } self.system_state_changes.requests.push(msg); *used_slots += 1; self.update_balance_change_consuming(new_balance); Ok(()) } }
{ self.canister_id }
response.rs
use std::collections::HashMap; #[derive(Debug, Clone)] pub struct Alert { pub source: String, pub event_id: String, pub trigger_id: String, pub subject: String, pub entity: String, pub groups: Vec<String>, pub alert_start_time: String, pub alert_age: String, pub alert_status: String, pub priority: String, pub tags: HashMap<String, String>, }
pub type AlertList = Vec<Alert>; // 'description': entry['description'], // 'alert_time': alert_age, // 'lastchange_utc': last_change, // 'ack': ack_status, // 'sendto': ",".join(send_to_list), // 'subject': subject, // 'message': message,
parse_gemspec.go
package ruby import ( "bufio" "encoding/json" "fmt" "io" "regexp" "strings" "github.com/anchore/syft/internal" "github.com/mitchellh/mapstructure" "github.com/anchore/syft/syft/pkg" "github.com/anchore/syft/syft/pkg/cataloger/common" ) // integrity check var _ common.ParserFn = parseGemFileLockEntries type postProcessor func(string) []string // match example: Al\u003Ex ---> 003E var unicodePattern = regexp.MustCompile(`\\u(?P<unicode>[0-9A-F]{4})`) var patterns = map[string]*regexp.Regexp{ // match example: name = "railties".freeze ---> railties "name": regexp.MustCompile(`.*\.name\s*=\s*["']{1}(?P<name>.*)["']{1} *`), // match example: version = "1.0.4".freeze ---> 1.0.4 "version": regexp.MustCompile(`.*\.version\s*=\s*["']{1}(?P<version>.*)["']{1} *`), // match example: // homepage = "https://github.com/anchore/syft".freeze ---> https://github.com/anchore/syft "homepage": regexp.MustCompile(`.*\.homepage\s*=\s*["']{1}(?P<homepage>.*)["']{1} *`), // match example: files = ["exe/bundle".freeze, "exe/bundler".freeze] ---> "exe/bundle".freeze, "exe/bundler".freeze "files": regexp.MustCompile(`.*\.files\s*=\s*\[(?P<files>.*)\] *`), // match example: authors = ["Andr\u00E9 Arko".freeze, "Samuel Giddins".freeze, "Colby Swandale".freeze, // "Hiroshi Shibata".freeze, "David Rodr\u00EDguez".freeze, "Grey Baker".freeze...] "authors": regexp.MustCompile(`.*\.authors\s*=\s*\[(?P<authors>.*)\] *`), // match example: licenses = ["MIT".freeze] ----> "MIT".freeze "licenses": regexp.MustCompile(`.*\.licenses\s*=\s*\[(?P<licenses>.*)\] *`), } var postProcessors = map[string]postProcessor{ "files": processList, "authors": processList,
func processList(s string) []string { var results []string for _, item := range strings.Split(s, ",") { results = append(results, strings.Trim(item, "\" ")) } return results } func parseGemSpecEntries(_ string, reader io.Reader) ([]pkg.Package, error) { var pkgs []pkg.Package var fields = make(map[string]interface{}) scanner := bufio.NewScanner(reader) for scanner.Scan() { line := scanner.Text() sanitizedLine := strings.TrimSpace(line) sanitizedLine = strings.ReplaceAll(sanitizedLine, ".freeze", "") sanitizedLine = renderUtf8(sanitizedLine) if sanitizedLine == "" { continue } for field, pattern := range patterns { matchMap := internal.MatchNamedCaptureGroups(pattern, sanitizedLine) if value := matchMap[field]; value != "" { if postProcessor := postProcessors[field]; postProcessor != nil { fields[field] = postProcessor(value) } else { fields[field] = value } // TODO: know that a line could actually match on multiple patterns, this is unlikely though break } } } if fields["name"] != "" && fields["version"] != "" { var metadata pkg.GemMetadata if err := mapstructure.Decode(fields, &metadata); err != nil { return nil, fmt.Errorf("unable to decode gem metadata: %w", err) } pkgs = append(pkgs, pkg.Package{ Name: metadata.Name, Version: metadata.Version, Licenses: metadata.Licenses, Language: pkg.Ruby, Type: pkg.GemPkg, MetadataType: pkg.GemMetadataType, Metadata: metadata, }) } return pkgs, nil } // renderUtf8 takes any string escaped string sub-sections from the ruby string and replaces those sections with the UTF8 runes. func renderUtf8(s string) string { fullReplacement := unicodePattern.ReplaceAllStringFunc(s, func(unicodeSection string) string { var replacement string // note: the json parser already has support for interpreting hex-representations of unicode escaped strings as unicode runes. // we can do this ourselves with strconv.Atoi, or leverage the existing json package. if err := json.Unmarshal([]byte(`"`+unicodeSection+`"`), &replacement); err != nil { return unicodeSection } return replacement }) return fullReplacement }
"licenses": processList, }
instructions.rs
#![allow(non_snake_case)] use crate::cdsl::instructions::{ AllInstructions, InstructionBuilder as Inst, InstructionGroup, InstructionGroupBuilder, }; use crate::cdsl::operands::Operand; use crate::cdsl::types::ValueType; use crate::cdsl::typevar::{Interval, TypeSetBuilder, TypeVar}; use crate::shared::entities::EntityRefs; use crate::shared::formats::Formats; use crate::shared::immediates::Immediates; use crate::shared::types; #[allow(clippy::many_single_char_names)] pub(crate) fn define( mut all_instructions: &mut AllInstructions, formats: &Formats, immediates: &Immediates, entities: &EntityRefs, ) -> InstructionGroup
{ let mut ig = InstructionGroupBuilder::new(&mut all_instructions); let iflags: &TypeVar = &ValueType::Special(types::Flag::IFlags.into()).into(); let iWord = &TypeVar::new( "iWord", "A scalar integer machine word", TypeSetBuilder::new().ints(32..64).build(), ); let nlo = &Operand::new("nlo", iWord).with_doc("Low part of numerator"); let nhi = &Operand::new("nhi", iWord).with_doc("High part of numerator"); let d = &Operand::new("d", iWord).with_doc("Denominator"); let q = &Operand::new("q", iWord).with_doc("Quotient"); let r = &Operand::new("r", iWord).with_doc("Remainder"); ig.push( Inst::new( "x86_udivmodx", r#" Extended unsigned division. Concatenate the bits in `nhi` and `nlo` to form the numerator. Interpret the bits as an unsigned number and divide by the unsigned denominator `d`. Trap when `d` is zero or if the quotient is larger than the range of the output. Return both quotient and remainder. "#, &formats.ternary, ) .operands_in(vec![nlo, nhi, d]) .operands_out(vec![q, r]) .can_trap(true), ); ig.push( Inst::new( "x86_sdivmodx", r#" Extended signed division. Concatenate the bits in `nhi` and `nlo` to form the numerator. Interpret the bits as a signed number and divide by the signed denominator `d`. Trap when `d` is zero or if the quotient is outside the range of the output. Return both quotient and remainder. "#, &formats.ternary, ) .operands_in(vec![nlo, nhi, d]) .operands_out(vec![q, r]) .can_trap(true), ); let argL = &Operand::new("argL", iWord); let argR = &Operand::new("argR", iWord); let resLo = &Operand::new("resLo", iWord); let resHi = &Operand::new("resHi", iWord); ig.push( Inst::new( "x86_umulx", r#" Unsigned integer multiplication, producing a double-length result. Polymorphic over all scalar integer types, but does not support vector types. "#, &formats.binary, ) .operands_in(vec![argL, argR]) .operands_out(vec![resLo, resHi]), ); ig.push( Inst::new( "x86_smulx", r#" Signed integer multiplication, producing a double-length result. Polymorphic over all scalar integer types, but does not support vector types. "#, &formats.binary, ) .operands_in(vec![argL, argR]) .operands_out(vec![resLo, resHi]), ); let Float = &TypeVar::new( "Float", "A scalar or vector floating point number", TypeSetBuilder::new() .floats(Interval::All) .simd_lanes(Interval::All) .build(), ); let IntTo = &TypeVar::new( "IntTo", "An integer type with the same number of lanes", TypeSetBuilder::new() .ints(32..64) .simd_lanes(Interval::All) .build(), ); let x = &Operand::new("x", Float); let a = &Operand::new("a", IntTo); ig.push( Inst::new( "x86_cvtt2si", r#" Convert with truncation floating point to signed integer. The source floating point operand is converted to a signed integer by rounding towards zero. If the result can't be represented in the output type, returns the smallest signed value the output type can represent. This instruction does not trap. "#, &formats.unary, ) .operands_in(vec![x]) .operands_out(vec![a]), ); let x = &Operand::new("x", Float); let a = &Operand::new("a", Float); let y = &Operand::new("y", Float); ig.push( Inst::new( "x86_fmin", r#" Floating point minimum with x86 semantics. This is equivalent to the C ternary operator `x < y ? x : y` which differs from `fmin` when either operand is NaN or when comparing +0.0 to -0.0. When the two operands don't compare as LT, `y` is returned unchanged, even if it is a signalling NaN. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_fmax", r#" Floating point maximum with x86 semantics. This is equivalent to the C ternary operator `x > y ? x : y` which differs from `fmax` when either operand is NaN or when comparing +0.0 to -0.0. When the two operands don't compare as GT, `y` is returned unchanged, even if it is a signalling NaN. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); let x = &Operand::new("x", iWord); ig.push( Inst::new( "x86_push", r#" Pushes a value onto the stack. Decrements the stack pointer and stores the specified value on to the top. This is polymorphic in i32 and i64. However, it is only implemented for i64 in 64-bit mode, and only for i32 in 32-bit mode. "#, &formats.unary, ) .operands_in(vec![x]) .other_side_effects(true) .can_store(true), ); ig.push( Inst::new( "x86_pop", r#" Pops a value from the stack. Loads a value from the top of the stack and then increments the stack pointer. This is polymorphic in i32 and i64. However, it is only implemented for i64 in 64-bit mode, and only for i32 in 32-bit mode. "#, &formats.nullary, ) .operands_out(vec![x]) .other_side_effects(true) .can_load(true), ); let y = &Operand::new("y", iWord); let rflags = &Operand::new("rflags", iflags); ig.push( Inst::new( "x86_bsr", r#" Bit Scan Reverse -- returns the bit-index of the most significant 1 in the word. Result is undefined if the argument is zero. However, it sets the Z flag depending on the argument, so it is at least easy to detect and handle that case. This is polymorphic in i32 and i64. It is implemented for both i64 and i32 in 64-bit mode, and only for i32 in 32-bit mode. "#, &formats.unary, ) .operands_in(vec![x]) .operands_out(vec![y, rflags]), ); ig.push( Inst::new( "x86_bsf", r#" Bit Scan Forwards -- returns the bit-index of the least significant 1 in the word. Is otherwise identical to 'bsr', just above. "#, &formats.unary, ) .operands_in(vec![x]) .operands_out(vec![y, rflags]), ); const CFI_LABEL_SIZE_BITS: u16 = 64; // if changing this, also change the size of `block_label_imm` below let block_label = &TypeVar::new( "block_label", "A CFI block label", TypeSetBuilder::new() .ints(CFI_LABEL_SIZE_BITS..CFI_LABEL_SIZE_BITS) .build(), ); let cfi_label = &Operand::new("cfi_label", block_label); let block1_label = &Operand::new("block1_label", block_label); let block_label_imm = &Operand::new("i", &immediates.imm64) .with_doc( "An immediate representing the block label"); ig.push( Inst::new( "set_cfi_label", r#" This sets the CFI label register (`r14`) to the label given as an argument to this instruction. "#, &formats.unary_imm, ) .operands_in(vec![block_label_imm]), ); ig.push( Inst::new( "condbr_get_new_cfi_label", r#" Get the new cfi label which should be `cmov`d into `r14` if the branch condition is true. The insutruction assumes that the flags have been set for test r14, r14 If `r14` is currently zero (we're currently on the right path), then this instruction return `block_label`. If `r14` is currently nonzero (we're currently on the wrong path), then this instruction will return r14. The full sequence is: ``` ---- this | mov tmp, r14 inst | cmovz tmp, block1_label ---- < set flags for branch condition > cmovx r14, tmp jx ``` where "this inst" takes `block_label` as input and returns `tmp`. "#, &formats.unary, ) .operands_in(vec![block1_label]) .operands_out(vec![block1_label]) ); ig.push( Inst::new( "conditionally_set_cfi_label", r#" This sets the CFI label register (`r14`) to the label given as an argument to this instruction, but only if the CFI label register was currently 0 (indicating we're on the right path). "#, &formats.unary, ) .operands_in(vec![cfi_label]), ); ig.push( Inst::new( "cfi_check_that_label_is_equal_to", r#" This inserts the proper checks that the CFI label register (`r14`) is equal to the given value. If it is not equal, it zeroes the heap pointer via cmov. Following this instruction, the CFI label register (`r14`) will be zero if the check passed, and nonzero if it did not. "#, &formats.unary, ) .operands_in(vec![cfi_label]), ); ig.push( Inst::new( "cfi_sub", r#" cfi_check but doesn't zero anything, just sets the CFI label register (`r14`) to zero if appropriate. "#, &formats.unary, ) .operands_in(vec![cfi_label]), ); let reg = &Operand::new("reg", block_label); let reg1 = &Operand::new("reg1", block_label); let reg2 = &Operand::new("reg2", block_label); ig.push( Inst::new( "cfi_reg_set", r#" Takes two registers. First register must be zero. Sets the first regiter to second reg iff the CFI label register (`r14`) is not zero. "#, &formats.binary, ) .operands_in(vec![reg1, reg2]) .operands_out(vec![reg]), ); let uimm8 = &immediates.uimm8; let TxN = &TypeVar::new( "TxN", "A SIMD vector type", TypeSetBuilder::new() .ints(Interval::All) .floats(Interval::All) .bools(Interval::All) .simd_lanes(Interval::All) .includes_scalars(false) .build(), ); let a = &Operand::new("a", TxN).with_doc("A vector value (i.e. held in an XMM register)"); let b = &Operand::new("b", TxN).with_doc("A vector value (i.e. held in an XMM register)"); let i = &Operand::new("i", uimm8,).with_doc( "An ordering operand controlling the copying of data from the source to the destination; see PSHUFD in Intel manual for details"); ig.push( Inst::new( "x86_pshufd", r#" Packed Shuffle Doublewords -- copies data from either memory or lanes in an extended register and re-orders the data according to the passed immediate byte. "#, &formats.extract_lane, ) .operands_in(vec![a, i]) // TODO allow copying from memory here (need more permissive type than TxN) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_pshufb", r#" Packed Shuffle Bytes -- re-orders data in an extended register using a shuffle mask from either memory or another extended register "#, &formats.binary, ) .operands_in(vec![a, b]) // TODO allow re-ordering from memory here (need more permissive type than TxN) .operands_out(vec![a]), ); let Idx = &Operand::new("Idx", uimm8).with_doc("Lane index"); let x = &Operand::new("x", TxN); let a = &Operand::new("a", &TxN.lane_of()); ig.push( Inst::new( "x86_pextr", r#" Extract lane ``Idx`` from ``x``. The lane index, ``Idx``, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of ``x``. "#, &formats.extract_lane, ) .operands_in(vec![x, Idx]) .operands_out(vec![a]), ); let IBxN = &TypeVar::new( "IBxN", "A SIMD vector type containing only booleans and integers", TypeSetBuilder::new() .ints(Interval::All) .bools(Interval::All) .simd_lanes(Interval::All) .includes_scalars(false) .build(), ); let x = &Operand::new("x", IBxN); let y = &Operand::new("y", &IBxN.lane_of()).with_doc("New lane value"); let a = &Operand::new("a", IBxN); ig.push( Inst::new( "x86_pinsr", r#" Insert ``y`` into ``x`` at lane ``Idx``. The lane index, ``Idx``, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of ``x``. "#, &formats.insert_lane, ) .operands_in(vec![x, Idx, y]) .operands_out(vec![a]), ); let FxN = &TypeVar::new( "FxN", "A SIMD vector type containing floats", TypeSetBuilder::new() .floats(Interval::All) .simd_lanes(Interval::All) .includes_scalars(false) .build(), ); let x = &Operand::new("x", FxN); let y = &Operand::new("y", &FxN.lane_of()).with_doc("New lane value"); let a = &Operand::new("a", FxN); ig.push( Inst::new( "x86_insertps", r#" Insert a lane of ``y`` into ``x`` at using ``Idx`` to encode both which lane the value is extracted from and which it is inserted to. This is similar to x86_pinsr but inserts floats, which are already stored in an XMM register. "#, &formats.insert_lane, ) .operands_in(vec![x, Idx, y]) .operands_out(vec![a]), ); let x = &Operand::new("x", FxN); let y = &Operand::new("y", FxN); let a = &Operand::new("a", FxN); ig.push( Inst::new( "x86_movsd", r#" Move the low 64 bits of the float vector ``y`` to the low 64 bits of float vector ``x`` "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_movlhps", r#" Move the low 64 bits of the float vector ``y`` to the high 64 bits of float vector ``x`` "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); let IxN = &TypeVar::new( "IxN", "A SIMD vector type containing integers", TypeSetBuilder::new() .ints(Interval::All) .simd_lanes(Interval::All) .includes_scalars(false) .build(), ); let I64x2 = &TypeVar::new( "I64x2", "A SIMD vector type containing one large integer (the upper lane is concatenated with \ the lower lane to form the integer)", TypeSetBuilder::new() .ints(64..64) .simd_lanes(2..2) .includes_scalars(false) .build(), ); let x = &Operand::new("x", IxN).with_doc("Vector value to shift"); let y = &Operand::new("y", I64x2).with_doc("Number of bits to shift"); let a = &Operand::new("a", IxN); ig.push( Inst::new( "x86_psll", r#" Shift Packed Data Left Logical -- This implements the behavior of the shared instruction ``ishl`` but alters the shift operand to live in an XMM register as expected by the PSLL* family of instructions. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_psrl", r#" Shift Packed Data Right Logical -- This implements the behavior of the shared instruction ``ushr`` but alters the shift operand to live in an XMM register as expected by the PSRL* family of instructions. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_psra", r#" Shift Packed Data Right Arithmetic -- This implements the behavior of the shared instruction ``sshr`` but alters the shift operand to live in an XMM register as expected by the PSRA* family of instructions. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); let x = &Operand::new("x", TxN); let y = &Operand::new("y", TxN); let f = &Operand::new("f", iflags); ig.push( Inst::new( "x86_ptest", r#" Logical Compare -- PTEST will set the ZF flag if all bits in the result are 0 of the bitwise AND of the first source operand (first operand) and the second source operand (second operand). PTEST sets the CF flag if all bits in the result are 0 of the bitwise AND of the second source operand (second operand) and the logical NOT of the destination operand (first operand). "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![f]), ); let x = &Operand::new("x", IxN); let y = &Operand::new("y", IxN); let a = &Operand::new("a", IxN); ig.push( Inst::new( "x86_pmaxs", r#" Maximum of Packed Signed Integers -- Compare signed integers in the first and second operand and return the maximum values. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_pmaxu", r#" Maximum of Packed Unsigned Integers -- Compare unsigned integers in the first and second operand and return the maximum values. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_pmins", r#" Minimum of Packed Signed Integers -- Compare signed integers in the first and second operand and return the minimum values. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); ig.push( Inst::new( "x86_pminu", r#" Minimum of Packed Unsigned Integers -- Compare unsigned integers in the first and second operand and return the minimum values. "#, &formats.binary, ) .operands_in(vec![x, y]) .operands_out(vec![a]), ); let i64_t = &TypeVar::new( "i64_t", "A scalar 64bit integer", TypeSetBuilder::new().ints(64..64).build(), ); let GV = &Operand::new("GV", &entities.global_value); let addr = &Operand::new("addr", i64_t); ig.push( Inst::new( "x86_elf_tls_get_addr", r#" Elf tls get addr -- This implements the GD TLS model for ELF. The clobber output should not be used. "#, &formats.unary_global_value, ) // This is a bit overly broad to mark as clobbering *all* the registers, because it should // only preserve caller-saved registers. There's no way to indicate this to register // allocation yet, though, so mark as clobbering all registers instead. .clobbers_all_regs(true) .operands_in(vec![GV]) .operands_out(vec![addr]), ); ig.push( Inst::new( "x86_macho_tls_get_addr", r#" Mach-O tls get addr -- This implements TLS access for Mach-O. The clobber output should not be used. "#, &formats.unary_global_value, ) // See above comment for x86_elf_tls_get_addr. .clobbers_all_regs(true) .operands_in(vec![GV]) .operands_out(vec![addr]), ); ig.build() }
transform.py
#!/usr/bin/env python # -*- coding: utf8 -*- # ============================================================================ # Copyright (c) 2013-2020 nexB Inc. http://www.nexb.com/ - All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from collections import Counter from collections import OrderedDict import io import json import attr from attributecode import CRITICAL from attributecode import Error from attributecode import saneyaml from attributecode.util import csv from attributecode.util import python2 from attributecode.util import replace_tab_with_spaces if python2: # pragma: nocover from itertools import izip_longest as zip_longest # NOQA else: # pragma: nocover from itertools import zip_longest # NOQA def transform_csv_to_csv(location, output, transformer): """ Read a CSV file at `location` and write a new CSV file at `output`. Apply transformations using the `transformer` Transformer. Return a list of Error objects. """ if not transformer: raise ValueError('Cannot transform without Transformer') rows = read_csv_rows(location) errors = [] data = iter(rows) names = next(rows) field_names = strip_trailing_fields_csv(names) dupes = check_duplicate_fields(field_names) if dupes: msg = u'Duplicated field name: %(name)s' for name in dupes: errors.append(Error(CRITICAL, msg % locals())) return field_names, [], errors # Convert to dicts new_data = [OrderedDict(zip_longest(field_names, item)) for item in data] field_names, updated_data, errors = transform_data(new_data, transformer) if errors: return errors else: write_csv(output, updated_data, field_names) return [] def transform_json_to_json(location, output, transformer): """ Read a JSON file at `location` and write a new JSON file at `output`. Apply transformations using the `transformer` Transformer. Return a list of Error objects. """ if not transformer: raise ValueError('Cannot transform without Transformer') items = read_json(location) data = strip_trailing_fields_json(items) new_data = normalize_dict_data(data) field_names, updated_data, errors = transform_data(new_data, transformer) if errors: return errors else: write_json(output, updated_data) return [] def strip_trailing_fields_csv(names): """ Strip trailing spaces for field names #456 """ field_names = [] for name in names: field_names.append(name.strip()) return field_names def strip_trailing_fields_json(items): """ Strip trailing spaces for field name #456 """ data = [] od = OrderedDict() for item in items: for field in item: stripped_field_name = field.strip() od[stripped_field_name] = item[field] data.append(od) return data def normalize_dict_data(data): """ Check if the input data from scancode-toolkit and normalize to a normal dictionary if it is. Return a list type of normalized dictionary. """ try: # Check if this is a JSON output from scancode-toolkit if(data["headers"][0]["tool_name"] == "scancode-toolkit"): #only takes data inside "files" new_data = data["files"] except: new_data = data if not isinstance(new_data, list): new_data = [new_data] return new_data def transform_data(data, transformer): """ Read a dictionary and apply transformations using the `transformer` Transformer. Return a tuple of: ([field names...], [transformed ordered dict...], [Error objects..]) """ if not transformer: return data renamed_field_data = transformer.apply_renamings(data) field_names = renamed_field_data[0].keys() if transformer.field_filters: renamed_field_data = list(transformer.filter_fields(renamed_field_data)) field_names = [c for c in field_names if c in transformer.field_filters] if transformer.exclude_fields: renamed_field_data = list(transformer.filter_excluded(renamed_field_data)) field_names = [c for c in field_names if c not in transformer.exclude_fields] errors = transformer.check_required_fields(renamed_field_data) if errors: return field_names, data, errors return field_names, renamed_field_data, errors tranformer_config_help = ''' A transform configuration file is used to describe which transformations and validations to apply to a source CSV file. This is a simple text file using YAML format, using the same format as an .ABOUT file. The attributes that can be set in a configuration file are: * field_renamings: An optional map of source CSV or JSON field name to target CSV/JSON new field name that is used to rename CSV fields. For instance with this configuration the fields "Directory/Location" will be renamed to "about_resource" and "foo" to "bar": field_renamings: about_resource : 'Directory/Location' bar : foo The renaming is always applied first before other transforms and checks. All other field names referenced below are these that exist AFTER the renamings have been applied to the existing field names. * required_fields: An optional list of required field names that must have a value, beyond the standard fields names. If a source CSV/JSON does not have such a field or a row is missing a value for a required field, an error is reported. For instance with this configuration an error will be reported if the fields "name" and "version" are missing or if any row does not have a value set for these fields: required_fields: - name - version * field_filters: An optional list of field names that should be kept in the transformed CSV/JSON. If this list is provided, all the fields from the source CSV/JSON that should be kept in the target CSV/JSON must be listed regardless of either standard or required fields. If this list is not provided, all source CSV/JSON fields are kept in the transformed target CSV/JSON. For instance with this configuration the target CSV/JSON will only contains the "name" and "version" fields and no other field: field_filters: - name - version * exclude_fields: An optional list of field names that should be excluded in the transformed CSV/JSON. If this list is provided, all the fields from the source CSV/JSON that should be excluded in the target CSV/JSON must be listed. Excluding standard or required fields will cause an error. If this list is not provided, all source CSV/JSON fields are kept in the transformed target CSV/JSON. For instance with this configuration the target CSV/JSON will not contain the "type" and "temp" fields: exclude_fields: - type - temp ''' @attr.attributes class Transformer(object): __doc__ = tranformer_config_help field_renamings = attr.attrib(default=attr.Factory(dict)) required_fields = attr.attrib(default=attr.Factory(list)) field_filters = attr.attrib(default=attr.Factory(list)) exclude_fields = attr.attrib(default=attr.Factory(list)) # a list of all the standard fields from AboutCode toolkit standard_fields = attr.attrib(default=attr.Factory(list), init=False) # a list of the subset of standard fields that are essential and MUST be # present for AboutCode toolkit to work essential_fields = attr.attrib(default=attr.Factory(list), init=False) # called by attr after the __init__() def __attrs_post_init__(self, *args, **kwargs): from attributecode.model import About about = About() self.essential_fields = list(about.required_fields) self.standard_fields = [f.name for f in about.all_fields()] @classmethod def default(cls): """ Return a default Transformer with built-in transforms. """ return cls( field_renamings={}, required_fields=[], field_filters=[], exclude_fields=[], ) @classmethod def from_file(cls, location): """ Load and return a Transformer instance from a YAML configuration file at `location`. """ with io.open(location, encoding='utf-8') as conf: data = saneyaml.load(replace_tab_with_spaces(conf.read())) return cls( field_renamings=data.get('field_renamings', {}), required_fields=data.get('required_fields', []), field_filters=data.get('field_filters', []), exclude_fields=data.get('exclude_fields', []), ) def check_required_fields(self, data): """ Return a list of Error for a `data` list of ordered dict where a dict is missing a value for a required field name. """ errors = [] required = set(self.essential_fields + self.required_fields) if not required: return [] for rn, item in enumerate(data): missings = [rk for rk in required if not item.get(rk)] if not missings: continue missings = ', '.join(missings) msg = 'Row {rn} is missing required values for fields: {missings}' errors.append(Error(CRITICAL, msg.format(**locals()))) return errors def apply_renamings(self, data): """ Return a tranformed list of `field_names` where fields are renamed based on this Transformer configuration. """ renamings = self.field_renamings if not renamings: return data renamings = {n: rn for n, rn in renamings.items()} renamed_list = [] for row in data: renamed = OrderedDict() for key in row: matched = False for renamed_key in renamings: if key == renamings[renamed_key]: renamed[renamed_key] = row[key] matched = True if not matched: renamed[key] = row[key] renamed_list.append(renamed) return renamed_list """ def clean_fields(self, field_names): Apply standard cleanups to a list of fields and return these. if not field_names: return field_names return [c.strip().lower() for c in field_names] """ def filter_fields(self, data): """ Yield transformed dicts from a `data` list of dicts keeping only fields with a name in the `field_filters`of this Transformer. Return the data unchanged if no `field_filters` exists. """ #field_filters = set(self.clean_fields(self.field_filters)) field_filters = set(self.field_filters) for entry in data: items = ((k, v) for k, v in entry.items() if k in field_filters) yield OrderedDict(items) def filter_excluded(self, data): """ Yield transformed dicts from a `data` list of dicts excluding fields with names in the `exclude_fields`of this Transformer. Return the data unchanged if no `exclude_fields` exists. """ #exclude_fields = set(self.clean_fields(self.exclude_fields)) exclude_fields = set(self.exclude_fields) for entry in data: items = ((k, v) for k, v in entry.items() if k not in exclude_fields) yield OrderedDict(items) def check_duplicate_fields(field_names): """ Check that there are no duplicate in the `field_names` list of field name strings, ignoring case. Return a list of unique duplicated field names. """ counted = Counter(c.lower() for c in field_names)
def read_csv_rows(location): """ Yield rows (as a list of values) from a CSV file at `location`. """ with io.open(location, encoding='utf-8', errors='replace') as csvfile: reader = csv.reader(csvfile) for row in reader: yield row def read_json(location): """ Yield rows (as a list of values) from a CSV file at `location`. """ with io.open(location, encoding='utf-8', errors='replace') as jsonfile: data = json.load(jsonfile, object_pairs_hook=OrderedDict) return data def write_csv(location, data, field_names): # NOQA """ Write a CSV file at `location` the `data` list of ordered dicts using the `field_names`. """ with io.open(location, 'w', encoding='utf-8', newline='\n') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=field_names) writer.writeheader() writer.writerows(data) def write_json(location, data): """ Write a JSON file at `location` the `data` list of ordered dicts. """ with open(location, 'w') as jsonfile: json.dump(data, jsonfile, indent=3)
return [field for field, count in sorted(counted.items()) if count > 1]
Order.ts
import { Entity, PrimaryGeneratedColumn, CreateDateColumn, UpdateDateColumn, OneToOne, JoinColumn, OneToMany, Column, } from 'typeorm'; import Customer from '@modules/customers/infra/typeorm/entities/Customer'; import OrdersProducts from '@modules/orders/infra/typeorm/entities/OrdersProducts'; @Entity('orders') class Order { @PrimaryGeneratedColumn('uuid') id: string; @Column() customer_id: string;
@JoinColumn({ name: 'customer_id' }) customer: Customer; @OneToMany(() => OrdersProducts, productsToOrder => productsToOrder.order, { cascade: true, eager: true, }) order_products: OrdersProducts[]; @CreateDateColumn() created_at: Date; @UpdateDateColumn() updated_at: Date; } export default Order;
@OneToOne(() => Customer, { eager: true })
collecting_reporter.py
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING from pylint.reporters.base_reporter import BaseReporter class
(BaseReporter): """collects messages""" name = "collector" def __init__(self): BaseReporter.__init__(self) self.messages = [] def handle_message(self, msg): self.messages.append(msg) _display = None
CollectingReporter
test_vmss_manager.py
import unittest import time import os from datetime import datetime, timedelta from unittest.mock import patch from spaceone.core.unittest.result import print_data from spaceone.core.unittest.runner import RichTestRunner from spaceone.core import config from spaceone.core.transaction import Transaction from spaceone.core import utils from spaceone.inventory.error import * from spaceone.inventory.connector.virtual_machine_scale_set import VmScaleSetConnector from spaceone.inventory.manager.virtual_machine_scale_set_manager import VmScaleSetManager class TestVMScaleSetManager(unittest.TestCase): @classmethod def setUpClass(cls): config.init_conf(package='spaceone.inventory') config_path = os.environ.get('TEST_CONFIG') test_config = utils.load_yaml_from_file(config_path) cls.schema = 'azure_client_secret' cls.azure_credentials = test_config.get('AZURE_CREDENTIALS', {}) cls.vmss_connector = VmScaleSetConnector(transaction=Transaction(), config={}, secret_data=cls.azure_credentials) cls.vmss_manager = VmScaleSetManager(Transaction()) super().setUpClass() @classmethod def tearDownClass(cls) -> None: super().tearDownClass() def
(self, *args): secret_data = self.azure_credentials params = {'options': {}, 'secret_data': secret_data, 'filter': {}} vm_scale_sets = self.vmss_manager.collect_power_state(params) for vm_scale_set in vm_scale_sets: print(vm_scale_set.to_primitive()) if __name__ == "__main__": unittest.main(testRunner=RichTestRunner)
test_collect_cloud_service
benchmark_test.go
package day20 import ( "testing" "github.com/nlowe/aoc2021/challenge" ) func
(b *testing.B) { for i := 0; i < b.N; i++ { _ = partA(challenge.FromFile()) } } func BenchmarkB(b *testing.B) { for i := 0; i < b.N; i++ { _ = partB(challenge.FromFile()) } }
BenchmarkA
test_anosim.py
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import io from functools import partial from unittest import TestCase, main import numpy as np import pandas as pd from pandas.util.testing import assert_series_equal from skbio import DistanceMatrix from skbio.stats.distance import anosim class TestANOSIM(TestCase): """All results were verified with R (vegan::anosim).""" def setUp(self): # Distance matrices with and without ties in the ranks, with 2 groups # of equal size.
def test_ties(self): # Ensure we get the same results if we rerun the method using the same # inputs. Also ensure we get the same results if we run the method # using a grouping vector or a data frame with equivalent groupings. exp = pd.Series(index=self.exp_index, data=['ANOSIM', 'R', 4, 2, 0.25, 0.671, 999], name='ANOSIM results') for _ in range(2): np.random.seed(0) obs = anosim(self.dm_ties, self.grouping_equal) self.assert_series_equal(obs, exp) for _ in range(2): np.random.seed(0) obs = anosim(self.dm_ties, self.df, column='Group') self.assert_series_equal(obs, exp) def test_no_ties(self): exp = pd.Series(index=self.exp_index, data=['ANOSIM', 'R', 4, 2, 0.625, 0.332, 999], name='ANOSIM results') np.random.seed(0) obs = anosim(self.dm_no_ties, self.grouping_equal) self.assert_series_equal(obs, exp) def test_no_permutations(self): exp = pd.Series(index=self.exp_index, data=['ANOSIM', 'R', 4, 2, 0.625, np.nan, 0], name='ANOSIM results') obs = anosim(self.dm_no_ties, self.grouping_equal, permutations=0) self.assert_series_equal(obs, exp) def test_unequal_group_sizes(self): exp = pd.Series(index=self.exp_index, data=['ANOSIM', 'R', 6, 3, -0.363636, 0.878, 999], name='ANOSIM results') np.random.seed(0) obs = anosim(self.dm_unequal, self.grouping_unequal) self.assert_series_equal(obs, exp) np.random.seed(0) obs = anosim(self.dm_unequal, self.grouping_unequal_relabeled) self.assert_series_equal(obs, exp) if __name__ == '__main__': main()
dm_ids = ['s1', 's2', 's3', 's4'] self.grouping_equal = ['Control', 'Control', 'Fast', 'Fast'] self.df = pd.read_csv( io.StringIO('ID,Group\ns2,Control\ns3,Fast\ns4,Fast\ns5,Control\n' 's1,Control'), index_col=0) self.dm_ties = DistanceMatrix([[0, 1, 1, 4], [1, 0, 3, 2], [1, 3, 0, 3], [4, 2, 3, 0]], dm_ids) self.dm_no_ties = DistanceMatrix([[0, 1, 5, 4], [1, 0, 3, 2], [5, 3, 0, 3], [4, 2, 3, 0]], dm_ids) # Test with 3 groups of unequal size. This data also generates a # negative R statistic. self.grouping_unequal = ['Control', 'Treatment1', 'Treatment2', 'Treatment1', 'Control', 'Control'] # Equivalent grouping but with different labels -- groups should be # assigned different integer labels but results should be the same. self.grouping_unequal_relabeled = ['z', 42, 'abc', 42, 'z', 'z'] self.dm_unequal = DistanceMatrix( [[0.0, 1.0, 0.1, 0.5678, 1.0, 1.0], [1.0, 0.0, 0.002, 0.42, 0.998, 0.0], [0.1, 0.002, 0.0, 1.0, 0.123, 1.0], [0.5678, 0.42, 1.0, 0.0, 0.123, 0.43], [1.0, 0.998, 0.123, 0.123, 0.0, 0.5], [1.0, 0.0, 1.0, 0.43, 0.5, 0.0]], ['s1', 's2', 's3', 's4', 's5', 's6']) # Expected series index is the same across all tests. self.exp_index = ['method name', 'test statistic name', 'sample size', 'number of groups', 'test statistic', 'p-value', 'number of permutations'] # Stricter series equality testing than the default. self.assert_series_equal = partial(assert_series_equal, check_index_type=True, check_series_type=True)
variable.rs
use crate::SwapContainer::SwapContainer; type Ty = i64; pub struct Variable{ desc: String, domain: SwapContainer, } impl Variable{ pub fn getDomain(&self) -> Vec<Ty>
pub fn remove(&mut self, v: Ty) -> usize { 1 } }
{ Vec::new() }
shop.rs
use std::{collections::HashMap, fmt::Display}; use super::equipment::{Equipment, Shield, Sword}; use crate::character::Character; use crate::event::Event; use crate::game::Game; use crate::log; pub enum Error { NotEnoughGold, ItemNotAvailable, } /// Print the list of available items and their price. pub fn list(game: &Game) { let items = available_items(&game.player) .into_iter() .map(|(_, item)| item) .collect::<Vec<Box<dyn Shoppable>>>(); log::shop_list(game, items); } /// Buy an item and add it to the game. pub fn buy(game: &mut Game, item: &str) -> Result<(), Error> { let player = &mut game.player; let mut items = available_items(player) .into_iter() .collect::<HashMap<String, Box<dyn Shoppable>>>(); if let Some(item) = items.remove(item) { item.buy(game)?; Ok(()) } else { Err(Error::ItemNotAvailable) } } /// Build a list of items currently available at the shop fn available_items(player: &Character) -> Vec<(String, Box<dyn Shoppable>)> { let mut items = Vec::<(String, Box<dyn Shoppable>)>::new(); let level = available_level(player); let sword = Sword::new(level); if sword.is_upgrade_from(&player.sword.as_ref()) { items.push(("sword".to_string(), Box::new(sword))); } let shield = Shield::new(level); if shield.is_upgrade_from(&player.shield.as_ref()) { items.push(("shield".to_string(), Box::new(shield))); } let potion = super::Potion::new(level); items.push(("potion".to_string(), Box::new(potion))); let remedy = super::Remedy::new(); items.push(("remedy".to_string(), Box::new(remedy))); let escape = super::Escape::new(); items.push(("escape".to_string(), Box::new(escape))); items } /// The offered items/equipment have levels e.g. potion[1], sword[5], etc. /// they become available for purchase only when the player reaches that level fn available_level(player: &Character) -> i32 { // allow level 1 or level 5n std::cmp::max(1, (player.level / 5) * 5) } pub trait Shoppable: Display { fn cost(&self) -> i32; fn buy(&self, game: &mut Game) -> Result<(), Error> { if game.gold < self.cost() { return Err(Error::NotEnoughGold); } game.gold -= self.cost(); self.add_to(game); Event::emit( game, Event::ItemBought { item: self.to_string(), }, ); Ok(()) } fn add_to(&self, game: &mut Game); } impl Shoppable for Sword { fn cost(&self) -> i32 { self.level() * 500 } fn add_to(&self, game: &mut Game) { game.player.sword = Some(self.clone()) } } impl Shoppable for Shield { fn cost(&self) -> i32 { self.level() * 500 } fn add_to(&self, game: &mut Game) { game.player.shield = Some(self.clone()) } } impl Shoppable for super::Potion { fn cost(&self) -> i32 { self.level * 200 } fn add_to(&self, game: &mut Game) { game.add_item("potion", Box::new(self.clone())); } } impl Shoppable for super::Escape { fn cost(&self) -> i32 { 1000 } fn add_to(&self, game: &mut Game) { game.add_item("escape", Box::new(self.clone()));
fn cost(&self) -> i32 { 400 } fn add_to(&self, game: &mut Game) { game.add_item("remedy", Box::new(self.clone())); } }
} } impl Shoppable for super::Remedy {
send_to_dory.go
/* ---------------------------------------------------------------------------- Copyright 2021 Dave Peterson <[email protected]> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ---------------------------------------------------------------------------- This is an example Go program that uses the doryclient library to send a message to Dory. */ package main import ( "flag" "fmt" "io/ioutil" "math" "net" "os" "path/filepath" "time" "github.com/dspeterson/dory/client/go/doryclient" ) type socketType string const ( unixDgram socketType = "unix-dgram" unixStream socketType = "unix-stream" ) type msgType string const ( anyPartition msgType = "AnyPartition" partitionKey msgType = "PartitionKey" ) type cmdLineArgs struct { // type of socket to use for communication with Dory sockType socketType // path for Dory's socket file sockPath string // type of message to send (see Dory documentation) mType msgType // If mType above specifies partitionKey, use this as the partition key. pKey uint32 // Kafka topic topic string // Kafka message key (can be empty) key string // Kafka message body value string } // Process and return command line args. Return nil if args are invalid. func getArgs() *cmdLineArgs { ret := &cmdLineArgs{} sockType := flag.String("socket-type", "unix-dgram", "unix-dgram or unix-stream") sockPath := flag.String("socket-path", "", "path to Dory's socket file") mType := flag.String("msg-type", "AnyPartition", "AnyPartition or PartitionKey") pKey := flag.Uint64("partition-key", 0, "partition key to use when sending PartitionKey message") topic := flag.String("topic", "", "Kafka topic") key := flag.String("key", "", "Kafka message key (can be empty)") value := flag.String("value", "", "Kafka message body") flag.Parse() if *sockType != string(unixDgram) && *sockType != string(unixStream) { _, _ = fmt.Fprintf(os.Stderr, "-socket-type must specify %v or %v\n", unixDgram, unixStream) return nil } if *sockPath == "" { _, _ = fmt.Fprintf(os.Stderr, "-socket-path argument is missing\n") return nil } if *mType != string(anyPartition) && *mType != string(partitionKey) { _, _ = fmt.Fprintf(os.Stderr, "-msg-type must specify %v or %v\n", anyPartition, partitionKey) return nil } if *pKey > math.MaxUint32
if *topic == "" { _, _ = fmt.Fprintf(os.Stderr, "-topic argument is missing\n") return nil } ret.sockType = socketType(*sockType) ret.sockPath = *sockPath ret.mType = msgType(*mType) ret.pKey = uint32(*pKey) ret.topic = *topic ret.key = *key ret.value = *value return ret } func getEpochMilliseconds() uint64 { return uint64(time.Now().UnixNano() / int64(time.Millisecond)) } // Send message to Dory as specified by args. Return true on success or false // on failure. func sendMsg(args *cmdLineArgs) bool { var sockType string switch args.sockType { case unixDgram: sockType = "unixgram" // UNIX domain datagram socket case unixStream: sockType = "unix" // UNIX domain stream socket default: _, _ = fmt.Fprintf(os.Stderr, "internal error: unknown socket type [%v]\n", args.sockType) return false } topic := []byte(args.topic) // Kafka topic msgKey := []byte(args.key) // Kafka message key (may be empty) msgValue := []byte(args.value) // Kafka message body // Create temporary directory for client socket file. dir, err := ioutil.TempDir("", "dory_go_client") if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to create temp directory for client socket file: %v\n", err) return false } defer func() { cleanupErr := os.RemoveAll(dir) if cleanupErr != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to remove temp directory for client socket file: %v\n", err) } }() // Set up socket communication with Dory. clientPath := filepath.Join(dir, "client.sock") clientAddr := net.UnixAddr{ Name: clientPath, Net: sockType, } serverAddr := net.UnixAddr{ Name: args.sockPath, Net: sockType, } conn, err := net.DialUnix(sockType, &clientAddr, &serverAddr) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to create %v socket: %v\n", sockType, err) return false } now := getEpochMilliseconds() var msg []byte switch args.mType { case anyPartition: msg, err = doryclient.CreateAnyPartitionMsg(topic, now, msgKey, msgValue) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to create AnyPartition message: %v\n", err) return false } case partitionKey: msg, err = doryclient.CreatePartitionKeyMsg(args.pKey, topic, now, msgKey, msgValue) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to create PartitionKey message: %v\n", err) return false } default: _, _ = fmt.Fprintf(os.Stderr, "internal error: unknown message type [%v]\n", args.mType) return false } _, err = conn.Write(msg) // send to Dory if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to send %v %v message: %v\n", args.mType, args.sockType, err) return false } return true } func main() { exitCode := 0 defer func() { os.Exit(exitCode) }() args := getArgs() if args == nil { // invalid command line args exitCode = 1 return } // Send message to Dory as specified by command line args. ok := sendMsg(args) if !ok { exitCode = 1 } }
{ _, _ = fmt.Fprintf(os.Stderr, "-partition-key value %v is too large: max is %v\n", *pKey, math.MaxUint32) return nil }
i18n.js
import Vue from 'vue' import VueI18n from 'vue-i18n' Vue.use(VueI18n); function
() { const locales = require.context('./locales', true, /[A-Za-z0-9-_,\s]+\.json$/i); const messages = {}; locales.keys().forEach(key => { const matched = key.match(/([A-Za-z0-9-_]+)\./i); if (matched && matched.length > 1) { const locale = matched[1]; messages[locale] = locales(key) } }); return messages } export default new VueI18n({ locale: 'zh', fallbackLocale: 'en', silentFallbackWarn: true, formatFallbackMessages: true, // dateTimeFormats: { // 'en': { // short: { // month: 'short', day: 'numeric', // weekday: 'short', hour: 'numeric', minute: 'numeric' // }, // long: { // year: 'numeric', month: 'short', day: 'numeric', // hour: 'numeric', minute: 'numeric' // } // }, // 'zh': { // short: { // month: 'short', day: 'numeric', // weekday: 'short', hour: 'numeric', minute: 'numeric' // }, // long: { // year: 'numeric', month: 'short', day: 'numeric', // hour: 'numeric', minute: 'numeric' // } // }, // 'ja': { // short: { // month: 'short', day: 'numeric', // weekday: 'short', hour: 'numeric', minute: 'numeric' // }, // long: { // year: 'numeric', month: 'short', day: 'numeric', // hour: 'numeric', minute: 'numeric' // } // } // }, messages: loadLocaleMessages() })
loadLocaleMessages
webpackModuleError.ts
import { readFileSync } from 'fs' import * as path from 'path' import { webpack } from 'next/dist/compiled/webpack/webpack' import { getBabelError } from './parseBabel' import { getCssError } from './parseCss'
function getFileData( compilation: webpack.compilation.Compilation, m: any ): [string, string | null] { let resolved: string let ctx: string | null = compilation.compiler?.context ?? compilation.context ?? null if (ctx !== null && typeof m.resource === 'string') { const res = path.relative(ctx, m.resource).replace(/\\/g, path.posix.sep) resolved = res.startsWith('.') ? res : `.${path.posix.sep}${res}` } else { const requestShortener = compilation.requestShortener if (typeof m?.readableIdentifier === 'function') { resolved = m.readableIdentifier(requestShortener) } else { resolved = m.request ?? m.userRequest } } if (resolved) { let content: string | null = null try { content = readFileSync( ctx ? path.resolve(ctx, resolved) : resolved, 'utf8' ) } catch {} return [resolved, content] } return ['<unknown>', null] } export async function getModuleBuildError( compilation: webpack.compilation.Compilation, input: any ): Promise<SimpleWebpackError | false> { if ( !( typeof input === 'object' && (input?.name === 'ModuleBuildError' || input?.name === 'ModuleNotFoundError') && Boolean(input.module) && input.error instanceof Error ) ) { return false } const err: Error = input.error const [sourceFilename, sourceContent] = getFileData(compilation, input.module) const notFoundError = await getNotFoundError( compilation, input, sourceFilename ) if (notFoundError !== false) { return notFoundError } const babel = getBabelError(sourceFilename, err) if (babel !== false) { return babel } const css = getCssError(sourceFilename, err) if (css !== false) { return css } const scss = getScssError(sourceFilename, sourceContent, err) if (scss !== false) { return scss } return false }
import { getScssError } from './parseScss' import { getNotFoundError } from './parseNotFoundError' import { SimpleWebpackError } from './simpleWebpackError'
collectdplugin.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of Karesansui Core. # # Copyright (C) 2009-2012 HDE, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import os import re import sys import glob from karesansui.lib.dict_op import DictOp from karesansui.lib.parser.base.xml_like_conf_parser import xmlLikeConfParser as Parser from karesansui.lib.utils import preprint_r, r_chgrp, r_chmod from karesansui.lib.const import VENDOR_SYSCONF_DIR, \ COLLECTD_DATA_DIR, KARESANSUI_GROUP """ Define Variables for This Parser """ PARSER_COLLECTD_PLUGIN_DIR = "%s/collectd.d" % VENDOR_SYSCONF_DIR class collectdpluginParser: _module = "collectdplugin" def __init__(self): self.dop = DictOp() self.dop.addconf(self._module,{}) self.parser = Parser() self.parser.set_delim("[ \t]+") self.parser.set_new_delim("\t") self.parser.set_comment("#") self.base_parser_name = self.parser.__class__.__name__ from karesansui.lib.parser.collectd import collectdParser collectdp = collectdParser() self.parser.set_opt_uni(collectdp.opt_uni) self.parser.set_opt_multi(collectdp.opt_multi) self.parser.set_opt_sect(collectdp.opt_sect) pass def set_footer(self, footer=""): self.parser.set_footer(footer) def source_file(self): retval = [] glob_str = "%s/*.conf" % (PARSER_COLLECTD_PLUGIN_DIR,) for _afile in glob.glob(glob_str): retval.append(_afile) return retval def read_conf(self,extra_args=None): retval = {} for _afile in self.source_file(): plugin_name = re.sub("\.conf$","",os.path.basename(_afile)) try: extra_args['include'] if not re.search(extra_args['include'],plugin_name): continue except: pass self.parser.set_source_file([_afile]) conf_arr = self.parser.read_conf() try: self.dop.set(self._module,[plugin_name],conf_arr[_afile]['value']) except: pass self.dop.set(self._module,['@BASE_PARSER'],self.base_parser_name) #self.dop.preprint_r(self._module) return self.dop.getconf(self._module) def _pre_write_conf(self,conf_arr={}): # Change permission to be able to read/write data kss group.
def write_conf(self,conf_arr={},extra_args=None,dryrun=False): retval = True conf_arr = self._pre_write_conf(conf_arr) for plugin_name,_v in conf_arr.items(): _afile = "%s/%s.conf" % (PARSER_COLLECTD_PLUGIN_DIR,plugin_name,) try: _v['action'] if _v['action'] == "delete": if os.path.exists(_afile): os.unlink(_afile) continue except: pass #continue try: _v['value'] self.dop.addconf("parser",{}) self.dop.set("parser",[_afile],_v['value']) #self.dop.preprint_r("parser") arr = self.dop.getconf("parser") self.parser.write_conf(arr,dryrun=dryrun) except: pass return retval """ """ if __name__ == '__main__': """Testing """ parser = collectdpluginParser() # 読み込み dop = DictOp() dop.addconf("dum",parser.read_conf()) new_plugin_name = "takuma" ########################################################## # Uniオプション (一箇所しか設定できないオプション) の追加 ########################################################## # 'Foo foo' を追加(設定値リスト形式モードよる addメソッド) dop.add("dum",[new_plugin_name,"Foo"],["foo",[["comment foo1","comment foo2"],"comment foo3"]]) # 'Bar bar' を追加(設定値文字列形式モードによる cdp_setメソッド) dop.cdp_set("dum",[new_plugin_name,"Bar"],"bar",multiple_file=True) dop.cdp_set_pre_comment("dum",[new_plugin_name,"Bar"],["","comment bar1","comment bar2"],multiple_file=True) dop.cdp_set_post_comment("dum",[new_plugin_name,"Bar"],"comment bar3",multiple_file=True) ########################################################## # Multiオプション (複数設定できるオプション) の追加 ########################################################## # 'LoadPlugin target_hoge' を追加 dop.cdp_set("dum",[new_plugin_name,"LoadPlugin","target_hoge"],"target_hoge",multiple_file=True,is_opt_multi=True) dop.cdp_set_pre_comment("dum",[new_plugin_name,"LoadPlugin","target_hoge"],["","Dis is target_hoge"],multiple_file=True) ########################################################## # Sectオプション (<ブラケット>ディレクティブオプション) の追加 ########################################################## # 下記 を追加 # <Plugin "foobar"> # <View "hoge"> # SubOpt1 gege # post # </View> # Option2 false # Option1 true # </Plugin> dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option1"],"true",multiple_file=True) dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","Option2"],"false",multiple_file=True) dop.cdp_set_pre_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"pre comment",multiple_file=True) dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","Option2"],"post comment",multiple_file=True) dop.cdp_set("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"gege",multiple_file=True) dop.cdp_set_post_comment("dum",[new_plugin_name,"Plugin","foobar","View","hoge","SubOpt1"],"post",multiple_file=True) print(dop.get("dum",["filter","@ORDERS"],multiple_file=True)) # 複数ファイルを読み込むパーサーの場合は、is_parent_parser=Trueにすること # '<Plugin foobar>' を 先頭にする key = [new_plugin_name,"Plugin","foobar"] dop.insert_order("dum",key,0,is_parent_parser=True) # 'LoadPlugin target_hoge' を 先頭にする => '<Plugin foobar>' は2番目になる key = [new_plugin_name,"LoadPlugin","target_hoge"] dop.insert_order("dum",key,0,is_parent_parser=True) # 'Foo foo' を 先頭にする => 'LoadPlugin target_hoge' は2番目になる key = [new_plugin_name,"Foo"] dop.insert_order("dum",key,0,is_parent_parser=True) # work completely #dop.cdp_comment("dum",["python","Plugin","python","Import"],multiple_file=True) #dop.cdp_comment("dum",["python","Plugin","python","Module","notification"],multiple_file=True) #dop.cdp_comment("dum",["python","Plugin","python","Module","notification","CountupDBPath"],multiple_file=True) #dop.cdp_set("dum",["python","Plugin","python","Module","notification","@ORDERS"],[['Environ'],['CountupDBPath']],multiple_file=True,is_opt_multi=True) # work completely, too. #dop.cdp_comment("dum",["python","Plugin","python","ModulePath"],multiple_file=True) # work completely, too. (but this is overwritten by _pre_write_conf() method) #dop.cdp_set("dum",["python","Plugin","python","@ORDERS"],[['ModulePath'],['Encoding']],multiple_file=True,is_opt_multi=True) #sys.exit() # 配列確認 conf = dop.getconf("dum") preprint_r(conf) parser.write_conf(conf,dryrun=True)
if os.path.exists(COLLECTD_DATA_DIR): if os.getuid() == 0: r_chgrp(COLLECTD_DATA_DIR,KARESANSUI_GROUP) r_chmod(COLLECTD_DATA_DIR,"g+rwx") r_chmod(COLLECTD_DATA_DIR,"o-rwx") dop = DictOp() dop.addconf("__",conf_arr) if dop.isset("__",["python"]) is True: dop.cdp_unset("__",["python","Plugin","python","@ORDERS"],multiple_file=True) orders = [] orders.append(['Encoding']) orders.append(['LogTraces']) orders.append(['Interactive']) orders.append(['ModulePath']) orders.append(['Import']) orders.append(['Module']) dop.cdp_set("__",["python","Plugin","python","@ORDERS"],orders,is_opt_multi=True,multiple_file=True) return dop.getconf("__")
deployment_trigger_policy.rs
// Generated from definition com.github.openshift.api.apps.v1.DeploymentTriggerPolicy /// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. #[derive(Clone, Debug, Default, PartialEq)] pub struct DeploymentTriggerPolicy { /// ImageChangeParams represents the parameters for the ImageChange trigger. pub image_change_params: Option<crate::api::apps::v1::DeploymentTriggerImageChangeParams>, /// Type of the trigger pub type_: Option<String>, } impl<'de> serde::Deserialize<'de> for DeploymentTriggerPolicy { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_image_change_params, Key_type_, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error
} deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = DeploymentTriggerPolicy; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("DeploymentTriggerPolicy") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_image_change_params: Option<crate::api::apps::v1::DeploymentTriggerImageChangeParams> = None; let mut value_type_: Option<String> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_image_change_params => value_image_change_params = serde::de::MapAccess::next_value(&mut map)?, Field::Key_type_ => value_type_ = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(DeploymentTriggerPolicy { image_change_params: value_image_change_params, type_: value_type_, }) } } deserializer.deserialize_struct( "DeploymentTriggerPolicy", &[ "imageChangeParams", "type", ], Visitor, ) } } impl serde::Serialize for DeploymentTriggerPolicy { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "DeploymentTriggerPolicy", self.image_change_params.as_ref().map_or(0, |_| 1) + self.type_.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.image_change_params { serde::ser::SerializeStruct::serialize_field(&mut state, "imageChangeParams", value)?; } if let Some(value) = &self.type_ { serde::ser::SerializeStruct::serialize_field(&mut state, "type", value)?; } serde::ser::SerializeStruct::end(state) } }
{ Ok(match v { "imageChangeParams" => Field::Key_image_change_params, "type" => Field::Key_type_, _ => Field::Other, }) }
certificates_shell.py
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from magnumclient.common import cliutils as utils from magnumclient.i18n import _ DEPRECATION_MESSAGE = ( 'WARNING: The bay parameter is deprecated and will be removed in a future ' 'release.\nUse the cluster parameter to avoid seeing this message.') def _show_cert(certificate):
def _get_target_uuid(cs, args): target = None if args.cluster: target = cs.clusters.get(args.cluster) elif args.bay: print(DEPRECATION_MESSAGE) target = cs.bays.get(args.bay) else: raise utils.MissingArgs(['--cluster or --bay']) return target.uuid @utils.arg('--bay', required=False, metavar='<bay>', help=_('ID or name of the bay.')) @utils.arg('postional_cluster', metavar='<cluster>', nargs='?', default=None, help=_('ID or name of the cluster.')) @utils.arg('--cluster', metavar='<cluster>', default=None, help=(_('ID or name of the cluster. %s') % utils.CLUSTER_DEPRECATION_HELP)) @utils.deprecated(utils.MAGNUM_CLIENT_DEPRECATION_WARNING) def do_ca_show(cs, args): """Show details about the CA certificate for a bay or cluster.""" utils.validate_cluster_args(args.postional_cluster, args.cluster) args.cluster = args.postional_cluster or args.cluster opts = { 'cluster_uuid': _get_target_uuid(cs, args) } cert = cs.certificates.get(**opts) _show_cert(cert) @utils.arg('--csr', metavar='<csr>', help=_('File path of the csr file to send to Magnum' ' to get signed.')) @utils.arg('--bay', required=False, metavar='<bay>', help=_('ID or name of the bay.')) @utils.arg('--cluster', required=False, metavar='<cluster>', help=_('ID or name of the cluster.')) @utils.deprecated(utils.MAGNUM_CLIENT_DEPRECATION_WARNING) def do_ca_sign(cs, args): """Generate the CA certificate for a bay or cluster.""" opts = { 'cluster_uuid': _get_target_uuid(cs, args) } if args.csr is None or not os.path.isfile(args.csr): print('A CSR must be provided.') return with open(args.csr, 'r') as f: opts['csr'] = f.read() cert = cs.certificates.create(**opts) _show_cert(cert) @utils.arg('--cluster', required=True, metavar='<cluster>', help=_('ID or name of the cluster.')) @utils.deprecated(utils.MAGNUM_CLIENT_DEPRECATION_WARNING) def do_ca_rotate(cs, args): """Rotate the CA certificate for a bay or cluster to revoke access.""" cluster = cs.clusters.get(args.cluster) opts = { 'cluster_uuid': cluster.uuid } cs.certificates.rotate_ca(**opts)
print(certificate.pem)
test.py
array = [[0 for col in range(11)] for row in range(10)]
print(len(array))
utils.js
import "core-js/modules/es.array.concat"; import { hasOwnProperty, isObject } from './../../helpers/object'; import { arrayEach } from './../../helpers/array'; /** * Create separated id for borders for each cell. * * @param {Number} row Visual row index. * @param {Number} col Visual column index. * @returns {String} */ export function createId(row, col) { return "border_row".concat(row, "col").concat(col); } /** * Create default single border for each position (top/right/bottom/left). * * @returns {Object} `{{width: number, color: string}}` */ export function createDefaultCustomBorder() { return { width: 1, color: '#000' }; } /** * Create default object for empty border. * * @returns {Object} `{{hide: boolean}}` */ export function createSingleEmptyBorder() { return { hide: true }; } /** * Create default Handsontable border object. * * @returns {Object} `{{width: number, color: string, cornerVisible: boolean}}` */ export function createDefaultHtBorder() { return { width: 1, color: '#000', cornerVisible: false }; } /** * Prepare empty border for each cell with all custom borders hidden. * * @param {Number} row Visual row index. * @param {Number} col Visual column index. * @returns {Object} `{{id: *, border: *, row: *, col: *, top: {hide: boolean}, right: {hide: boolean}, bottom: {hide: boolean}, left: {hide: boolean}}}` */ export function createEmptyBorders(row, col) { return { id: createId(row, col), border: createDefaultHtBorder(), row: row, col: col, top: createSingleEmptyBorder(), right: createSingleEmptyBorder(), bottom: createSingleEmptyBorder(), left: createSingleEmptyBorder() }; } export function
(defaultBorder, customBorder) { if (hasOwnProperty(customBorder, 'border')) { defaultBorder.border = customBorder.border; } if (hasOwnProperty(customBorder, 'top')) { if (customBorder.top) { if (!isObject(customBorder.top)) { customBorder.top = createDefaultCustomBorder(); } defaultBorder.top = customBorder.top; } else { customBorder.top = createSingleEmptyBorder(); defaultBorder.top = customBorder.top; } } if (hasOwnProperty(customBorder, 'right')) { if (customBorder.right) { if (!isObject(customBorder.right)) { customBorder.right = createDefaultCustomBorder(); } defaultBorder.right = customBorder.right; } else { customBorder.right = createSingleEmptyBorder(); defaultBorder.right = customBorder.right; } } if (hasOwnProperty(customBorder, 'bottom')) { if (customBorder.bottom) { if (!isObject(customBorder.bottom)) { customBorder.bottom = createDefaultCustomBorder(); } defaultBorder.bottom = customBorder.bottom; } else { customBorder.bottom = createSingleEmptyBorder(); defaultBorder.bottom = customBorder.bottom; } } if (hasOwnProperty(customBorder, 'left')) { if (customBorder.left) { if (!isObject(customBorder.left)) { customBorder.left = createDefaultCustomBorder(); } defaultBorder.left = customBorder.left; } else { customBorder.left = createSingleEmptyBorder(); defaultBorder.left = customBorder.left; } } return defaultBorder; } /** * Check if selection has border. * * @param hot * @param direction */ export function checkSelectionBorders(hot, direction) { var atLeastOneHasBorder = false; arrayEach(hot.getSelectedRange(), function (range) { range.forAll(function (r, c) { var metaBorders = hot.getCellMeta(r, c).borders; if (metaBorders) { if (direction) { if (!hasOwnProperty(metaBorders[direction], 'hide') || metaBorders[direction].hide === false) { atLeastOneHasBorder = true; return false; // breaks forAll } } else { atLeastOneHasBorder = true; return false; // breaks forAll } } }); }); return atLeastOneHasBorder; } /** * Mark label in contextMenu as selected. * * @param label * @returns {string} */ export function markSelected(label) { return "<span class=\"selected\">".concat(String.fromCharCode(10003), "</span>").concat(label); // workaround for https://github.com/handsontable/handsontable/issues/1946 }
extendDefaultBorder
template.rs
use crate::authors; use crate::config::TemplateConfig; use crate::emoji; use crate::include_exclude::*; use crate::projectname::ProjectName; use anyhow::{Context, Result}; use console::style; use heck::{CamelCase, KebabCase, SnakeCase}; use indicatif::ProgressBar; use liquid_core::{Filter, FilterReflection, Object, ParseFilter, Runtime, Value, ValueView}; use std::fs; use std::path::Path; use std::{collections::HashMap, env}; use walkdir::{DirEntry, WalkDir}; fn engine() -> liquid::Parser { liquid::ParserBuilder::with_stdlib() .filter(KebabCaseFilterParser) .filter(PascalCaseFilterParser) .filter(SnakeCaseFilterParser) .build() .expect("can't fail due to no partials support") } #[derive(Clone, ParseFilter, FilterReflection)] #[filter( name = "kebab_case", description = "Change text to kebab-case.", parsed(KebabCaseFilter) )] pub(crate) struct KebabCaseFilterParser; #[derive(Debug, Default, liquid_derive::Display_filter)] #[name = "kebab_case"] struct KebabCaseFilter; impl Filter for KebabCaseFilter { fn evaluate( &self, input: &dyn ValueView, _runtime: &Runtime, ) -> Result<liquid::model::Value, liquid_core::error::Error> { let input = input .as_scalar() .ok_or_else(|| liquid_core::error::Error::with_msg("String expected"))?; let input = input.into_string().to_string().to_kebab_case(); Ok(liquid::model::Value::scalar(input)) } } #[derive(Clone, liquid_derive::ParseFilter, liquid_derive::FilterReflection)] #[filter( name = "pascal_case", description = "Change text to PascalCase.", parsed(PascalCaseFilter) )] pub(crate) struct PascalCaseFilterParser; #[derive(Debug, Default, liquid_derive::Display_filter)] #[name = "pascal_case"] struct PascalCaseFilter; impl Filter for PascalCaseFilter { fn evaluate( &self, input: &dyn ValueView, _runtime: &Runtime, ) -> Result<liquid::model::Value, liquid_core::error::Error> { let input = input .as_scalar() .ok_or_else(|| liquid_core::error::Error::with_msg("String expected"))?; let input = input.into_string().to_camel_case(); Ok(liquid::model::Value::scalar(input)) } } #[derive(Clone, liquid_derive::ParseFilter, liquid_derive::FilterReflection)] #[filter( name = "snake_case", description = "Change text to snake_case.", parsed(SnakeCaseFilter) )] pub(crate) struct SnakeCaseFilterParser; #[derive(Debug, Default, liquid_derive::Display_filter)] #[name = "snake_case"] struct SnakeCaseFilter; impl Filter for SnakeCaseFilter { fn evaluate( &self, input: &dyn ValueView, _runtime: &Runtime<'_>, ) -> Result<liquid::model::Value, liquid_core::error::Error> { let input = input .as_scalar() .ok_or_else(|| liquid_core::error::Error::with_msg("String expected"))?; let input = input.into_string().to_snake_case(); Ok(input.to_value()) } }
pub(crate) fn substitute( name: &ProjectName, template_values: &HashMap<String, toml::Value>, force: bool, ) -> Result<Object> { let project_name = if force { name.raw() } else { name.kebab_case() }; let authors = authors::get_authors()?; let os_arch = format!("{}-{}", env::consts::OS, env::consts::ARCH); let mut liquid_object = Object::new(); liquid_object.insert("project-name".into(), Value::Scalar(project_name.into())); liquid_object.insert("crate_name".into(), Value::Scalar(name.snake_case().into())); liquid_object.insert("authors".into(), Value::Scalar(authors.into())); liquid_object.insert("os-arch".into(), Value::Scalar(os_arch.into())); template_values.iter().try_for_each(|(k, v)| { let value = match v { toml::Value::String(content) => Value::Scalar(content.clone().into()), toml::Value::Boolean(content) => Value::Scalar((*content).into()), _ => anyhow::bail!(format!( "{} {}", emoji::ERROR, style("Unsupported value type. Only Strings and Booleans are supported.") .bold() .red(), )), }; liquid_object.insert(k.clone().into(), value); Ok(()) })?; Ok(liquid_object) } pub(crate) fn walk_dir( project_dir: &Path, template: Object, template_config: Option<TemplateConfig>, pbar: ProgressBar, ) -> Result<()> { fn is_dir(entry: &DirEntry) -> bool { entry.file_type().is_dir() } fn is_git_metadata(entry: &DirEntry) -> bool { entry .path() .components() .any(|c| c == std::path::Component::Normal(".git".as_ref())) } let engine = engine(); let matcher = template_config.map_or_else( || Ok(Matcher::default()), |config| Matcher::new(config, project_dir), )?; for entry in WalkDir::new(project_dir) { let entry = entry?; if is_dir(&entry) || is_git_metadata(&entry) { continue; } let filename = entry.path(); let relative_path = filename.strip_prefix(project_dir)?; pbar.set_message(&filename.display().to_string()); if matcher.should_include(relative_path) { let new_contents = engine .clone() .parse_file(filename)? .render(&template) .with_context(|| { format!( "{} {} `{}`", emoji::ERROR, style("Error replacing placeholders").bold().red(), style(filename.display()).bold() ) })?; fs::write(filename, new_contents).with_context(|| { format!( "{} {} `{}`", emoji::ERROR, style("Error writing").bold().red(), style(filename.display()).bold() ) })?; } } pbar.finish_and_clear(); Ok(()) }
test_core.py
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Dict import textwrap from fastclasses_json.api import dataclass_json from fastclasses_json import core def test_to_dict_source(): @dataclass class A: x: int assert core._to_dict_source(A) == textwrap.dedent( """\ def to_dict(self): result = {} result['x'] = self.x return result """ ) def test_from_dict_source(): @dataclass class A: x: int assert core._from_dict_source(A) == textwrap.dedent( """\ def from_dict(cls, o, *, infer_missing): args = {} args['x'] = o.get('x') return cls(**args) """ ) def test_from_dict_source__optional(): @dataclass class A: x: Optional[int] assert core._from_dict_source(A) == textwrap.dedent( """\ def from_dict(cls, o, *, infer_missing): args = {} args['x'] = o.get('x') return cls(**args) """ ) def test_from_dict_source__default(): @dataclass class
: x: int = 1 assert core._from_dict_source(A) == textwrap.dedent( """\ def from_dict(cls, o, *, infer_missing): args = {} if 'x' in o: args['x'] = o.get('x') return cls(**args) """ ) def test_from_dict_source__list_nested(): @dataclass_json @dataclass class A: a: str @dataclass_json @dataclass class B: a: List[A] assert core._from_dict_source(B) == textwrap.dedent( """\ def from_dict(cls, o, *, infer_missing): args = {} value = o.get('a') if value is not None: value = [A._fastclasses_json_from_dict(__0) for __0 in value] args['a'] = value return cls(**args) """ ) def test_from_dict_source__enum(): from enum import Enum class A(Enum): X = 'ex' Y = 'why' @dataclass_json @dataclass class B: a: A assert core._from_dict_source(B) == textwrap.dedent( """\ def from_dict(cls, o, *, infer_missing): args = {} value = o.get('a') if value is not None: value = A(value) args['a'] = value return cls(**args) """ ) def test_expr_builder__list_enum(): class A(Enum): X = 'ex' Y = 'why' t = List[A] builder = core.expr_builder(t) assert builder('XXX') == '[A(__0) for __0 in XXX]' def test_expr_builder__list_list_enum(): class A(Enum): X = 'ex' Y = 'why' t = List[List[A]] builder = core.expr_builder(t) assert builder('XXX') == '[[A(__1) for __1 in __0] for __0 in XXX]' def test_expr_builder__list_dataclass(): @dataclass class A: X = 'ex' Y = 'why' t = List[A] builder = core.expr_builder(t) assert builder('XXX') == \ '[A._fastclasses_json_from_dict(__0) for __0 in XXX]' def test_expr_builder__optional_enum(): class A(Enum): X = 'ex' Y = 'why' t = Optional[A] builder = core.expr_builder(t) assert builder('XXX') == 'A(__0) if (__0:=(XXX)) is not None else None' def test_expr_builder__dict_enum(): class A(Enum): X = 'ex' Y = 'why' t = Dict[str, A] builder = core.expr_builder(t) assert builder('XXX') == '{__k0: A(__v0) for __k0,__v0 in (XXX).items()}' def test_references_types__enum(): class A(Enum): X = 'ex' Y = 'why' @dataclass class XX: a: A assert core.referenced_types(XX) == {'A': A}
A
crackfortran.py
#!/usr/bin/env python """ crackfortran --- read fortran (77,90) code and extract declaration information. Copyright 1999-2004 Pearu Peterson all rights reserved, Pearu Peterson <[email protected]> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/09/27 07:13:49 $ Pearu Peterson Usage of crackfortran: ====================== Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename> -m <module name for f77 routines>,--ignore-contains Functions: crackfortran, crack2fortran The following Fortran statements/constructions are supported (or will be if needed): block data,byte,call,character,common,complex,contains,data, dimension,double complex,double precision,end,external,function, implicit,integer,intent,interface,intrinsic, logical,module,optional,parameter,private,public, program,real,(sequence?),subroutine,type,use,virtual, include,pythonmodule Note: 'virtual' is mapped to 'dimension'. Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). Note: code after 'contains' will be ignored until its scope ends. Note: 'common' statement is extended: dimensions are moved to variable definitions Note: f2py directive: <commentchar>f2py<line> is read as <line> Note: pythonmodule is introduced to represent Python module Usage: `postlist=crackfortran(files,funcs)` `postlist` contains declaration information read from the list of files `files`. `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file `postlist` has the following structure: *** it is a list of dictionaries containing `blocks': B = {'block','body','vars','parent_block'[,'name','prefix','args','result', 'implicit','externals','interfaced','common','sortvars', 'commonvars','note']} B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | 'program' | 'block data' | 'type' | 'pythonmodule' B['body'] --- list containing `subblocks' with the same structure as `blocks' B['parent_block'] --- dictionary of a parent block: C['body'][<index>]['parent_block'] is C B['vars'] --- dictionary of variable definitions B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) B['name'] --- name of the block (not if B['block']=='interface') B['prefix'] --- prefix string (only if B['block']=='function') B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' B['result'] --- name of the return value (only if B['block']=='function') B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None B['externals'] --- list of variables being external B['interfaced'] --- list of variables being external and defined B['common'] --- dictionary of common blocks (list of objects) B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) B['from'] --- string showing the 'parents' of the current block B['use'] --- dictionary of modules used in current block: {<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}} B['note'] --- list of LaTeX comments on the block B['f2pyenhancements'] --- optional dictionary {'threadsafe':'','fortranname':<name>, 'callstatement':<C-expr>|<multi-line block>, 'callprotoargument':<C-expr-list>, 'usercode':<multi-line block>|<list of multi-line blocks>, 'pymethoddef:<multi-line block>' } B['entry'] --- dictionary {entryname:argslist,..} B['varnames'] --- list of variable names given in the order of reading the Fortran code, useful for derived types. B['saved_interface'] --- a string of scanned routine signature, defines explicit interface *** Variable definition is a dictionary D = B['vars'][<variable name>] = {'typespec'[,'attrspec','kindselector','charselector','=','typename']} D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | 'double precision' | 'integer' | 'logical' | 'real' | 'type' D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)', 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', 'optional','required', etc) K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = 'complex' | 'integer' | 'logical' | 'real' ) C = D['charselector'] = {['*','len','kind']} (only if D['typespec']=='character') D['='] --- initialization expression string D['typename'] --- name of the type if D['typespec']=='type' D['dimension'] --- list of dimension bounds D['intent'] --- list of intent specifications D['depend'] --- list of variable names on which current variable depends on D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised D['note'] --- list of LaTeX comments on the variable *** Meaning of kind/char selectors (few examples): D['typespec>']*K['*'] D['typespec'](kind=K['kind']) character*C['*'] character(len=C['len'],kind=C['kind']) (see also fortran type declaration statement formats below) Fortran 90 type declaration statement format (F77 is subset of F90) ==================================================================== (Main source: IBM XL Fortran 5.1 Language Reference Manual) type declaration = <typespec> [[<attrspec>]::] <entitydecl> <typespec> = byte | character[<charselector>] | complex[<kindselector>] | double complex | double precision | integer[<kindselector>] | logical[<kindselector>] | real[<kindselector>] | type(<typename>) <charselector> = * <charlen> | ([len=]<len>[,[kind=]<kind>]) | (kind=<kind>[,len=<len>]) <kindselector> = * <intlen> | ([kind=]<kind>) <attrspec> = comma separated list of attributes. Only the following attributes are used in building up the interface: external (parameter --- affects '=' key) optional intent Other attributes are ignored. <intentspec> = in | out | inout <arrayspec> = comma separated list of dimension bounds. <entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>] [/<init_expr>/ | =<init_expr>] [,<entitydecl>] In addition, the following attributes are used: check,depend,note TODO: * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' -> 'real x(2)') The above may be solved by creating appropriate preprocessor program, for example. """ from __future__ import division, absolute_import, print_function import sys import string import fileinput import re import os import copy import platform from . import __version__ # The eviroment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * f2py_version = __version__.version # Global flags: strictf77 = 1 # Ignore `!' comments unless line[0]=='!' sourcecodeform = 'fix' # 'fix','free' quiet = 0 # Be verbose if 0 (Obsolete: not used any more) verbose = 1 # Be quiet if 0, extra verbose if > 1. tabchar = 4 * ' ' pyffilename = '' f77modulename = '' skipemptyends = 0 # for old F77 programs without 'program' statement ignorecontains = 1 dolowercase = 1 debug = [] # Global variables beginpattern = '' currentfilename = '' expectbegin = 1 f90modulevars = {} filepositiontext = '' gotnextfile = 1 groupcache = None groupcounter = 0 grouplist = {groupcounter: []} groupname = '' include_paths = [] neededmodule = -1 onlyfuncs = [] previous_context = None skipblocksuntil = -1 skipfuncs = [] skipfunctions = [] usermodules = [] def reset_global_f2py_vars(): global groupcounter, grouplist, neededmodule, expectbegin global skipblocksuntil, usermodules, f90modulevars, gotnextfile global filepositiontext, currentfilename, skipfunctions, skipfuncs global onlyfuncs, include_paths, previous_context global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename global f77modulename, skipemptyends, ignorecontains, dolowercase, debug # flags strictf77 = 1 sourcecodeform = 'fix' quiet = 0 verbose = 1 tabchar = 4 * ' ' pyffilename = '' f77modulename = '' skipemptyends = 0 ignorecontains = 1 dolowercase = 1 debug = [] # variables groupcounter = 0 grouplist = {groupcounter: []} neededmodule = -1 expectbegin = 1 skipblocksuntil = -1 usermodules = [] f90modulevars = {} gotnextfile = 1 filepositiontext = '' currentfilename = '' skipfunctions = [] skipfuncs = [] onlyfuncs = [] include_paths = [] previous_context = None def outmess(line, flag=1): global filepositiontext if not verbose: return if not quiet: if flag: sys.stdout.write(filepositiontext) sys.stdout.write(line) re._MAXCACHE = 50 defaultimplicitrules = {} for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c] = {'typespec': 'real'} for c in "ijklmn": defaultimplicitrules[c] = {'typespec': 'integer'} del c badnames = {} invbadnames = {} for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', 'max', 'min', 'flen', 'fshape', 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', 'type', 'default']: badnames[n] = n + '_bn' invbadnames[n + '_bn'] = n def rmbadname1(name): if name in badnames: errmess('rmbadname1: Replacing "%s" with "%s".\n' % (name, badnames[name])) return badnames[name] return name def rmbadname(names): return [rmbadname1(_m) for _m in names] def undo_rmbadname1(name): if name in invbadnames: errmess('undo_rmbadname1: Replacing "%s" with "%s".\n' % (name, invbadnames[name])) return invbadnames[name] return name def undo_rmbadname(names): return [undo_rmbadname1(_m) for _m in names] def getextension(name): i = name.rfind('.') if i == -1: return '' if '\\' in name[i:]: return '' if '/' in name[i:]: return '' return name[i + 1:] is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z', re.I).match _has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-', re.I).search _has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-', re.I).search _has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-', re.I).search _free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match def is_free_format(file): """Check if file is in free format Fortran.""" # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 f = open(file, 'r') line = f.readline() n = 15 # the number of non-comment lines to scan for hints if _has_f_header(line): n = 0 elif _has_f90_header(line): n = 0 result = 1 while n > 0 and line: if line[0] != '!' and line.strip(): n -= 1 if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': result = 1 break line = f.readline() f.close() return result # Read fortran (77,90) code def readfortrancode(ffile, dowithline=show, istop=1): """ Read fortran codes from files and 1) Get rid of comments, line continuations, and empty lines; lower cases. 2) Call dowithline(line) on every line. 3) Recursively call itself when statement \"include '<filename>'\" is met. """ global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 global beginpattern, quiet, verbose, dolowercase, include_paths if not istop: saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase if ffile == []: return localdolowercase = dolowercase cont = 0 finalline = '' ll = '' commentline = re.compile( r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)') includeline = re.compile( r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I) cont1 = re.compile(r'(?P<line>.*)&\s*\Z') cont2 = re.compile(r'(\s*&|)(?P<line>.*)') mline_mark = re.compile(r".*?'''") if istop: dowithline('', -1) ll, l1 = '', '' spacedigits = [' '] + [str(_m) for _m in range(10)] filepositiontext = '' fin = fileinput.FileInput(ffile) while True: l = fin.readline() if not l: break if fin.isfirstline(): filepositiontext = '' currentfilename = fin.filename() gotnextfile = 1 l1 = l strictf77 = 0 sourcecodeform = 'fix' ext = os.path.splitext(currentfilename)[1] if is_f_file(currentfilename) and \ not (_has_f90_header(l) or _has_fix_header(l)): strictf77 = 1 elif is_free_format(currentfilename) and not _has_fix_header(l): sourcecodeform = 'free' if strictf77: beginpattern = beginpattern77 else: beginpattern = beginpattern90 outmess('\tReading file %s (format:%s%s)\n' % (repr(currentfilename), sourcecodeform, strictf77 and ',strict' or '')) l = l.expandtabs().replace('\xa0', ' ') # Get rid of newline characters while not l == '': if l[-1] not in "\n\r\f": break l = l[:-1] if not strictf77: r = commentline.match(l) if r: l = r.group('line') + ' ' # Strip comments starting with `!' rl = r.group('rest') if rl[:4].lower() == 'f2py': # f2py directive l = l + 4 * ' ' r = commentline.match(rl[4:]) if r: l = l + r.group('line') else: l = l + rl[4:] if l.strip() == '': # Skip empty line cont = 0 continue if sourcecodeform == 'fix': if l[0] in ['*', 'c', '!', 'C', '#']: if l[1:5].lower() == 'f2py': # f2py directive l = ' ' + l[5:] else: # Skip comment line cont = 0 continue elif strictf77: if len(l) > 72: l = l[:72] if not (l[0] in spacedigits): raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' 'this code is in fix form?\n\tline=%s' % repr(l)) if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): # Continuation of a previous line ll = ll + l[6:] finalline = '' origfinalline = '' else: if not strictf77: # F90 continuation r = cont1.match(l) if r: l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' origfinalline = '' else: # clean up line beginning from possible digits. l = ' ' + l[5:] if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l cont = (r is not None) else: # clean up line beginning from possible digits. l = ' ' + l[5:] if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l elif sourcecodeform == 'free': if not cont and ext == '.pyf' and mline_mark.match(l): l = l + '\n' while True: lc = fin.readline() if not lc: errmess( 'Unexpected end of file when reading multiline\n') break l = l + lc if mline_mark.match(lc): break l = l.rstrip() r = cont1.match(l) if r: l = r.group('line') # Continuation follows .. if cont: ll = ll + cont2.match(l).group('line') finalline = '' origfinalline = '' else: if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll ll = l cont = (r is not None) else: raise ValueError( "Flag sourcecodeform must be either 'fix' or 'free': %s" % repr(sourcecodeform)) filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) if m: fn = m.group('name') if os.path.isfile(fn): readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [ os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( repr(fn), os.pathsep.join(include_dirs))) else: dowithline(finalline) l1 = ll if localdolowercase: finalline = ll.lower() else: finalline = ll origfinalline = ll filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( fin.filelineno() - 1, currentfilename, l1) m = includeline.match(origfinalline) if m: fn = m.group('name') if os.path.isfile(fn): readfortrancode(fn, dowithline=dowithline, istop=0) else: include_dirs = [os.path.dirname(currentfilename)] + include_paths foundfile = 0 for inc_dir in include_dirs: fn1 = os.path.join(inc_dir, fn) if os.path.isfile(fn1): foundfile = 1 readfortrancode(fn1, dowithline=dowithline, istop=0) break if not foundfile: outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( repr(fn), os.pathsep.join(include_dirs))) else: dowithline(finalline) filepositiontext = '' fin.close() if istop: dowithline('', 1) else: gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ beginpattern, quiet, verbose, dolowercase = saveglobals # Crack line beforethisafter = r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))' + \ r'\s*(?P<this>(\b(%s)\b))' + \ r'\s*(?P<after>%s)\s*\Z' ## fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' typespattern = re.compile( beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' typespattern4implicit = re.compile(beforethisafter % ( '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) # functionpattern = re.compile(beforethisafter % ( r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' subroutinepattern = re.compile(beforethisafter % ( r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' # modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' # groupbegins77 = r'program|block\s*data' beginpattern77 = re.compile( beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' groupbegins90 = groupbegins77 + \ r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' beginpattern90 = re.compile( beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' groupends = r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' endpattern = re.compile( beforethisafter % ('', groupends, groupends, r'[\w\s]*'), re.I), 'end' # endifs='end\s*(if|do|where|select|while|forall)' endifs = r'(end\s*(if|do|where|select|while|forall))|(module\s*procedure)' endifpattern = re.compile( beforethisafter % (r'[\w]*?', endifs, endifs, r'[\w\s]*'), re.I), 'endif' # implicitpattern = re.compile( beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' dimensionpattern = re.compile(beforethisafter % ( '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' externalpattern = re.compile( beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' optionalpattern = re.compile( beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' requiredpattern = re.compile( beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' publicpattern = re.compile( beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' privatepattern = re.compile( beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' intrisicpattern = re.compile( beforethisafter % ('', 'intrisic', 'intrisic', '.*'), re.I), 'intrisic' intentpattern = re.compile(beforethisafter % ( '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' parameterpattern = re.compile( beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' datapattern = re.compile( beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' callpattern = re.compile( beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' entrypattern = re.compile( beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' callfunpattern = re.compile( beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' commonpattern = re.compile( beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' usepattern = re.compile( beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' containspattern = re.compile( beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' formatpattern = re.compile( beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' # Non-fortran and f2py-specific statements f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' multilinepattern = re.compile( r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline' ## def _simplifyargs(argsline): a = [] for n in markoutercomma(argsline).split('@,@'): for r in '(),': n = n.replace(r, '_') a.append(n) return ','.join(a) crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*', re.I) def crackline(line, reset=0): """ reset=-1 --- initialize reset=0 --- crack the line reset=1 --- final check if mismatch of blocks occurred Cracked data is saved in grouplist[0]. """ global beginpattern, groupcounter, groupname, groupcache, grouplist global filepositiontext, currentfilename, neededmodule, expectbegin global skipblocksuntil, skipemptyends, previous_context, gotnextfile if ';' in line and not (f2pyenhancementspattern[0].match(line) or multilinepattern[0].match(line)): for l in line.split(';'): # XXX: non-zero reset values need testing assert reset == 0, repr(reset) crackline(l, reset) return if reset < 0: groupcounter = 0 groupname = {groupcounter: ''} groupcache = {groupcounter: {}} grouplist = {groupcounter: []} groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['block'] = '' groupcache[groupcounter]['name'] = '' neededmodule = -1 skipblocksuntil = -1 return if reset > 0: fl = 0 if f77modulename and neededmodule == groupcounter: fl = 2 while groupcounter > fl: outmess('crackline: groupcounter=%s groupname=%s\n' % (repr(groupcounter), repr(groupname))) outmess( 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if f77modulename and neededmodule == groupcounter: grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end interface grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end module neededmodule = -1 return if line == '': return flag = 0 for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, requiredpattern, parameterpattern, datapattern, publicpattern, privatepattern, intrisicpattern, endifpattern, endpattern, formatpattern, beginpattern, functionpattern, subroutinepattern, implicitpattern, typespattern, commonpattern, callpattern, usepattern, containspattern, entrypattern, f2pyenhancementspattern, multilinepattern ]: m = pat[0].match(line) if m: break flag = flag + 1 if not m: re_1 = crackline_re_1 if 0 <= skipblocksuntil <= groupcounter: return if 'externals' in groupcache[groupcounter]: for name in groupcache[groupcounter]['externals']: if name in invbadnames: name = invbadnames[name] if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: continue m1 = re.match( r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) if m1: m2 = re_1.match(m1.group('before')) a = _simplifyargs(m1.group('args')) if m2: line = 'callfun %s(%s) result (%s)' % ( name, a, m2.group('result')) else: line = 'callfun %s(%s)' % (name, a) m = callfunpattern[0].match(line) if not m: outmess( 'crackline: could not resolve function call for line=%s.\n' % repr(line)) return analyzeline(m, 'callfun', line) return if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): previous_context = None outmess('crackline:%d: No pattern for line\n' % (groupcounter)) return elif pat[1] == 'end': if 0 <= skipblocksuntil < groupcounter: groupcounter = groupcounter - 1 if skipblocksuntil <= groupcounter: return if groupcounter <= 0: raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) m1 = beginpattern[0].match((line)) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % (repr(m1.group('this')), repr(groupname[groupcounter]), filepositiontext) ) if skipblocksuntil == groupcounter: skipblocksuntil = -1 grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 if not skipemptyends: expectbegin = 1 elif pat[1] == 'begin': if 0 <= skipblocksuntil <= groupcounter: groupcounter = groupcounter + 1 return gotnextfile = 0 analyzeline(m, pat[1], line) expectbegin = 0 elif pat[1] == 'endif': pass elif pat[1] == 'contains': if ignorecontains: return if 0 <= skipblocksuntil <= groupcounter: return skipblocksuntil = groupcounter else: if 0 <= skipblocksuntil <= groupcounter: return analyzeline(m, pat[1], line) def markouterparen(line): l = '' f = 0 for c in line: if c == '(': f = f + 1 if f == 1: l = l + '@(@' continue elif c == ')': f = f - 1 if f == 0: l = l + '@)@' continue l = l + c return l def markoutercomma(line, comma=','): l = '' f = 0 cc = '' for c in line: if (not cc or cc == ')') and c == '(': f = f + 1 cc = ')' elif not cc and c == '\'' and (not l or l[-1] != '\\'): f = f + 1 cc = '\'' elif c == cc: f = f - 1 if f == 0: cc = '' elif c == comma and f == 0: l = l + '@' + comma + '@' continue l = l + c assert not f, repr((f, line, l, cc)) return l def unmarkouterparen(line): r = line.replace('@(@', '(').replace('@)@', ')') return r def appenddecl(decl, decl2, force=1): if not decl: decl = {} if not decl2: return decl if decl is decl2: return decl for k in list(decl2.keys()): if k == 'typespec': if force or k not in decl: decl[k] = decl2[k] elif k == 'attrspec': for l in decl2[k]: decl = setattrspec(decl, l, force) elif k == 'kindselector': decl = setkindselector(decl, decl2[k], force) elif k == 'charselector': decl = setcharselector(decl, decl2[k], force) elif k in ['=', 'typename']: if force or k not in decl: decl[k] = decl2[k] elif k == 'note': pass elif k in ['intent', 'check', 'dimension', 'optional', 'required']: errmess('appenddecl: "%s" not implemented.\n' % k) else: raise Exception('appenddecl: Unknown variable definition key:' + str(k)) return decl selectpattern = re.compile( r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z', re.I) nameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z', re.I) callnameargspattern = re.compile( r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z', re.I) real16pattern = re.compile( r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') real8pattern = re.compile( r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') _intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) def _is_intent_callback(vdecl): for a in vdecl.get('attrspec', []): if _intentcallbackpattern.match(a): return 1 return 0 def _resolvenameargspattern(line): line = markouterparen(line) m1 = nameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), m1.group('result'), m1.group('bind') m1 = callnameargspattern.match(line) if m1: return m1.group('name'), m1.group('args'), None, None return None, [], None, None def analyzeline(m, case, line): global groupcounter, groupname, groupcache, grouplist, filepositiontext global currentfilename, f77modulename, neededinterface, neededmodule global expectbegin, gotnextfile, previous_context block = m.group('this') if case != 'multiline': previous_context = None if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ and not skipemptyends and groupcounter < 1: newname = os.path.basename(currentfilename).split('.')[0] outmess( 'analyzeline: no group yet. Creating program group with name "%s".\n' % newname) gotnextfile = 0 groupcounter = groupcounter + 1 groupname[groupcounter] = 'program' groupcache[groupcounter] = {} grouplist[groupcounter] = [] groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['block'] = 'program' groupcache[groupcounter]['name'] = newname groupcache[groupcounter]['from'] = 'fromsky' expectbegin = 0 if case in ['begin', 'call', 'callfun']: # Crack line => block,name,args,result block = block.lower() if re.match(r'block\s*data', block, re.I): block = 'block data' if re.match(r'python\s*module', block, re.I): block = 'python module' name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is None: if block == 'block data': name = '_BLOCK_DATA_' else: name = '' if block not in ['interface', 'block data']: outmess('analyzeline: No name/args pattern found for line.\n') previous_context = (block, name, groupcounter) if args: args = rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args = [] if '' in args: while '' in args: args.remove('') outmess( 'analyzeline: argument list is malformed (missing argument).\n') # end of crack line => block,name,args,result needmodule = 0 needinterface = 0 if case in ['call', 'callfun']: needinterface = 1 if 'args' not in groupcache[groupcounter]: return if name not in groupcache[groupcounter]['args']: return for it in grouplist[groupcounter]: if it['name'] == name: return if name in groupcache[groupcounter]['interfaced']: return block = {'call': 'subroutine', 'callfun': 'function'}[case] if f77modulename and neededmodule == -1 and groupcounter <= 1: neededmodule = groupcounter + 2 needmodule = 1 if block != 'interface': needinterface = 1 # Create new block(s) groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] if needmodule: if verbose > 1: outmess('analyzeline: Creating module block %s\n' % repr(f77modulename), 0) groupname[groupcounter] = 'module' groupcache[groupcounter]['block'] = 'python module' groupcache[groupcounter]['name'] = f77modulename groupcache[groupcounter]['from'] = '' groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] if needinterface: if verbose > 1: outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( groupcounter), 0) groupname[groupcounter] = 'interface' groupcache[groupcounter]['block'] = 'interface' groupcache[groupcounter]['name'] = 'unknown_interface' groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcounter = groupcounter + 1 groupcache[groupcounter] = {} grouplist[groupcounter] = [] groupname[groupcounter] = block groupcache[groupcounter]['block'] = block if not name: name = 'unknown_' + block groupcache[groupcounter]['prefix'] = m.group('before') groupcache[groupcounter]['name'] = rmbadname1(name) groupcache[groupcounter]['result'] = result if groupcounter == 1: groupcache[groupcounter]['from'] = currentfilename else: if f77modulename and groupcounter == 3: groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], currentfilename) else: groupcache[groupcounter]['from'] = '%s:%s' % ( groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) for k in list(groupcache[groupcounter].keys()): if not groupcache[groupcounter][k]: del groupcache[groupcounter][k] groupcache[groupcounter]['args'] = args groupcache[groupcounter]['body'] = [] groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['interfaced'] = [] groupcache[groupcounter]['vars'] = {} groupcache[groupcounter]['entry'] = {} # end of creation if block == 'type': groupcache[groupcounter]['varnames'] = [] if case in ['call', 'callfun']: # set parents variables if name not in groupcache[groupcounter - 2]['externals']: groupcache[groupcounter - 2]['externals'].append(name) groupcache[groupcounter]['vars'] = copy.deepcopy( groupcache[groupcounter - 2]['vars']) try: del groupcache[groupcounter]['vars'][name][ groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] except: pass if block in ['function', 'subroutine']: # set global attributes try: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) except: pass if case == 'callfun': # return type if result and result in groupcache[groupcounter]['vars']: if not name == result: groupcache[groupcounter]['vars'][name] = appenddecl( groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) # if groupcounter>1: # name is interfaced try: groupcache[groupcounter - 2]['interfaced'].append(name) except: pass if block == 'function': t = typespattern[0].match(m.group('before') + ' ' + name) if t: typespec, selector, attr, edecl = cracktypespec0( t.group('this'), t.group('after')) updatevars(typespec, selector, attr, edecl) if case in ['call', 'callfun']: grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end routine grouplist[groupcounter - 1].append(groupcache[groupcounter]) grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] del grouplist[groupcounter] groupcounter = groupcounter - 1 # end interface elif case == 'entry': name, args, result, bind = _resolvenameargspattern(m.group('after')) if name is not None: if args: args = rmbadname([x.strip() for x in markoutercomma(args).split('@,@')]) else: args = [] assert result is None, repr(result) groupcache[groupcounter]['entry'][name] = args previous_context = ('entry', name, groupcounter) elif case == 'type': typespec, selector, attr, edecl = cracktypespec0( block, m.group('after')) last_name = updatevars(typespec, selector, attr, edecl) if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrisic']: edecl = groupcache[groupcounter]['vars'] ll = m.group('after').strip() i = ll.find('::') if i < 0 and case == 'intent': i = markouterparen(ll).find('@)@') - 2 ll = ll[:i + 1] + '::' + ll[i + 1:] i = ll.find('::') if ll[i:] == '::' and 'args' in groupcache[groupcounter]: outmess('All arguments will have attribute %s%s\n' % (m.group('this'), ll[:i])) ll = ll + ','.join(groupcache[groupcounter]['args']) if i < 0: i = 0 pl = '' else: pl = ll[:i].strip() ll = ll[i + 2:] ch = markoutercomma(pl).split('@,@') if len(ch) > 1: pl = ch[0] outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( ','.join(ch[1:]))) last_name = None for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: m1 = namepattern.match(e) if not m1: if case in ['public', 'private']: k = '' else: print(m.groupdict()) outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( case, repr(e))) continue else: k = rmbadname1(m1.group('name')) if k not in edecl: edecl[k] = {} if case == 'dimension': ap = case + m1.group('after') if case == 'intent': ap = m.group('this') + pl if _intentcallbackpattern.match(ap): if k not in groupcache[groupcounter]['args']: if groupcounter > 1: if '__user__' not in groupcache[groupcounter - 2]['name']: outmess( 'analyzeline: missing __user__ module (could be nothing)\n') # fixes ticket 1693 if k != groupcache[groupcounter]['name']: outmess('analyzeline: appending intent(callback) %s' ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) groupcache[groupcounter]['args'].append(k) else: errmess( 'analyzeline: intent(callback) %s is ignored' % (k)) else: errmess('analyzeline: intent(callback) %s is already' ' in argument list' % (k)) if case in ['optional', 'required', 'public', 'external', 'private', 'intrisic']: ap = case if 'attrspec' in edecl[k]: edecl[k]['attrspec'].append(ap) else: edecl[k]['attrspec'] = [ap] if case == 'external': if groupcache[groupcounter]['block'] == 'program': outmess('analyzeline: ignoring program arguments\n') continue if k not in groupcache[groupcounter]['args']: continue if 'externals' not in groupcache[groupcounter]: groupcache[groupcounter]['externals'] = [] groupcache[groupcounter]['externals'].append(k) last_name = k groupcache[groupcounter]['vars'] = edecl if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'parameter': edecl = groupcache[groupcounter]['vars'] ll = m.group('after').strip()[1:-1] last_name = None for e in markoutercomma(ll).split('@,@'): try: k, initexpr = [x.strip() for x in e.split('=')] except: outmess( 'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll)) continue params = get_parameters(edecl) k = rmbadname1(k) if k not in edecl: edecl[k] = {} if '=' in edecl[k] and (not edecl[k]['='] == initexpr): outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( k, edecl[k]['='], initexpr)) t = determineexprtype(initexpr, params) if t: if t.get('typespec') == 'real': tt = list(initexpr) for m in real16pattern.finditer(initexpr): tt[m.start():m.end()] = list( initexpr[m.start():m.end()].lower().replace('d', 'e')) initexpr = ''.join(tt) elif t.get('typespec') == 'complex': initexpr = initexpr[1:].lower().replace('d', 'e').\ replace(',', '+1j*(') try: v = eval(initexpr, {}, params) except (SyntaxError, NameError, TypeError) as msg: errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' % (initexpr, msg)) continue edecl[k]['='] = repr(v) if 'attrspec' in edecl[k]: edecl[k]['attrspec'].append('parameter') else: edecl[k]['attrspec'] = ['parameter'] last_name = k groupcache[groupcounter]['vars'] = edecl if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'implicit': if m.group('after').strip().lower() == 'none': groupcache[groupcounter]['implicit'] = None elif m.group('after'): if 'implicit' in groupcache[groupcounter]: impl = groupcache[groupcounter]['implicit'] else: impl = {} if impl is None: outmess( 'analyzeline: Overwriting earlier "implicit none" statement.\n') impl = {} for e in markoutercomma(m.group('after')).split('@,@'): decl = {} m1 = re.match( r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) if not m1: outmess( 'analyzeline: could not extract info of implicit statement part "%s"\n' % (e)) continue m2 = typespattern4implicit.match(m1.group('this')) if not m2: outmess( 'analyzeline: could not extract types pattern of implicit statement part "%s"\n' % (e)) continue typespec, selector, attr, edecl = cracktypespec0( m2.group('this'), m2.group('after')) kindselect, charselect, typename = cracktypespec( typespec, selector) decl['typespec'] = typespec decl['kindselector'] = kindselect decl['charselector'] = charselect decl['typename'] = typename for k in list(decl.keys()): if not decl[k]: del decl[k] for r in markoutercomma(m1.group('after')).split('@,@'): if '-' in r: try: begc, endc = [x.strip() for x in r.split('-')] except: outmess( 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r) continue else: begc = endc = r.strip() if not len(begc) == len(endc) == 1: outmess( 'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n' % r) continue for o in range(ord(begc), ord(endc) + 1): impl[chr(o)] = decl groupcache[groupcounter]['implicit'] = impl elif case == 'data': ll = [] dl = '' il = '' f = 0 fc = 1 inp = 0 for c in m.group('after'): if not inp: if c == "'": fc = not fc if c == '/' and fc: f = f + 1 continue if c == '(': inp = inp + 1 elif c == ')': inp = inp - 1 if f == 0: dl = dl + c elif f == 1: il = il + c elif f == 2: dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() ll.append([dl, il]) dl = c il = '' f = 0 if f == 2: dl = dl.strip() if dl.startswith(','): dl = dl[1:].strip() ll.append([dl, il]) vars = {} if 'vars' in groupcache[groupcounter]: vars = groupcache[groupcounter]['vars'] last_name = None for l in ll: l = [x.strip() for x in l] if l[0][0] == ',': l[0] = l[0][1:] if l[0][0] == '(': outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % l[0]) continue i = 0 j = 0 llen = len(l[1]) for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]): if v[0] == '(': outmess( 'analyzeline: implied-DO list "%s" is not supported. Skipping.\n' % v) # XXX: subsequent init expressions may get wrong values. # Ignoring since data statements are irrelevant for # wrapping. continue fc = 0 while (i < llen) and (fc or not l[1][i] == ','): if l[1][i] == "'": fc = not fc i = i + 1 i = i + 1 if v not in vars: vars[v] = {} if '=' in vars[v] and not vars[v]['='] == l[1][j:i - 1]: outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n' % ( v, vars[v]['='], l[1][j:i - 1])) vars[v]['='] = l[1][j:i - 1] j = i last_name = v groupcache[groupcounter]['vars'] = vars if last_name is not None: previous_context = ('variable', last_name, groupcounter) elif case == 'common': line = m.group('after').strip() if not line[0] == '/': line = '//' + line cl = [] f = 0 bn = '' ol = '' for c in line: if c == '/': f = f + 1 continue if f >= 3: bn = bn.strip() if not bn: bn = '_BLNK_' cl.append([bn, ol]) f = f - 2 bn = '' ol = '' if f % 2: bn = bn + c else: ol = ol + c bn = bn.strip() if not bn: bn = '_BLNK_' cl.append([bn, ol]) commonkey = {} if 'common' in groupcache[groupcounter]: commonkey = groupcache[groupcounter]['common'] for c in cl: if c[0] not in commonkey: commonkey[c[0]] = [] for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: if i: commonkey[c[0]].append(i) groupcache[groupcounter]['common'] = commonkey previous_context = ('common', bn, groupcounter) elif case == 'use': m1 = re.match( r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z', m.group('after'), re.I) if m1: mm = m1.groupdict() if 'use' not in groupcache[groupcounter]: groupcache[groupcounter]['use'] = {} name = m1.group('name') groupcache[groupcounter]['use'][name] = {} isonly = 0 if 'list' in mm and mm['list'] is not None: if 'notonly' in mm and mm['notonly'] is None: isonly = 1 groupcache[groupcounter]['use'][name]['only'] = isonly ll = [x.strip() for x in mm['list'].split(',')] rl = {} for l in ll: if '=' in l: m2 = re.match( r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z', l, re.I) if m2: rl[m2.group('local').strip()] = m2.group( 'use').strip() else: outmess( 'analyzeline: Not local=>use pattern found in %s\n' % repr(l)) else: rl[l] = l groupcache[groupcounter]['use'][name]['map'] = rl else: pass else: print(m.groupdict()) outmess('analyzeline: Could not crack the use statement.\n') elif case in ['f2pyenhancements']: if 'f2pyenhancements' not in groupcache[groupcounter]: groupcache[groupcounter]['f2pyenhancements'] = {} d = groupcache[groupcounter]['f2pyenhancements'] if m.group('this') == 'usercode' and 'usercode' in d: if isinstance(d['usercode'], str): d['usercode'] = [d['usercode']] d['usercode'].append(m.group('after')) else: d[m.group('this')] = m.group('after') elif case == 'multiline': if previous_context is None: if verbose: outmess('analyzeline: No context for multiline block.\n') return gc = groupcounter appendmultiline(groupcache[gc], previous_context[:2], m.group('this')) else: if verbose > 1: print(m.groupdict()) outmess('analyzeline: No code implemented for line.\n') def appendmultiline(group, context_name, ml): if 'f2pymultilines' not in group: group['f2pymultilines'] = {} d = group['f2pymultilines'] if context_name not in d: d[context_name] = [] d[context_name].append(ml) return def cracktypespec0(typespec, ll): selector = None attr = None if re.match(r'double\s*complex', typespec, re.I): typespec = 'double complex' elif re.match(r'double\s*precision', typespec, re.I): typespec = 'double precision' else: typespec = typespec.strip().lower() m1 = selectpattern.match(markouterparen(ll)) if not m1: outmess( 'cracktypespec0: no kind/char_selector pattern found for line.\n') return d = m1.groupdict() for k in list(d.keys()): d[k] = unmarkouterparen(d[k]) if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: selector = d['this'] ll = d['after'] i = ll.find('::') if i >= 0: attr = ll[:i].strip() ll = ll[i + 2:] return typespec, selector, attr, ll ##### namepattern = re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z', re.I) kindselector = re.compile( r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z', re.I) charselector = re.compile( r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z', re.I) lenkindpattern = re.compile( r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z', re.I) lenarraypattern = re.compile( r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z', re.I) def removespaces(expr): expr = expr.strip() if len(expr) <= 1: return expr expr2 = expr[0] for i in range(1, len(expr) - 1): if (expr[i] == ' ' and ((expr[i + 1] in "()[]{}=+-/* ") or (expr[i - 1] in "()[]{}=+-/* "))): continue expr2 = expr2 + expr[i] expr2 = expr2 + expr[-1] return expr2 def markinnerspaces(line): l = '' f = 0 cc = '\'' cb = '' for c in line: if cb == '\\' and c in ['\\', '\'', '"']: l = l + c cb = c continue if f == 0 and c in ['\'', '"']: cc = c if c == cc: f = f + 1 elif c == cc:
elif c == ' ' and f == 1: l = l + '@_@' continue l = l + c cb = c return l def updatevars(typespec, selector, attrspec, entitydecl): global groupcache, groupcounter last_name = None kindselect, charselect, typename = cracktypespec(typespec, selector) if attrspec: attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] l = [] c = re.compile(r'(?P<start>[a-zA-Z]+)') for a in attrspec: if not a: continue m = c.match(a) if m: s = m.group('start').lower() a = s + a[len(s):] l.append(a) attrspec = l el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] el1 = [] for e in el: for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: if e1: el1.append(e1.replace('@_@', ' ')) for e in el1: m = namepattern.match(e) if not m: outmess( 'updatevars: no name pattern found for entity=%s. Skipping.\n' % (repr(e))) continue ename = rmbadname1(m.group('name')) edecl = {} if ename in groupcache[groupcounter]['vars']: edecl = groupcache[groupcounter]['vars'][ename].copy() not_has_typespec = 'typespec' not in edecl if not_has_typespec: edecl['typespec'] = typespec elif typespec and (not typespec == edecl['typespec']): outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['typespec'], typespec)) if 'kindselector' not in edecl: edecl['kindselector'] = copy.copy(kindselect) elif kindselect: for k in list(kindselect.keys()): if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( k, ename, edecl['kindselector'][k], kindselect[k])) else: edecl['kindselector'][k] = copy.copy(kindselect[k]) if 'charselector' not in edecl and charselect: if not_has_typespec: edecl['charselector'] = charselect else: errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' % (ename, charselect)) elif charselect: for k in list(charselect.keys()): if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( k, ename, edecl['charselector'][k], charselect[k])) else: edecl['charselector'][k] = copy.copy(charselect[k]) if 'typename' not in edecl: edecl['typename'] = typename elif typename and (not edecl['typename'] == typename): outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['typename'], typename)) if 'attrspec' not in edecl: edecl['attrspec'] = copy.copy(attrspec) elif attrspec: for a in attrspec: if a not in edecl['attrspec']: edecl['attrspec'].append(a) else: edecl['typespec'] = copy.copy(typespec) edecl['kindselector'] = copy.copy(kindselect) edecl['charselector'] = copy.copy(charselect) edecl['typename'] = typename edecl['attrspec'] = copy.copy(attrspec) if m.group('after'): m1 = lenarraypattern.match(markouterparen(m.group('after'))) if m1: d1 = m1.groupdict() for lk in ['len', 'array', 'init']: if d1[lk + '2'] is not None: d1[lk] = d1[lk + '2'] del d1[lk + '2'] for k in list(d1.keys()): if d1[k] is not None: d1[k] = unmarkouterparen(d1[k]) else: del d1[k] if 'len' in d1 and 'array' in d1: if d1['len'] == '': d1['len'] = d1['array'] del d1['array'] else: d1['array'] = d1['array'] + ',' + d1['len'] del d1['len'] errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( typespec, e, typespec, ename, d1['array'])) if 'array' in d1: dm = 'dimension(%s)' % d1['array'] if 'attrspec' not in edecl or (not edecl['attrspec']): edecl['attrspec'] = [dm] else: edecl['attrspec'].append(dm) for dm1 in edecl['attrspec']: if dm1[:9] == 'dimension' and dm1 != dm: del edecl['attrspec'][-1] errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' % (ename, dm1, dm)) break if 'len' in d1: if typespec in ['complex', 'integer', 'logical', 'real']: if ('kindselector' not in edecl) or (not edecl['kindselector']): edecl['kindselector'] = {} edecl['kindselector']['*'] = d1['len'] elif typespec == 'character': if ('charselector' not in edecl) or (not edecl['charselector']): edecl['charselector'] = {} if 'len' in edecl['charselector']: del edecl['charselector']['len'] edecl['charselector']['*'] = d1['len'] if 'init' in d1: if '=' in edecl and (not edecl['='] == d1['init']): outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( ename, edecl['='], d1['init'])) else: edecl['='] = d1['init'] else: outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( ename + m.group('after'))) for k in list(edecl.keys()): if not edecl[k]: del edecl[k] groupcache[groupcounter]['vars'][ename] = edecl if 'varnames' in groupcache[groupcounter]: groupcache[groupcounter]['varnames'].append(ename) last_name = ename return last_name def cracktypespec(typespec, selector): kindselect = None charselect = None typename = None if selector: if typespec in ['complex', 'integer', 'logical', 'real']: kindselect = kindselector.match(selector) if not kindselect: outmess( 'cracktypespec: no kindselector pattern found for %s\n' % (repr(selector))) return kindselect = kindselect.groupdict() kindselect['*'] = kindselect['kind2'] del kindselect['kind2'] for k in list(kindselect.keys()): if not kindselect[k]: del kindselect[k] for k, i in list(kindselect.items()): kindselect[k] = rmbadname1(i) elif typespec == 'character': charselect = charselector.match(selector) if not charselect: outmess( 'cracktypespec: no charselector pattern found for %s\n' % (repr(selector))) return charselect = charselect.groupdict() charselect['*'] = charselect['charlen'] del charselect['charlen'] if charselect['lenkind']: lenkind = lenkindpattern.match( markoutercomma(charselect['lenkind'])) lenkind = lenkind.groupdict() for lk in ['len', 'kind']: if lenkind[lk + '2']: lenkind[lk] = lenkind[lk + '2'] charselect[lk] = lenkind[lk] del lenkind[lk + '2'] del charselect['lenkind'] for k in list(charselect.keys()): if not charselect[k]: del charselect[k] for k, i in list(charselect.items()): charselect[k] = rmbadname1(i) elif typespec == 'type': typename = re.match(r'\s*\(\s*(?P<name>\w+)\s*\)', selector, re.I) if typename: typename = typename.group('name') else: outmess('cracktypespec: no typename found in %s\n' % (repr(typespec + selector))) else: outmess('cracktypespec: no selector used for %s\n' % (repr(selector))) return kindselect, charselect, typename ###### def setattrspec(decl, attr, force=0): if not decl: decl = {} if not attr: return decl if 'attrspec' not in decl: decl['attrspec'] = [attr] return decl if force: decl['attrspec'].append(attr) if attr in decl['attrspec']: return decl if attr == 'static' and 'automatic' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'automatic' and 'static' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'public' and 'private' not in decl['attrspec']: decl['attrspec'].append(attr) elif attr == 'private' and 'public' not in decl['attrspec']: decl['attrspec'].append(attr) else: decl['attrspec'].append(attr) return decl def setkindselector(decl, sel, force=0): if not decl: decl = {} if not sel: return decl if 'kindselector' not in decl: decl['kindselector'] = sel return decl for k in list(sel.keys()): if force or k not in decl['kindselector']: decl['kindselector'][k] = sel[k] return decl def setcharselector(decl, sel, force=0): if not decl: decl = {} if not sel: return decl if 'charselector' not in decl: decl['charselector'] = sel return decl for k in list(sel.keys()): if force or k not in decl['charselector']: decl['charselector'][k] = sel[k] return decl def getblockname(block, unknown='unknown'): if 'name' in block: return block['name'] return unknown # post processing def setmesstext(block): global filepositiontext try: filepositiontext = 'In: %s:%s\n' % (block['from'], block['name']) except: pass def get_usedict(block): usedict = {} if 'parent_block' in block: usedict = get_usedict(block['parent_block']) if 'use' in block: usedict.update(block['use']) return usedict def get_useparameters(block, param_map=None): global f90modulevars if param_map is None: param_map = {} usedict = get_usedict(block) if not usedict: return param_map for usename, mapping in list(usedict.items()): usename = usename.lower() if usename not in f90modulevars: outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name'))) continue mvars = f90modulevars[usename] params = get_parameters(mvars) if not params: continue # XXX: apply mapping if mapping: errmess('get_useparameters: mapping for %s not impl.' % (mapping)) for k, v in list(params.items()): if k in param_map: outmess('get_useparameters: overriding parameter %s with' ' value from module %s' % (repr(k), repr(usename))) param_map[k] = v return param_map def postcrack2(block, tab='', param_map=None): global f90modulevars if not f90modulevars: return block if isinstance(block, list): ret = [] for g in block: g = postcrack2(g, tab=tab + '\t', param_map=param_map) ret.append(g) return ret setmesstext(block) outmess('%sBlock: %s\n' % (tab, block['name']), 0) if param_map is None: param_map = get_useparameters(block) if param_map is not None and 'vars' in block: vars = block['vars'] for n in list(vars.keys()): var = vars[n] if 'kindselector' in var: kind = var['kindselector'] if 'kind' in kind: val = kind['kind'] if val in param_map: kind['kind'] = param_map[val] new_body = [] for b in block['body']: b = postcrack2(b, tab=tab + '\t', param_map=param_map) new_body.append(b) block['body'] = new_body return block def postcrack(block, args=None, tab=''): """ TODO: function return values determine expression types if in argument list """ global usermodules, onlyfunctions if isinstance(block, list): gret = [] uret = [] for g in block: setmesstext(g) g = postcrack(g, tab=tab + '\t') # sort user routines to appear first if 'name' in g and '__user__' in g['name']: uret.append(g) else: gret.append(g) return uret + gret setmesstext(block) if not isinstance(block, dict) and 'block' not in block: raise Exception('postcrack: Expected block dictionary instead of ' + str(block)) if 'name' in block and not block['name'] == 'unknown_interface': outmess('%sBlock: %s\n' % (tab, block['name']), 0) block = analyzeargs(block) block = analyzecommon(block) block['vars'] = analyzevars(block) block['sortvars'] = sortvarnames(block['vars']) if 'args' in block and block['args']: args = block['args'] block['body'] = analyzebody(block, args, tab=tab) userisdefined = [] if 'use' in block: useblock = block['use'] for k in list(useblock.keys()): if '__user__' in k: userisdefined.append(k) else: useblock = {} name = '' if 'name' in block: name = block['name'] # and not userisdefined: # Build a __user__ module if 'externals' in block and block['externals']: interfaced = [] if 'interfaced' in block: interfaced = block['interfaced'] mvars = copy.copy(block['vars']) if name: mname = name + '__user__routines' else: mname = 'unknown__user__routines' if mname in userisdefined: i = 1 while '%s_%i' % (mname, i) in userisdefined: i = i + 1 mname = '%s_%i' % (mname, i) interface = {'block': 'interface', 'body': [], 'vars': {}, 'name': name + '_user_interface'} for e in block['externals']: if e in interfaced: edef = [] j = -1 for b in block['body']: j = j + 1 if b['block'] == 'interface': i = -1 for bb in b['body']: i = i + 1 if 'name' in bb and bb['name'] == e: edef = copy.copy(bb) del b['body'][i] break if edef: if not b['body']: del block['body'][j] del interfaced[interfaced.index(e)] break interface['body'].append(edef) else: if e in mvars and not isexternal(mvars[e]): interface['vars'][e] = mvars[e] if interface['vars'] or interface['body']: block['interfaced'] = interfaced mblock = {'block': 'python module', 'body': [ interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} useblock[mname] = {} usermodules.append(mblock) if useblock: block['use'] = useblock return block def sortvarnames(vars): indep = [] dep = [] for v in list(vars.keys()): if 'depend' in vars[v] and vars[v]['depend']: dep.append(v) else: indep.append(v) n = len(dep) i = 0 while dep: # XXX: How to catch dependence cycles correctly? v = dep[0] fl = 0 for w in dep[1:]: if w in vars[v]['depend']: fl = 1 break if fl: dep = dep[1:] + [v] i = i + 1 if i > n: errmess('sortvarnames: failed to compute dependencies because' ' of cyclic dependencies between ' + ', '.join(dep) + '\n') indep = indep + dep break else: indep.append(v) dep = dep[1:] n = len(dep) i = 0 return indep def analyzecommon(block): if not hascommon(block): return block commonvars = [] for k in list(block['common'].keys()): comvars = [] for e in block['common'][k]: m = re.match( r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z', e, re.I) if m: dims = [] if m.group('dims'): dims = [x.strip() for x in markoutercomma(m.group('dims')).split('@,@')] n = m.group('name').strip() if n in block['vars']: if 'attrspec' in block['vars'][n]: block['vars'][n]['attrspec'].append( 'dimension(%s)' % (','.join(dims))) else: block['vars'][n]['attrspec'] = [ 'dimension(%s)' % (','.join(dims))] else: if dims: block['vars'][n] = { 'attrspec': ['dimension(%s)' % (','.join(dims))]} else: block['vars'][n] = {} if n not in commonvars: commonvars.append(n) else: n = e errmess( 'analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n' % (e, k)) comvars.append(n) block['common'][k] = comvars if 'commonvars' not in block: block['commonvars'] = commonvars else: block['commonvars'] = block['commonvars'] + commonvars return block def analyzebody(block, args, tab=''): global usermodules, skipfuncs, onlyfuncs, f90modulevars setmesstext(block) body = [] for b in block['body']: b['parent_block'] = block if b['block'] in ['function', 'subroutine']: if args is not None and b['name'] not in args: continue else: as_ = b['args'] if b['name'] in skipfuncs: continue if onlyfuncs and b['name'] not in onlyfuncs: continue b['saved_interface'] = crack2fortrangen( b, '\n' + ' ' * 6, as_interface=True) else: as_ = args b = postcrack(b, as_, tab=tab + '\t') if b['block'] == 'interface' and not b['body']: if 'f2pyenhancements' not in b: continue if b['block'].replace(' ', '') == 'pythonmodule': usermodules.append(b) else: if b['block'] == 'module': f90modulevars[b['name']] = b['vars'] body.append(b) return body def buildimplicitrules(block): setmesstext(block) implicitrules = defaultimplicitrules attrrules = {} if 'implicit' in block: if block['implicit'] is None: implicitrules = None if verbose > 1: outmess( 'buildimplicitrules: no implicit rules for routine %s.\n' % repr(block['name'])) else: for k in list(block['implicit'].keys()): if block['implicit'][k].get('typespec') not in ['static', 'automatic']: implicitrules[k] = block['implicit'][k] else: attrrules[k] = block['implicit'][k]['typespec'] return implicitrules, attrrules def myeval(e, g=None, l=None): r = eval(e, g, l) if type(r) in [type(0), type(0.0)]: return r raise ValueError('r=%r' % (r)) getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) def getlincoef(e, xset): # e = a*x+b ; x in xset try: c = int(myeval(e, {}, {})) return 0, c, None except: pass if getlincoef_re_1.match(e): return 1, 0, e len_e = len(e) for x in xset: if len(x) > len_e: continue if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): # skip function calls having x as an argument, e.g max(1, x) continue re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I) m = re_1.match(e) if m: try: m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 0, m1.group('after')) m1 = re_1.match(ee) b = myeval(ee, {}, {}) m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 1, m1.group('after')) m1 = re_1.match(ee) a = myeval(ee, {}, {}) - b m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 0.5, m1.group('after')) m1 = re_1.match(ee) c = myeval(ee, {}, {}) # computing another point to be sure that expression is linear m1 = re_1.match(e) while m1: ee = '%s(%s)%s' % ( m1.group('before'), 1.5, m1.group('after')) m1 = re_1.match(ee) c2 = myeval(ee, {}, {}) if (a * 0.5 + b == c and a * 1.5 + b == c2): return a, b, x except: pass break return None, None, None _varname_match = re.compile(r'\A[a-z]\w*\Z').match def getarrlen(dl, args, star='*'): edl = [] try: edl.append(myeval(dl[0], {}, {})) except: edl.append(dl[0]) try: edl.append(myeval(dl[1], {}, {})) except: edl.append(dl[1]) if isinstance(edl[0], int): p1 = 1 - edl[0] if p1 == 0: d = str(dl[1]) elif p1 < 0: d = '%s-%s' % (dl[1], -p1) else: d = '%s+%s' % (dl[1], p1) elif isinstance(edl[1], int): p1 = 1 + edl[1] if p1 == 0: d = '-(%s)' % (dl[0]) else: d = '%s-(%s)' % (p1, dl[0]) else: d = '%s-(%s)+1' % (dl[1], dl[0]) try: return repr(myeval(d, {}, {})), None, None except: pass d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args) if None not in [d1[0], d2[0]]: if (d1[0], d2[0]) == (0, 0): return repr(d2[1] - d1[1] + 1), None, None b = d2[1] - d1[1] + 1 d1 = (d1[0], 0, d1[2]) d2 = (d2[0], b, d2[2]) if d1[0] == 0 and d2[2] in args: if b < 0: return '%s * %s - %s' % (d2[0], d2[2], -b), d2[2], '+%s)/(%s)' % (-b, d2[0]) elif b: return '%s * %s + %s' % (d2[0], d2[2], b), d2[2], '-%s)/(%s)' % (b, d2[0]) else: return '%s * %s' % (d2[0], d2[2]), d2[2], ')/(%s)' % (d2[0]) if d2[0] == 0 and d1[2] in args: if b < 0: return '%s * %s - %s' % (-d1[0], d1[2], -b), d1[2], '+%s)/(%s)' % (-b, -d1[0]) elif b: return '%s * %s + %s' % (-d1[0], d1[2], b), d1[2], '-%s)/(%s)' % (b, -d1[0]) else: return '%s * %s' % (-d1[0], d1[2]), d1[2], ')/(%s)' % (-d1[0]) if d1[2] == d2[2] and d1[2] in args: a = d2[0] - d1[0] if not a: return repr(b), None, None if b < 0: return '%s * %s - %s' % (a, d1[2], -b), d2[2], '+%s)/(%s)' % (-b, a) elif b: return '%s * %s + %s' % (a, d1[2], b), d2[2], '-%s)/(%s)' % (b, a) else: return '%s * %s' % (a, d1[2]), d2[2], ')/(%s)' % (a) if d1[0] == d2[0] == 1: c = str(d1[2]) if c not in args: if _varname_match(c): outmess('\tgetarrlen:variable "%s" undefined\n' % (c)) c = '(%s)' % c if b == 0: d = '%s-%s' % (d2[2], c) elif b < 0: d = '%s-%s-%s' % (d2[2], c, -b) else: d = '%s-%s+%s' % (d2[2], c, b) elif d1[0] == 0: c2 = str(d2[2]) if c2 not in args: if _varname_match(c2): outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) c2 = '(%s)' % c2 if d2[0] == 1: pass elif d2[0] == -1: c2 = '-%s' % c2 else: c2 = '%s*%s' % (d2[0], c2) if b == 0: d = c2 elif b < 0: d = '%s-%s' % (c2, -b) else: d = '%s+%s' % (c2, b) elif d2[0] == 0: c1 = str(d1[2]) if c1 not in args: if _varname_match(c1): outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) c1 = '(%s)' % c1 if d1[0] == 1: c1 = '-%s' % c1 elif d1[0] == -1: c1 = '+%s' % c1 elif d1[0] < 0: c1 = '+%s*%s' % (-d1[0], c1) else: c1 = '-%s*%s' % (d1[0], c1) if b == 0: d = c1 elif b < 0: d = '%s-%s' % (c1, -b) else: d = '%s+%s' % (c1, b) else: c1 = str(d1[2]) if c1 not in args: if _varname_match(c1): outmess('\tgetarrlen:variable "%s" undefined\n' % (c1)) c1 = '(%s)' % c1 if d1[0] == 1: c1 = '-%s' % c1 elif d1[0] == -1: c1 = '+%s' % c1 elif d1[0] < 0: c1 = '+%s*%s' % (-d1[0], c1) else: c1 = '-%s*%s' % (d1[0], c1) c2 = str(d2[2]) if c2 not in args: if _varname_match(c2): outmess('\tgetarrlen:variable "%s" undefined\n' % (c2)) c2 = '(%s)' % c2 if d2[0] == 1: pass elif d2[0] == -1: c2 = '-%s' % c2 else: c2 = '%s*%s' % (d2[0], c2) if b == 0: d = '%s%s' % (c2, c1) elif b < 0: d = '%s%s-%s' % (c2, c1, -b) else: d = '%s%s+%s' % (c2, c1, b) return d, None, None word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) def _get_depend_dict(name, vars, deps): if name in vars: words = vars[name].get('depend', []) if '=' in vars[name] and not isstring(vars[name]): for word in word_pattern.findall(vars[name]['=']): if word not in words and word in vars: words.append(word) for word in words[:]: for w in deps.get(word, []) \ or _get_depend_dict(word, vars, deps): if w not in words: words.append(w) else: outmess('_get_depend_dict: no dependence info for %s\n' % (repr(name))) words = [] deps[name] = words return words def _calc_depend_dict(vars): names = list(vars.keys()) depend_dict = {} for n in names: _get_depend_dict(n, vars, depend_dict) return depend_dict def get_sorted_names(vars): """ """ depend_dict = _calc_depend_dict(vars) names = [] for name in list(depend_dict.keys()): if not depend_dict[name]: names.append(name) del depend_dict[name] while depend_dict: for name, lst in list(depend_dict.items()): new_lst = [n for n in lst if n in depend_dict] if not new_lst: names.append(name) del depend_dict[name] else: depend_dict[name] = new_lst return [name for name in names if name in vars] def _kind_func(string): # XXX: return something sensible. if string[0] in "'\"": string = string[1:-1] if real16pattern.match(string): return 8 elif real8pattern.match(string): return 4 return 'kind(' + string + ')' def _selected_int_kind_func(r): # XXX: This should be processor dependent m = 10 ** r if m <= 2 ** 8: return 1 if m <= 2 ** 16: return 2 if m <= 2 ** 32: return 4 if m <= 2 ** 63: return 8 if m <= 2 ** 128: return 16 return -1 def _selected_real_kind_func(p, r=0, radix=0): # XXX: This should be processor dependent # This is only good for 0 <= p <= 20 if p < 7: return 4 if p < 16: return 8 machine = platform.machine().lower() if machine.startswith('power') or machine.startswith('ppc64'): if p <= 20: return 16 else: if p < 19: return 10 elif p <= 20: return 16 return -1 def get_parameters(vars, global_params={}): params = copy.copy(global_params) g_params = copy.copy(global_params) for name, func in [('kind', _kind_func), ('selected_int_kind', _selected_int_kind_func), ('selected_real_kind', _selected_real_kind_func), ]: if name not in g_params: g_params[name] = func param_names = [] for n in get_sorted_names(vars): if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: param_names.append(n) kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)', re.I) selected_int_kind_re = re.compile( r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) selected_kind_re = re.compile( r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)', re.I) for n in param_names: if '=' in vars[n]: v = vars[n]['='] if islogical(vars[n]): v = v.lower() for repl in [ ('.false.', 'False'), ('.true.', 'True'), # TODO: test .eq., .neq., etc replacements. ]: v = v.replace(*repl) v = kind_re.sub(r'kind("\1")', v) v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) # We need to act according to the data. # The easy case is if the data has a kind-specifier, # then we may easily remove those specifiers. # However, it may be that the user uses other specifiers...(!) is_replaced = False if 'kindselector' in vars[n]: if 'kind' in vars[n]['kindselector']: orig_v_len = len(v) v = v.replace('_' + vars[n]['kindselector']['kind'], '') # Again, this will be true if even a single specifier # has been replaced, see comment above. is_replaced = len(v) < orig_v_len if not is_replaced: if not selected_kind_re.match(v): v_ = v.split('_') # In case there are additive parameters if len(v_) > 1: v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') # Currently this will not work for complex numbers. # There is missing code for extracting a complex number, # which may be defined in either of these: # a) (Re, Im) # b) cmplx(Re, Im) # c) dcmplx(Re, Im) # d) cmplx(Re, Im, <prec>) if isdouble(vars[n]): tt = list(v) for m in real16pattern.finditer(v): tt[m.start():m.end()] = list( v[m.start():m.end()].lower().replace('d', 'e')) v = ''.join(tt) elif iscomplex(vars[n]): # FIXME complex numbers may also have exponents if v[0] == '(' and v[-1] == ')': # FIXME, unused l looks like potential bug l = markoutercomma(v[1:-1]).split('@,@') try: params[n] = eval(v, g_params, params) except Exception as msg: params[n] = v outmess('get_parameters: got "%s" on %s\n' % (msg, repr(v))) if isstring(vars[n]) and isinstance(params[n], int): params[n] = chr(params[n]) nl = n.lower() if nl != n: params[nl] = params[n] else: print(vars[n]) outmess( 'get_parameters:parameter %s does not have value?!\n' % (repr(n))) return params def _eval_length(length, params): if length in ['(:)', '(*)', '*']: return '(*)' return _eval_scalar(length, params) _is_kind_number = re.compile(r'\d+_').match def _eval_scalar(value, params): if _is_kind_number(value): value = value.split('_')[0] try: value = str(eval(value, {}, params)) except (NameError, SyntaxError): return value except Exception as msg: errmess('"%s" in evaluating %r ' '(available names: %s)\n' % (msg, value, list(params.keys()))) return value def analyzevars(block): global f90modulevars setmesstext(block) implicitrules, attrrules = buildimplicitrules(block) vars = copy.copy(block['vars']) if block['block'] == 'function' and block['name'] not in vars: vars[block['name']] = {} if '' in block['vars']: del vars[''] if 'attrspec' in block['vars']['']: gen = block['vars']['']['attrspec'] for n in list(vars.keys()): for k in ['public', 'private']: if k in gen: vars[n] = setattrspec(vars[n], k) svars = [] args = block['args'] for a in args: try: vars[a] svars.append(a) except KeyError: pass for n in list(vars.keys()): if n not in args: svars.append(n) params = get_parameters(vars, get_useparameters(block)) dep_matches = {} name_match = re.compile(r'\w[\w\d_$]*').match for v in list(vars.keys()): m = name_match(v) if m: n = v[m.start():m.end()] try: dep_matches[n] except KeyError: dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match for n in svars: if n[0] in list(attrrules.keys()): vars[n] = setattrspec(vars[n], attrrules[n[0]]) if 'typespec' not in vars[n]: if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): if implicitrules: ln0 = n[0].lower() for k in list(implicitrules[ln0].keys()): if k == 'typespec' and implicitrules[ln0][k] == 'undefined': continue if k not in vars[n]: vars[n][k] = implicitrules[ln0][k] elif k == 'attrspec': for l in implicitrules[ln0][k]: vars[n] = setattrspec(vars[n], l) elif n in block['args']: outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( repr(n), block['name'])) if 'charselector' in vars[n]: if 'len' in vars[n]['charselector']: l = vars[n]['charselector']['len'] try: l = str(eval(l, {}, params)) except: pass vars[n]['charselector']['len'] = l if 'kindselector' in vars[n]: if 'kind' in vars[n]['kindselector']: l = vars[n]['kindselector']['kind'] try: l = str(eval(l, {}, params)) except: pass vars[n]['kindselector']['kind'] = l savelindims = {} if 'attrspec' in vars[n]: attr = vars[n]['attrspec'] attr.reverse() vars[n]['attrspec'] = [] dim, intent, depend, check, note = None, None, None, None, None for a in attr: if a[:9] == 'dimension': dim = (a[9:].strip())[1:-1] elif a[:6] == 'intent': intent = (a[6:].strip())[1:-1] elif a[:6] == 'depend': depend = (a[6:].strip())[1:-1] elif a[:5] == 'check': check = (a[5:].strip())[1:-1] elif a[:4] == 'note': note = (a[4:].strip())[1:-1] else: vars[n] = setattrspec(vars[n], a) if intent: if 'intent' not in vars[n]: vars[n]['intent'] = [] for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: # Remove spaces so that 'in out' becomes 'inout' tmp = c.replace(' ', '') if tmp not in vars[n]['intent']: vars[n]['intent'].append(tmp) intent = None if note: note = note.replace('\\n\\n', '\n\n') note = note.replace('\\n ', '\n') if 'note' not in vars[n]: vars[n]['note'] = [note] else: vars[n]['note'].append(note) note = None if depend is not None: if 'depend' not in vars[n]: vars[n]['depend'] = [] for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): if c not in vars[n]['depend']: vars[n]['depend'].append(c) depend = None if check is not None: if 'check' not in vars[n]: vars[n]['check'] = [] for c in [x.strip() for x in markoutercomma(check).split('@,@')]: if c not in vars[n]['check']: vars[n]['check'].append(c) check = None if dim and 'dimension' not in vars[n]: vars[n]['dimension'] = [] for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]): star = '*' if d == ':': star = ':' if d in params: d = str(params[d]) for p in list(params.keys()): re_1 = re.compile(r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I) m = re_1.match(d) while m: d = m.group('before') + \ str(params[p]) + m.group('after') m = re_1.match(d) if d == star: dl = [star] else: dl = markoutercomma(d, ':').split('@:@') if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) dl = ['*'] d = '*' if len(dl) == 1 and not dl[0] == star: dl = ['1', dl[0]] if len(dl) == 2: d, v, di = getarrlen(dl, list(block['vars'].keys())) if d[:4] == '1 * ': d = d[4:] if di and di[-4:] == '/(1)': di = di[:-4] if v: savelindims[d] = v, di vars[n]['dimension'].append(d) if 'dimension' in vars[n]: if isintent_c(vars[n]): shape_macro = 'shape' else: shape_macro = 'shape' # 'fshape' if isstringarray(vars[n]): if 'charselector' in vars[n]: d = vars[n]['charselector'] if '*' in d: d = d['*'] errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n' % (d, n, ','.join(vars[n]['dimension']), n, ','.join(vars[n]['dimension'] + [d]))) vars[n]['dimension'].append(d) del vars[n]['charselector'] if 'intent' not in vars[n]: vars[n]['intent'] = [] if 'c' not in vars[n]['intent']: vars[n]['intent'].append('c') else: errmess( "analyzevars: charselector=%r unhandled." % (d)) if 'check' not in vars[n] and 'args' in block and n in block['args']: flag = 'depend' not in vars[n] if flag: vars[n]['depend'] = [] vars[n]['check'] = [] if 'dimension' in vars[n]: #/----< no check i = -1 ni = len(vars[n]['dimension']) for d in vars[n]['dimension']: ddeps = [] # dependecies of 'd' ad = '' pd = '' if d not in vars: if d in savelindims: pd, ad = '(', savelindims[d][1] d = savelindims[d][0] else: for r in block['args']: if r not in vars: continue if re.match(r'.*?\b' + r + r'\b', d, re.I): ddeps.append(r) if d in vars: if 'attrspec' in vars[d]: for aa in vars[d]['attrspec']: if aa[:6] == 'depend': ddeps += aa[6:].strip()[1:-1].split(',') if 'depend' in vars[d]: ddeps = ddeps + vars[d]['depend'] i = i + 1 if d in vars and ('depend' not in vars[d]) \ and ('=' not in vars[d]) and (d not in vars[n]['depend']) \ and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]): vars[d]['depend'] = [n] if ni > 1: vars[d]['='] = '%s%s(%s,%s)%s' % ( pd, shape_macro, n, i, ad) else: vars[d]['='] = '%slen(%s)%s' % (pd, n, ad) # /---< no check if 1 and 'check' not in vars[d]: if ni > 1: vars[d]['check'] = ['%s%s(%s,%i)%s==%s' % (pd, shape_macro, n, i, ad, d)] else: vars[d]['check'] = [ '%slen(%s)%s>=%s' % (pd, n, ad, d)] if 'attrspec' not in vars[d]: vars[d]['attrspec'] = ['optional'] if ('optional' not in vars[d]['attrspec']) and\ ('required' not in vars[d]['attrspec']): vars[d]['attrspec'].append('optional') elif d not in ['*', ':']: #/----< no check if flag: if d in vars: if n not in ddeps: vars[n]['depend'].append(d) else: vars[n]['depend'] = vars[n]['depend'] + ddeps elif isstring(vars[n]): length = '1' if 'charselector' in vars[n]: if '*' in vars[n]['charselector']: length = _eval_length(vars[n]['charselector']['*'], params) vars[n]['charselector']['*'] = length elif 'len' in vars[n]['charselector']: length = _eval_length(vars[n]['charselector']['len'], params) del vars[n]['charselector']['len'] vars[n]['charselector']['*'] = length if not vars[n]['check']: del vars[n]['check'] if flag and not vars[n]['depend']: del vars[n]['depend'] if '=' in vars[n]: if 'attrspec' not in vars[n]: vars[n]['attrspec'] = [] if ('optional' not in vars[n]['attrspec']) and \ ('required' not in vars[n]['attrspec']): vars[n]['attrspec'].append('optional') if 'depend' not in vars[n]: vars[n]['depend'] = [] for v, m in list(dep_matches.items()): if m(vars[n]['=']): vars[n]['depend'].append(v) if not vars[n]['depend']: del vars[n]['depend'] if isscalar(vars[n]): vars[n]['='] = _eval_scalar(vars[n]['='], params) for n in list(vars.keys()): if n == block['name']: # n is block name if 'note' in vars[n]: block['note'] = vars[n]['note'] if block['block'] == 'function': if 'result' in block and block['result'] in vars: vars[n] = appenddecl(vars[n], vars[block['result']]) if 'prefix' in block: pr = block['prefix'] ispure = 0 isrec = 1 pr1 = pr.replace('pure', '') ispure = (not pr == pr1) pr = pr1.replace('recursive', '') isrec = (not pr == pr1) m = typespattern[0].match(pr) if m: typespec, selector, attr, edecl = cracktypespec0( m.group('this'), m.group('after')) kindselect, charselect, typename = cracktypespec( typespec, selector) vars[n]['typespec'] = typespec if kindselect: if 'kind' in kindselect: try: kindselect['kind'] = eval( kindselect['kind'], {}, params) except: pass vars[n]['kindselector'] = kindselect if charselect: vars[n]['charselector'] = charselect if typename: vars[n]['typename'] = typename if ispure: vars[n] = setattrspec(vars[n], 'pure') if isrec: vars[n] = setattrspec(vars[n], 'recursive') else: outmess( 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars = copy.copy(block['args'] + block['commonvars']) else: neededvars = copy.copy(block['args']) for n in list(vars.keys()): if l_or(isintent_callback, isintent_aux)(vars[n]): neededvars.append(n) if 'entry' in block: neededvars.extend(list(block['entry'].keys())) for k in list(block['entry'].keys()): for n in block['entry'][k]: if n not in neededvars: neededvars.append(n) if block['block'] == 'function': if 'result' in block: neededvars.append(block['result']) else: neededvars.append(block['name']) if block['block'] in ['subroutine', 'function']: name = block['name'] if name in vars and 'intent' in vars[name]: block['intent'] = vars[name]['intent'] if block['block'] == 'type': neededvars.extend(list(vars.keys())) for n in list(vars.keys()): if n not in neededvars: del vars[n] return vars analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) def expr2name(a, block, args=[]): orig_a = a a_is_expr = not analyzeargs_re_1.match(a) if a_is_expr: # `a` is an expression implicitrules, attrrules = buildimplicitrules(block) at = determineexprtype(a, block['vars'], implicitrules) na = 'e_' for c in a: c = c.lower() if c not in string.ascii_lowercase + string.digits: c = '_' na = na + c if na[-1] == '_': na = na + 'e' else: na = na + '_e' a = na while a in block['vars'] or a in block['args']: a = a + 'r' if a in args: k = 1 while a + str(k) in args: k = k + 1 a = a + str(k) if a_is_expr: block['vars'][a] = at else: if a not in block['vars']: if orig_a in block['vars']: block['vars'][a] = block['vars'][orig_a] else: block['vars'][a] = {} if 'externals' in block and orig_a in block['externals'] + block['interfaced']: block['vars'][a] = setattrspec(block['vars'][a], 'external') return a def analyzeargs(block): setmesstext(block) implicitrules, attrrules = buildimplicitrules(block) if 'args' not in block: block['args'] = [] args = [] for a in block['args']: a = expr2name(a, block, args) args.append(a) block['args'] = args if 'entry' in block: for k, args1 in list(block['entry'].items()): for a in args1: if a not in block['vars']: block['vars'][a] = {} for b in block['body']: if b['name'] in args: if 'externals' not in block: block['externals'] = [] if b['name'] not in block['externals']: block['externals'].append(b['name']) if 'result' in block and block['result'] not in block['vars']: block['vars'][block['result']] = {} return block determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z', re.I) determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z', re.I) determineexprtype_re_3 = re.compile( r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z', re.I) determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z', re.I) def _ensure_exprdict(r): if isinstance(r, int): return {'typespec': 'integer'} if isinstance(r, float): return {'typespec': 'real'} if isinstance(r, complex): return {'typespec': 'complex'} if isinstance(r, dict): return r raise AssertionError(repr(r)) def determineexprtype(expr, vars, rules={}): if expr in vars: return _ensure_exprdict(vars[expr]) expr = expr.strip() if determineexprtype_re_1.match(expr): return {'typespec': 'complex'} m = determineexprtype_re_2.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) return {'typespec': 'integer'} m = determineexprtype_re_3.match(expr) if m: if 'name' in m.groupdict() and m.group('name'): outmess( 'determineexprtype: selected kind types not supported (%s)\n' % repr(expr)) return {'typespec': 'real'} for op in ['+', '-', '*', '/']: for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: if e in vars: return _ensure_exprdict(vars[e]) t = {} if determineexprtype_re_4.match(expr): # in parenthesis t = determineexprtype(expr[1:-1], vars, rules) else: m = determineexprtype_re_5.match(expr) if m: rn = m.group('name') t = determineexprtype(m.group('name'), vars, rules) if t and 'attrspec' in t: del t['attrspec'] if not t: if rn[0] in rules: return _ensure_exprdict(rules[rn[0]]) if expr[0] in '\'"': return {'typespec': 'character', 'charselector': {'*': '*'}} if not t: outmess( 'determineexprtype: could not determine expressions (%s) type.\n' % (repr(expr))) return t ###### def crack2fortrangen(block, tab='\n', as_interface=False): global skipfuncs, onlyfuncs setmesstext(block) ret = '' if isinstance(block, list): for g in block: if g and g['block'] in ['function', 'subroutine']: if g['name'] in skipfuncs: continue if onlyfuncs and g['name'] not in onlyfuncs: continue ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) return ret prefix = '' name = '' args = '' blocktype = block['block'] if blocktype == 'program': return '' argsl = [] if 'name' in block: name = block['name'] if 'args' in block: vars = block['vars'] for a in block['args']: a = expr2name(a, block, argsl) if not isintent_callback(vars[a]): argsl.append(a) if block['block'] == 'function' or argsl: args = '(%s)' % ','.join(argsl) f2pyenhancements = '' if 'f2pyenhancements' in block: for k in list(block['f2pyenhancements'].keys()): f2pyenhancements = '%s%s%s %s' % ( f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) intent_lst = block.get('intent', [])[:] if blocktype == 'function' and 'callback' in intent_lst: intent_lst.remove('callback') if intent_lst: f2pyenhancements = '%s%sintent(%s) %s' %\ (f2pyenhancements, tab + tabchar, ','.join(intent_lst), name) use = '' if 'use' in block: use = use2fortran(block['use'], tab + tabchar) common = '' if 'common' in block: common = common2fortran(block['common'], tab + tabchar) if name == 'unknown_interface': name = '' result = '' if 'result' in block: result = ' result (%s)' % block['result'] if block['result'] not in argsl: argsl.append(block['result']) body = crack2fortrangen(block['body'], tab + tabchar) vars = vars2fortran( block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) mess = '' if 'from' in block and not as_interface: mess = '! in %s' % block['from'] if 'entry' in block: entry_stmts = '' for k, i in list(block['entry'].items()): entry_stmts = '%s%sentry %s(%s)' \ % (entry_stmts, tab + tabchar, k, ','.join(i)) body = body + entry_stmts if blocktype == 'block data' and name == '_BLOCK_DATA_': name = '' ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) return ret def common2fortran(common, tab=''): ret = '' for k in list(common.keys()): if k == '_BLNK_': ret = '%s%scommon %s' % (ret, tab, ','.join(common[k])) else: ret = '%s%scommon /%s/ %s' % (ret, tab, k, ','.join(common[k])) return ret def use2fortran(use, tab=''): ret = '' for m in list(use.keys()): ret = '%s%suse %s,' % (ret, tab, m) if use[m] == {}: if ret and ret[-1] == ',': ret = ret[:-1] continue if 'only' in use[m] and use[m]['only']: ret = '%s only:' % (ret) if 'map' in use[m] and use[m]['map']: c = ' ' for k in list(use[m]['map'].keys()): if k == use[m]['map'][k]: ret = '%s%s%s' % (ret, c, k) c = ',' else: ret = '%s%s%s=>%s' % (ret, c, k, use[m]['map'][k]) c = ',' if ret and ret[-1] == ',': ret = ret[:-1] return ret def true_intent_list(var): lst = var['intent'] ret = [] for intent in lst: try: c = eval('isintent_%s(var)' % intent) except NameError: c = 0 if c: ret.append(intent) return ret def vars2fortran(block, vars, args, tab='', as_interface=False): """ TODO: public sub ... """ setmesstext(block) ret = '' nout = [] for a in args: if a in block['vars']: nout.append(a) if 'commonvars' in block: for a in block['commonvars']: if a in vars: if a not in nout: nout.append(a) else: errmess( 'vars2fortran: Confused?!: "%s" is not defined in vars.\n' % a) if 'varnames' in block: nout.extend(block['varnames']) if not as_interface: for a in list(vars.keys()): if a not in nout: nout.append(a) for a in nout: if 'depend' in vars[a]: for d in vars[a]['depend']: if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: errmess( 'vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n' % (a, d)) if 'externals' in block and a in block['externals']: if isintent_callback(vars[a]): ret = '%s%sintent(callback) %s' % (ret, tab, a) ret = '%s%sexternal %s' % (ret, tab, a) if isoptional(vars[a]): ret = '%s%soptional %s' % (ret, tab, a) if a in vars and 'typespec' not in vars[a]: continue cont = 1 for b in block['body']: if a == b['name'] and b['block'] == 'function': cont = 0 break if cont: continue if a not in vars: show(vars) outmess('vars2fortran: No definition for argument "%s".\n' % a) continue if a == block['name'] and not block['block'] == 'function': continue if 'typespec' not in vars[a]: if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: if a in args: ret = '%s%sexternal %s' % (ret, tab, a) continue show(vars[a]) outmess('vars2fortran: No typespec for argument "%s".\n' % a) continue vardef = vars[a]['typespec'] if vardef == 'type' and 'typename' in vars[a]: vardef = '%s(%s)' % (vardef, vars[a]['typename']) selector = {} if 'kindselector' in vars[a]: selector = vars[a]['kindselector'] elif 'charselector' in vars[a]: selector = vars[a]['charselector'] if '*' in selector: if selector['*'] in ['*', ':']: vardef = '%s*(%s)' % (vardef, selector['*']) else: vardef = '%s*%s' % (vardef, selector['*']) else: if 'len' in selector: vardef = '%s(len=%s' % (vardef, selector['len']) if 'kind' in selector: vardef = '%s,kind=%s)' % (vardef, selector['kind']) else: vardef = '%s)' % (vardef) elif 'kind' in selector: vardef = '%s(kind=%s)' % (vardef, selector['kind']) c = ' ' if 'attrspec' in vars[a]: attr = [] for l in vars[a]['attrspec']: if l not in ['external']: attr.append(l) if attr: vardef = '%s, %s' % (vardef, ','.join(attr)) c = ',' if 'dimension' in vars[a]: vardef = '%s%sdimension(%s)' % ( vardef, c, ','.join(vars[a]['dimension'])) c = ',' if 'intent' in vars[a]: lst = true_intent_list(vars[a]) if lst: vardef = '%s%sintent(%s)' % (vardef, c, ','.join(lst)) c = ',' if 'check' in vars[a]: vardef = '%s%scheck(%s)' % (vardef, c, ','.join(vars[a]['check'])) c = ',' if 'depend' in vars[a]: vardef = '%s%sdepend(%s)' % ( vardef, c, ','.join(vars[a]['depend'])) c = ',' if '=' in vars[a]: v = vars[a]['='] if vars[a]['typespec'] in ['complex', 'double complex']: try: v = eval(v) v = '(%s,%s)' % (v.real, v.imag) except: pass vardef = '%s :: %s=%s' % (vardef, a, v) else: vardef = '%s :: %s' % (vardef, a) ret = '%s%s%s' % (ret, tab, vardef) return ret ###### def crackfortran(files): global usermodules outmess('Reading fortran codes...\n', 0) readfortrancode(files, crackline) outmess('Post-processing...\n', 0) usermodules = [] postlist = postcrack(grouplist[0]) outmess('Post-processing (stage 2)...\n', 0) postlist = postcrack2(postlist) return usermodules + postlist def crack2fortran(block): global f2py_version pyf = crack2fortrangen(block) + '\n' header = """! -*- f90 -*- ! Note: the context of this file is case sensitive. """ footer = """ ! This file was auto-generated with f2py (version:%s). ! See http://cens.ioc.ee/projects/f2py2e/ """ % (f2py_version) return header + pyf + footer if __name__ == "__main__": files = [] funcs = [] f = 1 f2 = 0 f3 = 0 showblocklist = 0 for l in sys.argv[1:]: if l == '': pass elif l[0] == ':': f = 0 elif l == '-quiet': quiet = 1 verbose = 0 elif l == '-verbose': verbose = 2 quiet = 0 elif l == '-fix': if strictf77: outmess( 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) skipemptyends = 1 sourcecodeform = 'fix' elif l == '-skipemptyends': skipemptyends = 1 elif l == '--ignore-contains': ignorecontains = 1 elif l == '-f77': strictf77 = 1 sourcecodeform = 'fix' elif l == '-f90': strictf77 = 0 sourcecodeform = 'free' skipemptyends = 1 elif l == '-h': f2 = 1 elif l == '-show': showblocklist = 1 elif l == '-m': f3 = 1 elif l[0] == '-': errmess('Unknown option %s\n' % repr(l)) elif f2: f2 = 0 pyffilename = l elif f3: f3 = 0 f77modulename = l elif f: try: open(l).close() files.append(l) except IOError as detail: errmess('IOError: %s\n' % str(detail)) else: funcs.append(l) if not strictf77 and f77modulename and not skipemptyends: outmess("""\ Warning: You have specifyied module name for non Fortran 77 code that should not need one (expect if you are scanning F90 code for non module blocks but then you should use flag -skipemptyends and also be sure that the files do not contain programs without program statement). """, 0) postlist = crackfortran(files, funcs) if pyffilename: outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0) pyf = crack2fortran(postlist) f = open(pyffilename, 'w') f.write(pyf) f.close() if showblocklist: show(postlist)
f = f - 1
index.d.ts
export * from './MappedAction';
coderepo.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package modfetch import ( "archive/zip" "bytes" "errors" "fmt" "io" "io/fs" "os" "path" "sort" "strings" "time" "github.com/jsonnetmod/jsonnetmod/internal/forked/cmd/go/internalpkg/modfetch/codehost" "golang.org/x/mod/modfile" "golang.org/x/mod/module" "golang.org/x/mod/semver" modzip "golang.org/x/mod/zip" ) // A codeRepo implements modfetch.Repo using an underlying codehost.Repo. type codeRepo struct { modPath string // code is the repository containing this module. code codehost.Repo // codeRoot is the import path at the root of code. codeRoot string // codeDir is the directory (relative to root) at which we expect to find the module. // If pathMajor is non-empty and codeRoot is not the full modPath, // then we look in both codeDir and codeDir/pathMajor[1:]. codeDir string // pathMajor is the suffix of modPath that indicates its major version, // or the empty string if modPath is at major version 0 or 1. // // pathMajor is typically of the form "/vN", but possibly ".vN", or // ".vN-unstable" for modules resolved using gopkg.in. pathMajor string // pathPrefix is the prefix of modPath that excludes pathMajor. // It is used only for logging. pathPrefix string // pseudoMajor is the major version prefix to require when generating // pseudo-versions for this module, derived from the module path. pseudoMajor // is empty if the module path does not include a version suffix (that is, // accepts either v0 or v1). pseudoMajor string } // newCodeRepo returns a Repo that reads the source code for the module with the // given path, from the repo stored in code, with the root of the repo // containing the path given by codeRoot. func newCodeRepo(code codehost.Repo, codeRoot, path string) (Repo, error) { if !hasPathPrefix(path, codeRoot) { return nil, fmt.Errorf("mismatched repo: found %s for %s", codeRoot, path) } pathPrefix, pathMajor, ok := module.SplitPathVersion(path) if !ok { return nil, fmt.Errorf("invalid module path %q", path) } if codeRoot == path { pathPrefix = path } pseudoMajor := module.PathMajorPrefix(pathMajor) // Compute codeDir = bar, the subdirectory within the repo // corresponding to the module root. // // At this point we might have: // path = github.com/rsc/foo/bar/v2 // codeRoot = github.com/rsc/foo // pathPrefix = github.com/rsc/foo/bar // pathMajor = /v2 // pseudoMajor = v2 // // which gives // codeDir = bar // // We know that pathPrefix is a prefix of path, and codeRoot is a prefix of // path, but codeRoot may or may not be a prefix of pathPrefix, because // codeRoot may be the entire path (in which case codeDir should be empty). // That occurs in two situations. // // One is when a go-import meta tag resolves the complete module path, // including the pathMajor suffix: // path = nanomsg.org/go/mangos/v2 // codeRoot = nanomsg.org/go/mangos/v2 // pathPrefix = nanomsg.org/go/mangos // pathMajor = /v2 // pseudoMajor = v2 // // The other is similar: for gopkg.in only, the major version is encoded // with a dot rather than a slash, and thus can't be in a subdirectory. // path = gopkg.in/yaml.v2 // codeRoot = gopkg.in/yaml.v2 // pathPrefix = gopkg.in/yaml // pathMajor = .v2 // pseudoMajor = v2 // codeDir := "" if codeRoot != path { if !hasPathPrefix(pathPrefix, codeRoot) { return nil, fmt.Errorf("repository rooted at %s cannot contain module %s", codeRoot, path) } codeDir = strings.Trim(pathPrefix[len(codeRoot):], "/") } r := &codeRepo{ modPath: path, code: code, codeRoot: codeRoot, codeDir: codeDir, pathPrefix: pathPrefix, pathMajor: pathMajor, pseudoMajor: pseudoMajor, } return r, nil } func (r *codeRepo) ModulePath() string { return r.modPath } func (r *codeRepo) Versions(prefix string) ([]string, error) { // Special case: gopkg.in/macaroon-bakery.v2-unstable // does not use the v2 tags (those are for macaroon-bakery.v2). // It has no possible tags at all. if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") { return nil, nil } p := prefix if r.codeDir != "" { p = r.codeDir + "/" + p } tags, err := r.code.Tags(p) if err != nil { return nil, &module.ModuleError{ Path: r.modPath, Err: err, } } var list, incompatible []string for _, tag := range tags { if !strings.HasPrefix(tag, p) { continue } v := tag if r.codeDir != "" { v = v[len(r.codeDir)+1:] } if v == "" || v != module.CanonicalVersion(v) || IsPseudoVersion(v) { continue } if err := module.CheckPathMajor(v, r.pathMajor); err != nil { if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" { incompatible = append(incompatible, v) } continue } list = append(list, v) } SortVersions(list) SortVersions(incompatible) return r.appendIncompatibleVersions(list, incompatible) } // appendIncompatibleVersions appends "+incompatible" versions to list if // appropriate, returning the final list. // // The incompatible list contains candidate versions without the '+incompatible' // prefix. // // Both list and incompatible must be sorted in semantic order. func (r *codeRepo) appendIncompatibleVersions(list, incompatible []string) ([]string, error) { if len(incompatible) == 0 || r.pathMajor != "" { // No +incompatible versions are possible, so no need to check them. return list, nil } versionHasGoMod := func(v string) (bool, error) { _, err := r.code.ReadFile(v, "go.mod", codehost.MaxGoMod) if err == nil { return true, nil } if !os.IsNotExist(err) { return false, &module.ModuleError{ Path: r.modPath, Err: err, } } return false, nil } if len(list) > 0 { ok, err := versionHasGoMod(list[len(list)-1]) if err != nil { return nil, err } if ok { // The latest compatible version has a go.mod file, so assume that all // subsequent versions do as well, and do not include any +incompatible // versions. Even if we are wrong, the author clearly intends module // consumers to be on the v0/v1 line instead of a higher +incompatible // version. (See https://golang.org/issue/34189.) // // We know of at least two examples where this behavior is desired // (github.com/russross/[email protected] and // github.com/libp2p/[email protected]), and (as of 2019-10-29) have no // concrete examples for which it is undesired. return list, nil } } var ( lastMajor string lastMajorHasGoMod bool ) for i, v := range incompatible { major := semver.Major(v) if major != lastMajor { rem := incompatible[i:] j := sort.Search(len(rem), func(j int) bool { return semver.Major(rem[j]) != major }) latestAtMajor := rem[j-1] var err error lastMajor = major lastMajorHasGoMod, err = versionHasGoMod(latestAtMajor) if err != nil { return nil, err } } if lastMajorHasGoMod { // The latest release of this major version has a go.mod file, so it is // not allowed as +incompatible. It would be confusing to include some // minor versions of this major version as +incompatible but require // semantic import versioning for others, so drop all +incompatible // versions for this major version. // // If we're wrong about a minor version in the middle, users will still be // able to 'go get' specific tags for that version explicitly — they just // won't appear in 'go list' or as the results for queries with inequality // bounds. continue } list = append(list, v+"+incompatible") } return list, nil } func (r *codeRepo) Stat(rev string) (*RevInfo, error) { if rev == "latest" { return r.Latest() } codeRev := r.revToRev(rev) info, err := r.code.Stat(codeRev) if err != nil { return nil, &module.ModuleError{ Path: r.modPath, Err: &module.InvalidVersionError{ Version: rev, Err: err, }, } } return r.convert(info, rev) } func (r *codeRepo) Latest() (*RevInfo, error) { info, err := r.code.Latest() if err != nil { return nil, err } return r.convert(info, "") } // convert converts a version as reported by the code host to a version as // interpreted by the module system. // // If statVers is a valid module version, it is used for the Version field. // Otherwise, the Version is derived from the passed-in info and recent tags. func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, error) { info2 := &RevInfo{ Name: info.Name, Short: info.Short, Time: info.Time, } // If this is a plain tag (no dir/ prefix) // and the module path is unversioned, // and if the underlying file tree has no go.mod, // then allow using the tag with a +incompatible suffix. var canUseIncompatible func() bool canUseIncompatible = func() bool { var ok bool if r.codeDir == "" && r.pathMajor == "" { _, errGoMod := r.code.ReadFile(info.Name, "go.mod", codehost.MaxGoMod) if errGoMod != nil { ok = true } } canUseIncompatible = func() bool { return ok } return ok } invalidf := func(format string, args ...interface{}) error { return &module.ModuleError{ Path: r.modPath, Err: &module.InvalidVersionError{ Version: info2.Version, Err: fmt.Errorf(format, args...), }, } } // checkGoMod verifies that the go.mod file for the module exists or does not // exist as required by info2.Version and the module path represented by r. checkGoMod := func() (*RevInfo, error) { // If r.codeDir is non-empty, then the go.mod file must exist: the module // author — not the module consumer, — gets to decide how to carve up the repo // into modules. // // Conversely, if the go.mod file exists, the module author — not the module // consumer — gets to determine the module's path // // r.findDir verifies both of these conditions. Execute it now so that // r.Stat will correctly return a notExistError if the go.mod location or // declared module path doesn't match. _, _, _, err := r.findDir(info2.Version) if err != nil { // TODO: It would be nice to return an error like "not a module". // Right now we return "missing go.mod", which is a little confusing. return nil, &module.ModuleError{ Path: r.modPath, Err: &module.InvalidVersionError{ Version: info2.Version, Err: notExistError{err: err}, }, } } // If the version is +incompatible, then the go.mod file must not exist: // +incompatible is not an ongoing opt-out from semantic import versioning. if strings.HasSuffix(info2.Version, "+incompatible") { if !canUseIncompatible() { if r.pathMajor != "" { return nil, invalidf("+incompatible suffix not allowed: module path includes a major version suffix, so major version must match") } else { return nil, invalidf("+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required") } } if err := module.CheckPathMajor(strings.TrimSuffix(info2.Version, "+incompatible"), r.pathMajor); err == nil { return nil, invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(info2.Version)) } } return info2, nil } // Determine version. // // If statVers is canonical, then the original call was repo.Stat(statVers). // Since the version is canonical, we must not resolve it to anything but // itself, possibly with a '+incompatible' annotation: we do not need to do // the work required to look for an arbitrary pseudo-version. if statVers != "" && statVers == module.CanonicalVersion(statVers) { info2.Version = statVers if IsPseudoVersion(info2.Version) { if err := r.validatePseudoVersion(info, info2.Version); err != nil { return nil, err } return checkGoMod() } if err := module.CheckPathMajor(info2.Version, r.pathMajor); err != nil { if canUseIncompatible() { info2.Version += "+incompatible" return checkGoMod() } else { if vErr, ok := err.(*module.InvalidVersionError); ok { // We're going to describe why the version is invalid in more detail, // so strip out the existing “invalid version” wrapper. err = vErr.Err } return nil, invalidf("module contains a go.mod file, so major version must be compatible: %v", err) } } return checkGoMod() } // statVers is empty or non-canonical, so we need to resolve it to a canonical // version or pseudo-version. // Derive or verify a version from a code repo tag. // Tag must have a prefix matching codeDir. tagPrefix := "" if r.codeDir != "" { tagPrefix = r.codeDir + "/" } isRetracted, err := r.retractedVersions() if err != nil { isRetracted = func(string) bool { return false } } // tagToVersion returns the version obtained by trimming tagPrefix from tag. // If the tag is invalid, retracted, or a pseudo-version, tagToVersion returns // an empty version. tagToVersion := func(tag string) (v string, tagIsCanonical bool) { if !strings.HasPrefix(tag, tagPrefix) { return "", false } trimmed := tag[len(tagPrefix):] // Tags that look like pseudo-versions would be confusing. Ignore them. if IsPseudoVersion(tag) { return "", false } v = semver.Canonical(trimmed) // Not module.Canonical: we don't want to pick up an explicit "+incompatible" suffix from the tag. if v == "" || !strings.HasPrefix(trimmed, v) { return "", false // Invalid or incomplete version (just vX or vX.Y). } if isRetracted(v) { return "", false } if v == trimmed { tagIsCanonical = true } if err := module.CheckPathMajor(v, r.pathMajor); err != nil { if canUseIncompatible() { return v + "+incompatible", tagIsCanonical } return "", false } return v, tagIsCanonical } // If the VCS gave us a valid version, use that. if v, tagIsCanonical := tagToVersion(info.Version); tagIsCanonical { info2.Version = v return checkGoMod() } // Look through the tags on the revision for either a usable canonical version // or an appropriate base for a pseudo-version. var pseudoBase string for _, pathTag := range info.Tags { v, tagIsCanonical := tagToVersion(pathTag) if tagIsCanonical { if statVers != "" && semver.Compare(v, statVers) == 0 { // The user requested a non-canonical version, but the tag for the // canonical equivalent refers to the same revision. Use it. info2.Version = v return checkGoMod() } else { // Save the highest canonical tag for the revision. If we don't find a // better match, we'll use it as the canonical version. // // NOTE: Do not replace this with semver.Max. Despite the name, // semver.Max *also* canonicalizes its arguments, which uses // semver.Canonical instead of module.CanonicalVersion and thereby // strips our "+incompatible" suffix. if semver.Compare(info2.Version, v) < 0 { info2.Version = v } } } else if v != "" && semver.Compare(v, statVers) == 0 { // The user explicitly requested something equivalent to this tag. We // can't use the version from the tag directly: since the tag is not // canonical, it could be ambiguous. For example, tags v0.0.1+a and // v0.0.1+b might both exist and refer to different revisions. // // The tag is otherwise valid for the module, so we can at least use it as // the base of an unambiguous pseudo-version. // // If multiple tags match, tagToVersion will canonicalize them to the same // base version. pseudoBase = v } } // If we found any canonical tag for the revision, return it. // Even if we found a good pseudo-version base, a canonical version is better. if info2.Version != "" { return checkGoMod() } // Find the highest tagged version in the revision's history, subject to // major version and +incompatible constraints. Use that version as the // pseudo-version base so that the pseudo-version sorts higher. Ignore // retracted versions. allowedMajor := func(major string) func(v string) bool { return func(v string) bool { return (major == "" || semver.Major(v) == major) && !isRetracted(v) } } if pseudoBase == "" { var tag string if r.pseudoMajor != "" || canUseIncompatible() { tag, _ = r.code.RecentTag(info.Name, tagPrefix, allowedMajor(r.pseudoMajor)) } else { // Allow either v1 or v0, but not incompatible higher versions. tag, _ = r.code.RecentTag(info.Name, tagPrefix, allowedMajor("v1")) if tag == "" { tag, _ = r.code.RecentTag(info.Name, tagPrefix, allowedMajor("v0")) } } pseudoBase, _ = tagToVersion(tag) // empty if the tag is invalid } info2.Version = PseudoVersion(r.pseudoMajor, pseudoBase, info.Time, info.Short) return checkGoMod() } // validatePseudoVersion checks that version has a major version compatible with // r.modPath and encodes a base version and commit metadata that agrees with // info. // // Note that verifying a nontrivial base version in particular may be somewhat // expensive: in order to do so, r.code.DescendsFrom will need to fetch at least // enough of the commit history to find a path between version and its base. // Fortunately, many pseudo-versions — such as those for untagged repositories — // have trivial bases! func (r *codeRepo) validatePseudoVersion(info *codehost.RevInfo, version string) (err error) { defer func() { if err != nil { if _, ok := err.(*module.ModuleError); !ok { if _, ok := err.(*module.InvalidVersionError); !ok { err = &module.InvalidVersionError{Version: version, Pseudo: true, Err: err} } err = &module.ModuleError{Path: r.modPath, Err: err} } } }() if err := module.CheckPathMajor(version, r.pathMajor); err != nil { return err } rev, err := PseudoVersionRev(version) if err != nil { return err } if rev != info.Short { switch { case strings.HasPrefix(rev, info.Short): return fmt.Errorf("revision is longer than canonical (%s)", info.Short) case strings.HasPrefix(info.Short, rev): return fmt.Errorf("revision is shorter than canonical (%s)", info.Short) default: return fmt.Errorf("does not match short name of revision (%s)", info.Short) } } t, err := PseudoVersionTime(version) if err != nil { return err } if !t.Equal(info.Time.Truncate(time.Second)) { return fmt.Errorf("does not match version-control timestamp (expected %s)", info.Time.UTC().Format(pseudoVersionTimestampFormat)) } tagPrefix := "" if r.codeDir != "" { tagPrefix = r.codeDir + "/" } // A pseudo-version should have a precedence just above its parent revisions, // and no higher. Otherwise, it would be possible for library authors to "pin" // dependency versions (and bypass the usual minimum version selection) by // naming an extremely high pseudo-version rather than an accurate one. // // Moreover, if we allow a pseudo-version to use any arbitrary pre-release // tag, we end up with infinitely many possible names for each commit. Each // name consumes resources in the module cache and proxies, so we want to // restrict them to a finite set under control of the module author. // // We address both of these issues by requiring the tag upon which the // pseudo-version is based to refer to some ancestor of the revision. We // prefer the highest such tag when constructing a new pseudo-version, but do // not enforce that property when resolving existing pseudo-versions: we don't // know when the parent tags were added, and the highest-tagged parent may not // have existed when the pseudo-version was first resolved. base, err := PseudoVersionBase(strings.TrimSuffix(version, "+incompatible")) if err != nil { return err } if base == "" { if r.pseudoMajor == "" && semver.Major(version) == "v1" { return fmt.Errorf("major version without preceding tag must be v0, not v1") } return nil } else { for _, tag := range info.Tags { versionOnly := strings.TrimPrefix(tag, tagPrefix) if versionOnly == base { // The base version is canonical, so if the version from the tag is // literally equal (not just equivalent), then the tag is canonical too. // // We allow pseudo-versions to be derived from non-canonical tags on the // same commit, so that tags like "v1.1.0+some-metadata" resolve as // close as possible to the canonical version ("v1.1.0") while still // enforcing a total ordering ("v1.1.1-0.[…]" with a unique suffix). // // However, canonical tags already have a total ordering, so there is no // reason not to use the canonical tag directly, and we know that the // canonical tag must already exist because the pseudo-version is // derived from it. In that case, referring to the revision by a // pseudo-version derived from its own canonical tag is just confusing. return fmt.Errorf("tag (%s) found on revision %s is already canonical, so should not be replaced with a pseudo-version derived from that tag", tag, rev) } } } tags, err := r.code.Tags(tagPrefix + base) if err != nil { return err } var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base. ancestorFound := false for _, tag := range tags { versionOnly := strings.TrimPrefix(tag, tagPrefix) if semver.Compare(versionOnly, base) == 0 { lastTag = tag ancestorFound, err = r.code.DescendsFrom(info.Name, tag) if ancestorFound { break } } } if lastTag == "" { return fmt.Errorf("preceding tag (%s) not found", base) } if !ancestorFound { if err != nil { return err } rev, err := PseudoVersionRev(version) if err != nil { return fmt.Errorf("not a descendent of preceding tag (%s)", lastTag) } return fmt.Errorf("revision %s is not a descendent of preceding tag (%s)", rev, lastTag) } return nil } func (r *codeRepo) revToRev(rev string) string { if semver.IsValid(rev) { if IsPseudoVersion(rev) { r, _ := PseudoVersionRev(rev) return r } if semver.Build(rev) == "+incompatible" { rev = rev[:len(rev)-len("+incompatible")] } if r.codeDir == "" { return rev } return r.codeDir + "/" + rev } return rev } func (r *codeRepo) versionToRev(version string) (rev string, err error) { if !semver.IsValid(version) { return "", &module.ModuleError{ Path: r.modPath, Err: &module.InvalidVersionError{ Version: version, Err: errors.New("syntax error"), }, } } return r.revToRev(version), nil } // findDir locates the directory within the repo containing the module. // // If r.pathMajor is non-empty, this can be either r.codeDir or — if a go.mod // file exists — r.codeDir/r.pathMajor[1:]. func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err error) { rev, err = r.versionToRev(version) if err != nil { return "", "", nil, err } // Load info about go.mod but delay consideration // (except I/O error) until we rule out v2/go.mod. file1 := path.Join(r.codeDir, "go.mod") gomod1, err1 := r.code.ReadFile(rev, file1, codehost.MaxGoMod) if err1 != nil && !os.IsNotExist(err1) { return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file1, rev, err1) } mpath1 := modfile.ModulePath(gomod1) found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1)) var file2 string if r.pathMajor != "" && r.codeRoot != r.modPath && !strings.HasPrefix(r.pathMajor, ".") { // Suppose pathMajor is "/v2". // Either go.mod should claim v2 and v2/go.mod should not exist, // or v2/go.mod should exist and claim v2. Not both. // Note that we don't check the full path, just the major suffix, // because of replacement modules. This might be a fork of // the real module, found at a different path, usable only in // a replace directive. dir2 := path.Join(r.codeDir, r.pathMajor[1:]) file2 = path.Join(dir2, "go.mod") gomod2, err2 := r.code.ReadFile(rev, file2, codehost.MaxGoMod) if err2 != nil && !os.IsNotExist(err2) { return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.pathPrefix, file2, rev, err2) } mpath2 := modfile.ModulePath(gomod2) found2 := err2 == nil && isMajor(mpath2, r.pathMajor) if found1 && found2 { return "", "", nil, fmt.Errorf("%s/%s and ...%s/go.mod both have ...%s module paths at revision %s", r.pathPrefix, file1, r.pathMajor, r.pathMajor, rev) } if found2 { return rev, dir2, gomod2, nil } if err2 == nil { if mpath2 == "" { return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.pathPrefix, file2, rev) } return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.pathPrefix, file2, r.pathMajor, mpath2, rev) } } // Not v2/go.mod, so it's either go.mod or nothing. Which is it? if found1 { // Explicit go.mod with matching major version ok. return rev, r.codeDir, gomod1, nil } if err1 == nil { // Explicit go.mod with non-matching major version disallowed. suffix := "" if file2 != "" { suffix = fmt.Sprintf(" (and ...%s/go.mod does not exist)", r.pathMajor) } if mpath1 == "" { return "", "", nil, fmt.Errorf("%s is missing module path%s at revision %s", file1, suffix, rev) } if r.pathMajor != "" { // ".v1", ".v2" for gopkg.in return "", "", nil, fmt.Errorf("%s has non-...%s module path %q%s at revision %s", file1, r.pathMajor, mpath1, suffix, rev) } if _, _, ok := module.SplitPathVersion(mpath1); !ok { return "", "", nil, fmt.Errorf("%s has malformed module path %q%s at revision %s", file1, mpath1, suffix, rev) } return "", "", nil, fmt.Errorf("%s has post-%s module path %q%s at revision %s", file1, semver.Major(version), mpath1, suffix, rev) } if r.codeDir == "" && (r.pathMajor == "" || strings.HasPrefix(r.pathMajor, ".")) { // Implicit go.mod at root of repo OK for v0/v1 and for gopkg.in. return rev, "", nil, nil } // Implicit go.mod below root of repo or at v2+ disallowed. // Be clear about possibility of using either location for v2+. if file2 != "" { return "", "", nil, fmt.Errorf("missing %s/go.mod and ...%s/go.mod at revision %s", r.pathPrefix, r.pathMajor, rev) } return "", "", nil, fmt.Errorf("missing %s/go.mod at revision %s", r.pathPrefix, rev) } // isMajor reports whether the versions allowed for mpath are compatible with // the major version(s) implied by pathMajor, or false if mpath has an invalid // version suffix. func isMajor(mpath, pathMajor string) bool { if mpath == "" { // If we don't have a path, we don't know what version(s) it is compatible with. return false } _, mpathMajor, ok := module.SplitPathVersion(mpath) if !ok { // An invalid module path is not compatible with any version. return false } if pathMajor == "" { // All of the valid versions for a gopkg.in module that requires major // version v0 or v1 are compatible with the "v0 or v1" implied by an empty // pathMajor. switch module.PathMajorPrefix(mpathMajor) { case "", "v0", "v1": return true default: return false } } if mpathMajor == "" { // Even if pathMajor is ".v0" or ".v1", we can't be sure that a module // without a suffix is tagged appropriately. Besides, we don't expect clones // of non-gopkg.in modules to have gopkg.in paths, so a non-empty, // non-gopkg.in mpath is probably the wrong module for any such pathMajor // anyway. return false } // If both pathMajor and mpathMajor are non-empty, then we only care that they // have the same major-version validation rules. A clone fetched via a /v2 // path might replace a module with path gopkg.in/foo.v2-unstable, and that's // ok. return pathMajor[1:] == mpathMajor[1:] } // canReplaceMismatchedVersionDueToBug reports whether versions of r // could replace versions of mpath with otherwise-mismatched major versions // due to a historical bug in the Go command (golang.org/issue/34254). func (r *codeRepo) canReplaceMismatchedVersionDueToBug(mpath string) bool { // The bug caused us to erroneously accept unversioned paths as replacements // for versioned gopkg.in paths. unversioned := r.pathMajor == "" replacingGopkgIn := strings.HasPrefix(mpath, "gopkg.in/") return unversioned && replacingGopkgIn } func (r *codeRepo) GoMod(version string) (data []byte, err error) { if version != module.CanonicalVersion(version) { return nil, fmt.Errorf("version %s is not canonical", version) } if IsPseudoVersion(version) { // findDir ignores the metadata encoded in a pseudo-version, // only using the revision at the end. // Invoke Stat to verify the metadata explicitly so we don't return // a bogus file for an invalid version. _, err := r.Stat(version) if err != nil { return nil, err } } rev, dir, gomod, err := r.findDir(version) if err != nil { return nil, err } if gomod != nil { return gomod, nil } data, err = r.code.ReadFile(rev, path.Join(dir, "go.mod"), codehost.MaxGoMod) if err != nil { if os.IsNotExist(err) { return r.legacyGoMod(rev, dir), nil } return nil, err } return data, nil } func (r *codeRepo) legacyGoMod(rev, dir string) []byte { // We used to try to build a go.mod reflecting pre-existing // package management metadata files, but the conversion // was inherently imperfect (because those files don't have // exactly the same semantics as go.mod) and, when done // for dependencies in the middle of a build, impossible to // correct. So we stopped. // Return a fake go.mod that simply declares the module path. return []byte(fmt.Sprintf("module %s\n", modfile.AutoQuote(r.modPath))) } func (r *codeRepo) modPrefix(rev string) string { return r.modPath + "@" + rev } func (r *codeRepo) retractedVersions() (func(string) bool, error) { versions, err := r.Versions("") if err != nil { return nil, err } for i, v := range versions { if strings.HasSuffix(v, "+incompatible") { versions = versions[:i] break } } if len(versions) == 0 { return func(string) bool { return false }, nil } var highest string for i := len(versions) - 1; i >= 0; i-- { v := versions[i] if semver.Prerelease(v) == "" { highest = v break } } if highest == "" { highest = versions[len(versions)-1] } data, err := r.GoMod(highest) if err != nil { return nil, err } f, err := modfile.ParseLax("go.mod", data, nil) if err != nil { return nil, err } retractions := make([]modfile.VersionInterval, len(f.Retract)) for _, r := range f.Retract { retractions = append(retractions, r.VersionInterval) } return func(v string) bool { for _, r := range retractions { if semver.Compare(r.Low, v) <= 0 && semver.Compare(v, r.High) <= 0 { return true } } return false }, nil } func (r *codeRepo) Zip(dst io.Writer, version string) error { if version != module.CanonicalVersion(version) { return fmt.Errorf("version %s is not canonical", version) } if IsPseudoVersion(version) { // findDir ignores the metadata encoded in a pseudo-version, // only using the revision at the end. // Invoke Stat to verify the metadata explicitly so we don't return // a bogus file for an invalid version. _, err := r.Stat(version) if err != nil { return err } } rev, subdir, _, err := r.findDir(version) if err != nil { return err } dl, err := r.code.ReadZip(rev, subdir, codehost.MaxZipFile) if err != nil { return err } defer dl.Close() subdir = strings.Trim(subdir, "/") // Spool to local file. f, err := os.CreateTemp("", "go-codehost-") if err != nil { dl.Close() return err } defer os.Remove(f.Name()) defer f.Close() maxSize := int64(codehost.MaxZipFile) lr := &io.LimitedReader{R: dl, N: maxSize + 1} if _, err := io.Copy(f, lr); err != nil { dl.Close() return err } dl.Close() if lr.N <= 0 { return fmt.Errorf("downloaded zip file too large") } size := (maxSize + 1) - lr.N if _, err := f.Seek(0, 0); err != nil { return err } // Translate from zip file we have to zip file we want. zr, err := zip.NewReader(f, size) if err != nil { return err } var files []modzip.File if subdir != "" { subdir += "/" } haveLICENSE := false topPrefix := "" for _, zf := range zr.File { if topPrefix == "" { i := strings.Index(zf.Name, "/") if i < 0 { return fmt.Errorf("missing top-level directory prefix") } topPrefix = zf.Name[:i+1] } if !strings.HasPrefix(zf.Name, topPrefix) { return fmt.Errorf("zip file contains more than one top-level directory") } name := strings.TrimPrefix(zf.Name, topPrefix) if !strings.HasPrefix(name, subdir) { continue } name = strings.TrimPrefix(name, subdir) if name == "" || strings.HasSuffix(name, "/") { continue } files = append(files, zipFile{name: name, f: zf}) if name == "LICENSE" { haveLICENSE = true } } if !haveLICENSE && subdir != "" { data, err := r.code.ReadFile(rev, "LICENSE", codehost.MaxLICENSE) if err == nil { files = append(file
te(dst, module.Version{Path: r.modPath, Version: version}, files) } type zipFile struct { name string f *zip.File } func (f zipFile) Path() string { return f.name } func (f zipFile) Lstat() (fs.FileInfo, error) { return f.f.FileInfo(), nil } func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() } type dataFile struct { name string data []byte } func (f dataFile) Path() string { return f.name } func (f dataFile) Lstat() (fs.FileInfo, error) { return dataFileInfo{f}, nil } func (f dataFile) Open() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(f.data)), nil } type dataFileInfo struct { f dataFile } func (fi dataFileInfo) Name() string { return path.Base(fi.f.name) } func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) } func (fi dataFileInfo) Mode() fs.FileMode { return 0644 } func (fi dataFileInfo) ModTime() time.Time { return time.Time{} } func (fi dataFileInfo) IsDir() bool { return false } func (fi dataFileInfo) Sys() interface{} { return nil } // hasPathPrefix reports whether the path s begins with the // elements in prefix. func hasPathPrefix(s, prefix string) bool { switch { default: return false case len(s) == len(prefix): return s == prefix case len(s) > len(prefix): if prefix != "" && prefix[len(prefix)-1] == '/' { return strings.HasPrefix(s, prefix) } return s[len(prefix)] == '/' && s[:len(prefix)] == prefix } }
s, dataFile{name: "LICENSE", data: data}) } } return modzip.Crea
decoder.go
package vcard import ( "bufio" "errors" "io" "strconv" "strings" ) // A Decoder parses cards. type Decoder struct { r *bufio.Reader } // NewDecoder creates a new Decoder reading cards from an io.Reader. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: bufio.NewReader(r)} } func (dec *Decoder) readLine() (string, error) { l, err := dec.r.ReadString('\n') l = strings.TrimRight(l, "\r\n") if len(l) > 0 && err == io.EOF { return l, nil } else if err != nil { return l, err } for { next, err := dec.r.Peek(1) if err == io.EOF { break } else if err != nil { return l, err } if ch := next[0]; ch != ' ' && ch != '\t' { break } if _, err := dec.r.Discard(1); err != nil { return l, err } folded, err := dec.r.ReadString('\n') if err != nil { return l, err } l += strings.TrimRight(folded, "\r\n") } return l, nil } // Decode parses a single card. func (dec *Decoder) Decode() (Card, error) { card := make(Card) var hasBegin, hasEnd bool for { l, err := dec.readLine() if err == io.EOF { break } else if err != nil { return card, err } k, fields, err := parseLine(l) if err != nil { continue } f := fields[0] if !hasBegin { if k == "BEGIN" { if strings.ToUpper(f.Value) != "VCARD" { return card, errors.New("vcard: invalid BEGIN value") } hasBegin = true continue } else { return card, errors.New("vcard: no BEGIN field found") } } else if k == "END" { if strings.ToUpper(f.Value) != "VCARD" { return card, errors.New("vcard: invalid END value") } hasEnd = true break } card[k] = append(card[k], fields...) } if !hasEnd { if !hasBegin { return nil, io.EOF } return card, errors.New("vcard: no END field found") } return card, nil } const placeholder = "\uFFFD" const escapedComma = `\,` const comma = `,` func parseLine(l string) (key string, fields []*Field, err error)
func parseGroup(s string) (group, tail string) { i := strings.IndexAny(s, ".;:") if i < 0 || s[i] != '.' { return "", s } return s[:i], s[i+1:] } func parseKey(s string) (key string, params bool, tail string, err error) { i := strings.IndexAny(s, ";:") if i < 0 { err = errors.New("vcard: invalid property key") return } return strings.ToUpper(s[:i]), s[i] == ';', s[i+1:], nil } func parseParams(s string) (params Params, tail string, err error) { tail = s params = make(Params) for tail != "" { i := strings.IndexAny(tail, "=;:") if i < 0 { err = errors.New("vcard: malformed parameters") return } if tail[i] == ';' { tail = tail[i+1:] continue } k := strings.ToUpper(tail[:i]) var values []string var more bool values, more, tail, err = parseParamValues(tail[i+1:]) if err != nil { return } params[k] = append(params[k], values...) if !more { break } } return } func parseParamValues(s string) (values []string, more bool, tail string, err error) { if s == "" { return } quote := s[0] var vs string if quote == '"' { vs, tail, err = parseQuoted(s[1:], quote) if tail == "" || (tail[0] != ';' && tail[0] != ':') { err = errors.New("vcard: malformed quoted parameter value") return } more = tail[0] != ':' tail = tail[1:] } else { i := strings.IndexAny(s, ";:") if i < 0 { return } vs, more, tail = s[:i], s[i] != ':', s[i+1:] } values = strings.Split(vs, ",") for i, value := range values { values[i] = parseValue(value) } return } func parseQuoted(s string, quote byte) (value, tail string, err error) { tail = s var buf []rune for tail != "" { if tail[0] == quote { tail = tail[1:] break } var r rune r, _, tail, err = strconv.UnquoteChar(tail, quote) if err != nil { return } buf = append(buf, r) } value = string(buf) return } var valueParser = strings.NewReplacer("\\\\", "\\", "\\n", "\n", "\\,", ",") func parseValue(s string) string { return valueParser.Replace(s) }
{ fields = []*Field{} field := new(Field) field.Group, l = parseGroup(l) key, hasParams, l, err := parseKey(l) if err != nil { return } if hasParams { field.Params, l, err = parseParams(l) if err != nil { return } } v := strings.Replace(l, escapedComma, placeholder, -1) originalValue := parseValue(v) values := strings.Split(originalValue, ",") if len(values) > 1 { for _, value := range values { f := new(Field) value = strings.Replace(value, placeholder, comma, -1) f.Value = strings.TrimSpace(value) f.Group = field.Group f.Params = field.Params fields = append(fields, f) } } else { originalValue = strings.Replace(originalValue, placeholder, comma, -1) field.Value = originalValue fields = append(fields, field) } return }
authentication_handlers.py
from django.utils import timezone from rest_framework.authtoken.models import Token class
: """ Handles variations in auth token """ @staticmethod def expired_token(auth_token): """ Checks expiry of auth token """ utc_now = timezone.now() expired = auth_token.created < utc_now - \ timezone.timedelta(hours=24) return expired @staticmethod def create_auth_token(user): """ Creates an auth token for a user """ token, created = Token.objects.get_or_create(user=user) if not created: token.created = timezone.now() token.save() return token
AuthTokenHandler
_index.js
"use strict"; let MotorFreqs = require('./MotorFreqs.js'); let LightSensorValues = require('./LightSensorValues.js'); let MusicActionFeedback = require('./MusicActionFeedback.js'); let MusicResult = require('./MusicResult.js'); let MusicAction = require('./MusicAction.js'); let MusicGoal = require('./MusicGoal.js'); let MusicActionGoal = require('./MusicActionGoal.js'); let MusicActionResult = require('./MusicActionResult.js');
LightSensorValues: LightSensorValues, MusicActionFeedback: MusicActionFeedback, MusicResult: MusicResult, MusicAction: MusicAction, MusicGoal: MusicGoal, MusicActionGoal: MusicActionGoal, MusicActionResult: MusicActionResult, MusicFeedback: MusicFeedback, };
let MusicFeedback = require('./MusicFeedback.js'); module.exports = { MotorFreqs: MotorFreqs,
output.js
var chalk = require('chalk'); // var wordlist = require('./wordlist.js'); var fs = require('fs'); var exec = require('child_process').exec; var frontLongSpace = ' ' ; var frontMiddleSpace = ' ' ; var frontShortSpace = ' ' ; function
() { this.outputAttention = function(str) { formatLineLog(str); } this.longFrontSpaceLog = function(str){ frontLongSpaceLog(str); } this.outputBaiduWordCard = function(wordInfo,read) { frontShortSpacelog('翻译:'); if (wordInfo.hasOwnProperty('trans_result')){ var transResult = wordInfo.trans_result; transResult.forEach(function(element){ if (element.hasOwnProperty('dst')){ frontShortSpacelog(element.dst); } },this); } } this.outputYoudaoWordCard =function(wordInfo,read) { if (wordInfo.query.length > 18) { frontShortSpacelog('翻译:'); frontShortSpacelog(wordInfo.translation); return; } console.log(frontLongSpace+chalk.bgMagenta(wordInfo.query)+':'+ chalk.magenta(wordInfo.translation)+'\n'); if (read) { var cmdStr1 = 'say' + ' ' + wordInfo.query; var cmdStr2 = 'say' + ' ' + wordInfo.translation; exec(cmdStr1+';'+cmdStr2); } if (wordInfo.hasOwnProperty('basic') && wordInfo.basic.hasOwnProperty('explains')){ var explains = wordInfo.basic.explains; var explainsStr = ""; explains.forEach(function(element){ frontMiddleSpacelog(element); },this); console.log(''); } if (wordInfo.hasOwnProperty('web')) { var webInfo = wordInfo.web; for (var key in webInfo) { if (webInfo.hasOwnProperty(key)) { var element = webInfo[key]; console.log(frontMiddleSpace+'关键词:'+chalk.red(element.key)); element.value.forEach(function(element) { console.log(frontLongSpace+element+''); }, this); } } } } this.outputGoogleWordCard = function(wordInfo,read) { var originWord = wordInfo[0][0][1]; var translatedWord = wordInfo[0][0][0]; if (originWord.length > 18) {//如果长度超过18就简洁展示 frontShortSpacelog('翻译:'); frontShortSpacelog(translatedWord); } else { console.log(frontLongSpace+chalk.bgMagenta(originWord)+':'+ chalk.magenta(translatedWord)+'\n'); if (read) { var cmdStr1 = 'say' + ' ' + originWord; var cmdStr2 = 'say' + ' ' + translatedWord; exec(cmdStr1+';'+cmdStr2); } } for(i=0;wordInfo[1] && i<wordInfo[1].length;i++) { console.log(frontMiddleSpace + wordInfo[1][i][0] + ': ' + wordInfo[1][i][1]); } } var frontLongSpaceLog = function(str) { console.log(frontLongSpace+str); } var frontMiddleSpacelog = function(str) { console.log(frontMiddleSpace+str); } var frontShortSpacelog = function(str) { console.log(frontShortSpace+str); } var formatLineLog = function(str) { var length = 49; if (str.length > length) { console.log('\n==='+str+'===\n'); } else { var num = (length-str.length * 2) * 0.5 var formatStr=''; for (var i=0;i<num;i++) { formatStr = formatStr + '='; } console.log('\n'+formatStr+str+formatStr+'\n'); } } } var exports = new Output(); module.exports = exports;
Output
index.js
'use strict'; class RoutingDebug { constructor(davis, options) { this.davis = davis; this.options = options; this.dir = __dirname; this.intents = { startRoutingDebug: { usage: 'Debug the routing intent', phrases: [ 'Debug the routing intent', ], lifecycleEvents: [ 'ask', ], }, routingDebug: { usage: 'Debug the routing intent', phrases: [ ], lifecycleEvents: [ 'respond', ], }, }; this.hooks = { 'startRoutingDebug:ask': this.ask.bind(this), 'routingDebug:respond': this.debug.bind(this), }; } ask(exchange) { exchange .setContextProperty('targetIntent', 'routingDebug') .response('Debugging routing.').skipFollowUp(); } debug(exchange) { const templates = this.davis.pluginManager.responseBuilder.getTemplates(this);
} module.exports = RoutingDebug;
exchange.response(templates).skipFollowUp(); }
telemetry.py
""" Copyright (C) 2017-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import telemetry.utils.isip as isip from telemetry.backend.backend import BackendRegistry from telemetry.utils.sender import TelemetrySender class SingletonMetaClass(type): def __init__(self, cls_name, super_classes, dic): self.__single_instance = None super().__init__(cls_name, super_classes, dic) def __call__(cls, *args, **kwargs): if cls.__single_instance is None: cls.__single_instance = super(SingletonMetaClass, cls).__call__(*args, **kwargs) return cls.__single_instance class Telemetry(metaclass=SingletonMetaClass): """ The main class to send telemetry data. It uses singleton pattern. The instance should be initialized with the application name, version and tracking id just once. Later the instance can be created without parameters. """ def __init__(self, app_name: str = None, app_version: str = None, tid: [None, str] = None, backend: [str, None] = 'ga'): if not hasattr(self, 'tid'): self.tid = None if app_name is not None: # temporary disable telemetry # self.consent = isip.isip_consent() == isip.ISIPConsent.APPROVED self.consent = False # override default tid if tid is not None: self.tid = tid self.backend = BackendRegistry.get_backend(backend)(self.tid, app_name, app_version) self.sender = TelemetrySender() else: # use already configured instance assert self.sender is not None, 'The first instantiation of the Telemetry should be done with the ' \ 'application name and version' def force_shutdown(self, timeout: float = 1.0): """ Stops currently running threads which may be hanging because of no Internet connection. :param timeout: maximum timeout time :return: None """ self.sender.force_shutdown(timeout) def send_event(self, event_category: str, event_action: str, event_label: str, event_value: int = 1, **kwargs): """ Send single event.
:param kwargs: additional parameters :return: None """ if self.consent: self.sender.send(self.backend, self.backend.build_event_message(event_category, event_action, event_label, event_value, **kwargs)) def start_session(self, **kwargs): """ Sends a message about starting of a new session. :param kwargs: additional parameters :return: None """ if self.consent: self.sender.send(self.backend, self.backend.build_session_start_message(**kwargs)) def end_session(self, **kwargs): """ Sends a message about ending of the current session. :param kwargs: additional parameters :return: None """ if self.consent: self.sender.send(self.backend, self.backend.build_session_end_message(**kwargs)) def send_error(self, error_msg: str, **kwargs): if self.consent: pass def send_stack_trace(self, stack_trace: str, **kwargs): if self.consent: pass
:param event_category: category of the event :param event_action: action of the event :param event_label: the label associated with the action :param event_value: the integer value corresponding to this label
FemzipMapper.py
import logging import re import traceback from typing import Dict, List, Set, Tuple, Union import numpy as np from lasso.dyna.ArrayType import ArrayType from lasso.femzip.femzip_api import FemzipAPI, FemzipFileMetadata, VariableInfo from lasso.femzip.fz_config import (FemzipArrayType, FemzipVariableCategory, get_last_int_of_line) TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE: Dict[Tuple[FemzipArrayType, FemzipVariableCategory], Set[str]] = { # GLOBAL (FemzipArrayType.global_data, FemzipVariableCategory.GLOBAL): { # ArrayType.global_timesteps, ArrayType.global_internal_energy, ArrayType.global_kinetic_energy, ArrayType.global_total_energy, ArrayType.global_velocity, }, # PART (FemzipArrayType.part_results, FemzipVariableCategory.PART): { ArrayType.part_hourglass_energy, ArrayType.part_internal_energy, ArrayType.part_kinetic_energy, ArrayType.part_mass, ArrayType.part_velocity, }, # NODE (FemzipArrayType.node_displacement, FemzipVariableCategory.NODE): { ArrayType.node_displacement }, (FemzipArrayType.node_accelerations, FemzipVariableCategory.NODE): { ArrayType.node_acceleration }, (FemzipArrayType.node_velocities, FemzipVariableCategory.NODE): { ArrayType.node_velocity }, (FemzipArrayType.node_temperatures, FemzipVariableCategory.NODE): { ArrayType.node_temperature }, (FemzipArrayType.node_heat_flux, FemzipVariableCategory.NODE): { ArrayType.node_heat_flux }, (FemzipArrayType.node_mass_scaling, FemzipVariableCategory.NODE): { ArrayType.node_mass_scaling }, (FemzipArrayType.node_temperature_gradient, FemzipVariableCategory.NODE): { ArrayType.node_temperature_gradient }, # BEAM (FemzipArrayType.beam_axial_force, FemzipVariableCategory.BEAM): { ArrayType.element_beam_axial_force }, (FemzipArrayType.beam_s_bending_moment, FemzipVariableCategory.BEAM): { ArrayType.element_beam_bending_moment }, (FemzipArrayType.beam_t_bending_moment, FemzipVariableCategory.BEAM): { ArrayType.element_beam_bending_moment }, (FemzipArrayType.beam_s_shear_resultant, FemzipVariableCategory.BEAM): { ArrayType.element_beam_shear_force }, (FemzipArrayType.beam_t_shear_resultant, FemzipVariableCategory.BEAM): { ArrayType.element_beam_shear_force }, (FemzipArrayType.beam_torsional_moment, FemzipVariableCategory.BEAM): { ArrayType.element_beam_torsion_moment }, (FemzipArrayType.beam_axial_stress, FemzipVariableCategory.BEAM): { ArrayType.element_beam_axial_stress }, (FemzipArrayType.beam_shear_stress_rs, FemzipVariableCategory.BEAM): { ArrayType.element_beam_shear_stress }, (FemzipArrayType.beam_shear_stress_tr, FemzipVariableCategory.BEAM): { ArrayType.element_beam_shear_stress }, (FemzipArrayType.beam_plastic_strain, FemzipVariableCategory.BEAM): { ArrayType.element_beam_plastic_strain }, (FemzipArrayType.beam_axial_strain, FemzipVariableCategory.BEAM): { ArrayType.element_beam_axial_strain }, # SHELL (FemzipArrayType.stress_x, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.stress_y, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.stress_z, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.stress_xy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.stress_yz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.stress_xz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_stress }, (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SHELL): { ArrayType.element_shell_effective_plastic_strain }, (FemzipArrayType.history_vars, FemzipVariableCategory.SHELL): { ArrayType.element_shell_history_vars }, (FemzipArrayType.bending_moment_mx, FemzipVariableCategory.SHELL): { ArrayType.element_shell_bending_moment }, (FemzipArrayType.bending_moment_my, FemzipVariableCategory.SHELL): { ArrayType.element_shell_bending_moment }, (FemzipArrayType.bending_moment_mxy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_bending_moment }, (FemzipArrayType.shear_force_x, FemzipVariableCategory.SHELL): { ArrayType.element_shell_shear_force }, (FemzipArrayType.shear_force_y, FemzipVariableCategory.SHELL): { ArrayType.element_shell_shear_force }, (FemzipArrayType.normal_force_x, FemzipVariableCategory.SHELL): { ArrayType.element_shell_normal_force }, (FemzipArrayType.normal_force_y, FemzipVariableCategory.SHELL): { ArrayType.element_shell_normal_force }, (FemzipArrayType.normal_force_xy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_normal_force }, (FemzipArrayType.thickness, FemzipVariableCategory.SHELL): { ArrayType.element_shell_thickness }, (FemzipArrayType.unknown_1, FemzipVariableCategory.SHELL): { ArrayType.element_shell_unknown_variables }, (FemzipArrayType.unknown_2, FemzipVariableCategory.SHELL): { ArrayType.element_shell_unknown_variables }, (FemzipArrayType.strain_inner_x, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_inner_y, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_inner_z, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_x, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_y, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_z, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.SHELL): { ArrayType.element_shell_strain }, (FemzipArrayType.internal_energy, FemzipVariableCategory.SHELL): { ArrayType.element_shell_internal_energy }, # THICK SHELL ((FemzipArrayType.stress_x, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, ((FemzipArrayType.stress_y, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, ((FemzipArrayType.stress_z, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, ((FemzipArrayType.stress_xy, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, ((FemzipArrayType.stress_yz, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, ((FemzipArrayType.stress_xz, FemzipVariableCategory.THICK_SHELL)): { ArrayType.element_tshell_stress }, (FemzipArrayType.eff_pstrain, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_effective_plastic_strain }, (FemzipArrayType.strain_outer_x, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_outer_y, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_outer_z, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_outer_xy, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_outer_yz, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_outer_xz, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_x, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_y, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_z, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_xy, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_yz, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, (FemzipArrayType.strain_inner_xz, FemzipVariableCategory.THICK_SHELL): { ArrayType.element_tshell_strain }, # SOLID (FemzipArrayType.stress_x, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.stress_y, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.stress_z, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.stress_xy, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.stress_yz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.stress_xz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_stress }, (FemzipArrayType.eff_pstrain, FemzipVariableCategory.SOLID): { ArrayType.element_solid_effective_plastic_strain }, (FemzipArrayType.history_vars, FemzipVariableCategory.SOLID): { ArrayType.element_solid_history_variables }, (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_x, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_y, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_z, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_xy, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_yz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, (FemzipArrayType.strain_xz, FemzipVariableCategory.SOLID): { ArrayType.element_solid_strain }, # AIRBAG (FemzipArrayType.airbag_state_geom, FemzipVariableCategory.CPM_AIRBAG): { ArrayType.airbag_n_active_particles, ArrayType.airbag_bag_volume, }, # AIRBAG PARTICLES (FemzipArrayType.airbag_particle_gas_chamber_id, FemzipVariableCategory.CPM_INT_VAR): { ArrayType.airbag_particle_gas_id }, (FemzipArrayType.airbag_particle_chamber_id, FemzipVariableCategory.CPM_INT_VAR): { ArrayType.airbag_particle_chamber_id }, (FemzipArrayType.airbag_particle_leakage, FemzipVariableCategory.CPM_INT_VAR): { ArrayType.airbag_particle_leakage }, (FemzipArrayType.airbag_particle_mass, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_mass }, (FemzipArrayType.airbag_particle_pos_x, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_position }, (FemzipArrayType.airbag_particle_pos_y, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_position }, (FemzipArrayType.airbag_particle_pos_z, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_position }, (FemzipArrayType.airbag_particle_vel_x, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_velocity }, (FemzipArrayType.airbag_particle_vel_y, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_velocity }, (FemzipArrayType.airbag_particle_vel_z, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_velocity }, (FemzipArrayType.airbag_particle_radius, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_radius }, (FemzipArrayType.airbag_particle_spin_energy, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_spin_energy }, (FemzipArrayType.airbag_particle_tran_energy, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_translation_energy }, (FemzipArrayType.airbag_particle_neighbor_dist, FemzipVariableCategory.CPM_FLOAT_VAR): { ArrayType.airbag_particle_nearest_segment_distance }, } # indexes for various femzip arrays stress_index = { FemzipArrayType.stress_x.value: 0, FemzipArrayType.stress_y.value: 1, FemzipArrayType.stress_z.value: 2, FemzipArrayType.stress_xy.value: 3, FemzipArrayType.stress_yz.value: 4, FemzipArrayType.stress_xz.value: 5, FemzipArrayType.normal_force_x.value: 0, FemzipArrayType.normal_force_y.value: 1, FemzipArrayType.normal_force_xy.value: 2, FemzipArrayType.shear_force_x.value: 0, FemzipArrayType.shear_force_y.value: 1, FemzipArrayType.strain_inner_x.value: 0, FemzipArrayType.strain_inner_y.value: 1, FemzipArrayType.strain_inner_z.value: 2, FemzipArrayType.strain_inner_xy.value: 3, FemzipArrayType.strain_inner_yz.value: 4, FemzipArrayType.strain_inner_xz.value: 5, FemzipArrayType.strain_outer_x.value: 0, FemzipArrayType.strain_outer_y.value: 1, FemzipArrayType.strain_outer_z.value: 2, FemzipArrayType.strain_outer_xy.value: 3, FemzipArrayType.strain_outer_yz.value: 4, FemzipArrayType.strain_outer_xz.value: 5, FemzipArrayType.beam_s_shear_resultant.value: 0, FemzipArrayType.beam_t_shear_resultant.value: 1, FemzipArrayType.beam_s_bending_moment.value: 0, FemzipArrayType.beam_t_bending_moment.value: 1, FemzipArrayType.strain_x.value: 0, FemzipArrayType.strain_y.value: 1, FemzipArrayType.strain_z.value: 2, FemzipArrayType.strain_xy.value: 3, FemzipArrayType.strain_yz.value: 4, FemzipArrayType.strain_xz.value: 5, FemzipArrayType.beam_shear_stress_rs.value: 0, FemzipArrayType.beam_shear_stress_tr.value: 1, FemzipArrayType.airbag_particle_pos_x.value: 0, FemzipArrayType.airbag_particle_pos_y.value: 1, FemzipArrayType.airbag_particle_pos_z.value: 2, FemzipArrayType.airbag_particle_vel_x.value: 0, FemzipArrayType.airbag_particle_vel_y.value: 1, FemzipArrayType.airbag_particle_vel_z.value: 2, FemzipArrayType.bending_moment_mx.value: 0, FemzipArrayType.bending_moment_my.value: 1, FemzipArrayType.bending_moment_mxy.value: 2, FemzipArrayType.unknown_1.value: 0, FemzipArrayType.unknown_2.value: 1, } def femzip_to_d3plot( result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray] ) -> Dict[str, np.ndarray]: """Map femzip arrays to d3plot arrays Parameters ---------- result_arrays: femzip arrays """ a = FemzipMapper() a.map(result_arrays) return a.d3plot_arrays class ArrayShapeInfo: n_layers: Union[int, None] = None n_vars: Union[int, None] = None n_entries: Union[int, None] = None n_timesteps: Union[int, None] = None def _set_attr(self, attr_name: str, value: Union[int, None]) -> None: self_attr_value = getattr(self, attr_name) if value is not None: if self_attr_value is None: setattr(self, attr_name, value) else: setattr(self, attr_name, max(self_attr_value, value)) def set_n_layers(self, n_layers: Union[int, None]) -> None: self._set_attr("n_layers", n_layers) def set_n_vars(self, n_vars: Union[int, None]) -> None: self._set_attr("n_vars", n_vars) def set_n_entries(self, n_entries: Union[int, None]) -> None: self._set_attr("n_entries", n_entries) def set_n_timesteps(self, n_timesteps: Union[int, None]) -> None: self._set_attr("n_timesteps", n_timesteps) def to_shape(self) -> Tuple[int, ...]: shape = [self.n_timesteps, self.n_entries] fortran_offset = 1 if self.n_layers is not None: shape.append(self.n_layers + fortran_offset) if self.n_vars is not None: shape.append(self.n_vars + fortran_offset) return tuple(shape) class D3plotArrayMapping: d3plot_array_type: str d3_layer_slice: Union[slice, int, None] = None d3_var_slice: Union[slice, int, None] = None fz_layer_slice: Union[slice, int, None] = None fz_var_slice: Union[slice, int, None] = None just_assign: bool = False def to_slice(self) -> Tuple[Union[int, slice], ...]: slices: List[Union[slice, int]] = [slice(None), slice(None)] if self.d3_layer_slice is not None: slices.append(self.d3_layer_slice) if self.d3_var_slice is not None: slices.append(self.d3_var_slice) return tuple(slices) class FemzipArrayInfo: full_name: str = "" short_name: str = "" index: int = -1 category: FemzipVariableCategory array_type: FemzipArrayType array: np.ndarray i_layer: Union[int, None] = None i_var: Union[int, None] = None mappings: List[D3plotArrayMapping] def __init__(self): self.mappings = [] def __str__(self) -> str: return f"""FemzipArrayInfo: full_name = {self.full_name} short_name = {self.short_name} index = {self.index} category = {self.category} array_type = {self.array_type}> i_layer = {self.i_layer} i_var = {self.i_var}""" class FemzipMapper(): """Class for mapping femzip variable data to d3plots. Takes no arguments. """ # regex pattern for reading variables name_separation_pattern = re.compile(r"(^[^\(\n]+)(\([^\)]+\))*") FORTRAN_OFFSET: int = 1 _d3plot_arrays: Dict[str, np.ndarray] = {} def __init__(self): pass def map(self, result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray]): """Map femzip data to d3plot arrays. Parameters ---------- result_arrays: femzip variable data """ self._d3plot_arrays = {} self._fz_array_slices = {} # convert to internal datastructure array_infos = self._convert(result_arrays) # build the array shapes d3plot_array_shapes = self._build(array_infos) # init the numpy arrays self._d3plot_arrays = self._allocate_d3plot_arrays(d3plot_array_shapes) # add all the data to its right place self._map_arrays(array_infos, self._d3plot_arrays) def _convert(self, result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray] ) -> List[FemzipArrayInfo]: """ Convert femzip result arrays into array infos Parameters ---------- result_arrays: Dict[Tuple[int, str, FemzipVariableCategory], np.ndarray] result arrays from femzip Returns ------- array_infos: List[FemzipArrayInfo] infos about femzip arrays """ array_infos = [] # convert for (fz_index, fz_name, fz_cat), array in result_arrays.items(): femzip_array_info = FemzipArrayInfo() femzip_array_info.index = fz_index femzip_array_info.full_name = fz_name femzip_array_info.category = fz_cat femzip_array_info.array = array femzip_array_info.array_type = FemzipArrayType.from_string(fz_name) var_name, i_layer, i_stress, i_history = self._parse_femzip_name( fz_name, fz_cat) femzip_array_info.short_name = var_name femzip_array_info.i_layer = i_layer femzip_array_info.i_var = i_stress if i_stress is not None else i_history array_infos.append(femzip_array_info) return array_infos @staticmethod def _build(fz_arrays: List[FemzipArrayInfo]) -> Dict[str, Tuple[int, ...]]: """ Counts the occurence of all variables in the result array such as the number of layers and stresses. Paramters --------- fz_arrays: List[FemzipArrayInfo] infos about femzip arrays Returns ------- d3plot_array_shapes: shapes of the d3plot arrays required to be allocated Notes ----- Some variables only have partial stress results written for Sigma-x and Sigma-y and layers one to three for example. """ shape_infos: Dict[str, ArrayShapeInfo] = {} name_count: Dict[Tuple[str, FemzipVariableCategory], int] = {} for arr_info in fz_arrays: # print(arr_info) d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[( arr_info.array_type, arr_info.category)] # var_name = var_name.strip() for array_type in d3_array_types: # print(array_type) array_shape_info = shape_infos.get(array_type) or ArrayShapeInfo() # beam layer vars always have same name but # must be counted up as layers if (arr_info.full_name, arr_info.category) in name_count: count = name_count[(arr_info.full_name, arr_info.category)] i_layer = count + 1 name_count[(arr_info.full_name, arr_info.category)] = i_layer else: name_count[(arr_info.full_name, arr_info.category)] = 0 # update shape array_shape_info.set_n_timesteps(arr_info.array.shape[0]) array_shape_info.set_n_entries(arr_info.array.shape[1]) array_shape_info.set_n_layers(arr_info.i_layer) array_shape_info.set_n_vars(arr_info.i_var) shape_infos[array_type] = array_shape_info # where to put it mapping = D3plotArrayMapping() mapping.d3plot_array_type = array_type if arr_info.i_layer is not None: mapping.d3_layer_slice = arr_info.i_layer if arr_info.i_var is not None: mapping.d3_var_slice = arr_info.i_var # arrays to copy: # - node displacement, veloctiy, acceleration # - airbag integer vars (so we don't need to cast) if arr_info.array.ndim == 3 \ or arr_info.category == FemzipVariableCategory.CPM_INT_VAR: mapping.just_assign = True arr_info.mappings.append(mapping) # correct layers # if a field has the same name for multiple # layers such as beam axial stress, we needed # to count in order to determine if it had layers # now we need to correct i_layers from None to 0 for them name_count2 = {} for arr_info in fz_arrays: count = name_count[(arr_info.full_name, arr_info.category)] if count != 0 and arr_info.i_layer is None: count2 = name_count2.get((arr_info.full_name, arr_info.category), -1) count2 += 1 arr_info.i_layer = count2 name_count2[(arr_info.full_name, arr_info.category)] = count2 for mapping in arr_info.mappings: shape_info = shape_infos[mapping.d3plot_array_type] shape_info.set_n_layers(count) mapping.d3_layer_slice = count2 # all arrays which are simply copied (slice has len 2 and only one target) # get a just assign flag if (len(arr_info.mappings) == 2 and len(arr_info.mappings[0].to_slice()) == 2): arr_info.mappings[0].just_assign = True d3_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[( arr_info.array_type, arr_info.category)] for array_type in d3_array_types: del shape_infos[array_type] return {name: info.to_shape() for name, info in shape_infos.items()} def _map_arrays(self, array_infos: List[FemzipArrayInfo], d3plot_arrays: Dict[str, np.ndarray]): """Allocate a femzip variable to its correct position in the d3plot array dictionary. Paramters --------- array_infos: List[FemzipArrayInfo] femzip variables stored in a dictionary d3plot_array: Dict[str, np.ndarray] d3plot arrays preallocated Notes ----- The keys are the femzip array name (unparsed) and the category of the variable as an enum. """ for arr_info in array_infos: if arr_info.category == FemzipVariableCategory.CPM_AIRBAG: d3plot_arrays[ArrayType.airbag_n_active_particles] = arr_info.array[:, :, 0].view( np.int32) d3plot_arrays[ArrayType.airbag_bag_volume] = arr_info.array[:, :, 1] else: for mapping in arr_info.mappings: if mapping.just_assign: d3plot_arrays[mapping.d3plot_array_type] = arr_info.array continue slices = mapping.to_slice() d3plot_array = d3plot_arrays[mapping.d3plot_array_type] # for femzip arrays with same name first var_index is missing if d3plot_array.ndim == 3 and len(slices) == 2 and arr_info.array.ndim == 2: slices = (*slices, 0) d3plot_array[slices] = arr_info.array def
(self, array_shapes: Dict[str, Tuple[int, ...]]) -> Dict[str, np.ndarray]: """Initialize all the d3plot arrays. Parameters ---------- array_shapes: array_shapes: Dict[str, Tuple[int, ...]] array shapes required to be allocated Returns ------- d3plot_arrays: Dict[str, np.ndarray] d3plot arrays preallocated """ d3plot_arrays = {} for key, shape in array_shapes.items(): d3plot_arrays[key] = np.empty(shape, dtype=np.float32) return d3plot_arrays @ property def d3plot_arrays(self): """Returns the mapped d3plot arrays. """ return self._d3plot_arrays def _parse_femzip_name(self, fz_name: str, var_type: FemzipVariableCategory) -> Tuple[str, Union[int, None], Union[int, None], Union[int, None]]: """Parses the femzip variable names. Parameters ---------- fz_name: cryptic femzip variable name we need to parse var_type: the category of this varialbe e.g. shells, parts, global etc. Returns ------- var_name: femzip variable name without integration and layer info i_layer: layer index i_stress: stress index i_history: history variable index """ matches = self.name_separation_pattern.findall(fz_name) if not len(matches) == 1: err_msg = "Could not match femzip array name: {0}" raise ValueError(err_msg.format(fz_name)) if not len(matches[0]) == 2: err_msg = "Could not match femzip array name: {0}" raise ValueError(err_msg.format(fz_name)) (first_grp, second_grp) = matches[0] var_name, extra_value = get_last_int_of_line(first_grp) var_name = var_name.strip() # the slice 1:-1 leaves out the brackets '(' and ')' _, i_layer = get_last_int_of_line( second_grp[1:-1]) if i_layer is not None: i_layer -= self.FORTRAN_OFFSET i_history: Union[int, None] = None if var_type != FemzipVariableCategory.PART or \ var_type != FemzipVariableCategory.GLOBAL: i_history = extra_value if i_history: i_history -= self.FORTRAN_OFFSET # set var name to the unformatted femzip array type name if "Epsilon" in var_name: var_name = fz_name.strip() if "inner" in var_name: i_layer = 0 elif "outer" in var_name: i_layer = 1 else: # solid strain i_layer = 0 i_stress: Union[int, None] = stress_index.get(var_name, None) return var_name, i_layer, i_stress, i_history def filter_femzip_variables(file_metadata: FemzipFileMetadata, d3plot_array_filter: Union[Set[str], None]) -> FemzipFileMetadata: """ Filters variable infos regarding d3plot array types Parameters ---------- file_metadata: FemzipFileMetadata metadata of femzip file including contained variables d3plot_array_filter: Union[Set[str], None] array types to filter for if wanted Returns ------- file_metadata: FemzipFileMetadata filtered array according to array types """ # find out which arrays we need and vars_to_copy: List[int] = list() for i_var in range(file_metadata.number_of_variables): try: var_info: VariableInfo = file_metadata.variable_infos[i_var] var_type: int = var_info.var_type var_index: int = var_info.var_index var_name: str = var_info.name.decode("utf-8") logging.debug(f"{var_type}, {var_index}, {var_name.strip()}") if var_type == FemzipVariableCategory.GEOMETRY.value: continue # find out which array from name try: fz_array_type = FemzipArrayType.from_string(var_name) except ValueError: warn_msg = ("Warning: lasso-python does not support femzip result" " field '{0}' category type '{1}'.") logging.warning(warn_msg.format(var_name.strip(), var_type)) continue # check if we asked for the array matching_array_types = TRANSL_FEMZIP_ARRATYPE_TO_D3PLOT_ARRAYTYPE[( fz_array_type, FemzipVariableCategory(var_type))] if d3plot_array_filter is not None: if not matching_array_types.intersection(d3plot_array_filter): continue vars_to_copy.append(i_var) except Exception: trb_msg = traceback.format_exc() err_msg = "An error ocurred while preprocessing femzip variable information: {0}" logging.warning(err_msg.format(trb_msg)) # copy filtered data filtered_file_metadata = FemzipFileMetadata() FemzipAPI.copy_struct(file_metadata, filtered_file_metadata) filtered_file_metadata.number_of_variables = len(vars_to_copy) FilteredVariableInfoArrayType = len(vars_to_copy) * VariableInfo filtered_info_array_data = FilteredVariableInfoArrayType() for i_var, src_i_var in enumerate(vars_to_copy): FemzipAPI.copy_struct( file_metadata.variable_infos[src_i_var], filtered_info_array_data[i_var]) filtered_file_metadata.variable_infos = filtered_info_array_data return filtered_file_metadata
_allocate_d3plot_arrays
node.go
package filesystem import ( "io" "os" "path" "gopkg.in/src-d/go-billy.v3" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/filemode" "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" ) var ignore = map[string]bool{ ".git": true, } // The node represents a file or a directory in a billy.Filesystem. It // implements the interface noder.Noder of merkletrie package. // // This implementation implements a "standard" hash method being able to be // compared with any other noder.Noder implementation inside of go-git. type node struct { fs billy.Filesystem submodules map[string]plumbing.Hash path string hash []byte children []noder.Noder isDir bool } // NewRootNode returns the root node based on a given billy.Filesystem. // // In order to provide the submodule hash status, a map[string]plumbing.Hash // should be provided where the key is the path of the submodule and the commit // of the submodule HEAD func NewRootNode( fs billy.Filesystem, submodules map[string]plumbing.Hash, ) noder.Noder { return &node{fs: fs, submodules: submodules, isDir: true} } // Hash the hash of a filesystem is the result of concatenating the computed // plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the // difftree algorithm will detect changes in the contents of files and also in // their mode. // // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { return n.hash } func (n *node) Name() string { return path.Base(n.path) } func (n *node) IsDir() bool { return n.isDir } func (n *node) Children() ([]noder.Noder, error) { if err := n.calculateChildren(); err != nil { return nil, err } return n.children, nil } func (n *node) NumChildren() (int, error) { if err := n.calculateChildren(); err != nil { return -1, err } return len(n.children), nil } func (n *node) calculateChildren() error { if len(n.children) != 0 { return nil } files, err := n.fs.ReadDir(n.path) if err != nil { if os.IsNotExist(err)
return nil } for _, file := range files { if _, ok := ignore[file.Name()]; ok { continue } c, err := n.newChildNode(file) if err != nil { return err } n.children = append(n.children, c) } return nil } func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) hash, err := n.calculateHash(path, file) if err != nil { return nil, err } node := &node{ fs: n.fs, submodules: n.submodules, path: path, hash: hash, isDir: file.IsDir(), } if hash, isSubmodule := n.submodules[path]; isSubmodule { node.hash = append(hash[:], filemode.Submodule.Bytes()...) node.isDir = false } return node, nil } func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { if file.IsDir() { return make([]byte, 24), nil } var hash plumbing.Hash var err error if file.Mode()&os.ModeSymlink != 0 { hash, err = n.doCalculateHashForSymlink(path, file) } else { hash, err = n.doCalculateHashForRegular(path, file) } if err != nil { return nil, err } mode, err := filemode.NewFromOSFileMode(file.Mode()) if err != nil { return nil, err } return append(hash[:], mode.Bytes()...), nil } func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { f, err := n.fs.Open(path) if err != nil { return plumbing.ZeroHash, err } defer f.Close() h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) if _, err := io.Copy(h, f); err != nil { return plumbing.ZeroHash, err } return h.Sum(), nil } func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { target, err := n.fs.Readlink(path) if err != nil { return plumbing.ZeroHash, err } h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) if _, err := h.Write([]byte(target)); err != nil { return plumbing.ZeroHash, err } return h.Sum(), nil } func (n *node) String() string { return n.path }
{ return nil }
setup.rs
/// Parse the SETUP packet of control transfers #[derive(Clone, Copy, Debug, Default)] pub struct SetupPacket { /// bmRequestType pub request_type: u8, /// bRequest pub request: u8, /// wValue pub value: u16, /// wIndex pub index: u16, /// wLength pub length: u16, } impl SetupPacket { /// Parse a [SetupPacket] from raw setup packet pub fn parse(setup: &[u8; 8]) -> SetupPacket
}
{ SetupPacket { request_type: setup[0], request: setup[1], value: (setup[3] as u16) << 8 | (setup[2] as u16), index: (setup[5] as u16) << 8 | (setup[4] as u16), length: (setup[7] as u16) << 8 | (setup[6] as u16), } }
SchemaDirectiveVisitor.js
var __extends = (this && this.__extends) || (function () { var extendStatics = function (d, b) { extendStatics = Object.setPrototypeOf || ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; return extendStatics(d, b); }; return function (d, b) { extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; })(); var __importDefault = (this && this.__importDefault) || function (mod) { return (mod && mod.__esModule) ? mod : { "default": mod }; }; Object.defineProperty(exports, "__esModule", { value: true }); var values_1 = require("graphql/execution/values"); var each_1 = __importDefault(require("./each")); var valueFromASTUntyped_1 = __importDefault(require("./valueFromASTUntyped")); var SchemaVisitor_1 = require("./SchemaVisitor"); var visitSchema_1 = require("./visitSchema"); var hasOwn = Object.prototype.hasOwnProperty; // This class represents a reusable implementation of a @directive that may // appear in a GraphQL schema written in Schema Definition Language. // // By overriding one or more visit{Object,Union,...} methods, a subclass // registers interest in certain schema types, such as GraphQLObjectType, // GraphQLUnionType, etc. When SchemaDirectiveVisitor.visitSchemaDirectives is // called with a GraphQLSchema object and a map of visitor subclasses, the // overidden methods of those subclasses allow the visitors to obtain // references to any type objects that have @directives attached to them, // enabling visitors to inspect or modify the schema as appropriate. // // For example, if a directive called @rest(url: "...") appears after a field // definition, a SchemaDirectiveVisitor subclass could provide meaning to that // directive by overriding the visitFieldDefinition method (which receives a // GraphQLField parameter), and then the body of that visitor method could // manipulate the field's resolver function to fetch data from a REST endpoint // described by the url argument passed to the @rest directive: // // const typeDefs = ` // type Query { // people: [Person] @rest(url: "/api/v1/people") // }`; // // const schema = makeExecutableSchema({ typeDefs }); // // SchemaDirectiveVisitor.visitSchemaDirectives(schema, { // rest: class extends SchemaDirectiveVisitor { // public visitFieldDefinition(field: GraphQLField<any, any>) { // const { url } = this.args; // field.resolve = () => fetch(url); // } // } // }); // // The subclass in this example is defined as an anonymous class expression, // for brevity. A truly reusable SchemaDirectiveVisitor would most likely be // defined in a library using a named class declaration, and then exported for // consumption by other modules and packages. // // See below for a complete list of overridable visitor methods, their // parameter types, and more details about the properties exposed by instances // of the SchemaDirectiveVisitor class. var SchemaDirectiveVisitor = /** @class */ (function (_super) { __extends(SchemaDirectiveVisitor, _super); // Mark the constructor protected to enforce passing SchemaDirectiveVisitor // subclasses (not instances) to visitSchemaDirectives. function SchemaDirectiveVisitor(config) { var _this = _super.call(this) || this; _this.name = config.name; _this.args = config.args; _this.visitedType = config.visitedType; _this.schema = config.schema; _this.context = config.context; return _this; } // Override this method to return a custom GraphQLDirective (or modify one // already present in the schema) to enforce argument types, provide default // argument values, or specify schema locations where this @directive may // appear. By default, any declaration found in the schema will be returned. SchemaDirectiveVisitor.getDirectiveDeclaration = function (directiveName, schema) { return schema.getDirective(directiveName); }; // Call SchemaDirectiveVisitor.visitSchemaDirectives to visit every // @directive in the schema and create an appropriate SchemaDirectiveVisitor // instance to visit the object decorated by the @directive. SchemaDirectiveVisitor.visitSchemaDirectives = function (schema, directiveVisitors, // Optional context object that will be available to all visitor instances // via this.context. Defaults to an empty null-prototype object. context) { if (context === void 0) { context = Object.create(null); } // If the schema declares any directives for public consumption, record // them here so that we can properly coerce arguments when/if we encounter // an occurrence of the directive while walking the schema below. var declaredDirectives = this.getDeclaredDirectives(schema, directiveVisitors); // Map from directive names to lists of SchemaDirectiveVisitor instances // created while visiting the schema. var createdVisitors = Object.create(null); Object.keys(directiveVisitors).forEach(function (directiveName) { createdVisitors[directiveName] = []; }); function visitorSelector(type, methodName) { var visitors = []; var directiveNodes = type.astNode != null ? type.astNode.directives : null; if (!directiveNodes) { return visitors; } directiveNodes.forEach(function (directiveNode) { var directiveName = directiveNode.name.value; if (!hasOwn.call(directiveVisitors, directiveName)) { return; } var visitorClass = directiveVisitors[directiveName]; // Avoid creating visitor objects if visitorClass does not override // the visitor method named by methodName. if (!visitorClass.implementsVisitorMethod(methodName)) { return; } var decl = declaredDirectives[directiveName]; var args; if (decl != null) { // If this directive was explicitly declared, use the declared // argument types (and any default values) to check, coerce, and/or // supply default values for the given arguments. args = values_1.getArgumentValues(decl, directiveNode); } else { // If this directive was not explicitly declared, just convert the // argument nodes to their corresponding JavaScript values. args = Object.create(null); if (directiveNode.arguments != null) { directiveNode.arguments.forEach(function (arg) { args[arg.name.value] = valueFromASTUntyped_1.default(arg.value); }); } } // As foretold in comments near the top of the visitSchemaDirectives // method, this is where instances of the SchemaDirectiveVisitor class // get created and assigned names. While subclasses could override the // constructor method, the constructor is marked as protected, so // these are the only arguments that will ever be passed. visitors.push(new visitorClass({ name: directiveName, args: args, visitedType: type, schema: schema, context: context, })); }); if (visitors.length > 0) { visitors.forEach(function (visitor) { createdVisitors[visitor.name].push(visitor); }); } return visitors; } visitSchema_1.visitSchema(schema, visitorSelector); return createdVisitors; }; SchemaDirectiveVisitor.getDeclaredDirectives = function (schema, directiveVisitors) { var declaredDirectives = Object.create(null); each_1.default(schema.getDirectives(), function (decl) { declaredDirectives[decl.name] = decl; }); // If the visitor subclass overrides getDirectiveDeclaration, and it // returns a non-null GraphQLDirective, use that instead of any directive // declared in the schema itself. Reasoning: if a SchemaDirectiveVisitor // goes to the trouble of implementing getDirectiveDeclaration, it should // be able to rely on that implementation. each_1.default(directiveVisitors, function (visitorClass, directiveName) { var decl = visitorClass.getDirectiveDeclaration(directiveName, schema); if (decl != null) { declaredDirectives[directiveName] = decl;
}); each_1.default(declaredDirectives, function (decl, name) { if (!hasOwn.call(directiveVisitors, name)) { // SchemaDirectiveVisitors.visitSchemaDirectives might be called // multiple times with partial directiveVisitors maps, so it's not // necessarily an error for directiveVisitors to be missing an // implementation of a directive that was declared in the schema. return; } var visitorClass = directiveVisitors[name]; each_1.default(decl.locations, function (loc) { var visitorMethodName = directiveLocationToVisitorMethodName(loc); if (SchemaVisitor_1.SchemaVisitor.implementsVisitorMethod(visitorMethodName) && !visitorClass.implementsVisitorMethod(visitorMethodName)) { // While visitor subclasses may implement extra visitor methods, // it's definitely a mistake if the GraphQLDirective declares itself // applicable to certain schema locations, and the visitor subclass // does not implement all the corresponding methods. throw new Error("SchemaDirectiveVisitor for @" + name + " must implement " + visitorMethodName + " method"); } }); }); return declaredDirectives; }; return SchemaDirectiveVisitor; }(SchemaVisitor_1.SchemaVisitor)); exports.SchemaDirectiveVisitor = SchemaDirectiveVisitor; // Convert a string like "FIELD_DEFINITION" to "visitFieldDefinition". function directiveLocationToVisitorMethodName(loc) { return ('visit' + loc.replace(/([^_]*)_?/g, function (_wholeMatch, part) { return part.charAt(0).toUpperCase() + part.slice(1).toLowerCase(); })); } //# sourceMappingURL=SchemaDirectiveVisitor.js.map
}
test_dates.py
# coding: utf-8 import pytest import mock import workdays import datetime import dmutils.dates as dates_package class TestPublishingDates(): def test_get_publishing_dates_formats_time(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2015, 5, 22, 20, 39, 39, 417900) brief = { 'requirementsLength': '1 week', } assert dates_package.datetime.utcnow() == datetime.datetime(2015, 5, 22, 20, 39, 39, 417900) assert dates_package.get_publishing_dates(brief)['closing_time'] == '11:59 pm' def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_monday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 6, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 11, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_tuesday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 7, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 12, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_wednesday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 13, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_thursday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 14, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_friday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', 'lotSlug': 'digital-specialists' } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 15, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_saturday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 16, 23, 59, 59) def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_sunday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900) brief = { 'requirementsLength': '1 week', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 17, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_monday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 18, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_tuesday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 18, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 19, 23, 59, 59) def
(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 19, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 20, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_thursday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 20, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 21, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_friday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 21, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 22, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_saturday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 23, 23, 59, 59) def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_sunday(self): with mock.patch('dmutils.dates.datetime') as mock_date: mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900) brief = { 'requirementsLength': '2 weeks', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 7, 24, 23, 59, 59) def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_no_requirementLength(self): brief = { 'publishedAt': u'2016-01-04T12:00:00.00000Z', } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59) def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_1_week_requirementsLength(self): brief = { 'publishedAt': u'2016-01-04T12:00:00.00000Z', 'requirementsLength': '1 week' } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 1, 6, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 1, 8, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 1, 11, 23, 59, 59) def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_2_week_requirementsLength(self): brief = { 'publishedAt': u'2016-01-04T12:00:00.00000Z', 'requirementsLength': '2 weeks' } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59) def test_get_publishing_dates_returns_correct_dates_if_published_at_key_is_a_date_object(self): brief = { 'publishedAt': datetime.datetime(2016, 1, 4, 12, 0, 0), } dates = dates_package.get_publishing_dates(brief) assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59) assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59) assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_wednesday
get_version.py
# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # AlliedTelesis.AT8100.get_version # --------------------------------------------------------------------- # Copyright (C) 2007-2018 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- """ """ from noc.core.script.base import BaseScript from noc.sa.interfaces.igetversion import IGetVersion import re class Script(BaseScript): name = "AlliedTelesis.AT8100.get_version" cache = True interface = IGetVersion
rx_boot = re.compile(r"^Bootloader version\s+:\s+(?P<bootprom>\S+)\s*\n", re.MULTILINE) rx_version = re.compile(r"^Software version\s+:\s+(?P<version>\S+)\s*\n", re.MULTILINE) def execute_cli(self): v = self.cli("show system") match1 = self.rx_plat.search(v) match2 = self.rx_boot.search(v) match3 = self.rx_version.search(v) return { "vendor": "Allied Telesis", "platform": match1.group("platform"), "version": match3.group("version"), "attributes": { "Boot PROM": match2.group("bootprom"), "HW version": match1.group("hardware"), "Serial Number": match1.group("serial"), }, }
rx_plat = re.compile( r"^Base\s+(?P<platform>AT-81\S+)\s+(?P<hardware>\S+)\s+(?P<serial>\S+)\s*\n", re.MULTILINE )
nonexhaustive.rs
/*! Contains `NonExhaustive<>` and related items. */ use std::{ cmp::{Ordering,PartialEq,Eq,Ord,PartialOrd}, fmt::{self,Debug,Display}, hash::{Hash,Hasher}, marker::PhantomData, mem::ManuallyDrop, ops::Deref, }; use crate::{ abi_stability::StableAbi, erased_types::{ c_functions, trait_objects::{ HasherObject, }, InterfaceBound, }, inline_storage::ScratchSpace, marker_type::ErasedObject, nonexhaustive_enum::{ vtable::NonExhaustiveVtable_Ref, GetVTable,GetEnumInfo,GetNonExhaustive, ValidDiscriminant,EnumInfo, SerializeEnum,DeserializeEnum, }, pointer_trait::{CanTransmuteElement,TransmuteElement}, type_level::{ impl_enum::Implemented, trait_marker, }, std_types::RBoxError, traits::IntoReprRust, }; use core_extensions::{ utils::transmute_ignore_size, }; use serde::{ser,de,Serialize,Deserialize,Serializer,Deserializer}; // #[cfg(test)] #[cfg(all(test,not(feature="only_new_tests")))] mod tests; /** A generic type for all ffi-safe non-exhaustive enums. This type allows adding variants to enums it wraps in ABI compatible versions of a library. # Generic parameters ### `E` This is the enum that this was constructed from, and can be unwrapped back into if it's one of the valid variants in this context. ### `S` The storage type,used to store the enum opaquely. This has to be at least the size and alignment of the wrapped enum. This is necessary because: - The compiler assumes that an enum cannot be a variant outside the ones it sees. - To give some flexibility to grow the enum in semver compatible versions of a library. ### `I` The interface of the enum(it implements `InterfaceType`), determining which traits are required when constructing `NonExhaustive<>` and which are available afterwards. ### Example Say that we define an error type for a library. Version 1.0. ``` use abi_stable::{ StableAbi, nonexhaustive_enum::{NonExhaustiveFor,NonExhaustive}, std_types::RString, sabi_trait, }; #[repr(u8)] #[derive(StableAbi,Debug,Clone,PartialEq)] #[sabi(kind(WithNonExhaustive( size="[usize;8]", traits(Debug,Clone,PartialEq), )))] pub enum Error{ #[doc(hidden)] __NonExhaustive, CouldNotFindItem{ name:RString, }, OutOfStock{ id:usize, name:RString, }, } fn returns_could_not_find_item(name:RString)->NonExhaustiveFor<Error>{ let e=Error::CouldNotFindItem{name}; NonExhaustive::new(e) } fn returns_out_of_stock(id:usize,name:RString)->NonExhaustiveFor<Error>{ let e=Error::OutOfStock{id,name}; NonExhaustive::new(e) } ``` Then in 1.1 we add another error variant,returned only by new library functions. ``` use abi_stable::{ StableAbi, nonexhaustive_enum::{NonExhaustiveFor,NonExhaustive}, std_types::RString, sabi_trait, }; #[repr(u8)] #[derive(StableAbi,Debug,Clone,PartialEq)] #[sabi(kind(WithNonExhaustive( size="[usize;8]", traits(Debug,Clone,PartialEq), )))] pub enum Error{ #[doc(hidden)] __NonExhaustive, CouldNotFindItem{ name:RString, }, OutOfStock{ id:usize, name:RString, }, InvalidItemId{ id:usize, }, } fn returns_invalid_item_id()->NonExhaustiveFor<Error>{ NonExhaustive::new(Error::InvalidItemId{id:100}) } ``` If a library user attempted to unwrap `Error::InvalidItemId` (using NonExhaustive::as_enum/as_enum_mut/into_enum) with the 1.0 version of `Error` they would get an `Err(..)` back. */ #[repr(C)] #[derive(StableAbi)] #[sabi( //debug_print, not_stableabi(E,S,I), bound="NonExhaustiveVtable_Ref<E,S,I>:StableAbi", bound="E: GetNonExhaustive<S>", bound="I: InterfaceBound", extra_checks="<I as InterfaceBound>::EXTRA_CHECKS", phantom_type_param="<E as GetNonExhaustive<S>>::NonExhaustive", )] pub struct NonExhaustive<E,S,I>{ // This is an opaque field since we only care about its size and alignment #[sabi(unsafe_opaque_field)] fill:ScratchSpace<S>, vtable:NonExhaustiveVtable_Ref<E,S,I>, _marker:PhantomData<()>, } /// The type of a `NonExhaustive<>` wrapping the enum E, /// using the `E`'s default storage and interface. pub type NonExhaustiveFor<E>= NonExhaustive< E, <E as GetEnumInfo>::DefaultStorage, <E as GetEnumInfo>::DefaultInterface, >; /// The type of a `NonExhaustive<>` wrapping the enum E, /// using the `E`'s default storage and a custom interface. pub type NonExhaustiveWI<E,I>= NonExhaustive< E, <E as GetEnumInfo>::DefaultStorage, I, >; /// The type of a `NonExhaustive<>` wrapping the enum E, /// using a custom storage and the `E`'s default interface. pub type NonExhaustiveWS<E,S>= NonExhaustive< E, S, <E as GetEnumInfo>::DefaultInterface, >; impl<E,S,I> NonExhaustive<E,S,I>{ /** Constructs a `NonExhaustive<>` from `value` using its default interface and storage. # Panic This panics if the storage has an alignment or size smaller than that of `E`. */ #[inline] pub fn new(value:E)->Self where E:GetVTable<S,I,DefaultStorage=S,DefaultInterface=I>, { NonExhaustive::with_storage_and_interface(value) } /** Constructs a `NonExhaustive<>` from `value` using its default storage and a custom interface. # Panic This panics if the storage has an alignment or size smaller than that of `E`. */ #[inline] pub fn with_interface(value:E)->Self where E:GetVTable<S,I,DefaultStorage=S>, { NonExhaustive::with_storage_and_interface(value) } /** Constructs a `NonExhaustive<>` from `value` using its default interface and a custom storage. # Panic This panics if the storage has an alignment or size smaller than that of `E`. */ #[inline] pub fn with_storage(value:E)->Self where E:GetVTable<S,I,DefaultInterface=I>, { NonExhaustive::with_storage_and_interface(value) } /** Constructs a `NonExhaustive<>` from `value` using both a custom interface and storage. # Panic This panics if the storage has an alignment or size smaller than that of `E`. */ pub fn with_storage_and_interface(value:E)->Self where E:GetVTable<S,I>, { unsafe{ NonExhaustive::with_vtable(value,E::VTABLE_REF) } } pub(super) unsafe fn with_vtable( value:E, vtable:NonExhaustiveVtable_Ref<E,S,I> )->Self{ Self::assert_fits_within_storage(); let mut this=Self{ fill:unsafe{ // The fact that the vtable was constructed ensures that // `Inline` implements `InlineStorage` ScratchSpace::uninit_unbounded() }, vtable, _marker:PhantomData }; (&mut this.fill as *mut ScratchSpace<S> as *mut E).write(value); this } /// Checks that the alignment of `E` is correct,returning `true` if it is. pub fn check_alignment()->bool{ let align_enum=std::mem::align_of::<E>(); let align_storage=std::mem::align_of::<S>(); align_enum <= align_storage } /// Checks that the size of `E` is correct,returning `true` if it is. pub fn check_size()->bool{ let size_enum=std::mem::size_of::<E>(); let size_storage=std::mem::size_of::<S>(); size_enum <= size_storage } /// Asserts that `E` fits within `S`,with the correct alignment and size. pub fn assert_fits_within_storage(){ let align_enum=std::mem::align_of::<E>(); let align_storage=std::mem::align_of::<S>(); assert!( Self::check_alignment(), "The alignment of the storage is lower than the enum:\n\t{} < {}", align_storage,align_enum, ); let size_enum=std::mem::size_of::<E>(); let size_storage=std::mem::size_of::<S>(); assert!( Self::check_size(), "The size of the storage is smaller than the enum:\n\t{} < {}", size_storage,size_enum, ); } } impl<E,S,I> NonExhaustive<E,S,I> where E:GetEnumInfo { /** Unwraps a reference to this `NonExhaustive<>` into a reference to the original enum. # Errors This returns an error if the wrapped enum is of a variant that is not valid in this context. # Example This shows how some `NonExhaustive<enum>` can be unwrapped, and others cannot.<br> That enum comes from a newer version of the library than this knows. ``` use abi_stable::nonexhaustive_enum::{ doc_enums::example_2::{Foo,new_a,new_b,new_c}, }; assert_eq!(new_a() .as_enum().ok(),Some(&Foo::A) ); assert_eq!(new_b(10).as_enum().ok(),Some(&Foo::B(10))); assert_eq!(new_b(77).as_enum().ok(),Some(&Foo::B(77))); assert_eq!(new_c().as_enum().ok() ,None ); ``` */ pub fn as_enum(&self)->Result<&E,UnwrapEnumError<&Self>>{ let discriminant=self.get_discriminant(); if E::is_valid_discriminant(discriminant) { unsafe{ Ok(&*(&self.fill as *const ScratchSpace<S> as *const E)) } }else{ Err(UnwrapEnumError::new(self)) } } /** Unwraps a mutable reference to this `NonExhaustive<>` into a mutable reference to the original enum. # Errors This returns an error if the wrapped enum is of a variant that is not valid in this context. # Example This shows how some `NonExhaustive<enum>` can be unwrapped, and others cannot.<br> That enum comes from a newer version of the library than this knows. ``` use abi_stable::nonexhaustive_enum::{ doc_enums::example_1::{Foo,new_a,new_b,new_c}, }; assert_eq!(new_a() .as_enum_mut().ok(),Some(&mut Foo::A)); assert_eq!(new_b(10).as_enum_mut().ok(),None); assert_eq!(new_b(77).as_enum_mut().ok(),None); assert_eq!(new_c().as_enum_mut().ok() ,None); ``` */ pub fn as_enum_mut(&mut self)->Result<&mut E,UnwrapEnumError<&mut Self>> where E:GetVTable<S,I>, { let discriminant=self.get_discriminant(); if E::is_valid_discriminant(discriminant) { /* Must update the vtable every time as_enum_mut is called, because if the enum is replaced with a variant with a discriminant outside the valid range for the functions in the vtable, it would be undefined behavior to call those functions. */ self.vtable=E::VTABLE_REF; unsafe{ Ok(&mut *(&mut self.fill as *mut ScratchSpace<S> as *mut E)) } }else{ Err(UnwrapEnumError::new(self)) } } /** Unwraps this `NonExhaustive<>` into the original enum. # Errors This returns an error if the wrapped enum is of a variant that is not valid in this context. # Example This shows how some `NonExhaustive<enum>` can be unwrapped, and others cannot.<br> That enum comes from a newer version of the library than this knows. ``` use abi_stable::nonexhaustive_enum::{ doc_enums::example_2::{Foo,new_a,new_b,new_c}, }; assert_eq!(new_a() .into_enum().ok(),Some(Foo::A)); assert_eq!(new_b(10).into_enum().ok(),Some(Foo::B(10))); assert_eq!(new_b(77).into_enum().ok(),Some(Foo::B(77))); assert_eq!(new_c().into_enum().ok() ,None); */ pub fn into_enum(self)->Result<E,UnwrapEnumError<Self>>{ let discriminant=self.get_discriminant(); if E::is_valid_discriminant(discriminant) { let this=ManuallyDrop::new(self); unsafe{ Ok((&this.fill as *const ScratchSpace<S> as *const E).read()) } }else{ Err(UnwrapEnumError::new(self)) } } /** Returns whether the discriminant of this enum is valid in this context. The only way for it to be invalid is if the dynamic library is a newer version than this knows. */ #[inline] pub fn is_valid_discriminant(&self)->bool{ E::is_valid_discriminant(self.get_discriminant()) } /** Gets the value of the discriminant of the enum. */ #[inline] pub fn get_discriminant(&self)->E::Discriminant{ unsafe{ *(&self.fill as *const ScratchSpace<S> as *const E::Discriminant) } } } impl<E,S,I> NonExhaustive<E,S,I>{ /** Transmute this `NonExhaustive<E,S,I>` into `NonExhaustive<F,S,I>`, changing the type of the enum it wraps. # Safety This has the same safety requirements that `std::mem::transmute` has. # Panics This panics if the storage has an alignment or size smaller than that of `F`. */ pub unsafe fn transmute_enum<F>(self)->NonExhaustive<F,S,I>{ NonExhaustive::<F,S,I>::assert_fits_within_storage(); transmute_ignore_size(self) } /** Transmute this `&NonExhaustive<E,S,I>` into `&NonExhaustive<F,S,I>`, changing the type of the enum it wraps. # Safety This has the same safety requirements that `std::mem::transmute` has. # Panics This panics if the storage has an alignment or size smaller than that of `F`. */ pub unsafe fn transmute_enum_ref<F>(&self)->&NonExhaustive<F,S,I>{ NonExhaustive::<F,S,I>::assert_fits_within_storage(); &*(self as *const Self as *const _) } /** Transmute this `&mut NonExhaustive<E,S,I>` into `&mut NonExhaustive<F,S,I>`, changing the type of the enum it wraps. # Safety This has the same safety requirements that `std::mem::transmute` has. # Panics This panics if the storage has an alignment or size smaller than that of `F`. */ pub unsafe fn transmute_enum_mut<F>(&mut self)->&mut NonExhaustive<F,S,I>{ NonExhaustive::<F,S,I>::assert_fits_within_storage(); &mut *(self as *mut Self as *mut _) } /** Transmute this pointer to a `NonExhaustive<E,S,I>` into a pointer (of the same kind) to a `NonExhaustive<F,S,I>`, changing the type of the enum it wraps. # Safety This has the same safety requirements that `abi_stable::pointer_traits::TransmuteElement::transmute_element` has. # Panics This panics if the storage has an alignment or size smaller than that of `F`. */ pub unsafe fn transmute_enum_ptr<P,F>(this:P)->P::TransmutedPtr where P:Deref<Target=Self>, P:CanTransmuteElement<NonExhaustive<F,S,I>>
/// Gets a reference to the vtable of this `NonExhaustive<>`. pub(crate) fn vtable(&self)->NonExhaustiveVtable_Ref<E,S,I>{ self.vtable } fn sabi_erased_ref(&self)->&ErasedObject{ unsafe{ &*(&self.fill as *const ScratchSpace<S> as *const ErasedObject) } } fn as_erased_ref(&self)->&ErasedObject{ unsafe{ &*(self as *const Self as *const ErasedObject) } } fn sabi_erased_mut(&mut self)->&mut ErasedObject{ unsafe{ &mut *(&mut self.fill as *mut ScratchSpace<S> as *mut ErasedObject) } } } impl<E,S,I> Clone for NonExhaustive<E,S,I> where I: InterfaceBound<Clone = Implemented<trait_marker::Clone>>, { fn clone(&self)->Self{ unsafe{ self.vtable().clone_()(self.sabi_erased_ref(),self.vtable) } } } impl<E,S,I> Display for NonExhaustive<E,S,I> where I: InterfaceBound<Display = Implemented<trait_marker::Display>>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe{ c_functions::adapt_std_fmt::<ErasedObject>( self.sabi_erased_ref(), self.vtable().display(), f ) } } } impl<E,S,I> Debug for NonExhaustive<E,S,I> where I: InterfaceBound<Debug = Implemented<trait_marker::Debug>>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe{ c_functions::adapt_std_fmt::<ErasedObject>( self.sabi_erased_ref(), self.vtable().debug(), f ) } } } impl<E,S,I> Eq for NonExhaustive<E,S,I> where Self: PartialEq, I: InterfaceBound<Eq = Implemented<trait_marker::Eq>>, { } impl<E,S,I1,I2> PartialEq<NonExhaustive<E,S,I2>> for NonExhaustive<E,S,I1> where I1: InterfaceBound<PartialEq = Implemented<trait_marker::PartialEq>>, { fn eq(&self, other: &NonExhaustive<E,S,I2>) -> bool { unsafe{ self.vtable().partial_eq()(self.sabi_erased_ref(), other.as_erased_ref()) } } } impl<E,S,I> Ord for NonExhaustive<E,S,I> where I: InterfaceBound<Ord = Implemented<trait_marker::Ord>>, Self: PartialOrd + Eq, { fn cmp(&self, other: &Self) -> Ordering { unsafe{ self.vtable().cmp()(self.sabi_erased_ref(), other.as_erased_ref()).into() } } } impl<E,S,I1,I2> PartialOrd<NonExhaustive<E,S,I2>> for NonExhaustive<E,S,I1> where I1: InterfaceBound<PartialOrd = Implemented<trait_marker::PartialOrd>>, Self: PartialEq<NonExhaustive<E,S,I2>>, { fn partial_cmp(&self, other: &NonExhaustive<E,S,I2>) -> Option<Ordering> { unsafe{ self.vtable().partial_cmp()(self.sabi_erased_ref(), other.as_erased_ref()) .map(IntoReprRust::into_rust) .into() } } } ///////////////////// impl<E,S,I> PartialOrd<E> for NonExhaustive<E,S,I> where E: GetEnumInfo+PartialOrd, I: InterfaceBound<PartialOrd = Implemented<trait_marker::PartialOrd>>, Self: PartialEq<E>, { fn partial_cmp(&self, other: &E) -> Option<Ordering> { unsafe{ match self.as_enum() { Ok(this)=>this.partial_cmp(other), Err(_)=>Some(Ordering::Greater), } } } } impl<E,S,I> PartialEq<E> for NonExhaustive<E,S,I> where E: GetEnumInfo+PartialEq, I: InterfaceBound<PartialEq = Implemented<trait_marker::PartialEq>>, { fn eq(&self, other: &E) -> bool { match self.as_enum() { Ok(this)=>this==other, Err(_)=>false, } } } ///////////////////// impl<E,S,I> NonExhaustive<E,S,I>{ /// It serializes a `NonExhaustive<_>` into a proxy. pub fn serialize_into_proxy(&self) -> Result<I::Proxy, RBoxError> where I: InterfaceBound<Serialize=Implemented<trait_marker::Serialize>>, I: SerializeEnum<NonExhaustive<E,S,I>>, { unsafe{ self.vtable().serialize()(self.sabi_erased_ref()).into_result() } } /// Deserializes a `NonExhaustive<_>` from a proxy. pub fn deserialize_from_proxy<'borr>(proxy: I::Proxy) -> Result<Self, RBoxError> where I: InterfaceBound<Deserialize= Implemented<trait_marker::Deserialize>>, I: DeserializeEnum<'borr,NonExhaustive<E,S,I>>, I::Proxy:'borr, E:GetEnumInfo, { I::deserialize_enum(proxy) } } /** First it serializes a `NonExhaustive<_>` into a proxy,then it serializes that proxy. */ impl<E,S,I> Serialize for NonExhaustive<E,S,I> where I: InterfaceBound<Serialize = Implemented<trait_marker::Serialize>>, I: SerializeEnum<NonExhaustive<E,S,I>>, I::Proxy:Serialize, { fn serialize<Z>(&self, serializer: Z) -> Result<Z::Ok, Z::Error> where Z: Serializer, { unsafe{ self.vtable().serialize()(self.sabi_erased_ref()) .into_result() .map_err(ser::Error::custom)? .serialize(serializer) } } } /// First it Deserializes a string,then it deserializes into a /// `NonExhaustive<_>`,by using `<I as DeserializeEnum>::deserialize_enum`. impl<'de,E,S,I> Deserialize<'de> for NonExhaustive<E,S,I> where E: 'de+GetVTable<S,I>, S: 'de, I: 'de+InterfaceBound<Deserialize=Implemented<trait_marker::Deserialize>>, I: DeserializeEnum<'de,NonExhaustive<E,S,I>>, <I as DeserializeEnum<'de,NonExhaustive<E,S,I>>>::Proxy:Deserialize<'de>, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let s = < <I as DeserializeEnum<'de,NonExhaustive<E,S,I>>>::Proxy as Deserialize >::deserialize(deserializer)?; I::deserialize_enum(s).map_err(de::Error::custom) } } ///////////////////// impl<E,S,I> Hash for NonExhaustive<E,S,I> where I: InterfaceBound<Hash = Implemented<trait_marker::Hash>>, { fn hash<H>(&self, state: &mut H) where H: Hasher, { unsafe{ self.vtable().hash()(self.sabi_erased_ref(), HasherObject::new(state)) } } } impl<E,S,I> std::error::Error for NonExhaustive<E,S,I> where I: InterfaceBound< Debug = Implemented<trait_marker::Debug>, Display = Implemented<trait_marker::Display>, Error = Implemented<trait_marker::Error> >, {} ///////////////////// impl<E,S,I> Drop for NonExhaustive<E,S,I>{ fn drop(&mut self){ let drop=self.vtable()._sabi_drop(); unsafe{ drop(self.sabi_erased_mut()); } } } /////////////////////////////////////////////////////////////////////////////// /// Used to abstract over the reference-ness of `NonExhaustive<>` inside UnwrapEnumError. pub trait NonExhaustiveSharedOps{ /// The type of the discriminant of the wrapped enum. type Discriminant:ValidDiscriminant; /// Gets the discriminant of the wrapped enum. fn get_discriminant_(&self)->Self::Discriminant; /// Gets miscelaneous information about the wrapped enum fn enum_info_(&self)->&'static EnumInfo; } /// A struct storing the discriminant and `EnumInfo` of some enum. pub struct DiscrAndEnumInfo<E>{ discr:E, enum_info:&'static EnumInfo, } impl<E> DiscrAndEnumInfo<E>{ /// Constructs this `DiscrAndEnumInfo`. pub fn new(discr:E,enum_info:&'static EnumInfo)->Self{ Self{discr,enum_info} } /// The value of the enum discriminant, pub fn discr(&self)->E where E:ValidDiscriminant { self.discr } /// The `EnumInfo` of an enum. pub fn enum_info(&self)->&'static EnumInfo{ self.enum_info } } impl<E> NonExhaustiveSharedOps for DiscrAndEnumInfo<E> where E:ValidDiscriminant { type Discriminant=E; fn get_discriminant_(&self)->E{ self.discr } fn enum_info_(&self)->&'static EnumInfo{ self.enum_info } } macro_rules! impl_neso { ( impl[$E:ident,$S:ident,$I:ident] ) => ( type Discriminant=$E::Discriminant; fn get_discriminant_(&self)->$E::Discriminant { self.get_discriminant() } fn enum_info_(&self)->&'static EnumInfo{ self.vtable().enum_info() } ) } impl<E,S,I> NonExhaustiveSharedOps for NonExhaustive<E,S,I> where E:GetEnumInfo, { impl_neso!{ impl[E,S,I] } } impl<'a,E,S,I> NonExhaustiveSharedOps for &'a NonExhaustive<E,S,I> where E:GetEnumInfo, { impl_neso!{ impl[E,S,I] } } impl<'a,E,S,I> NonExhaustiveSharedOps for &'a mut NonExhaustive<E,S,I> where E:GetEnumInfo, { impl_neso!{ impl[E,S,I] } } /////////////////////////////////////////////////////////////////////////////// /** An error for a situation where a `NonExhaustive<>` could not be unwrapped into the enum because the discriminant wasn't valid in this context (likely because it is from a newer version of the library). */ #[must_use] #[repr(transparent)] #[derive(Clone,PartialEq,Eq,PartialOrd,Ord,StableAbi)] pub struct UnwrapEnumError<N>{ /// This field is either a `NonExhaustive<>` or a `DiscrAndEnumInfo<>` pub non_exhaustive:N, _priv:(), } impl<N> UnwrapEnumError<N>{ /// Gets the `non_exhaustive` field. #[must_use] pub fn into_inner(self)->N{ self.non_exhaustive } /// Converts this into a boxed error. pub fn into_boxed(self)->RBoxError where N:NonExhaustiveSharedOps, { let x=DiscrAndEnumInfo{ discr:self.non_exhaustive.get_discriminant_(), enum_info:self.non_exhaustive.enum_info_(), }; let x=UnwrapEnumError::new(x); RBoxError::new(x) } } impl<N> UnwrapEnumError<N>{ #[inline] const fn new(non_exhaustive:N)->Self{ Self{ non_exhaustive, _priv:(), } } } impl<N> Display for UnwrapEnumError<N> where N:NonExhaustiveSharedOps, { fn fmt(&self,f:&mut fmt::Formatter<'_>)->fmt::Result{ write!( f, "Could not unwrap NonExhaustive into '{}'.\n\ Because its discriminant was {:?} .", self.non_exhaustive.enum_info_().type_name(), self.non_exhaustive.get_discriminant_(), ) } } impl<N> Debug for UnwrapEnumError<N> where N:NonExhaustiveSharedOps, { fn fmt(&self,f:&mut fmt::Formatter<'_>)->fmt::Result{ f.debug_struct("UnwrapEnumError") .field("non_exhaustive",&"<opaque>") .field("discriminant",&self.non_exhaustive.get_discriminant_()) .field("enum_info",&self.non_exhaustive.enum_info_()) .finish() } } impl<N> From<UnwrapEnumError<N>> for RBoxError where N:NonExhaustiveSharedOps { fn from(uee:UnwrapEnumError<N>)->RBoxError{ uee.into_boxed() } } impl<N> std::error::Error for UnwrapEnumError<N> where N:NonExhaustiveSharedOps, {}
{ NonExhaustive::<F,S,I>::assert_fits_within_storage(); this.transmute_element::<NonExhaustive<F,S,I>>() }
train_volleyball_stage2_dynamic.py
import sys sys.path.append(".") from train_net_dynamic import * cfg=Config('volleyball') cfg.inference_module_name = 'dynamic_volleyball' cfg.device_list = "0,1" cfg.use_gpu = True cfg.use_multi_gpu = True cfg.training_stage = 2 cfg.train_backbone = True cfg.test_before_train = False cfg.test_interval_epoch = 1 # vgg16 setup cfg.backbone = 'vgg16' cfg.stage1_model_path = 'result/basemodel_VD_vgg16.pth' cfg.out_size = 22, 40 cfg.emb_features = 512 # res18 setup # cfg.backbone = 'res18' # cfg.stage1_model_path = 'result/basemodel_VD_res18.pth' # cfg.out_size = 23, 40
# Dynamic Inference setup cfg.group = 1 cfg.stride = 1 cfg.ST_kernel_size = [(3, 3)] #[(3, 3),(3, 3),(3, 3),(3, 3)] cfg.dynamic_sampling = True cfg.sampling_ratio = [1] cfg.lite_dim = 128 # None # 128 cfg.scale_factor = True cfg.beta_factor = False cfg.hierarchical_inference = False cfg.parallel_inference = False cfg.num_DIM = 1 cfg.train_dropout_prob = 0.3 cfg.batch_size = 2 cfg.test_batch_size = 1 cfg.num_frames = 10 cfg.load_backbone_stage2 = True cfg.train_learning_rate = 1e-4 # cfg.lr_plan = {11: 3e-5, 21: 1e-5} # cfg.max_epoch = 60 # cfg.lr_plan = {11: 3e-5, 21: 1e-5} cfg.lr_plan = {11: 1e-5} cfg.max_epoch = 30 cfg.actions_weights = [[1., 1., 2., 3., 1., 2., 2., 0.2, 1.]] cfg.exp_note = 'Dynamic Volleyball_stage2_res18_litedim128_reproduce_1' train_net(cfg)
# cfg.emb_features = 512
commands.py
# Copyright 2012, Nachi Ueno, NTT MCL, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cliff import lister from neutronclient.common import utils from neutronclient.neutron import v2_0 as client from neutronclient.neutron.v2_0 import port from neutron.i18n import _LI from neutron.openstack.common import log as logging class ProbeCommand(client.NeutronCommand): log = logging.getLogger(__name__ + '.ProbeCommand') def get_debug_agent(self): return self.app.debug_agent def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) self.log.info(_('Unimplemented commands')) class CreateProbe(ProbeCommand): """Create probe port and interface, then plug it in.""" log = logging.getLogger(__name__ + '.CreateProbe') def get_parser(self, prog_name): parser = super(CreateProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='network_id', help=_('ID of network to probe')) parser.add_argument( '--device-owner', default='network', choices=['network', 'compute'], help=_('Owner type of the device: network/compute')) return parser def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) debug_agent = self.get_debug_agent() probe_port = debug_agent.create_probe(parsed_args.id, parsed_args.device_owner) self.log.info(_('Probe created : %s '), probe_port.id) class DeleteProbe(ProbeCommand): """Delete probe - delete port then uplug.""" log = logging.getLogger(__name__ + '.DeleteProbe') def get_parser(self, prog_name): parser = super(DeleteProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to delete')) return parser def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) debug_agent = self.get_debug_agent() debug_agent.delete_probe(parsed_args.id) self.log.info(_('Probe %s deleted'), parsed_args.id) class ListProbe(client.NeutronCommand, lister.Lister): """List probes.""" log = logging.getLogger(__name__ + '.ListProbe') _formatters = {'fixed_ips': port._format_fixed_ips, } def get_debug_agent(self): return self.app.debug_agent def
(self, parsed_args): debug_agent = self.get_debug_agent() info = debug_agent.list_probes() columns = sorted(info[0].keys()) if info else [] return (columns, (utils.get_item_properties( s, columns, formatters=self._formatters, ) for s in info), ) class ClearProbe(ProbeCommand): """Clear All probes.""" log = logging.getLogger(__name__ + '.ClearProbe') def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) debug_agent = self.get_debug_agent() cleared_probes_count = debug_agent.clear_probes() self.log.info(_LI('%d probe(s) deleted'), cleared_probes_count) class ExecProbe(ProbeCommand): """Exec commands on the namespace of the probe.""" log = logging.getLogger(__name__ + '.ExecProbe') def get_parser(self, prog_name): parser = super(ExecProbe, self).get_parser(prog_name) parser.add_argument( 'id', metavar='port_id', help=_('ID of probe port to execute command')) parser.add_argument( 'command', metavar='command', nargs='?', default=None, help=_('Command to execute')) return parser def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) debug_agent = self.get_debug_agent() result = debug_agent.exec_command(parsed_args.id, parsed_args.command) self.app.stdout.write(result + '\n') class PingAll(ProbeCommand): """Ping all fixed_ip.""" log = logging.getLogger(__name__ + '.ExecProbe') def get_parser(self, prog_name): parser = super(PingAll, self).get_parser(prog_name) parser.add_argument( '--timeout', metavar='<timeout>', default=10, help=_('Ping timeout')) parser.add_argument( '--id', metavar='network_id', default=None, help=_('ID of network')) return parser def run(self, parsed_args): self.log.debug('run(%s)', parsed_args) debug_agent = self.get_debug_agent() result = debug_agent.ping_all(parsed_args.id, timeout=parsed_args.timeout) self.app.stdout.write(result + '\n')
get_data
msgpack.go
package uchatlib import ( "encoding/base64" "encoding/json" "errors" "time" "github.com/lvzhihao/goutils" ) type UchatMessage struct { MerchantNo string LogSerialNo string ChatRoomSerialNo string WxUserSerialNo string MsgTime time.Time MsgType int32 Content string VoiceTime int32 ShareTitle string ShareDesc string ShareUrl string ExtraData interface{} //补充数据,并非接口返回 } func ConvertUchatMessage(b []byte) ([]*UchatMessage, error) { var rst map[string]interface{} err := json.Unmarshal(b, &rst) if err != nil { return nil, err } merchantNo, ok := rst["vcMerchantNo"] if !ok { return nil, errors.New("empty merchantNo") } data, ok := rst["Data"] if !ok { return nil, errors.New("empty Data") } var list []map[string]interface{} err = json.Unmarshal([]byte(goutils.ToString(data)), &list) if err != nil { return nil, err } ret := make([]*UchatMessage, 0) for _, v := range list { msg := &UchatMessage{} msg.MerchantNo = goutils.ToString(merchantNo) msg.LogSerialNo = goutils.ToString(v["vcSerialNo"]) msg.ChatRoomSerialNo = goutils.ToString(v["vcChatRoomSerialNo"]) msg.WxUserSerialNo = goutils.ToString(v["vcFromWxUserSerialNo"]) msg.MsgTime, _ = time.ParseInLocation("2006-01-02 15:04:05", goutils.ToString(v["dtMsgTime"]), UchatTimeLocation) msg.MsgType = goutils.ToInt32(v["nMsgType"]) content, err := base64.StdEncoding.DecodeString(goutils.ToString(v["vcContent"])) if err != nil { msg.Content = goutils.ToString(v["vcContent"]) } else { msg.Content = goutils.ToString(content) } msg.VoiceTime = goutils.ToInt32(v["nVoiceTime"]) msg.ShareTitle = goutils.ToString(v["vcShareTitle"]) msg.ShareDesc = goutils.ToString(v["vcShareDesc"]) msg.ShareUrl = goutils.ToString(v["vcShareUrl"]) ret = append(ret, msg) } return ret, nil } // keyword msgpack type UchatKeyword struct { LogSerialNo string ChatRoomSerialNo string FromWxUserSerialNo string ToWxUserSerialNo string Content string ExtraData interface{} //补充数据,并非接口返回 } func ConvertUchatKeyword(b []byte) ([]*UchatKeywo
st map[string]interface{} err := json.Unmarshal(b, &rst) if err != nil { return nil, err } data, ok := rst["Data"] if !ok { return nil, errors.New("empty Data") } var list []map[string]interface{} err = json.Unmarshal([]byte(goutils.ToString(data)), &list) if err != nil { return nil, err } ret := make([]*UchatKeyword, 0) for _, v := range list { key := &UchatKeyword{} key.LogSerialNo = goutils.ToString(v["vcSerialNo"]) key.ChatRoomSerialNo = goutils.ToString(v["vcChatRoomSerialNo"]) key.FromWxUserSerialNo = goutils.ToString(v["vcFromWxUserSerialNo"]) key.ToWxUserSerialNo = goutils.ToString(v["vcToWxUserSerialNo"]) key.Content = goutils.ToString(v["vcContent"]) ret = append(ret, key) } return ret, nil }
rd, error) { var r
layout.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Adjustment; use crate::Align; use crate::Buildable; use crate::Container; use crate::ResizeMode; use crate::Scrollable; use crate::ScrollablePolicy; use crate::Widget; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use std::boxed::Box as Box_; use std::fmt; use std::mem; use std::mem::transmute; glib::wrapper! { #[doc(alias = "GtkLayout")] pub struct Layout(Object<ffi::GtkLayout, ffi::GtkLayoutClass>) @extends Container, Widget, @implements Buildable, Scrollable; match fn { type_ => || ffi::gtk_layout_get_type(), } } impl Layout { #[doc(alias = "gtk_layout_new")] pub fn new( hadjustment: Option<&impl IsA<Adjustment>>, vadjustment: Option<&impl IsA<Adjustment>>, ) -> Layout { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_layout_new( hadjustment.map(|p| p.as_ref()).to_glib_none().0, vadjustment.map(|p| p.as_ref()).to_glib_none().0, )) .unsafe_cast() } } // rustdoc-stripper-ignore-next /// Creates a new builder-pattern struct instance to construct [`Layout`] objects. /// /// This method returns an instance of [`LayoutBuilder`] which can be used to create [`Layout`] objects. pub fn builder() -> LayoutBuilder { LayoutBuilder::default() } } impl Default for Layout { fn default() -> Self { glib::object::Object::new::<Self>(&[]) .expect("Can't construct Layout object with default parameters") } } #[derive(Clone, Default)] // rustdoc-stripper-ignore-next /// A [builder-pattern] type to construct [`Layout`] objects. /// /// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html pub struct LayoutBuilder { height: Option<u32>, width: Option<u32>, border_width: Option<u32>, child: Option<Widget>, resize_mode: Option<ResizeMode>, app_paintable: Option<bool>, can_default: Option<bool>, can_focus: Option<bool>, events: Option<gdk::EventMask>, expand: Option<bool>, #[cfg(any(feature = "v3_20", feature = "dox"))] #[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))] focus_on_click: Option<bool>, halign: Option<Align>, has_default: Option<bool>, has_focus: Option<bool>, has_tooltip: Option<bool>, height_request: Option<i32>, hexpand: Option<bool>, hexpand_set: Option<bool>, is_focus: Option<bool>, margin: Option<i32>, margin_bottom: Option<i32>, margin_end: Option<i32>, margin_start: Option<i32>, margin_top: Option<i32>, name: Option<String>, no_show_all: Option<bool>, opacity: Option<f64>, parent: Option<Container>, receives_default: Option<bool>, sensitive: Option<bool>, tooltip_markup: Option<String>, tooltip_text: Option<String>, valign: Option<Align>, vexpand: Option<bool>, vexpand_set: Option<bool>, visible: Option<bool>, width_request: Option<i32>, hadjustment: Option<Adjustment>, hscroll_policy: Option<ScrollablePolicy>, vadjustment: Option<Adjustment>, vscroll_policy: Option<ScrollablePolicy>, } impl LayoutBuilder { // rustdoc-stripper-ignore-next /// Create a new [`LayoutBuilder`]. pub fn new() -> Self { Self::default() } // rustdoc-stripper-ignore-next /// Build the [`Layout`]. pub fn build(self) -> Layout { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; if let Some(ref height) = self.height { properties.push(("height", height)); } if let Some(ref width) = self.width { properties.push(("width", width)); } if let Some(ref border_width) = self.border_width { properties.push(("border-width", border_width)); } if let Some(ref child) = self.child { properties.push(("child", child)); } if let Some(ref resize_mode) = self.resize_mode { properties.push(("resize-mode", resize_mode)); } if let Some(ref app_paintable) = self.app_paintable { properties.push(("app-paintable", app_paintable)); } if let Some(ref can_default) = self.can_default { properties.push(("can-default", can_default)); } if let Some(ref can_focus) = self.can_focus { properties.push(("can-focus", can_focus)); } if let Some(ref events) = self.events { properties.push(("events", events)); } if let Some(ref expand) = self.expand { properties.push(("expand", expand)); } #[cfg(any(feature = "v3_20", feature = "dox"))] if let Some(ref focus_on_click) = self.focus_on_click { properties.push(("focus-on-click", focus_on_click)); } if let Some(ref halign) = self.halign { properties.push(("halign", halign)); } if let Some(ref has_default) = self.has_default { properties.push(("has-default", has_default)); } if let Some(ref has_focus) = self.has_focus { properties.push(("has-focus", has_focus)); } if let Some(ref has_tooltip) = self.has_tooltip { properties.push(("has-tooltip", has_tooltip)); } if let Some(ref height_request) = self.height_request { properties.push(("height-request", height_request)); } if let Some(ref hexpand) = self.hexpand { properties.push(("hexpand", hexpand)); } if let Some(ref hexpand_set) = self.hexpand_set { properties.push(("hexpand-set", hexpand_set)); } if let Some(ref is_focus) = self.is_focus { properties.push(("is-focus", is_focus)); } if let Some(ref margin) = self.margin { properties.push(("margin", margin)); } if let Some(ref margin_bottom) = self.margin_bottom { properties.push(("margin-bottom", margin_bottom)); } if let Some(ref margin_end) = self.margin_end { properties.push(("margin-end", margin_end)); } if let Some(ref margin_start) = self.margin_start { properties.push(("margin-start", margin_start)); } if let Some(ref margin_top) = self.margin_top { properties.push(("margin-top", margin_top)); } if let Some(ref name) = self.name { properties.push(("name", name)); } if let Some(ref no_show_all) = self.no_show_all { properties.push(("no-show-all", no_show_all)); } if let Some(ref opacity) = self.opacity { properties.push(("opacity", opacity)); } if let Some(ref parent) = self.parent { properties.push(("parent", parent)); } if let Some(ref receives_default) = self.receives_default { properties.push(("receives-default", receives_default)); } if let Some(ref sensitive) = self.sensitive { properties.push(("sensitive", sensitive)); } if let Some(ref tooltip_markup) = self.tooltip_markup { properties.push(("tooltip-markup", tooltip_markup)); } if let Some(ref tooltip_text) = self.tooltip_text { properties.push(("tooltip-text", tooltip_text)); } if let Some(ref valign) = self.valign { properties.push(("valign", valign)); } if let Some(ref vexpand) = self.vexpand { properties.push(("vexpand", vexpand)); } if let Some(ref vexpand_set) = self.vexpand_set { properties.push(("vexpand-set", vexpand_set)); } if let Some(ref visible) = self.visible { properties.push(("visible", visible)); } if let Some(ref width_request) = self.width_request { properties.push(("width-request", width_request)); } if let Some(ref hadjustment) = self.hadjustment { properties.push(("hadjustment", hadjustment)); } if let Some(ref hscroll_policy) = self.hscroll_policy { properties.push(("hscroll-policy", hscroll_policy)); } if let Some(ref vadjustment) = self.vadjustment { properties.push(("vadjustment", vadjustment)); } if let Some(ref vscroll_policy) = self.vscroll_policy { properties.push(("vscroll-policy", vscroll_policy)); } glib::Object::new::<Layout>(&properties).expect("Failed to create an instance of Layout") } pub fn height(mut self, height: u32) -> Self { self.height = Some(height); self } pub fn width(mut self, width: u32) -> Self { self.width = Some(width); self } pub fn border_width(mut self, border_width: u32) -> Self { self.border_width = Some(border_width); self } pub fn child(mut self, child: &impl IsA<Widget>) -> Self { self.child = Some(child.clone().upcast()); self } pub fn resize_mode(mut self, resize_mode: ResizeMode) -> Self { self.resize_mode = Some(resize_mode); self } pub fn app_paintable(mut self, app_paintable: bool) -> Self { self.app_paintable = Some(app_paintable); self } pub fn can_default(mut self, can_default: bool) -> Self { self.can_default = Some(can_default); self } pub fn can_focus(mut self, can_focus: bool) -> Self { self.can_focus = Some(can_focus); self } pub fn events(mut self, events: gdk::EventMask) -> Self { self.events = Some(events); self } pub fn expand(mut self, expand: bool) -> Self { self.expand = Some(expand); self } #[cfg(any(feature = "v3_20", feature = "dox"))] #[cfg_attr(feature = "dox", doc(cfg(feature = "v3_20")))] pub fn focus_on_click(mut self, focus_on_click: bool) -> Self { self.focus_on_click = Some(focus_on_click); self } pub fn halign(mut self, halign: Align) -> Self { self.halign = Some(halign); self } pub fn has_default(mut self, has_default: bool) -> Self { self.has_default = Some(has_default); self } pub fn has_focus(mut self, has_focus: bool) -> Self { self.has_focus = Some(has_focus); self } pub fn has_tooltip(mut self, has_tooltip: bool) -> Self { self.has_tooltip = Some(has_tooltip); self } pub fn height_request(mut self, height_request: i32) -> Self { self.height_request = Some(height_request); self } pub fn hexpand(mut self, hexpand: bool) -> Self { self.hexpand = Some(hexpand); self } pub fn hexpand_set(mut self, hexpand_set: bool) -> Self { self.hexpand_set = Some(hexpand_set); self } pub fn is_focus(mut self, is_focus: bool) -> Self { self.is_focus = Some(is_focus); self } pub fn margin(mut self, margin: i32) -> Self { self.margin = Some(margin); self } pub fn margin_bottom(mut self, margin_bottom: i32) -> Self { self.margin_bottom = Some(margin_bottom); self } pub fn margin_end(mut self, margin_end: i32) -> Self { self.margin_end = Some(margin_end); self } pub fn margin_start(mut self, margin_start: i32) -> Self { self.margin_start = Some(margin_start); self } pub fn margin_top(mut self, margin_top: i32) -> Self { self.margin_top = Some(margin_top); self } pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } pub fn no_show_all(mut self, no_show_all: bool) -> Self { self.no_show_all = Some(no_show_all); self } pub fn opacity(mut self, opacity: f64) -> Self { self.opacity = Some(opacity); self } pub fn parent(mut self, parent: &impl IsA<Container>) -> Self { self.parent = Some(parent.clone().upcast()); self } pub fn receives_default(mut self, receives_default: bool) -> Self { self.receives_default = Some(receives_default); self } pub fn sensitive(mut self, sensitive: bool) -> Self { self.sensitive = Some(sensitive); self } pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self { self.tooltip_markup = Some(tooltip_markup.to_string()); self } pub fn tooltip_text(mut self, tooltip_text: &str) -> Self { self.tooltip_text = Some(tooltip_text.to_string()); self } pub fn valign(mut self, valign: Align) -> Self { self.valign = Some(valign); self } pub fn vexpand(mut self, vexpand: bool) -> Self
pub fn vexpand_set(mut self, vexpand_set: bool) -> Self { self.vexpand_set = Some(vexpand_set); self } pub fn visible(mut self, visible: bool) -> Self { self.visible = Some(visible); self } pub fn width_request(mut self, width_request: i32) -> Self { self.width_request = Some(width_request); self } pub fn hadjustment(mut self, hadjustment: &impl IsA<Adjustment>) -> Self { self.hadjustment = Some(hadjustment.clone().upcast()); self } pub fn hscroll_policy(mut self, hscroll_policy: ScrollablePolicy) -> Self { self.hscroll_policy = Some(hscroll_policy); self } pub fn vadjustment(mut self, vadjustment: &impl IsA<Adjustment>) -> Self { self.vadjustment = Some(vadjustment.clone().upcast()); self } pub fn vscroll_policy(mut self, vscroll_policy: ScrollablePolicy) -> Self { self.vscroll_policy = Some(vscroll_policy); self } } pub const NONE_LAYOUT: Option<&Layout> = None; pub trait LayoutExt: 'static { #[doc(alias = "gtk_layout_get_bin_window")] #[doc(alias = "get_bin_window")] fn bin_window(&self) -> Option<gdk::Window>; #[doc(alias = "gtk_layout_get_size")] #[doc(alias = "get_size")] fn size(&self) -> (u32, u32); #[doc(alias = "gtk_layout_move")] #[doc(alias = "move")] fn move_(&self, child_widget: &impl IsA<Widget>, x: i32, y: i32); #[doc(alias = "gtk_layout_put")] fn put(&self, child_widget: &impl IsA<Widget>, x: i32, y: i32); #[doc(alias = "gtk_layout_set_size")] fn set_size(&self, width: u32, height: u32); fn height(&self) -> u32; fn set_height(&self, height: u32); fn width(&self) -> u32; fn set_width(&self, width: u32); fn child_x<T: IsA<Widget>>(&self, item: &T) -> i32; fn set_child_x<T: IsA<Widget>>(&self, item: &T, x: i32); fn child_y<T: IsA<Widget>>(&self, item: &T) -> i32; fn set_child_y<T: IsA<Widget>>(&self, item: &T, y: i32); #[doc(alias = "height")] fn connect_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "width")] fn connect_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<Layout>> LayoutExt for O { fn bin_window(&self) -> Option<gdk::Window> { unsafe { from_glib_none(ffi::gtk_layout_get_bin_window( self.as_ref().to_glib_none().0, )) } } fn size(&self) -> (u32, u32) { unsafe { let mut width = mem::MaybeUninit::uninit(); let mut height = mem::MaybeUninit::uninit(); ffi::gtk_layout_get_size( self.as_ref().to_glib_none().0, width.as_mut_ptr(), height.as_mut_ptr(), ); let width = width.assume_init(); let height = height.assume_init(); (width, height) } } fn move_(&self, child_widget: &impl IsA<Widget>, x: i32, y: i32) { unsafe { ffi::gtk_layout_move( self.as_ref().to_glib_none().0, child_widget.as_ref().to_glib_none().0, x, y, ); } } fn put(&self, child_widget: &impl IsA<Widget>, x: i32, y: i32) { unsafe { ffi::gtk_layout_put( self.as_ref().to_glib_none().0, child_widget.as_ref().to_glib_none().0, x, y, ); } } fn set_size(&self, width: u32, height: u32) { unsafe { ffi::gtk_layout_set_size(self.as_ref().to_glib_none().0, width, height); } } fn height(&self) -> u32 { unsafe { let mut value = glib::Value::from_type(<u32 as StaticType>::static_type()); glib::gobject_ffi::g_object_get_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"height\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `height` getter") } } fn set_height(&self, height: u32) { unsafe { glib::gobject_ffi::g_object_set_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"height\0".as_ptr() as *const _, height.to_value().to_glib_none().0, ); } } fn width(&self) -> u32 { unsafe { let mut value = glib::Value::from_type(<u32 as StaticType>::static_type()); glib::gobject_ffi::g_object_get_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"width\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `width` getter") } } fn set_width(&self, width: u32) { unsafe { glib::gobject_ffi::g_object_set_property( self.to_glib_none().0 as *mut glib::gobject_ffi::GObject, b"width\0".as_ptr() as *const _, width.to_value().to_glib_none().0, ); } } fn child_x<T: IsA<Widget>>(&self, item: &T) -> i32 { unsafe { let mut value = glib::Value::from_type(<i32 as StaticType>::static_type()); crate::ffi::gtk_container_child_get_property( self.to_glib_none().0 as *mut crate::ffi::GtkContainer, item.to_glib_none().0 as *mut _, b"x\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().expect("Return Value for property `x` getter") } } fn set_child_x<T: IsA<Widget>>(&self, item: &T, x: i32) { unsafe { crate::ffi::gtk_container_child_set_property( self.to_glib_none().0 as *mut crate::ffi::GtkContainer, item.to_glib_none().0 as *mut _, b"x\0".as_ptr() as *const _, x.to_value().to_glib_none().0, ); } } fn child_y<T: IsA<Widget>>(&self, item: &T) -> i32 { unsafe { let mut value = glib::Value::from_type(<i32 as StaticType>::static_type()); crate::ffi::gtk_container_child_get_property( self.to_glib_none().0 as *mut crate::ffi::GtkContainer, item.to_glib_none().0 as *mut _, b"y\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value.get().expect("Return Value for property `y` getter") } } fn set_child_y<T: IsA<Widget>>(&self, item: &T, y: i32) { unsafe { crate::ffi::gtk_container_child_set_property( self.to_glib_none().0 as *mut crate::ffi::GtkContainer, item.to_glib_none().0 as *mut _, b"y\0".as_ptr() as *const _, y.to_value().to_glib_none().0, ); } } fn connect_height_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_height_trampoline<P: IsA<Layout>, F: Fn(&P) + 'static>( this: *mut ffi::GtkLayout, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(Layout::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::height\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_height_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_width_trampoline<P: IsA<Layout>, F: Fn(&P) + 'static>( this: *mut ffi::GtkLayout, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(Layout::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::width\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_width_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for Layout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("Layout") } }
{ self.vexpand = Some(vexpand); self }
Complement.py
def comp_9th(n): """takes a decimal number (n) in string fomat returns the 9's complement of the number""" n=str(n) result=[] for digit in n: a=str(9-int(digit)) result.append(a) return "".join(result) def comp_1st(n): """takes a binary number (n) in string fomat returns the 1's complement of the number""" n=str(n) result=[] for digit in n: a=str(1-int(digit)) result.append(a) return "".join(result) def comp_2nd(n): """takes a binary number (n) in string fomat returns the 2's complement of the number""" n=str(n) count = 0 for digit in n[::-1]: if digit == '1': break count += 1 change=n[:len(n)-(count+1)] unchange=n[len(n)-(count+1):] final=comp_1st(change) return final+unchange def comp_10th(n): """takes a decimal number (n) in string format return the 10's complement of the number""" n=str(n) count = 0 for digit in n[::-1]: if digit != '0': break count += 1 change=n[:len(n)-(count+1)] special=n[len(n)-(count+1):len(n)-count] var=str(10-int(special)) unchange=n[len(n)-count:] final=comp_9th(change) return final+var+unchange def decimalSub(m,n): """takes 2 decimal numbers in any format(sting or integer) return the result of subtraction usin complement rules""" m=str(m) n=str(n) req=max(len(m),len(n)) while len(m) < req: m="0"+m while len(n) < req: n="0"+n if int(n)> int(m): n_10th=int(comp_10th(str(n))) summation=int(m)+n_10th result=comp_10th(str(summation)) return "-"+result else: n_10th=int(comp_10th(str(n))) summation=int(m)+n_10th result=str(summation) return result[1:] def BinarySum(n,m): result=[] carry=0 x=str(n)[::-1] y=str(m)[::-1] for i in range(len(x)): a=int(x[i])+int(y[i])+carry if a==1 or a==0: result.append(str(a)) carry=0 elif a==2: result.append("0") carry=1 elif a==3: result.append("1") carry=1 if carry==1: result.append("1") result.reverse() return "".join(result) def binarySub(m,n): """takes 2 binary numbers in any format(sting or integer) return the result of subtraction usin complement rules""" m=str(m) n=str(n) req=max(len(m),len(n)) while len(m) < req: m="0"+m while len(n) < req: n="0"+n if int(n)> int(m): n_2nd=comp_2nd(str(n)) summation=BinarySum(m,n_2nd) result=comp_2nd(str(summation)) return "-"+result else: n_2nd=comp_2nd(str(n)) summation=BinarySum(m,n_2nd) result=str(summation) return result[1:] operations=[comp_1st,comp_2nd,comp_9th,comp_10th,decimalSub,binarySub] operation_names=["The first complement of the binary number:", "The second complement of the binary number:", "The ninth complement of the decimal number:", "The tenth complement of the decimal number:", "The difference between the two decimal numbers" "The difference between the two binary numbers"] print("This program deals with the complment and operations involving them") n=int(input("Enter number of operations: "))
print("2 to find the 9th complement of a decimal number") print("3 to find the 10th complement of a decimal number") print("4 to find the differnece between two decimal numbers") print("5 to find the differnece between two binary numbers") m=int(input("Enter the number of the required operation: ")) if m==0 or m==1 or m==2 or m==3: x=input("Enter the reqired number to convert: ") print(operation_names[m],x,"is",operations[m](x)) elif m==4 or m==5: x=input("Enter the first number: ") y=input("Enter the second number: ") print(operation_names[m],x,"and",y,"is",operations[m](x,y)) else: print("Wrong number of operation selected")
for num in range(n): print("Select the number of operation: ") print("0 to find the 1st complement of a binary number") print("1 to find the 2nd complement of a binary number")
format-log-output.js
// import formatLogText from './format-log-text'; export default function
( text , prefix = '' ) { return formatLogText(text) .split('\n') .map(textItem => `${prefix}${textItem.replace(/^> /, '')}`); }
formatLogOutput
app_dao.go
package dao import ( "fmt" "github.com/jinzhu/gorm" "kr/paasta/monitoring/paas/model" "kr/paasta/monitoring/paas/util" "strconv" ) type AppDao struct { txn *gorm.DB } func GetAppDao(txn *gorm.DB) *AppDao { return &AppDao{ txn: txn, } } func (b *AppDao) UpdatePaasAppAutoScalingPolicy(request model.AppAutoscalingPolicy) string { req := model.AppAutoScalingPolicies{ AppGuid: request.AppGuid, InstanceMinCnt: uint(request.InstanceMinCnt), InstanceMaxCnt: uint(request.InstanceMaxCnt), CpuMinThreshold: uint(request.CpuMinThreshold), CpuMaxThreshold: uint(request.CpuMaxThreshold), MemoryMinThreshold: uint(request.MemoryMinThreshold), MemoryMaxThreshold: uint(request.MemoryMaxThreshold), InstanceScalingUnit: uint(request.InstanceScalingUnit), MeasureTimeSec: uint(request.MeasureTimeSec), AutoScalingOutYn: request.AutoScalingOutYn, AutoScalingInYn: request.AutoScalingInYn, AutoScalingCpuYn: request.AutoScalingCpuYn, AutoScalingMemoryYn: request.AutoScalingMemoryYn, RegDate: util.GetDBCurrentTime(), RegUser: "system", ModiDate: util.GetDBCurrentTime(), ModiUser: "system", } updateQuery := "on duplicate key update " + "instance_min_cnt ='" + strconv.Itoa(request.InstanceMinCnt) + "'," + "instance_max_cnt ='" + strconv.Itoa(request.InstanceMaxCnt) + "'," + "cpu_min_threshold ='" + strconv.Itoa(request.CpuMinThreshold) + "'," + "cpu_max_threshold ='" + strconv.Itoa(request.CpuMaxThreshold) + "'," + "memory_min_threshold ='" + strconv.Itoa(request.MemoryMinThreshold) + "'," + "memory_max_threshold ='" + strconv.Itoa(request.MemoryMaxThreshold) + "'," + "instance_scaling_unit ='" + strconv.Itoa(request.InstanceScalingUnit) + "'," + "measure_time_sec ='" + strconv.Itoa(request.MeasureTimeSec) + "'," + "auto_scaling_out_yn ='" + request.AutoScalingOutYn + "'," + "auto_scaling_in_yn ='" + request.AutoScalingInYn + "'," + "auto_scaling_cpu_yn ='" + request.AutoScalingCpuYn + "'," + "auto_scaling_memory_yn ='" + request.AutoScalingMemoryYn + "'," + "modi_date = now(), modi_user = 'system'" status := b.txn.Debug().Table("app_auto_scaling_policies"). Set("gorm:insert_option", updateQuery).Create(&req) if status.Error != nil { return status.Error.Error() } else { return "" } } func (b *AppDao) GetPaasAppAutoScalingPolicy(request model.AppAlarmReq) (model.AppAutoscalingPolicy, model.ErrMessage) { t := model.AppAutoscalingPolicy{} status := b.txn.Debug().Table("app_auto_scaling_policies"). Select("app_guid, instance_min_cnt, instance_max_cnt, cpu_min_threshold, cpu_max_threshold, "+ "memory_min_threshold, memory_max_threshold, instance_scaling_unit, measure_time_sec, "+ "auto_scaling_out_yn, auto_scaling_in_yn, auto_scaling_cpu_yn, auto_scaling_memory_yn "). Where("app_guid = ? ", request.AppGuid). Find(&t) err := util.GetError().DbCheckError(status.Error) return t, err } func (b *AppDao) UpdatePaasAppPolicyInfo(request model.AppAlarmPolicy) string { req := model.AppAlarmPolicies{ AppGuid: request.AppGuid, CpuWarningThreshold: uint(request.CpuWarningThreshold), CpuCriticalThreshold: uint(request.CpuCriticalThreshold), MemoryWarningThreshold: uint(request.MemoryWarningThreshold), MemoryCriticalThreshold: uint(request.MemoryCriticalThreshold), MeasureTimeSec: uint(request.MeasureTimeSec), Email: request.Email, EmailSendYn: request.EmailSendYn, AlarmUseYn: request.AlarmUseYn, RegDate: util.GetDBCurrentTime(), RegUser: "system", ModiDate: util.GetDBCurrentTime(), ModiUser: "system", } updateQuery := "on duplicate key update " + "cpu_warning_threshold ='" + strconv.Itoa(request.CpuWarningThreshold) + "'," + "cpu_critical_threshold ='" + strconv.Itoa(request.CpuCriticalThreshold) + "'," + "memory_warning_threshold ='" + strconv.Itoa(request.MemoryWarningThreshold) + "'," + "memory_critical_threshold ='" + strconv.Itoa(request.MemoryCriticalThreshold) + "'," + "measure_time_sec ='" + strconv.Itoa(request.MeasureTimeSec) + "'," + "email ='" + request.Email + "'," + "email_send_yn ='" + request.EmailSendYn + "'," + "alarm_use_yn ='" + request.AlarmUseYn + "'," + "modi_date = now(), modi_user = 'system'" status := b.txn.Debug().Table("app_alarm_policies"). Set("gorm:insert_option", updateQuery).Create(&req) if status.Error != nil { return status.Error.Error() } else { return "" } } func (b *AppDao) GetPaasAppPolicyInfo(request model.AppAlarmReq) (model.AppAlarmPolicy, model.ErrMessage) { t := model.AppAlarmPolicy{} status := b.txn.Debug().Table("app_alarm_policies"). Select("app_guid, cpu_warning_threshold, cpu_critical_threshold, memory_warning_threshold, memory_critical_threshold, "+ "measure_time_sec, email, email_send_yn, alarm_use_yn"). Where("app_guid = ? ", request.AppGuid). Find(&t) err := util.GetError().DbCheckError(status.Error) return t, err } func (b *AppDao) GetPaasAppAlarmList(request model.AppAlarmReq) ([]model.AppAlarm, int, model.ErrMessage) { t := []model.AppAlarm{} if request.PageIndex != 0 && request.PageItems != 0 { //Page 를 계산한다. //Mysql 은 Limit을 제공함. LIMIT: Page당 조회 건수, OffSet: 조회시작 DataRow var rowCount int var startDataRow int endDataRow := request.PageItems * request.PageIndex if request.PageIndex == 1 { startDataRow = 0 } else if request.PageIndex > 1 { startDataRow = endDataRow - request.PageItems } var queryWhere = " app_guid = '" + request.AppGuid + "' and" if request.ResourceType != "" { queryWhere += " resource_type = '" + request.ResourceType + "' and" } if request.AlarmLevel != "" { queryWhere += " alarm_level = '" + request.AlarmLevel + "' and" } if request.SearchDateFrom != "" && request.SearchDateTo != "" { //DB에 저장된 시간은 GMT Time기준 //UI에
:= b.txn.Debug().Limit(request.PageItems).Table("app_alarm_histories"). Select("alarm_id, app_guid, app_idx, app_name, resource_type, " + "alarm_level, alarm_title, alarm_message, " + "reg_date + INTERVAL " + strconv.Itoa(model.GmtTimeGap) + " HOUR as reg_date"). Order("reg_date desc"). Offset(startDataRow). Where(queryWhere). Find(&t) err := util.GetError().DbCheckError(status.Error) status = b.txn.Debug().Table("app_alarm_histories"). Where(queryWhere). Count(&rowCount) if err != nil { return nil, 0, err } return t, rowCount, err } else { return nil, 0, nil } } func (b *AppDao) DeletePaasAppPolicy(guid string) string { req := model.AppAlarmReq{ AppGuid: guid, } fmt.Println("===== DeletePaasAppAlarmHistory =====") status := b.txn.Debug().Table("app_alarm_histories"). Where("app_guid = ? ", req.AppGuid).Delete(&req) fmt.Println("===== DeletePaasAppPolicy =====") status = b.txn.Debug().Table("app_alarm_policies"). Where("app_guid = ? ", req.AppGuid).Delete(&req) fmt.Println("===== DeletePaasAppAutoScaling Policy =====") status = b.txn.Debug().Table("app_auto_scaling_policies"). Where("app_guid = ? ", req.AppGuid).Delete(&req) if status.Error != nil { return status.Error.Error() } else { return "" } }
서 요청한 Local Time이 GmtTime Gap보다 9시간 빠르다. //요청한 일시에서 9시간을 빼어 조회 요청한다. dateFromUint, _ := strconv.ParseUint(request.SearchDateFrom, 10, 0) dateToUint, _ := strconv.ParseUint(request.SearchDateTo, 10, 0) gmtTimeGap := uint64(model.GmtTimeGap) dateFrom := strconv.FormatUint(dateFromUint-(60*60*gmtTimeGap), 10) dateTo := strconv.FormatUint(dateToUint-(60*60*gmtTimeGap), 10) queryWhere += " unix_timestamp(reg_date)*1000 between '" + dateFrom + "' and '" + dateTo + "' and" } //조건이 한가지라도 있다면 if queryWhere != "" { queryWhere = queryWhere[0 : len(queryWhere)-3] //and 조건 제거 } status
main.go
package main // import "libconf2j" import "C" import _ "c" import _ "restconf/c" func main()
{ // ignored }
test.js
const CachedRequest = require("../"); const request = require("request"); const nock = require("nock"); const temp = require('temp').track(); const { Readable } = require("stream"); const zlib = require("zlib"); class MockedResponseStream extends Readable { constructor(options, response) { super(options); this.response = response; } _read() { this.push(this.response); this.push(null); } } describe("CachedRequest", () => { function mock(method, times, response, headers) { method = method.toLowerCase(); times = times || 1; nock("http://ping.com") .filteringPath(/.+/, "/") [method]("/") .times(times) .reply(200, response, headers); }; before(() => { nock.disableNetConnect(); }); beforeEach(function () { const cacheDir = temp.mkdirSync("cache"); this.cachedRequest = CachedRequest(request); this.cachedRequest.setCacheDirectory(cacheDir); nock.cleanAll(); }); afterEach((done) => { temp.cleanup(done); }) describe("caching", () => { it("makes the request when the response isn't cached", function (done) { mock("GET", 1, () => { return new MockedResponseStream({}, "pong"); }); this.cachedRequest({uri: "http://ping.com/", ttl: 0}, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.equal("pong"); done(); }); }); it("makes the request when the response isn't cached using the get extension method", function (done) { mock("GET", 1, () => { return new MockedResponseStream({}, "pong"); }); this.cachedRequest.get({uri: "http://ping.com/", ttl: 0, method: 'GET'}, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.equal("pong"); done(); }); }); it("responds from the cache", function (done) { const responseBody = "pong"; const options = { uri: "http://ping.com/", method: "GET", ttl: 1000, }; mock(options.method, 1, () => new MockedResponseStream({}, responseBody)); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.equal(responseBody); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); expect(body).to.equal(responseBody); done(); }); }); }); it("parses JSON objects from the cache", function (done) { const responseBody = {"a": 1, "b": {"c": 2}}; const options = { uri: "http://ping.com/", method: "POST", json: true, ttl: 5000 }; mock(options.method, 1, () => { return new MockedResponseStream({}, JSON.stringify(responseBody)); }); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.deep.equal(responseBody); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); expect(body).to.deep.equal(responseBody); done(); }); }); }); it("responds from the cache using get extension method", function (done) { const responseBody = {"a": 1, "b": {"c": 2}}; const options = { uri: "http://ping.com/", method: "GET", json: true, ttl: 5000 }; mock(options.method, 1, () => { return new MockedResponseStream({}, JSON.stringify(responseBody)); }); this.cachedRequest.get(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.deep.equal(responseBody); this.cachedRequest.get(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); expect(body).to.deep.equal(responseBody); done(); }); }); }); it("responds the same from the cache if gzipped", function (done) { const responseBody = 'foo'; const options = { url: "http://ping.com/", ttl: 5000, gzip: true, }; //Return gzip compressed response with valid content encoding header mock("GET", 1, () => { return new MockedResponseStream({}, responseBody).pipe(zlib.createGzip()); }, { "Content-Encoding": "gzip" }); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.deep.equal(responseBody); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); expect(body).to.deep.equal(responseBody); done(); }); }); }); it("responds a buffer from the cache", function (done) { const responseBody = 'foo'; const options = { url: "http://ping.com/", ttl: 5000, encoding: null // body will be returned as a buffer }; //Return gzip compressed response with valid content encoding header mock("GET", 1, () => { return new MockedResponseStream({}, responseBody).pipe(zlib.createGzip()); }, { "Content-Encoding": "gzip" }); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; zlib.gunzip(body, (error, buffer) => { if (error) return done(error); expect(buffer).to.be.an.instanceof(Buffer); expect(buffer.toString()).to.deep.equal(responseBody); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); zlib.gunzip(body, (error, buffer) => { if (error) done(error); expect(buffer).to.be.an.instanceof(Buffer); expect(buffer.toString()).to.deep.equal(responseBody); done(); }); }); }); }); }); describe('when cannot parse the cached response headers', () => { after(function () { if (this._parseHeaders) { this.cachedRequest.setValue('parseHeaders', this._parseHeaders); } }); it("makes the request", function (done) { const options = {uri: "http://ping.com/", ttl: 5000}; mock("GET", 2, () => { return new MockedResponseStream({}, "pong"); }, {foo: 'bar'}); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.equal("pong"); this._parseHeaders = this.cachedRequest.getValue('parseHeaders'); this.cachedRequest.setValue('parseHeaders', () => { throw new Error('Cannot parse headers'); }); this.cachedRequest(options, (error, response, body) => { if (error) return done(error); expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.not.exist; expect(body).to.equal("pong"); done(); }); }); }); }); }); describe("streaming", () => { it("allows to use request as a stream", function (done) { let responseBody = ""; for (let i = 0; i < 1000; i++) { responseBody += "this is a long response body"; }; mock("GET", 1, () => { return new MockedResponseStream({}, responseBody); }); const options = {url: "http://ping.com/", ttl: 5000}; let body = ""; //Make fresh request this.cachedRequest(options) .on("data", (data) => { body += data; }) .on("end", () => { expect(body).to.equal(responseBody); body = ""; //Make cached request this.cachedRequest(options) .on("response", (response) => { expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); response.on("data", (data) => { body += data; }) .on("end", () => { expect(body).to.equal(responseBody); done(); }); }); }); }); it("allows to use request with get extension method as a stream", function (done) { let responseBody = ""; for (let i = 0; i < 1000; i++) { responseBody += "this is a long response body"; }; mock("GET", 1, () => { return new MockedResponseStream({}, responseBody); }); const options = {url: "http://ping.com/", ttl: 5000}; let body = ""; //Make fresh request this.cachedRequest.get(options) .on("data", (data) => { body += data; }) .on("end", () => { expect(body).to.equal(responseBody); body = ""; //Make cached request this.cachedRequest(options) .on("response", (response) => { expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); response.on("data", (data) => { body += data; }) .on("end", () => { expect(body).to.equal(responseBody);
}); }); }); it("handles gzip response", function (done) { let responseBody = ""; for (let i = 0; i < 1000; i++) { responseBody += "this is a long response body"; }; //Return gzip compressed response with valid content encoding header mock("GET", 1, () => { return new MockedResponseStream({}, responseBody).pipe(zlib.createGzip()); }, { "Content-Encoding": "gzip" }); const options = {url: "http://ping.com/", ttl: 5000}; let body = ""; //Make fresh request this.cachedRequest(options) .on("data", (data) => { //Ignore first reply }) .on("end", () => { //Make cached request this.cachedRequest(options) .on("response", (response) => { expect(response.statusCode).to.equal(200); expect(response.headers["x-from-cache"]).to.equal(1); expect(response.headers["content-encoding"]).to.equal("gzip"); const gunzip = zlib.createGunzip(); gunzip.on("data", (data) => { body += data.toString(); }); gunzip.on("end", () => { expect(body).to.equal(responseBody); done(); }); gunzip.on('error', (error) => { done(error); }); response.pipe(gunzip); }); }); }); }); });
done(); });